input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>1-10
from __future__ import annotations
import uuid
import numpy as np
import pandas as pd
from aistac.handlers.abstract_handlers import ConnectorContract
from ds_discovery.components.abstract_common_component import AbstractCommonComponent
from ds_discovery.components.commons import Commons
from ds_discovery.intent.transition_intent import TransitionIntentModel
from ds_discovery.managers.transition_property_manager import TransitionPropertyManager
__author__ = '<NAME>'
class Transition(AbstractCommonComponent):
REPORT_DICTIONARY = 'dictionary'
REPORT_ANALYSIS = 'analysis'
REPORT_FIELDS = 'field_description'
REPORT_QUALITY = 'data_quality'
REPORT_SUMMARY = 'data_quality_summary'
REPORT_PROVENANCE = 'provenance'
@classmethod
def from_uri(cls, task_name: str, uri_pm_path: str, username: str, uri_pm_repo: str=None, pm_file_type: str=None,
pm_module: str=None, pm_handler: str=None, pm_kwargs: dict=None, default_save=None,
reset_templates: bool=None, template_path: str=None, template_module: str=None,
template_source_handler: str=None, template_persist_handler: str=None, align_connectors: bool=None,
default_save_intent: bool=None, default_intent_level: bool=None, order_next_available: bool=None,
default_replace_intent: bool=None, has_contract: bool=None) -> Transition:
""" Class Factory Method to instantiates the components application. The Factory Method handles the
instantiation of the Properties Manager, the Intent Model and the persistence of the uploaded properties.
See class inline docs for an example method
:param task_name: The reference name that uniquely identifies a task or subset of the property manager
:param uri_pm_path: A URI that identifies the resource path for the property manager.
:param username: A user name for this task activity.
:param uri_pm_repo: (optional) A repository URI to initially load the property manager but not save to.
:param pm_file_type: (optional) defines a specific file type for the property manager
:param pm_module: (optional) the module or package name where the handler can be found
:param pm_handler: (optional) the handler for retrieving the resource
:param pm_kwargs: (optional) a dictionary of kwargs to pass to the property manager
:param default_save: (optional) if the configuration should be persisted. default to 'True'
:param reset_templates: (optional) reset connector templates from environ variables. Default True
(see `report_environ()`)
:param template_path: (optional) a template path to use if the environment variable does not exist
:param template_module: (optional) a template module to use if the environment variable does not exist
:param template_source_handler: (optional) a template source handler to use if no environment variable
:param template_persist_handler: (optional) a template persist handler to use if no environment variable
:param align_connectors: (optional) resets aligned connectors to the template. default Default True
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
:param has_contract: (optional) indicates the instance should have a property manager domain contract
:return: the initialised class instance
"""
_pm = TransitionPropertyManager(task_name=task_name, username=username)
_intent_model = TransitionIntentModel(property_manager=_pm, default_save_intent=default_save_intent,
default_intent_level=default_intent_level,
order_next_available=order_next_available,
default_replace_intent=default_replace_intent)
super()._init_properties(property_manager=_pm, uri_pm_path=uri_pm_path, default_save=default_save,
uri_pm_repo=uri_pm_repo, pm_file_type=pm_file_type, pm_module=pm_module,
pm_handler=pm_handler, pm_kwargs=pm_kwargs, has_contract=has_contract)
return cls(property_manager=_pm, intent_model=_intent_model, default_save=default_save,
reset_templates=reset_templates, template_path=template_path, template_module=template_module,
template_source_handler=template_source_handler, template_persist_handler=template_persist_handler,
align_connectors=align_connectors)
@classmethod
def scratch_pad(cls) -> TransitionIntentModel:
""" A class method to use the Components intent methods as a scratch pad"""
return super().scratch_pad()
@property
def intent_model(self) -> TransitionIntentModel:
"""The intent model instance"""
return self._intent_model
@property
def cleaners(self) -> TransitionIntentModel:
"""The intent model instance"""
return self._intent_model
@property
def pm(self) -> TransitionPropertyManager:
"""The properties manager instance"""
return self._component_pm
def set_provenance(self, title: str=None, domain: str=None, description: str=None, license_type: str=None,
license_name: str=None, license_uri: str=None, cost_price: str = None, cost_code: str = None,
cost_type: str = None, provider_name: str=None, provider_uri: str=None, provider_note: str=None,
author_name: str=None, author_uri: str=None, author_contact: str=None, save: bool=None):
"""sets the provenance values. Only sets those passed
:param title: (optional) the title of the provenance
:param domain: (optional) the domain it sits within
:param description: (optional) a description of the provenance
:param license_type: (optional) The type of the license. Default 'ODC-By'
:param license_name: (optional) The full name of the license. Default 'Open Data Commons Attribution License'
:param license_uri: (optional) The license uri. Default https://opendatacommons.org/licenses/by/
:param cost_price: (optional) a cost price associated with this provenance
:param cost_code: (optional) a cost centre code or reference code
:param cost_type: (optional) the cost type or description
:param provider_name: (optional) the provider system or institution name or title
:param provider_uri: (optional) a uri reference that helps identify the provider
:param provider_note: (optional) any notes that might be useful
:param author_name: (optional) the author of the data
:param author_uri: (optional) the author uri
:param author_contact: (optional)the the author contact information
:param save: (optional) if True, save to file. Default is True
"""
license_type = license_type if license_type else 'PDDL'
license_name = license_name if license_name else 'Open Data Commons Attribution License'
license_uri = license_uri if license_uri else 'https://opendatacommons.org/licenses/pddl/summary'
self.pm.set_provenance(title=title, domain=domain, description=description, license_type=license_type,
license_name=license_name, license_uri=license_uri, cost_price=cost_price,
cost_code=cost_code, cost_type=cost_type, provider_name=provider_name,
provider_uri=provider_uri, provider_note=provider_note, author_name=author_name,
author_uri=author_uri, author_contact=author_contact)
self.pm_persist(save=save)
def reset_provenance(self, save: bool=None):
"""resets the provenance back to its default values"""
self.pm.reset_provenance()
self.pm_persist(save)
def save_quality_report(self, canonical: pd.DataFrame=None, file_type: str=None, versioned: bool=None,
stamped: str=None):
""" Generates and persists the data quality
:param canonical: the canonical to base the report on
:param file_type: (optional) an alternative file extension to the default 'json' format
:param versioned: (optional) if the component version should be included as part of the pattern
:param stamped: (optional) A string of the timestamp options ['days', 'hours', 'minutes', 'seconds', 'ns']
:return:
"""
if isinstance(file_type, str) or isinstance(versioned, bool) or isinstance(stamped, str):
file_pattern = self.pm.file_pattern(self.REPORT_QUALITY, file_type=file_type, versioned=versioned,
stamped=stamped)
self.set_report_persist(self.REPORT_QUALITY, uri_file=file_pattern)
report = self.report_quality(canonical=canonical)
self.save_report_canonical(reports=self.REPORT_QUALITY, report_canonical=report, auto_connectors=True)
return
def run_component_pipeline(self, intent_levels: [str, int, list]=None, run_book: str=None, use_default: bool=None):
""" Runs the components pipeline from source to persist
:param intent_levels: a single or list of intent levels to run
:param run_book: a saved runbook to run
:param use_default: if the default runbook should be used if it exists
:return:
"""
canonical = self.load_source_canonical()
use_default = use_default if isinstance(use_default, bool) else True
if not isinstance(run_book, str) and use_default:
if self.pm.has_run_book(book_name=self.pm.PRIMARY_RUN_BOOK):
run_book = self.pm.PRIMARY_RUN_BOOK
result = self.intent_model.run_intent_pipeline(canonical, intent_levels=intent_levels, run_book=run_book,
inplace=False)
self.save_persist_canonical(result)
def report_attributes(self, canonical, stylise: bool=True):
""" generates a report on the attributes and any description provided
:param canonical: the canonical to report on
:param stylise: if True present the report stylised.
:return: pd.DataFrame
"""
labels = [f'Attributes ({len(canonical.columns)})', 'dType', 'Description']
file = []
for c in canonical.columns.sort_values().values:
line = [c, str(canonical[c].dtype),
". ".join(self.pm.report_notes(catalog='attributes', labels=c, drop_dates=True).get('text', []))]
file.append(line)
df_dd = pd.DataFrame(file, columns=labels)
if stylise:
style = [{'selector': 'th', 'props': [('font-size', "120%"), ("text-align", "center")]},
{'selector': '.row_heading, .blank', 'props': [('display', 'none;')]}]
df_style = df_dd.style.set_table_styles(style)
_ = df_style.applymap(self._dtype_color, subset=['dType'])
_ = df_style.set_properties(subset=['Description'], **{"text-align": "left"})
_ = df_style.set_properties(subset=[f'Attributes ({len(canonical.columns)})'], **{'font-weight': 'bold',
'font-size': "120%"})
return df_style
return df_dd
def report_provenance(self, as_dict: bool=None, stylise: bool=None):
""" a report on the provenance set as part of the domain contract
:param as_dict: (optional) if the result should be a dictionary. Default is False
:param stylise: (optional) if as_dict is False, if the return dataFrame should be stylised
:return:
"""
as_dict = as_dict if isinstance(as_dict, bool) else False
stylise = stylise if isinstance(stylise, bool) else True
report = self.pm.report_provenance()
if as_dict:
return report
df = pd.DataFrame(report, index=['values'])
df = df.transpose().reset_index()
df.columns = ['provenance', 'values']
if stylise:
return Commons.report(df, index_header='provenance')
return df
def report_quality_summary(self, canonical: pd.DataFrame=None, as_dict: bool=None, stylise: bool=None):
""" a summary quality report of the canonical
:param canonical: (optional) the canonical to be sumarised. If not passed then loads the canonical source
:param as_dict: (optional) if the result should be a dictionary. Default is False
:param stylise: (optional) if as_dict is False, if the return dataFrame should be stylised
:return: a dict or pd.DataFrame
"""
as_dict = as_dict if isinstance(as_dict, bool) else False
stylise = stylise if isinstance(stylise, bool) else True
if not isinstance(canonical, pd.DataFrame):
canonical = self._auto_transition()
# provinance
_provenance_headers = ['title', 'license', 'domain', 'description', 'provider', 'author', 'cost']
_provenance_count = len(list(filter(lambda x: x in _provenance_headers, self.pm.provenance.keys())))
_provenance_cost = self.pm.provenance.get('cost', {}).get('price', 'NA')
# descibed
_descibed_keys = self.pm.get_knowledge(catalog='attributes').keys()
_descibed_count = len(list(filter(lambda x: x in canonical.columns, _descibed_keys)))
# dictionary
_dictionary = self.canonical_report(canonical, stylise=False)
_total_fields = _dictionary.shape[0]
_null_total = _dictionary['%_Null'].sum()
_dom_fields = _dictionary['%_Dom'].sum()
_null_columns = _dictionary['%_Null'].where(_dictionary['%_Null'] > 0.98).dropna()
_dom_columns = _dictionary['%_Dom'].where(_dictionary['%_Dom'] > 0.98).dropna()
_usable_fields = set(_null_columns)
_usable_fields.update(_dom_columns)
_numeric_fields = len(Commons.filter_headers(canonical, dtype='number'))
_category_fields = len(Commons.filter_headers(canonical, dtype='category'))
_date_fields = len(Commons.filter_headers(canonical, dtype='datetime'))
_bool_fields = len(Commons.filter_headers(canonical, dtype='bool'))
_other_fields = len(Commons.filter_headers(canonical, dtype=['category', 'datetime', 'bool',
'number'], exclude=True))
_null_avg = _null_total / canonical.shape[1]
_dom_avg = _dom_fields / canonical.shape[1]
_quality_avg = | |
return sqs.order_by(prop_order)
else:
return sqs.order_by('-django_ct')
class LocationAdvancedSearchForm(SearchForm):
"""Search form allows user to search Solr index by property
This allows the user to specify the property, type of search and
AND/OR methods for combining searches"""
keyword = forms.CharField(label='Keywords', required=False)
# Dynamic Search Fields
def __init__(self, *args, **kwargs):
super(LocationAdvancedSearchForm, self).__init__(*args, **kwargs)
custom_fields = ResultProperty.objects.filter(display_field__startswith = 'loc', field_type__visible = True)
if custom_fields:
for i, custom_field in enumerate(custom_fields):
if custom_field.field_type:
cus_lab = custom_field.field_type.property
if custom_field.field_type.control_field:
self.fields['custom_' + custom_field.display_field] = forms.TreeNodeChoiceField(label=cus_lab, required = False, queryset = ControlField.objects.filter(type = custom_field.field_type), empty_label='Any')
else:
self.fields['custom_' + custom_field.display_field] = forms.CharField(label = cus_lab, required = False)
# Advanced Search Fields
property = forms.ModelChoiceField(label='Field', required=False, queryset=DescriptiveProperty.objects.filter(visible = True).filter(Q(primary_type='SL') | Q(primary_type='AL')), empty_label="Any")
search_type = forms.ChoiceField(label='Search Type', required=False, choices=SEARCH_TYPE)
q = forms.CharField(label='Search Terms', required=False)
op = forms.ChoiceField(label='And/Or', required=False, choices=OPERATOR)
property2 = forms.ModelChoiceField(label='Field', required=False, queryset=DescriptiveProperty.objects.filter(visible = True).filter(Q(primary_type='SL') | Q(primary_type='AL')), empty_label="Any")
search_type2 = forms.ChoiceField(label='Search Type', required=False, choices=SEARCH_TYPE)
q2 = forms.CharField(label='Search Terms', required=False)
op2 = forms.ChoiceField(label='And/Or', required=False, choices=OPERATOR)
property3 = forms.ModelChoiceField(label='Field', required=False, queryset=DescriptiveProperty.objects.filter(visible = True).filter(Q(primary_type='SL') | Q(primary_type='AL')), empty_label="Any")
search_type3 = forms.ChoiceField(label='Search Type', required=False, choices=SEARCH_TYPE)
q3 = forms.CharField(label='Search Terms', required=False)
order = forms.ModelChoiceField(label='', required=False, queryset=ResultProperty.objects.filter(display_field__startswith='loc'))
# filters
sub = SubjectChoices(
label = Subject._meta.verbose_name.capitalize(),
required = False,
widget = AutoHeavySelect2Widget(
select2_options = {
'width': '220px',
'placeholder': 'Lookup %s ...' % Subject._meta.verbose_name
}
)
)
med = MediaChoices(
label = Media._meta.verbose_name.capitalize(),
required = False,
widget = AutoHeavySelect2Widget(
select2_options = {
'width': '220px',
'placeholder': 'Lookup %s ...' % Media._meta.verbose_name
}
)
)
po = PersonOrgChoices(
label = PersonOrg._meta.verbose_name.capitalize(),
required = False,
widget = AutoHeavySelect2Widget(
select2_options = {
'width': '220px',
'placeholder': 'Lookup %s ...' % PersonOrg._meta.verbose_name
}
)
)
# img = forms.ChoiceField(label='Image', required=False, choices=(('default', '---'), ('yes', 'Yes'), ('no', 'No')))
col = forms.ModelChoiceField(label='Collection', required=False, queryset=Collection.objects.all())
def search(self):
"""This search method starts from a new query of all documents
in the index instead of getting the existing SearchQuerySet from the super class. This is mainly to clear the default
query of the index for the value of q. HOWEVER, this requires
redoing any actions normally taken before the SearchForm
is called, such as faceting the SearchQuerySet."""
sqs = SearchQuerySet()
sqs = sqs.filter(django_ct = 'base.location')
# faceting must be done here manually b/c we are creating a new SearchQuerySet
facet_fields = DescriptiveProperty.objects.filter(control_field = True, visible = True)
for facet_field in facet_fields:
sqs = sqs.facet('facet_prop_' + str(facet_field.pk))
if not self.is_valid():
return self.no_query_found()
prop_list = [self.cleaned_data['property'], self.cleaned_data['property2'], self.cleaned_data['property3']]
type_list = [self.cleaned_data['search_type'], self.cleaned_data['search_type2'], self.cleaned_data['search_type3']]
query_list = [self.cleaned_data['q'], self.cleaned_data['q2'], self.cleaned_data['q3']]
op_list = [self.cleaned_data['op'], self.cleaned_data['op2']]
# SELECTED FIELDS SEARCH
custom_fields = ResultProperty.objects.filter(display_field__startswith = 'loc')
if custom_fields:
for custom_field in custom_fields:
if 'custom_' + custom_field.display_field in self.cleaned_data:
if custom_field.field_type and custom_field.field_type.control_field and self.cleaned_data['custom_' + custom_field.display_field] != None:
value_tree = self.cleaned_data['custom_' + custom_field.display_field].get_descendants(include_self=True)
tsq = SQ()
for index, node in enumerate(value_tree):
kwargs = {'facet_prop_' + str(custom_field.field_type.pk) : node.id}
if index == 0:
tsq = SQ(**kwargs)
else:
tsq = tsq | SQ(**kwargs)
sqs = sqs.filter(tsq)
elif self.cleaned_data['custom_' + custom_field.display_field] != '':
kwargs = {'prop_' + str(custom_field.field_type.pk) : self.cleaned_data['custom_' + custom_field.display_field]}
sqs = sqs.filter(**kwargs)
# RELATED TABLES FILTER
sub = self.cleaned_data['sub']
if sub != None and sub != '':
sub_rels = Location.objects.filter(locationsubjectrelations__subject_id=sub).values_list('id', flat=True)
sqs = sqs.filter(django_id__in = sub_rels)
med = self.cleaned_data['med']
if med != None and med != '':
med_rels = Location.objects.filter(medialocationrelations__media_id=med).values_list('id', flat=True)
sqs = sqs.filter(django_id__in = med_rels)
po = self.cleaned_data['po']
if po != None and po != '':
po_rels = Location.objects.filter(locationpersonorgrelations__person_org_id=po).values_list('id', flat=True)
sqs = sqs.filter(django_id__in = po_rels)
# img = self.cleaned_data['img']
# if img != None and img != '':
# imgs = ['jpg', 'jpeg', 'png', 'tif', 'JPG', 'JPEG', 'PNG', 'TIF']
# if img == 'yes':
# img_locs = Location.objects.filter(locationfile__rsid__filetype__in = imgs).values_list('id', flat=True)
# else:
# img_locs = Location.objects.exclude(locationfile__rsid__filetype__in = imgs).values_list('id', flat=True)
# sqs = sqs.filter(django_id__in = img_locs)
col = self.cleaned_data['col']
if col != None and col != '':
loc_cols = Location.objects.filter(locationcollection__collection_id = col).values_list('id', flat=True)
sqs = sqs.filter(django_id__in = loc_cols)
# ADVANCED SEARCH
# query object for building full advanced query
sq = SQ()
modified = False
for j in range(0, len(prop_list)):
prop = 'content'
type = type_list[j]
query = query_list[j]
operator = ''
negate = False
kwargs = {}
# check for operator
if j > 0:
operator = op_list[j - 1]
# check for not
if type.startswith('!'):
negate = True
type = type[1:]
# if this row of query builder is blank, skip
if (query == '') and (type != 'blank'):
continue
else:
modified = True
# check if a property was selected
if prop_list[j] != None:
if prop_list[j].control_field:
prop = 'facet_prop_'+ str(prop_list[j].id)
else:
prop = 'prop_'+ str(prop_list[j].id)
# check if search type was selected
if type == '':
type = 'contains'
# determine the type of search
# CONTAINS -> special case misspellings
if type == 'contains':
query_text = '('
# special misspellings
if prop == 'prop_23':
#if doing a contains search for u number, get first instance of numbers followed by a 0 or 1 letter
match = re.search(r'(\d+[a-zA-Z]?)', query)
if match:
query = match.group(0)
query_text += (' ' + query + '? OR ')
else:
query = re.sub(r'(\s*)([uU]\s*?\.?\s*)(\d+)([a-zA-Z]*)', r'\1u* *\3*', query)
query = re.sub(r'(\s*)([pP][gG]\s*?[\./]?\s*)(\w+)', r'\1pg* *\3*', query)
query_text += '(' + query + '))'
kwargs = {str('%s' % prop) : Raw(query_text)}
# LIKE -> 'a*b' or 'a?b'
elif type == 'like':
keywords = query.split()
if keywords:
query_text = '('
for i, word in enumerate(keywords):
if i > 0:
query_text += ' AND '
query_text += word
query_text += ')'
kwargs = {str('%s' % prop) : Raw(query_text)}
# BLANK -> returns all subjects that don't have a value for given property
elif type == 'blank':
#if property is Any, then return all b/c query asks for doc with 'any' blank properties
if self.cleaned_data['property'] == None:
continue
# BLANK is a special case negation (essentially a double negative), so handle differently
if negate:
kwargs = {str('%s' % prop) : Raw('[1 TO *]')}
negate = False
else:
kwargs = {str('-%s' % prop) : Raw('[* TO *]')}
# ENDSWITH -> '*abc'
elif type == 'endswith':
keywords = query.split()
if keywords:
query_text = '('
for i, word in enumerate(keywords):
if i > 0:
query_text += ' AND '
query_text += ('*' + word)
query_text += ')'
kwargs = {str('%s' % prop) : Raw(query_text)}
else:
kwargs = {str('%s__%s' % (prop, type)) : str('%s' % query)}
if operator == 'or':
if negate:
sq = sq | ~SQ(**kwargs)
else:
sq = sq | SQ(**kwargs)
elif operator == 'and':
if negate:
sq = sq & ~SQ(**kwargs)
else:
sq = sq & SQ(**kwargs)
else:
if negate:
sq = ~SQ(**kwargs)
else:
sq = SQ(**kwargs)
if modified:
sqs = sqs.filter(sq)
if self.cleaned_data['order']:
prop_order = 'prop_' + str(self.cleaned_data['order'].field_type.id)
return sqs.order_by(prop_order)
else:
return sqs.order_by('-django_ct')
class MediaAdvancedSearchForm(SearchForm):
"""Search form allows user to search Solr index by property
This allows the user to specify the property, type of search and
AND/OR methods for combining searches"""
keyword = forms.CharField(label='Keywords', required=False)
# Dynamic Search Fields
def __init__(self, *args, **kwargs):
super(MediaAdvancedSearchForm, self).__init__(*args, **kwargs)
custom_fields = ResultProperty.objects.filter(display_field__startswith = 'med', field_type__visible = True)
if custom_fields:
for i, custom_field in enumerate(custom_fields):
if custom_field.field_type:
cus_lab = custom_field.field_type.property
if custom_field.field_type.control_field:
self.fields['custom_' + custom_field.display_field] = forms.TreeNodeChoiceField(label=cus_lab, required = False, queryset = ControlField.objects.filter(type = custom_field.field_type), empty_label='Any')
else:
self.fields['custom_' + custom_field.display_field] = forms.CharField(label = cus_lab, required = False)
# Advanced Search Fields
property = forms.ModelChoiceField(label='Field', required=False, queryset=DescriptiveProperty.objects.filter(visible = True).filter(Q(primary_type='MP') | Q(primary_type='AL')), empty_label="Any")
search_type = forms.ChoiceField(label='Search Type', required=False, choices=SEARCH_TYPE)
q = forms.CharField(label='Search Terms', required=False)
op = forms.ChoiceField(label='And/Or', required=False, choices=OPERATOR)
property2 = forms.ModelChoiceField(label='Field', required=False, queryset=DescriptiveProperty.objects.filter(visible = True).filter(Q(primary_type='MP') | Q(primary_type='AL')), empty_label="Any")
search_type2 = forms.ChoiceField(label='Search Type', required=False, | |
from direct.showbase.DirectObject import DirectObject
from DirectUtil import *
from DirectGeometry import *
from DirectGlobals import *
from DirectSelection import SelectionRay
from direct.interval.IntervalGlobal import Sequence, Func
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
CAM_MOVE_DURATION = 1.2
COA_MARKER_SF = 0.0075
Y_AXIS = Vec3(0, 1, 0)
class DirectCameraControl(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DirectCameraControl')
def __init__(self):
# Create the grid
self.startT = 0.0
self.startF = 0
self.orthoViewRoll = 0.0
self.lastView = 0
self.coa = Point3(0, 100, 0)
self.coaMarker = loader.loadModel('models/misc/sphere.bam')
self.coaMarker.setName('DirectCameraCOAMarker')
self.coaMarker.setTransparency(1)
self.coaMarker.setColor(1, 0, 0, 0)
self.coaMarker.setPos(0, 100, 0)
useDirectRenderStyle(self.coaMarker)
self.coaMarkerPos = Point3(0)
self.coaMarkerColorIval = None
self.fLockCOA = 0
self.nullHitPointCount = 0
self.cqEntries = []
self.coaMarkerRef = base.direct.group.attachNewNode('coaMarkerRef')
self.camManipRef = base.direct.group.attachNewNode('camManipRef')
self.switchDirBelowZero = True
t = CAM_MOVE_DURATION
self.actionEvents = [
['DIRECT-mouse1', self.mouseRotateStart],
['DIRECT-mouse1Up', self.mouseDollyStop],
['DIRECT-mouse2', self.mouseFlyStart],
['DIRECT-mouse2Up', self.mouseFlyStop],
['DIRECT-mouse3', self.mouseDollyStart],
['DIRECT-mouse3Up', self.mouseDollyStop],
]
# [gjeon] moved all of the hotkeys to single place for easy remapping
## self.keyEvents = [
## ['c', self.centerCamIn, 0.5],
## ['f', self.fitOnWidget], # Note: This function doesn't work as intended
## ['h', self.homeCam],
## ['shift-v', self.toggleMarkerVis],
## ['m', self.moveToFit], # Note: This function doesn't work as intended; the object dissappears and screen flashes
## ['n', self.pickNextCOA],
## ['u', self.orbitUprightCam],
## ['shift-u', self.uprightCam],
## [repr(1), self.spawnMoveToView, 1],
## [repr(2), self.spawnMoveToView, 2],
## [repr(3), self.spawnMoveToView, 3],
## [repr(4), self.spawnMoveToView, 4],
## [repr(5), self.spawnMoveToView, 5],
## [repr(6), self.spawnMoveToView, 6],
## [repr(7), self.spawnMoveToView, 7],
## [repr(8), self.spawnMoveToView, 8],
## ['9', self.swingCamAboutWidget, -90.0, t],
## ['0', self.swingCamAboutWidget, 90.0, t],
## ['`', self.removeManipulateCameraTask],
## ['=', self.zoomCam, 0.5, t],
## ['+', self.zoomCam, 0.5, t],
## ['-', self.zoomCam, -2.0, t],
## ['_', self.zoomCam, -2.0, t],
## ]
self.keyEvents = [
['DIRECT-centerCamIn', self.centerCamIn, 0.5],
['DIRECT-fitOnWidget', self.fitOnWidget], # Note: This function doesn't work as intended
['DIRECT-homeCam', self.homeCam],
['DIRECT-toggleMarkerVis', self.toggleMarkerVis],
['DIRECT-moveToFit', self.moveToFit], # Note: This function doesn't work as intended; the object dissappears and screen flashes
['DIRECT-pickNextCOA', self.pickNextCOA],
['DIRECT-orbitUprightCam', self.orbitUprightCam],
['DIRECT-uprightCam', self.uprightCam],
['DIRECT-spwanMoveToView-1', self.spawnMoveToView, 1],
['DIRECT-spwanMoveToView-2', self.spawnMoveToView, 2],
['DIRECT-spwanMoveToView-3', self.spawnMoveToView, 3],
['DIRECT-spwanMoveToView-4', self.spawnMoveToView, 4],
['DIRECT-spwanMoveToView-5', self.spawnMoveToView, 5],
['DIRECT-spwanMoveToView-6', self.spawnMoveToView, 6],
['DIRECT-spwanMoveToView-7', self.spawnMoveToView, 7],
['DIRECT-spwanMoveToView-8', self.spawnMoveToView, 8],
['DIRECT-swingCamAboutWidget-0', self.swingCamAboutWidget, -90.0, t],
['DIRECT-swingCamAboutWidget-1', self.swingCamAboutWidget, 90.0, t],
['DIRECT-removeManipulateCameraTask', self.removeManipulateCameraTask],
['DIRECT-zoomInCam', self.zoomCam, 0.5, t],
['DIRECT-zoomOutCam', self.zoomCam, -2.0, t],
]
# set this to true to prevent the camera from rolling
self.lockRoll = False
# NIK - flag to determine whether to use maya camera controls
self.useMayaCamControls = 0
self.altDown = 0
self.perspCollPlane = None # [gjeon] used for new LE
self.perspCollPlane2 = None # [gjeon] used for new LE
def toggleMarkerVis(self):
## if base.direct.cameraControl.coaMarker.isHidden():
## base.direct.cameraControl.coaMarker.show()
## else:
## base.direct.cameraControl.coaMarker.hide()
if self.coaMarker.isHidden():
self.coaMarker.show()
else:
self.coaMarker.hide()
def mouseRotateStart(self, modifiers):
if self.useMayaCamControls and modifiers == 4: # alt is pressed - use maya controls
# base.direct.pushUndo([base.direct.camera]) # Wasteful use of undo
self.spawnMouseRotateTask()
def mouseDollyStart(self, modifiers):
if self.useMayaCamControls and modifiers == 4: # alt is pressed - use maya controls
# Hide the marker for this kind of motion
self.coaMarker.hide()
# Record time of start of mouse interaction
self.startT= globalClock.getFrameTime()
self.startF = globalClock.getFrameCount()
# If the cam is orthogonal, spawn differentTask
if hasattr(base.direct, "manipulationControl") and base.direct.manipulationControl.fMultiView and\
base.direct.camera.getName() != 'persp':
self.spawnOrthoZoom()
else:
# Start manipulation
self.spawnHPanYZoom()
def mouseDollyStop(self):
taskMgr.remove('manipulateCamera')
def mouseFlyStart(self, modifiers):
# Record undo point
# base.direct.pushUndo([base.direct.camera]) # Wasteful use of undo
if self.useMayaCamControls and modifiers == 4: # alt is down, use maya controls
# Hide the marker for this kind of motion
self.coaMarker.hide()
# Record time of start of mouse interaction
self.startT= globalClock.getFrameTime()
self.startF = globalClock.getFrameCount()
# Start manipulation
# If the cam is orthogonal, spawn differentTask
if hasattr(base.direct, "manipulationControl") and base.direct.manipulationControl.fMultiView and\
base.direct.camera.getName() != 'persp':
self.spawnOrthoTranslate()
else:
self.spawnXZTranslate()
self.altDown = 1
elif not self.useMayaCamControls:
# Where are we in the display region?
if ((abs(base.direct.dr.mouseX) < 0.9) and (abs(base.direct.dr.mouseY) < 0.9)):
# MOUSE IS IN CENTRAL REGION
# Hide the marker for this kind of motion
self.coaMarker.hide()
# Record time of start of mouse interaction
self.startT= globalClock.getFrameTime()
self.startF = globalClock.getFrameCount()
# Start manipulation
self.spawnXZTranslateOrHPanYZoom()
# END MOUSE IN CENTRAL REGION
else:
if ((abs(base.direct.dr.mouseX) > 0.9) and
(abs(base.direct.dr.mouseY) > 0.9)):
# Mouse is in corners, spawn roll task
self.spawnMouseRollTask()
else:
# Mouse is in outer frame, spawn mouseRotateTask
self.spawnMouseRotateTask()
if not modifiers == 4:
self.altDown = 0
def mouseFlyStop(self):
taskMgr.remove('manipulateCamera')
stopT = globalClock.getFrameTime()
deltaT = stopT - self.startT
stopF = globalClock.getFrameCount()
deltaF = stopF - self.startF
## No reason this shouldn't work with Maya cam on
# if not self.useMayaCamControls and (deltaT <= 0.25) or (deltaF <= 1):
# Do this when not trying to manipulate camera
if not self.altDown and len(base.direct.selected.getSelectedAsList()) == 0:
# Check for a hit point based on
# current mouse position
# Allow intersection with unpickable objects
# And then spawn task to determine mouse mode
# Don't intersect with hidden or backfacing objects
skipFlags = SKIP_HIDDEN | SKIP_BACKFACE
# Skip camera (and its children), unless control key is pressed
skipFlags |= SKIP_CAMERA * (1 - base.getControl())
self.computeCOA(base.direct.iRay.pickGeom(skipFlags = skipFlags))
# Record reference point
self.coaMarkerRef.iPosHprScale(base.cam)
# Record entries
self.cqEntries = []
for i in range(base.direct.iRay.getNumEntries()):
self.cqEntries.append(base.direct.iRay.getEntry(i))
# Show the marker
self.coaMarker.show()
# Resize it
self.updateCoaMarkerSize()
def mouseFlyStartTopWin(self):
print "Moving mouse 2 in new window"
#altIsDown = base.getAlt()
#if altIsDown:
# print "Alt is down"
def mouseFlyStopTopWin(self):
print "Stopping mouse 2 in new window"
def spawnXZTranslateOrHPanYZoom(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn the new task
t = Task.Task(self.XZTranslateOrHPanYZoomTask)
# For HPanYZoom
t.zoomSF = Vec3(self.coaMarker.getPos(base.direct.camera)).length()
taskMgr.add(t, 'manipulateCamera')
def spawnXZTranslateOrHPPan(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
taskMgr.add(self.XZTranslateOrHPPanTask,
'manipulateCamera')
def spawnXZTranslate(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
taskMgr.add(self.XZTranslateTask, 'manipulateCamera')
def spawnOrthoTranslate(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
taskMgr.add(self.OrthoTranslateTask, 'manipulateCamera')
def spawnHPanYZoom(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
t = Task.Task(self.HPanYZoomTask)
t.zoomSF = Vec3(self.coaMarker.getPos(base.direct.camera)).length()
taskMgr.add(t, 'manipulateCamera')
def spawnOrthoZoom(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
t = Task.Task(self.OrthoZoomTask)
taskMgr.add(t, 'manipulateCamera')
def spawnHPPan(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
# Spawn new task
taskMgr.add(self.HPPanTask, 'manipulateCamera')
def XZTranslateOrHPanYZoomTask(self, state):
if base.direct.fShift:
return self.XZTranslateTask(state)
else:
return self.HPanYZoomTask(state)
def XZTranslateOrHPPanTask(self, state):
if base.direct.fShift:
# Panning action
return self.HPPanTask(state)
else:
# Translation action
return self.XZTranslateTask(state)
def XZTranslateTask(self, state):
coaDist = Vec3(self.coaMarker.getPos(base.direct.camera)).length()
xlateSF = (coaDist / base.direct.dr.near)
base.direct.camera.setPos(base.direct.camera,
(-0.5 * base.direct.dr.mouseDeltaX *
base.direct.dr.nearWidth *
xlateSF),
0.0,
(-0.5 * base.direct.dr.mouseDeltaY *
base.direct.dr.nearHeight *
xlateSF))
return Task.cont
def OrthoTranslateTask(self, state):
# create ray from the camera to detect 3d position
iRay = SelectionRay(base.direct.camera)
iRay.collider.setFromLens(base.direct.camNode, base.direct.dr.mouseX, base.direct.dr.mouseY)
#iRay.collideWithBitMask(1)
iRay.collideWithBitMask(BitMask32.bit(21))
iRay.ct.traverse(base.direct.grid)
entry = iRay.getEntry(0)
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
iRay.collisionNodePath.removeNode()
del iRay
if hasattr(state, 'prevPt'):
base.direct.camera.setPos(base.direct.camera, (state.prevPt - hitPt))
state.prevPt = hitPt
return Task.cont
def HPanYZoomTask(self, state):
# If the cam is orthogonal, don't rotate or zoom.
if (hasattr(base.direct.cam.node(), "getLens") and
base.direct.cam.node().getLens().__class__.__name__ == "OrthographicLens"):
return
if base.direct.fControl:
moveDir = Vec3(self.coaMarker.getPos(base.direct.camera))
# If marker is behind camera invert vector
if moveDir[1] < 0.0:
moveDir.assign(moveDir * -1)
moveDir.normalize()
else:
moveDir = Vec3(Y_AXIS)
if self.useMayaCamControls : # use maya controls
moveDir.assign(moveDir * ((base.direct.dr.mouseDeltaX -1.0 * base.direct.dr.mouseDeltaY)
* state.zoomSF))
hVal = 0.0
else:
moveDir.assign(moveDir * (-1.0 * base.direct.dr.mouseDeltaY *
state.zoomSF))
if base.direct.dr.mouseDeltaY > 0.0:
moveDir.setY(moveDir[1] * 1.0)
hVal = 0.5 * base.direct.dr.mouseDeltaX * base.direct.dr.fovH
base.direct.camera.setPosHpr(base.direct.camera,
moveDir[0],
moveDir[1],
moveDir[2],
hVal,
0.0, 0.0)
if (self.lockRoll == True):
# flatten roll
base.direct.camera.setR(0)
return Task.cont
def OrthoZoomTask(self, state):
filmSize = base.direct.camNode.getLens().getFilmSize()
factor = (base.direct.dr.mouseDeltaX -1.0 * base.direct.dr.mouseDeltaY) * 0.1
x = base.direct.dr.getWidth()
y = base.direct.dr.getHeight()
base.direct.dr.orthoFactor -= factor
if base.direct.dr.orthoFactor < 0:
base.direct.dr.orthoFactor = 0.0001
base.direct.dr.updateFilmSize(x, y)
return Task.cont
def HPPanTask(self, state):
base.direct.camera.setHpr(base.direct.camera,
(0.5 * base.direct.dr.mouseDeltaX *
base.direct.dr.fovH),
(-0.5 * base.direct.dr.mouseDeltaY *
base.direct.dr.fovV),
0.0)
return Task.cont
def spawnMouseRotateTask(self):
# Kill any existing tasks
taskMgr.remove('manipulateCamera')
if self.perspCollPlane:
iRay = SelectionRay(base.direct.camera)
iRay.collider.setFromLens(base.direct.camNode, 0.0, 0.0)
iRay.collideWithBitMask(1)
if base.direct.camera.getPos().getZ() >=0:
iRay.ct.traverse(self.perspCollPlane)
else:
iRay.ct.traverse(self.perspCollPlane2)
if iRay.getNumEntries() > 0:
entry = iRay.getEntry(0)
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
# create a temp nodePath to get the position
np = NodePath('temp')
np.setPos(base.direct.camera, hitPt)
self.coaMarkerPos = np.getPos()
np.remove()
self.coaMarker.setPos(self.coaMarkerPos)
iRay.collisionNodePath.removeNode()
del iRay
# Set at markers position in render coordinates
self.camManipRef.setPos(self.coaMarkerPos)
self.camManipRef.setHpr(base.direct.camera, ZERO_POINT)
t = Task.Task(self.mouseRotateTask)
if abs(base.direct.dr.mouseX) > 0.9:
t.constrainedDir = 'y'
else:
t.constrainedDir = 'x'
taskMgr.add(t, 'manipulateCamera')
def mouseRotateTask(self, state):
# If the cam is orthogonal, don't rotate.
if (hasattr(base.direct.cam.node(), "getLens") and
base.direct.cam.node().getLens().__class__.__name__ == "OrthographicLens"):
return
# If moving outside of center, ignore motion | |
'"y.__rop__(x)" is\n'
' tried. If this is also not implemented or returns '
'"NotImplemented",\n'
' a "TypeError" exception is raised. But see the following '
'exception:\n'
'\n'
'* Exception to the previous item: if the left operand is an '
'instance\n'
' of a built-in type or a new-style class, and the right '
'operand is an\n'
' instance of a proper subclass of that type or class and '
'overrides\n'
' the base\'s "__rop__()" method, the right operand\'s '
'"__rop__()"\n'
' method is tried *before* the left operand\'s "__op__()" '
'method.\n'
'\n'
' This is done so that a subclass can completely override '
'binary\n'
' operators. Otherwise, the left operand\'s "__op__()" '
'method would\n'
' always accept the right operand: when an instance of a '
'given class\n'
' is expected, an instance of a subclass of that class is '
'always\n'
' acceptable.\n'
'\n'
'* When either operand type defines a coercion, this coercion '
'is\n'
' called before that type\'s "__op__()" or "__rop__()" '
'method is\n'
' called, but no sooner. If the coercion returns an object '
'of a\n'
' different type for the operand whose coercion is invoked, '
'part of\n'
' the process is redone using the new object.\n'
'\n'
'* When an in-place operator (like \'"+="\') is used, if the '
'left\n'
' operand implements "__iop__()", it is invoked without any '
'coercion.\n'
' When the operation falls back to "__op__()" and/or '
'"__rop__()", the\n'
' normal coercion rules apply.\n'
'\n'
'* In "x + y", if *x* is a sequence that implements sequence\n'
' concatenation, sequence concatenation is invoked.\n'
'\n'
'* In "x * y", if one operand is a sequence that implements '
'sequence\n'
' repetition, and the other is an integer ("int" or "long"), '
'sequence\n'
' repetition is invoked.\n'
'\n'
'* Rich comparisons (implemented by methods "__eq__()" and so '
'on)\n'
' never use coercion. Three-way comparison (implemented by\n'
' "__cmp__()") does use coercion under the same conditions '
'as other\n'
' binary operations use it.\n'
'\n'
'* In the current implementation, the built-in numeric types '
'"int",\n'
' "long", "float", and "complex" do not use coercion. All '
'these types\n'
' implement a "__coerce__()" method, for use by the '
'built-in\n'
' "coerce()" function.\n'
'\n'
' Changed in version 2.7: The complex type no longer makes '
'implicit\n'
' calls to the "__coerce__()" method for mixed-type binary '
'arithmetic\n'
' operations.\n'
'\n'
'\n'
'With Statement Context Managers\n'
'===============================\n'
'\n'
'New in version 2.5.\n'
'\n'
'A *context manager* is an object that defines the runtime '
'context to\n'
'be established when executing a "with" statement. The '
'context manager\n'
'handles the entry into, and the exit from, the desired '
'runtime context\n'
'for the execution of the block of code. Context managers '
'are normally\n'
'invoked using the "with" statement (described in section The '
'with\n'
'statement), but can also be used by directly invoking their '
'methods.\n'
'\n'
'Typical uses of context managers include saving and '
'restoring various\n'
'kinds of global state, locking and unlocking resources, '
'closing opened\n'
'files, etc.\n'
'\n'
'For more information on context managers, see Context '
'Manager Types.\n'
'\n'
'object.__enter__(self)\n'
'\n'
' Enter the runtime context related to this object. The '
'"with"\n'
" statement will bind this method's return value to the "
'target(s)\n'
' specified in the "as" clause of the statement, if any.\n'
'\n'
'object.__exit__(self, exc_type, exc_value, traceback)\n'
'\n'
' Exit the runtime context related to this object. The '
'parameters\n'
' describe the exception that caused the context to be '
'exited. If the\n'
' context was exited without an exception, all three '
'arguments will\n'
' be "None".\n'
'\n'
' If an exception is supplied, and the method wishes to '
'suppress the\n'
' exception (i.e., prevent it from being propagated), it '
'should\n'
' return a true value. Otherwise, the exception will be '
'processed\n'
' normally upon exit from this method.\n'
'\n'
' Note that "__exit__()" methods should not reraise the '
'passed-in\n'
" exception; this is the caller's responsibility.\n"
'\n'
'See also:\n'
'\n'
' **PEP 343** - The "with" statement\n'
' The specification, background, and examples for the '
'Python "with"\n'
' statement.\n'
'\n'
'\n'
'Special method lookup for old-style classes\n'
'===========================================\n'
'\n'
'For old-style classes, special methods are always looked up '
'in exactly\n'
'the same way as any other method or attribute. This is the '
'case\n'
'regardless of whether the method is being looked up '
'explicitly as in\n'
'"x.__getitem__(i)" or implicitly as in "x[i]".\n'
'\n'
'This behaviour means that special methods may exhibit '
'different\n'
'behaviour for different instances of a single old-style '
'class if the\n'
'appropriate special attributes are set differently:\n'
'\n'
' >>> class C:\n'
' ... pass\n'
' ...\n'
' >>> c1 = C()\n'
' >>> c2 = C()\n'
' >>> c1.__len__ = lambda: 5\n'
' >>> c2.__len__ = lambda: 9\n'
' >>> len(c1)\n'
' 5\n'
' >>> len(c2)\n'
' 9\n'
'\n'
'\n'
'Special method lookup for new-style classes\n'
'===========================================\n'
'\n'
'For new-style classes, implicit invocations of special '
'methods are\n'
"only guaranteed to work correctly if defined on an object's "
'type, not\n'
"in the object's instance dictionary. That behaviour is the "
'reason why\n'
'the following code raises an exception (unlike the '
'equivalent example\n'
'with old-style classes):\n'
'\n'
' >>> class C(object):\n'
' ... pass\n'
' ...\n'
' >>> c = C()\n'
' >>> c.__len__ = lambda: 5\n'
' >>> len(c)\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" TypeError: object of type 'C' has no len()\n"
'\n'
'The rationale behind this behaviour lies with a number of '
'special\n'
'methods such as "__hash__()" and "__repr__()" that are '
'implemented by\n'
'all objects, including type objects. If the implicit lookup '
'of these\n'
'methods used the conventional lookup process, they would '
'fail when\n'
'invoked on the type object itself:\n'
'\n'
' >>> 1 .__hash__() == hash(1)\n'
' True\n'
' >>> int.__hash__() == hash(int)\n'
' Traceback (most recent call last):\n'
' File "<stdin>", line 1, in <module>\n'
" TypeError: descriptor '__hash__' of 'int' object needs an "
'argument\n'
'\n'
'Incorrectly attempting to invoke an unbound method of a '
'class in this\n'
"way is sometimes referred to as 'metaclass confusion', and "
'is avoided\n'
'by bypassing the instance when looking up special methods:\n'
'\n'
' >>> type(1).__hash__(1) == hash(1)\n'
' True\n'
' >>> type(int).__hash__(int) == hash(int)\n'
' True\n'
'\n'
'In addition to bypassing any instance attributes in the '
'interest of\n'
'correctness, implicit special method lookup generally also '
'bypasses\n'
'the "__getattribute__()" method even of the object\'s '
'metaclass:\n'
'\n'
' >>> class Meta(type):\n'
' ... def __getattribute__(*args):\n'
' ... print "Metaclass getattribute invoked"\n'
' ... return type.__getattribute__(*args)\n'
' ...\n'
' >>> class C(object):\n'
' ... __metaclass__ = Meta\n'
' ... def __len__(self):\n'
' ... return 10\n'
' ... def __getattribute__(*args):\n'
' ... print "Class getattribute invoked"\n'
' ... return object.__getattribute__(*args)\n'
' ...\n'
' >>> c = C()\n'
' >>> c.__len__() # Explicit lookup via '
'instance\n'
' Class getattribute invoked\n'
' 10\n'
' >>> type(c).__len__(c) # Explicit lookup via '
'type\n'
' Metaclass getattribute invoked\n'
' 10\n'
' >>> len(c) # Implicit lookup\n'
' 10\n'
'\n'
'Bypassing the "__getattribute__()" machinery in this fashion '
'provides\n'
'significant scope for speed optimisations within the '
'interpreter, at\n'
'the cost of some flexibility in the handling of special '
'methods (the\n'
'special method *must* be set on the class object itself in '
'order to be\n'
'consistently invoked by the interpreter).\n'
'\n'
'-[ Footnotes ]-\n'
'\n'
"[1] It *is* possible in some cases to change an object's "
'type,\n'
" under certain controlled conditions. It generally isn't "
| |
# 下加符 = ̪
0xA59C: 0x032C, # 下加符 = ̬
0xA59D: 0x032B, # 下加符 = ̫
0xA59E: 0x0329, # 下加符 = ̩
0xA59F: 0xFF5B, # 左开花括号 = {
0xA5A0: 0xFF5D, # 右闭花括号 = }
0xA5F7: 0x3016, # 左空方圆括号 = 〖
0xA5F8: 0x3017, # 右空方圆括号 = 〗
0xA5F9: 0x29DB, # ⧛
0xA5FA: 0xA5FA, # (vertical ⧛)
0xA5FB: 0x534D, # 卍
0xA5FC: 0xFE47, # 竖排上方括号 = ﹇
0xA5FD: 0xFE48, # 竖排下方括号 = ﹈
0xA5FE: 0x2571, # 斜线 = ╱
})
# Area A6
_update({
0xA640: 0x00C5, # 多国外文 = Å
0xA641: 0x0100, # 多国外文 = Ā
0xA642: 0x00C1, # 多国外文 = Á
0xA643: 0x01CD, # 多国外文 = Ǎ
0xA644: 0x00C0, # 多国外文 = À
0xA645: 0x00C2, # 多国外文 = Â
0xA646: 0x00C4, # 多国外文 = Ä
0xA647: 0x00C3, # 多国外文 = Ã
0xA648: 0x0112, # 多国外文 = Ē
0xA649: 0x00C9, # 多国外文 = É
0xA64A: 0x011A, # 多国外文 = Ě
0xA64B: 0x00C8, # 多国外文 = È
0xA64C: 0x00CA, # 多国外文 = Ê
0xA64D: 0x00CB, # 多国外文 = Ë
0xA64E: 0x1EBC, # 多国外文 = Ẽ
0xA64F: 0x012A, # 多国外文 = Ī
0xA650: 0x00CD, # 多国外文 = Í
0xA651: 0x01CF, # 多国外文 = Ǐ
0xA652: 0x00CC, # 多国外文 = Ì
0xA653: 0x00CE, # 多国外文 = Î
0xA654: 0x00CF, # 多国外文 = Ï
0xA655: 0x014C, # 多国外文 = Ō
0xA656: 0x00D3, # 多国外文 = Ó
0xA657: 0x01D1, # 多国外文 = Ǒ
0xA658: 0x00D2, # 多国外文 = Ò
0xA659: 0x00D4, # 多国外文 = Ô
0xA65A: 0x00D6, # 多国外文 = Ö
0xA65B: 0x00D5, # 多国外文 = Õ
0xA65C: 0x016A, # 多国外文 = Ū
0xA65D: 0x00DA, # 多国外文 = Ú
0xA65E: 0x01D3, # 多国外文 = Ǔ
0xA65F: 0x00D9, # 多国外文 = Ù
0xA660: 0x00DB, # 多国外文 = Û
0xA661: 0x00DC, # 多国外文 = Ü
0xA662: 0x01D5, # 多国外文 = Ǖ
0xA663: 0x01D7, # 多国外文 = Ǘ
0xA664: 0x01D9, # 多国外文 = Ǚ
0xA665: 0x01DB, # 多国外文 = Ǜ
0xA666: 0xA666, # 多国外文(Ü̂)
0xA667: 0x0108, # 多国外文 = Ĉ
0xA668: 0x011C, # 多国外文 = Ĝ
0xA669: 0x0124, # 多国外文 = Ĥ
0xA66A: 0x0134, # 多国外文 = Ĵ
0xA66B: 0x0160, # 多国外文 = Š
0xA66C: 0x015C, # 多国外文 = Ŝ
0xA66D: 0x0178, # 多国外文 = Ÿ
0xA66E: 0x017D, # 多国外文 = Ž
0xA66F: 0x1E90, # 多国外文 = Ẑ
0xA670: 0x0125, # 多国外文 = ĥ
0xA671: 0x0135, # 多国外文 = ĵ
0xA672: 0x00D1, # 多国外文 = Ñ
0xA673: 0x00E1, # á
0xA674: 0x00E9, # é
0xA675: 0x00ED, # í
0xA676: 0x00F3, # ó
0xA677: 0x00FA, # ú
0xA678: 0x2339D, # 𣎝
0xA679: 0x29F15, # 𩼕
0xA67A: 0x23293, # 𣊓
0xA67B: 0x3CA0, # 㲠
0xA67C: 0x2F922, # 牐
0xA67D: 0x24271, # 𤉱
0xA67E: 0x2720F, # 𧈏
0xA680: 0x00C1, # Á
0xA681: 0x0403, # Ѓ
0xA682: 0x00C9, # É
0xA683: 0x040C, # Ќ
0xA684: 0x00D3, # Ó
0xA685: 0x00FD, # ý
0xA686: 0xA686, # (Ы́)
0xA687: 0xA687, # (Э́)
0xA688: 0x04EC, # Ӭ
0xA689: 0xA689, # (Ю́)
0xA68A: 0xA68A, # (Я́)
0xA68B: 0xA68B, # (ѣ́)
0xA68C: 0xA68C, # (Ѣ́)
0xA68D: 0xA68D, # (И́)
0xA68E: 0x27E1B, # 𧸛
0xA68F: 0x910B, # 鄋
0xA690: 0x29F14, # 𩼔
0xA691: 0x2A0DF, # 𪃟
0xA692: 0x20270, # 𠉰
0xA693: 0x203F1, # 𠏱
0xA694: 0x211AB, # 𡆫
0xA695: 0x211E5, # 𡇥
0xA696: 0x21290, # 𡊐
0xA697: 0x363E, # 㘾
0xA698: 0x212DF, # 𡋟
0xA699: 0x57D7, # 埗
0xA69A: 0x2165F, # 𡙟
0xA69B: 0x248C2, # 𤣂
0xA69C: 0x22288, # 𢊈
0xA69D: 0x23C62, # 𣱢
0xA69E: 0x24276, # 𤉶
0xA69F: 0xFF1A, # 冒号 = :
0xA6A0: 0xFF1B, # 分号 = ;
0xA6B9: 0x2202, # 小写希腊字母 = ∂
0xA6BA: 0x03F5, # 小写希腊字母 = ϵ
0xA6BB: 0x03D1, # 小写希腊字母 = ϑ
0xA6BC: 0x03D5, # 小写希腊字母 = ϕ
0xA6BD: 0x03C6, # 小写希腊字母 = φ
0xA6BE: 0x03F0, # 小写希腊字母 = ϰ
0xA6BF: 0x03F1, # 小写希腊字母 = ϱ
0xA6C0: 0x03C2, # 小写希腊字母 = ς
0xA6D9: 0xFE10, # 竖排逗号 = ︐
0xA6DA: 0xFE12, # 竖排句号 = ︒
0xA6DB: 0xFE11, # 竖排顿号 = ︑
0xA6DC: 0xFE13, # 竖排冒号 = ︓
0xA6DD: 0xFE14, # 竖排分号 = ︔
0xA6DE: 0xFE15, # 竖排感叹号 = ︕
0xA6DF: 0xFE16, # 竖排问号 = ︖
0xA6EC: 0xFE17, # 竖排上空方圆括号 = ︗
0xA6ED: 0xFE18, # 竖排下空方圆括号 = ︘
0xA6F3: 0xFE19, # 竖排三点省略号 = ︙
0xA6F6: 0x00B7, # 居中间隔点 = ·
0xA6F7: 0xA6F7, # 居中逗号(middle ,)
0xA6F8: 0xA6F8, # 居中句号(middle 。)
0xA6F9: 0xA6F9, # 居中顿号(middle 、)
0xA6FA: 0xA6FA, # 居中冒号(middle :)
0xA6FB: 0xA6FB, # 居中分号(middle ;)
0xA6FC: 0xA6FC, # 居中感叹号(middle !)
0xA6FD: 0xA6FD, # 居中问号(middle ?)
0xA6FE: 0xA6FE # ( ͘)
})
# Area A7
_update({
0xA740: 0x24235, # 𤈵
0xA741: 0x2431A, # 𤌚
0xA742: 0x2489B, # 𤢛
0xA743: 0x4B63, # 䭣
0xA744: 0x25581, # 𥖁
0xA745: 0x25BB0, # 𥮰
0xA746: 0x7C06, # 簆
0xA747: 0x23388, # 𣎈
0xA748: 0x26A40, # 𦩀
0xA749: 0x26F16, # 𦼖
0xA74A: 0x2717F, # 𧅿
0xA74B: 0x22A98, # 𢪘
0xA74C: 0x3005, # 々
0xA74D: 0x22F7E, # 𢽾
0xA74E: 0x27BAA, # 𧮪
0xA74F: 0x20242, # 𠉂
0xA750: 0x23C5D, # 𣱝
0xA751: 0x22650, # 𢙐
0xA752: 0x247EF, # 𤟯
0xA753: 0x26221, # 𦈡
0xA754: 0x29A02, # 𩨂
0xA755: 0x45EA, # 䗪
0xA756: 0x26B4C, # 𦭌
0xA757: 0x26D9F, # 𦶟
0xA758: 0x26ED8, # 𦻘
0xA759: 0x359E, # 㖞
0xA75A: 0x20E01, # 𠸁
0xA75B: 0x20F90, # 𠾐
0xA75C: 0x3A18, # 㨘
0xA75D: 0x241A2, # 𤆢
0xA75E: 0x3B74, # 㭴
0xA75F: 0x43F2, # 䏲
0xA760: 0x40DA, # 䃚
0xA761: 0x3FA6, # 㾦
0xA762: 0x24ECA, # 𤻊
0xA763: 0x28C3E, # 𨰾
0xA764: 0x28C47, # 𨱇
0xA765: 0x28C4D, # 𨱍
0xA766: 0x28C4F, # 𨱏
0xA767: 0x28C4E, # 𨱎
0xA768: 0x28C54, # 𨱔
0xA769: 0x28C53, # 𨱓
0xA76A: 0x25128, # 𥄨
0xA76B: 0x251A7, # 𥆧
0xA76C: 0x45AC, # 䖬
0xA76D: 0x26A2D, # 𦨭
0xA76E: 0x41F2, # 䇲
0xA76F: 0x26393, # 𦎓
0xA770: 0x29F7C, # 𩽼
0xA771: 0x29F7E, # 𩽾
0xA772: 0x29F83, # 𩾃
0xA773: 0x29F87, # 𩾇
0xA774: 0x29F8C, # 𩾌
0xA775: 0x27785, # 𧞅
0xA776: 0x2775E, # 𧝞
0xA777: 0x28EE7, # 𨻧
0xA778: 0x290AF, # 𩂯
0xA779: 0x2070E, # 𠜎
0xA77A: 0x22AC1, # 𢫁
0xA77B: 0x20CED, # 𠳭
0xA77C: 0x3598, # 㖘
0xA77D: 0x220C7, # 𢃇
0xA77E: 0x22B43, # 𢭃
0xA780: 0x4367, # 䍧
0xA781: 0x20CD3, # 𠳓
0xA782: 0x20CAC, # 𠲬
0xA783: 0x36E2, # 㛢
0xA784: 0x35CE, # 㗎
0xA785: 0x3B39, # 㬹
0xA786: 0x44EA, # 䓪
0xA787: 0x20E96, # 𠺖
0xA788: 0x20E4C, # 𠹌
0xA789: 0x35ED, # 㗭
0xA78A: 0x20EF9, # 𠻹
0xA78B: 0x24319, # 𤌙
0xA78C: 0x267CC, # 𦟌
0xA78D: 0x28056, # 𨁖
0xA78E: 0x28840, # 𨡀
0xA78F: 0x20F90, # 𠾐
0xA790: 0x21014, # 𡀔
0xA791: 0x236DC, # 𣛜
0xA792: 0x28A17, # 𨨗
0xA793: 0x28879, # 𨡹
0xA794: 0x4C9E, # 䲞
0xA795: 0x20410, # 𠐐
0xA796: 0x40DF, # 䃟
0xA797: 0x210BF, # 𡂿
0xA798: 0x22E0B, # 𢸋
0xA799: 0x4312, # 䌒
0xA79A: 0x233AB, # 𣎫
0xA79B: 0x2812E, # 𨄮
0xA79C: 0x4A31, # 䨱
0xA79D: 0x27B48, # 𧭈
0xA79E: 0x29EAC, # 𩺬
0xA79F: 0x23822, # 𣠢
0xA7A0: 0x244CB, # 𤓋
0xA7C2: 0x0409, # 大写俄文字母LJE = Љ
0xA7C3: 0x040A, # 大写俄文字母NJE = Њ
0xA7C4: 0x040F, # 大写俄文字母DZHE = Џ
0xA7C5: 0x04AE, # 大写俄文字母 = Ү
0xA7C6: 0x0402, # 俄文字母 = Ђ
0xA7C7: 0x040B, # 俄文字母 = Ћ
0xA7C8: 0x0474, # 俄文字母 = Ѵ
0xA7C9: 0x0462, # 俄文字母 = Ѣ
0xA7CA: 0x0463, # 俄文字母 = ѣ
0xA7CB: 0x04E8, # 俄文字母 = Ө
0xA7CC: 0x0459, # 俄文字母 = љ
0xA7CD: 0x045A, # 俄文字母 = њ
0xA7CE: 0x045F, # 俄文字母 = џ
0xA7CF: 0x04AF, # 俄文字母 = ү
0xA7F2: 0x00E1, # 俄文字母 = á
0xA7F3: 0x00E9, # 俄文字母 = é
0xA7F4: 0xA7F4, # 俄文字母(и́)
0xA7F5: 0x00F3, # 俄文字母 = ó
0xA7F6: 0x00FD, # 俄文字母 = ý
0xA7F7: 0xA7F7, # 俄文字母(ы́)
0xA7F8: 0xA7F8, # 俄文字母(э́)
0xA7F9: 0xA7F9, # 俄文字母(ю́)
0xA7FA: 0xA7FA, # 俄文字母(я́)
0xA7FB: 0x0452, # 俄文字母 = ђ
0xA7FC: 0x045B, # 俄文字母 = ћ
0xA7FD: 0x0475, # 俄文字母 = ѵ
0xA7FE: 0x04E9 # 俄文字母 = ө
})
# Area A8
_update({
0xA8BC: 0x1E3F, # 汉语拼音(ḿ) = ḿ
0xA8C1: 0xA8C1, # 中文阴圈码十(⏺ + 十)
0xA8C2: 0xA8C2, # 中文阴圈码廿(⏺ + 廿)
0xA8C3: | |
<reponame>mirfan899/MTTS
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.wxgui.cliens.dataroamerclient.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GUI management of annotated data.
"""
import os.path
import wx
import wx.lib.scrolledpanel as scrolled
from sppas.src.ui.wxgui.sp_icons import TIER_RENAME
from sppas.src.ui.wxgui.sp_icons import TIER_DELETE
from sppas.src.ui.wxgui.sp_icons import TIER_CUT
from sppas.src.ui.wxgui.sp_icons import TIER_COPY
from sppas.src.ui.wxgui.sp_icons import TIER_PASTE
from sppas.src.ui.wxgui.sp_icons import TIER_DUPLICATE
from sppas.src.ui.wxgui.sp_icons import TIER_MOVE_UP
from sppas.src.ui.wxgui.sp_icons import TIER_MOVE_DOWN
from sppas.src.ui.wxgui.sp_icons import TIER_PREVIEW
from sppas.src.ui.wxgui.sp_icons import TIER_RADIUS
from sppas.src.ui.wxgui.ui.CustomEvents import FileWanderEvent, spEVT_FILE_WANDER
from sppas.src.ui.wxgui.ui.CustomEvents import spEVT_PANEL_SELECTED
from sppas.src.ui.wxgui.ui.CustomEvents import spEVT_SETTINGS
from sppas.src.ui.wxgui.panels.trslist import TrsList
from sppas.src.ui.wxgui.panels.mainbuttons import MainToolbarPanel
from sppas.src.ui.wxgui.structs.files import xFiles
import sppas.src.ui.wxgui.dialogs.filedialogs as filedialogs
from sppas.src.ui.wxgui.dialogs.msgdialogs import ShowInformation
from sppas.src.ui.wxgui.dialogs.msgdialogs import ShowYesNoQuestion
from .baseclient import BaseClient
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
RENAME_ID = wx.NewId()
DUPLICATE_ID = wx.NewId()
PREVIEW_ID = wx.NewId()
TIER_RADIUS_ID = wx.NewId()
# ----------------------------------------------------------------------------
# Main class that manage the notebook
# ----------------------------------------------------------------------------
class DataRoamerClient(BaseClient):
"""
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:summary: Manage the opened files.
This class manages the pages of a notebook with all opened files.
Each page (except if empty...) contains an instance of a DataRoamer.
"""
def __init__(self, parent, prefsIO):
BaseClient.__init__(self, parent, prefsIO)
self._update_members()
# ------------------------------------------------------------------------
def _update_members(self):
"""Update members."""
self._multiplefiles = True
# Quick and dirty solution to communicate to the file manager:
self._prefsIO.SetValue('F_CCB_MULTIPLE', t='bool', v=True, text='')
# ------------------------------------------------------------------------
def CreateComponent(self, parent, prefsIO):
return DataRoamer(parent, prefsIO)
# ------------------------------------------------------------------------
def New(self):
"""Add a new file into the current page."""
# Ask for the new file name
filename = filedialogs.SaveAsAnnotationFile()
if filename is None:
return
# Add the newly created file in the file manager and that's it!
evt = FileWanderEvent(filename=filename, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetTopLevelParent(), evt)
# ------------------------------------------------------------------------
def Save(self):
"""Save the current file(s)."""
page = self._notebook.GetCurrentPage()
for i in range(self._xfiles.GetSize()):
if self._xfiles.GetOther(i) == page:
o = self._xfiles.GetObject(i)
o.Save()
# ------------------------------------------------------------------------
def SaveAs(self):
"""Save the current file(s)."""
page = self._notebook.GetCurrentPage()
for i in range(self._xfiles.GetSize()):
if self._xfiles.GetOther(i) == page:
o = self._xfiles.GetObject(i)
o.SaveAs()
# ------------------------------------------------------------------------
def SaveAll(self):
"""Save all files of a page."""
for i in range(self._xfiles.GetSize()):
o = self._xfiles.GetObject(i)
o.SaveAll()
# ----------------------------------------------------------------------------
# The Component is the content of one page of the notebook.
# ----------------------------------------------------------------------------
class DataRoamer(wx.Panel):
"""
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:summary: This component allows to manage annotated files.
"""
def __init__(self, parent, prefsIO):
wx.Panel.__init__(self, parent, -1)
# members
self._filetrs = xFiles() # Associate files/trsdata
self._selection = None # the index of the selected trsdata panel
self._clipboard = None # Used to cut and paste
self._prefsIO = prefsIO
# create the client panel
sizer = wx.BoxSizer(wx.VERTICAL)
toolbar = self._create_toolbar()
sizer.Add(toolbar, proportion=0, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=4)
self._trspanel = self._create_content()
sizer.Add(self._trspanel, proportion=2, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=4)
# Bind events
self._trspanel.Bind(spEVT_PANEL_SELECTED, self.OnPanelSelection)
self.Bind(spEVT_FILE_WANDER, self.OnFileWander)
self.Bind(spEVT_SETTINGS, self.OnSettings)
self.Bind(wx.EVT_BUTTON, self.ProcessEvent)
self.SetBackgroundColour(prefsIO.GetValue('M_BG_COLOUR'))
self.SetForegroundColour(prefsIO.GetValue('M_FG_COLOUR'))
self.SetFont(prefsIO.GetValue('M_FONT'))
self.SetSizer(sizer)
self.Layout()
# ----------------------------------------------------------------------
def _create_toolbar(self):
"""Creates a toolbar panel."""
toolbar = MainToolbarPanel(self, self._prefsIO)
toolbar.AddButton(RENAME_ID,
TIER_RENAME,
'Rename',
tooltip="Rename the selected tier.")
toolbar.AddButton(wx.ID_DELETE,
TIER_DELETE,
'Delete',
tooltip="Delete the selected tier.")
toolbar.AddButton(wx.ID_CUT,
TIER_CUT,
'Cut',
tooltip="Cut the selected tier.")
toolbar.AddButton(wx.ID_COPY,
TIER_COPY,
"Copy",
tooltip="Copy the selected tier.")
toolbar.AddButton(wx.ID_PASTE,
TIER_PASTE,
"Paste",
tooltip="Paste the selected tier.")
toolbar.AddButton(DUPLICATE_ID,
TIER_DUPLICATE,
"Duplicate",
tooltip="Duplicate the selected tier.")
toolbar.AddButton(wx.ID_UP,
TIER_MOVE_UP,
"Move Up",
tooltip="Move up the selected tier.")
toolbar.AddButton(wx.ID_DOWN,
TIER_MOVE_DOWN,
"Move Down",
tooltip="Move down the selected tier.")
toolbar.AddButton(TIER_RADIUS_ID,
TIER_RADIUS,
"Radius",
tooltip="Fix the vagueness of each boundary. "
"Useful only for .xra file format.")
toolbar.AddButton(PREVIEW_ID,
TIER_PREVIEW,
"View",
tooltip="Preview of the selected tier.")
return toolbar
# ----------------------------------------------------------------------
def _create_content(self):
"""Create the panel with files content."""
panel = scrolled.ScrolledPanel(self, -1)
self._trssizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizerAndFit(self._trssizer)
panel.SetAutoLayout(True)
panel.SetupScrolling()
return panel
# ------------------------------------------------------------------------
# Callbacks to any kind of event
# ------------------------------------------------------------------------
def ProcessEvent(self, event):
"""Processes an event.
Processes an event, searching event tables and calling zero or more
suitable event handler function(s). Note that the ProcessEvent
method is called from the wxPython docview framework directly since
wxPython does not have a virtual ProcessEvent function.
:param event: (wx.Event)
"""
ide = event.GetId()
if ide == RENAME_ID:
self.Rename()
return True
elif ide == wx.ID_DELETE:
self.Delete()
return True
elif ide == wx.ID_CUT:
self.Cut()
return True
elif ide == wx.ID_COPY:
self.Copy()
return True
elif ide == wx.ID_PASTE:
self.Paste()
return True
elif ide == DUPLICATE_ID:
self.Duplicate()
return True
elif ide == wx.ID_UP:
self.MoveUp()
return True
elif ide == wx.ID_DOWN:
self.MoveDown()
return True
elif ide == PREVIEW_ID:
self.Preview()
return True
elif ide == TIER_RADIUS_ID:
self.Radius()
return True
return wx.GetApp().ProcessEvent(event)
# ----------------------------------------------------------------------
# Callbacks
# ----------------------------------------------------------------------
def OnFileWander(self, event):
"""A file was checked/unchecked somewhere else, then set/unset the data.
:param event: (wx.Event)
"""
f = event.filename
s = event.status
if s is True:
r = self.SetData(f)
if r is False:
evt = FileWanderEvent(filename=f, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent().GetParent().GetParent(), evt)
else:
if f is None:
self.UnsetAllData()
else:
self.UnsetData(f)
evt = FileWanderEvent(filename=f, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent().GetParent().GetParent(), evt)
# ------------------------------------------------------------------------
def OnPanelSelection(self, event):
"""Change the current selection (the transcription file that was clicked on)."""
sel = event.panel
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p != sel:
p.Deselect()
p.SetBackgroundColour(self._prefsIO.GetValue('M_BG_COLOUR'))
else:
# set the new selection
self._selection = p
p.SetBackgroundColour(wx.Colour(215, 215, 240))
# -----------------------------------------------------------------------
# Functions on a tier...
# -----------------------------------------------------------------------
def Rename(self):
"""Rename a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Rename()
# -----------------------------------------------------------------------
def Delete(self):
"""Delete a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Delete()
# -----------------------------------------------------------------------
def Cut(self):
"""Cut a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
self._clipboard = p.Cut()
# -----------------------------------------------------------------------
def Copy(self):
"""Copy a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
self._clipboard = p.Copy()
# -----------------------------------------------------------------------
def Paste(self):
"""Paste a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Paste(self._clipboard)
# -----------------------------------------------------------------------
def Duplicate(self):
"""Duplicate a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Duplicate()
# -----------------------------------------------------------------------
def MoveUp(self):
"""Move up a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.MoveUp()
# -----------------------------------------------------------------------
def MoveDown(self):
"""Move down a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.MoveDown()
# -----------------------------------------------------------------------
def Preview(self):
"""Open a frame to view a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Preview()
# -----------------------------------------------------------------------
def Radius(self):
"""Change radius value of all TimePoint instances of the tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Radius()
# ----------------------------------------------------------------------
# Functions on a file...
# ----------------------------------------------------------------------
def Save(self):
"""Save the selected file."""
if self._selection is None:
ShowInformation(self,
self._prefsIO,
"No file selected!\n"
"Click on a tier to select a file...",
style=wx.ICON_INFORMATION)
return
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Save()
# ----------------------------------------------------------------------
def SaveAs(self):
"""Save as... the selected file."""
if self._selection is None:
ShowInformation(self,
self._prefsIO,
"No file selected!\n"
"Click on a tier to select a file...",
style=wx.ICON_INFORMATION)
return
found = -1
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
found = i
break
if found > -1:
f = self._filetrs.GetFilename(i)
p = self._filetrs.GetObject(i)
# Ask for the new file name
filename = filedialogs.SaveAsAnnotationFile()
if filename is None:
return
| |
:return: Whether the operation succeeded or not
"""
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_status('ResetImageAttribute', params)
# Instance methods
def get_all_instances(self, instance_ids=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('DescribeInstances', params, [('item', Reservation)])
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None):
"""
Runs an image on EC2.
:type image_id: string
:param image_id: The ID of the image to run
:type min_count: int
:param min_count: The minimum number of instances to launch
:type max_count: int
:param max_count: The maximum number of instances to launch
:type key_name: string
:param key_name: The name of the key pair with which to launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run. Current choices are:
m1.small | m1.large | m1.xlarge | c1.medium |
c1.xlarge | m2.xlarge | m2.2xlarge |
m2.4xlarge | cc1.4xlarge
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can optionally use
this parameter to assign the instance a
specific available IP address from the
subnet (e.g., 10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated
via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the instance's
EBS volues are stopped (i.e. detached)
or terminated (i.e. deleted) when
the instance is shutdown by the
owner. Valid values are:
stop | terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines
"""
params = {'ImageId':image_id,
'MinCount':min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroup')
if user_data:
params['UserData'] = base64.b64encode(user_data)
if addressing_type:
params['AddressingType'] = addressing_type
if instance_type:
params['InstanceType'] = instance_type
if placement:
params['Placement.AvailabilityZone'] = placement
if placement_group:
params['Placement.GroupName'] = placement_group
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['Monitoring.Enabled'] = 'true'
if subnet_id:
params['SubnetId'] = subnet_id
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
if block_device_map:
block_device_map.build_list_params(params)
if disable_api_termination:
params['DisableApiTermination'] = 'true'
if instance_initiated_shutdown_behavior:
val = instance_initiated_shutdown_behavior
params['InstanceInitiatedShutdownBehavior'] = val
return self.get_object('RunInstances', params, Reservation, verb='POST')
def terminate_instances(self, instance_ids=None):
"""
Terminate the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to terminate
:rtype: list
:return: A list of the instances terminated
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('TerminateInstances', params, [('item', Instance)])
def stop_instances(self, instance_ids=None, force=False):
"""
Stop the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to stop
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
params = {}
if force:
params['Force'] = 'true'
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StopInstances', params, [('item', Instance)])
def start_instances(self, instance_ids=None):
"""
Start the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to start
:rtype: list
:return: A list of the instances started
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StartInstances', params, [('item', Instance)])
def get_console_output(self, instance_id):
"""
Retrieves the console output for the specified instance.
See http://docs.amazonwebservices.com/AWSEC2/2008-02-01/DeveloperGuide/ApiReference-Query-GetConsoleOutput.html
:type instance_id: string
:param instance_id: The instance ID of a running instance on the cloud.
:rtype: :class:`boto.ec2.instance.ConsoleOutput`
:return: The console output as a ConsoleOutput object
"""
params = {}
self.build_list_params(params, [instance_id], 'InstanceId')
return self.get_object('GetConsoleOutput', params, ConsoleOutput)
def reboot_instances(self, instance_ids=None):
"""
Reboot the specified instances.
:type instance_ids: list
:param instance_ids: The instances to terminate and reboot
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_status('RebootInstances', params)
def confirm_product_instance(self, product_code, instance_id):
params = {'ProductCode' : product_code,
'InstanceId' : instance_id}
rs = self.get_object('ConfirmProductInstance', params, ResultSet)
return (rs.status, rs.ownerId)
# InstanceAttribute methods
def get_instance_attribute(self, instance_id, attribute):
"""
Gets an attribute from an instance.
:type instance_id: string
:param instance_id: The Amazon id of the instance
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
instanceType|kernel|ramdisk|userData|
disableApiTermination|
instanceInitiatedShutdownBehavior|
rootDeviceName|blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the attribute requested
"""
params = {'InstanceId' : instance_id}
if attribute:
params['Attribute'] = attribute
return self.get_object('DescribeInstanceAttribute', params, InstanceAttribute)
def modify_instance_attribute(self, instance_id, attribute, value):
"""
Changes an attribute of an instance
:type instance_id: string
:param instance_id: The instance id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change.
AttributeName - Expected value (default)
instanceType - A valid instance type (m1.small)
kernel - Kernel ID (None)
ramdisk - Ramdisk ID (None)
userData - Base64 encoded String (None)
disableApiTermination - Boolean (true)
instanceInitiatedShutdownBehavior - stop|terminate
rootDeviceName - device name (None)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
# Allow a bool to be passed in for value of disableApiTermination
if attribute == 'disableApiTermination':
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'InstanceId' : instance_id,
'Attribute' : attribute,
'Value' : value}
return self.get_status('ModifyInstanceAttribute', params)
def reset_instance_attribute(self, instance_id, attribute):
"""
Resets an attribute of an instance to its default value.
:type instance_id: string
:param instance_id: ID of the instance
:type attribute: string
:param attribute: The attribute to reset. Valid values are:
kernel|ramdisk
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'InstanceId' : instance_id,
'Attribute' : attribute}
return self.get_status('ResetInstanceAttribute', params)
# Spot Instances
def get_all_spot_instance_requests(self, request_ids=None):
"""
Retrieve all the spot instances requests associated with your account.
@type request_ids: list
@param request_ids: A list of strings of spot instance request IDs
@rtype: list
@return: A list of
:class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
"""
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
return self.get_list('DescribeSpotInstanceRequests', params,
[('item', SpotInstanceRequest)])
def get_spot_price_history(self, start_time=None, end_time=None,
instance_type=None, product_description=None):
"""
Retrieve the recent history of spot instances pricing.
@type start_time: str
@param start_time: An indication of how far back to provide price
changes for. An ISO8601 DateTime string.
@type end_time: str
@param end_time: An indication of how far forward to provide price
changes for. An ISO8601 DateTime string.
@type instance_type: str
@param instance_type: Filter responses to a particular instance type.
@type product_description: str
@param product_descripton: Filter responses to a particular platform.
Valid values are currently: Linux
@rtype: list
@return: A list tuples containing price and timestamp.
"""
params = {}
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if instance_type:
params['InstanceType'] = instance_type
if product_description:
params['ProductDescription'] = product_description
return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)])
def request_spot_instances(self, price, image_id, count=1, type=None,
valid_from=None, valid_until=None,
launch_group=None, availability_zone_group=None,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None):
"""
Request instances on the spot market at a particular price.
:type price: str
:param price: The maximum price of your bid
:type image_id: string
:param image_id: The ID of the image to run
| |
<reponame>wkerzendorf/forspy<gh_stars>0
try:
import numpy
from pipeline_display import *
from pipeline_product import *
from reflex_plot_widgets import *
from numpy.polynomial import Polynomial
numpy.seterr(invalid='ignore')
except ImportError:
donothing=1
class PlotableReducedArc :
def __init__(self, fits):
self.arc = PipelineProduct(fits)
self.arcdisp = ImageDisplay()
self.loadFromFits()
def loadFromFits(self) :
self.arc.readImage()
self.arc.read2DLinearWCS()
def plot(self, subplot, title, tooltip):
self.arcdisp.setLabels('Lambda [Angstrom]', 'Y [pix]')
self.arcdisp.setZLimits((-100., 1000.))
self.arcdisp.setXLinearWCSAxis(self.arc.crval1, self.arc.cdelt1,
self.arc.crpix1)
self.arcdisp.display(subplot, title, tooltip, self.arc.image)
class PlotableFlat(object) :
def __init__(self, fits_flat, fits_slittrace):
self.flat = PipelineProduct(fits_flat)
self.slittrace = None
if fits_slittrace is not None:
self.slittrace = PipelineProduct(fits_slittrace)
self.flatdisp = ImageDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the flat image
self.flat.readImage()
#Reading the polinomial traces
if self.slittrace is not None:
ndegree = self.slittrace.getTableNcols(1) - 1
self.nslits = self.slittrace.getTableNrows(1) / 2
degreecols = []
for deg in range(ndegree):
colname = 'c%d'%deg
self.slittrace.readTableColumn(1, colname)
degreecols.append(self.slittrace.column)
top_trace_polynomials = []
bottom_trace_polynomials = []
for slit in range(self.nslits) :
top_trace_coeff = []
bottom_trace_coeff = []
for deg in range(ndegree) :
top_trace_coeff.append(degreecols[deg][2*slit])
bottom_trace_coeff.append(degreecols[deg][2*slit + 1])
top_trace_pol = Polynomial(top_trace_coeff)
bottom_trace_pol = Polynomial(bottom_trace_coeff)
top_trace_polynomials.append(top_trace_pol)
bottom_trace_polynomials.append(bottom_trace_pol)
#Creating the points to plot based on the polynomail traces
self.xpos_traces = []
self.ypos_top_traces = []
self.ypos_bottom_traces = []
for slit in range(self.nslits) :
ypos_top = []
ypos_bottom = []
xpos = []
for xpix in range(self.flat.image.shape[1]) :
xpos.append(xpix+1)
ypos_top.append(top_trace_polynomials[slit](xpix)+1)
ypos_bottom.append(bottom_trace_polynomials[slit](xpix)+1)
self.xpos_traces.append(xpos)
self.ypos_top_traces.append(ypos_top)
self.ypos_bottom_traces.append(ypos_bottom)
def plot(self, subplot, title, tooltip):
self.flatdisp.setLabels('X [pix]', 'Y [pix]')
self.flatdisp.display(subplot, title, tooltip, self.flat.image)
if self.slittrace is not None:
subplot.autoscale(enable=False)
for slit in range(self.nslits) :
subplot.plot(self.xpos_traces[slit], self.ypos_top_traces[slit],
linestyle='solid',color='red')
subplot.plot(self.xpos_traces[slit], self.ypos_bottom_traces[slit],
linestyle='solid',color='darkred')
class PlotableNormFlat (PlotableFlat) :
def __init__(self, fits_flat, fits_slittrace):
super(PlotableNormFlat, self).__init__(fits_flat, fits_slittrace)
def plot(self, subplot, title, tooltip):
self.flatdisp.setZLimits((0.9, 1.1))
self.flat.image[self.flat.image > 5.] = 0
super(PlotableNormFlat, self).plot(subplot, title, tooltip)
class PlotableRawFlat (PlotableFlat) :
def __init__(self, fits_flat_raw, fits_master_flat, fits_slittrace):
super(PlotableRawFlat, self).__init__(fits_flat_raw, fits_slittrace)
if self.slittrace is not None and fits_master_flat is not None :
master_flat = PipelineProduct(fits_master_flat)
self.trimm_lly = master_flat.all_hdu[0].header.get('HIERARCH ESO QC TRIMM LLY')
#Change the traces by the amount of overscan in Y that has been removed
for ypos_top in self.ypos_top_traces:
for j, ypos in enumerate(ypos_top):
ypos_top[j] = ypos + self.trimm_lly - 1
for ypos_bottom in self.ypos_bottom_traces:
for j, ypos in enumerate(ypos_bottom):
ypos_bottom[j] = ypos + self.trimm_lly -1
def plot(self, subplot, title, tooltip):
super(PlotableRawFlat, self).plot(subplot, title, tooltip)
class PlotableSpatialMap :
def __init__(self, fits_spatialmap):
self.spatialmap = PipelineProduct(fits_spatialmap)
self.spatialmapdisp = ImageDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the flat image
self.spatialmap.readImage()
def plot(self, subplot, title, tooltip):
self.spatialmapdisp.setLabels('X', 'Y')
self.spatialmapdisp.setZLimits((0., 100))
self.spatialmapdisp.display(subplot, title, tooltip, self.spatialmap.image)
class PlotableMappedScience :
def __init__(self, fits_mappedscience, fits_objecttable):
self.mappedscience = PipelineProduct(fits_mappedscience)
self.mappedsciencedisp = ImageDisplay()
if fits_objecttable is not None:
self.objecttable = PipelineProduct(fits_objecttable)
else :
self.objecttable = None
self.loadFromFits()
def loadFromFits(self) :
#Reading the flat image
self.mappedscience.readImage()
#Reading the object table
if self.objecttable is not None:
nslit = self.objecttable.getTableNrows(1)
maxobjectperslit = (self.objecttable.getTableNcols(1) - 7 ) / 4
start_extracted_cols = []
end_extracted_cols = []
for obj in range(maxobjectperslit):
colname = 'start_%d'%(obj+1)
self.objecttable.readTableColumn(1, colname)
start_extracted_cols.append(self.objecttable.column)
colname = 'end_%d'%(obj+1)
self.objecttable.readTableColumn(1, colname)
end_extracted_cols.append(self.objecttable.column)
self.ybottom_obj_extract = []
self.ytop_obj_extract = []
for slit in range(nslit) :
for obj in range(maxobjectperslit) :
ybottom = start_extracted_cols[obj][slit]
ytop = end_extracted_cols[obj][slit]
if ybottom != -1 :
self.ybottom_obj_extract.append(ybottom)
self.ytop_obj_extract.append(ytop)
self.nobjects = len(self.ybottom_obj_extract)
def plot(self, subplot, title, tooltip):
self.mappedsciencedisp.setLabels('X [pix]', 'Y [pix]')
self.mappedsciencedisp.setZLimits((0., 0.9))
self.mappedsciencedisp.display(subplot, title, tooltip, self.mappedscience.image)
if self.objecttable is not None:
subplot.autoscale(enable=False)
for obj in range(self.nobjects) :
subplot.axhline(self.ytop_obj_extract[obj], linestyle='solid',color='red')
subplot.axhline(self.ybottom_obj_extract[obj], linestyle='solid',color='yellow')
def getObjectInPosition(self, ypos) :
for obj in range(self.nobjects) :
if ypos > self.ybottom_obj_extract[obj] and \
ypos < self.ytop_obj_extract[obj] :
return self.nobjects - obj
return -1
class PlotableDispResiduals :
def __init__(self, fits_dispresiduals):
self.dispresiduals = PipelineProduct(fits_dispresiduals)
self.resdisplay = ScatterDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the residuals table
self.dispresiduals.readTableColumn(1, 'wavelength')
self.wave = self.dispresiduals.column
nwave = self.dispresiduals.getTableNrows(1)
ncolumns = self.dispresiduals.getTableNcols(1)
nselectedrows = (ncolumns - 1) // 3
self.residuals = []
self.allwave = []
self.allypos = []
self.allresiduals = []
for i in range(nselectedrows) :
#TODO: Currently the residuals are computed every 10 rows.
#This is hard-coded in the pipeline. It would be better just to detect the
#columns whose name start with 'r'
colname = 'r%d'%(i*10)
self.dispresiduals.readTableColumn(1, colname)
row_residuals = self.dispresiduals.column
self.residuals.append(row_residuals)
self.allwave.extend(self.wave)
self.allresiduals.extend(row_residuals)
ypos = i*10.
self.allypos.extend([ypos] * nwave)
def plotResVsWave(self, subplot, title, tooltip):
self.resdisplay.setLabels('Wavelength [Ang]','Residual [pix]')
self.resdisplay.display(subplot, title, tooltip, self.allwave,
self.allresiduals)
def plotResVsY(self, subplot, title, tooltip):
self.resdisplay.setLabels('Ypos [pix]','Residual [pix]')
self.resdisplay.display(subplot, title, tooltip, self.allypos,
self.allresiduals)
def getClosestLine(self, wave_selected) :
distance = numpy.fabs(self.wave - wave_selected)
idx = numpy.nanargmin(distance)
return self.wave[idx]
class PlotableDetectedLines :
def __init__(self, fits_detectedlines):
self.detectedlines = PipelineProduct(fits_detectedlines)
self.xydisplay = ScatterDisplay()
self.resdisplay = ScatterDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the residuals table
try :
self.detectedlines.readTableColumn(1, 'xpos_rectified')
self.x_pix = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'ypos_rectified')
self.y_pix = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'xpos_rectified_iter')
self.x_pix_iter = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'ypos_rectified_iter')
self.y_pix_iter = self.detectedlines.column
except KeyError:
self.detectedlines.readTableColumn(1, 'xpos')
self.x_pix = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'ypos')
self.y_pix = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'xpos_iter')
self.x_pix_iter = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'ypos_iter')
self.y_pix_iter = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'wave_ident')
self.wave = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'wave_ident_iter')
self.wave_iter = self.detectedlines.column
self.detectedlines.readTableColumn(1, 'res_xpos')
self.res_xpos = self.detectedlines.column
def plotXVsY(self, subplot, title, tooltip):
#We first plot all the detected lines
self.xydisplay.setLabels('Xpos [pix]','Ypos [pix]')
self.xydisplay.setColor('black')
self.xydisplay.display(subplot, title, tooltip, self.x_pix,
self.y_pix)
#We then overplot the identified lines in the second iteration
self.xydisplay.setColor('lightgreen')
self.xydisplay.display(subplot, title, tooltip,
self.x_pix_iter[numpy.isfinite(self.wave_iter)],
self.y_pix_iter[numpy.isfinite(self.wave_iter)])
#And then we overplot the identified lines in the first iteration
self.xydisplay.setColor('green')
self.xydisplay.display(subplot, title, tooltip,
self.x_pix[numpy.isfinite(self.wave)],
self.y_pix[numpy.isfinite(self.wave)])
def plotResVsWave(self, subplot, title, tooltip, excluded_lines = None):
self.resdisplay.setLabels('Wavelength [Ang]','Residual [pix]')
self.resdisplay.setColor('black')
self.resdisplay.display(subplot, title, tooltip,
self.wave[numpy.isfinite(self.res_xpos)],
self.res_xpos[numpy.isfinite(self.res_xpos)])
if excluded_lines is not None :
for line in excluded_lines :
subplot.axvline(line, linestyle='solid',color='red')
class PlotableSkylinesOffsets :
def __init__(self, fits_skylines_off):
self.skylines_off = PipelineProduct(fits_skylines_off)
self.resdisplay = ScatterDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the slylines offset table
nslits = self.skylines_off.getTableNcols(1) - 1
skylines_wave = self.skylines_off.readTableColumn(1, 'wave')
self.allskylines_wave = list()
self.allwave_res = list()
for col in range(nslits) :
self.allskylines_wave.extend(skylines_wave)
wave_res = self.skylines_off.readTableColumn(1, col + 1)
self.allwave_res.extend(wave_res)
def plot(self, subplot, title, tooltip):
self.resdisplay.setLabels('Wavelength [Ang]','Residual [Ang]')
self.resdisplay.setColor('black')
self.resdisplay.setPointSize(7)
self.resdisplay.display(subplot, title, tooltip,
self.allskylines_wave, self.allwave_res)
class PlotableExtractedScience :
def __init__(self, fits_extractedscience):
self.obj_id = -1
self.extractedscience = PipelineProduct(fits_extractedscience)
self.spectrumdisplay = SpectrumDisplay()
self.loadFromFits()
def loadFromFits(self) :
#Reading the flat image
self.extractedscience.readImage()
self.nobj = self.extractedscience.image.shape[0]
self.crpix1 = self.extractedscience.readKeyword('CRPIX1', 0)
self.crval1 = self.extractedscience.readKeyword('CRVAL1', 0)
self.cdelt1 = self.extractedscience.readKeyword('CD1_1', 0)
self.bunit = self.extractedscience.readKeyword('BUNIT', 0)
self.nwave = self.extractedscience.image.shape[1]
self.wave = numpy.arange(1, self.nwave+1, 1)
self.wave = (self.wave - self.crpix1) * self.cdelt1 + self.crval1
if(self.obj_id == -1) : # Select brightest
self.selectBrightest()
self.setFluxSelected()
def selectBrightest(self):
if self.nobj == 1:
self.obj_id = 1
median = 0
for obj in range(self.nobj) :
new_median = numpy.median(self.extractedscience.image[obj,:])
if new_median > median :
median = new_median
self.obj_id = obj + 1
def setFluxSelected(self) :
self.flux = self.extractedscience.image[self.obj_id-1,:]
def selectObject(self, obj_id):
self.obj_id = obj_id
self.setFluxSelected()
def plot(self, subplot, title, tooltip):
self.spectrumdisplay.setLabels('Lambda', 'Total Flux ['+self.bunit+']')
self.spectrumdisplay.display(subplot, title, tooltip, self.wave, self.flux,
autolimits = True)
class PlotableSpecPhot :
def __init__(self, fits):
self.resp = PipelineProduct(fits)
self.respdisp = SpectrumDisplay()
self.tabdisp = ScatterDisplay()
self.flat_sed = False
self.loadFromFits()
def loadFromFits(self) :
self.wave = self.resp.readTableColumn(1, 'WAVE')
self.wave_obs = self.resp.readTableColumn(2, 'WAVE')
self.std_ref_flux = self.resp.readTableColumn(1, 'STD_FLUX')
self.std_obs_flux = self.resp.readTableColumn(1, 'OBS_FLUX')
if 'RESPONSE' in self.resp.all_hdu[2].columns.names :
self.fit_response = self.resp.readTableColumn(2, 'RESPONSE')
self.raw_response = self.resp.readTableColumn(1, 'RAW_RESPONSE')
else :
self.fit_response = self.resp.readTableColumn(2, 'RESPONSE_FFSED')
self.raw_response = self.resp.readTableColumn(1, 'RAW_RESPONSE_FFSED')
self.flat_sed = True
self.used_fit = self.resp.readTableColumn(1, 'USED_FIT')
self.raw_response_nonnull = self.raw_response[self.raw_response > 0]
self.wave_nonnull = self.wave[self.raw_response > 0]
self.wave_used = self.wave[self.used_fit > 0]
self.raw_response_used = self.raw_response[self.used_fit > 0]
def plotResponse(self, subplot, title, tooltip):
self.respdisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) e-)')
self.respdisp.flux_lim = 0., numpy.max(self.raw_response_nonnull) * 1.1
self.respdisp.display(subplot, title, tooltip, self.wave_obs, self.fit_response, autolimits = False)
subplot.scatter(self.wave_nonnull, self.raw_response_nonnull, color='darkblue')
subplot.scatter(self.wave_used, self.raw_response_used, color='lightgreen')
def plotStdExtracted(self, subplot, title, tooltip):
self.respdisp.setLabels('Angstrom','e-/ (s Angstrom)')
std_obs_flux_nonnull = self.std_obs_flux[self.std_obs_flux > 0]
wave_nonnull = self.wave[self.std_obs_flux > 0]
self.respdisp.display(subplot, title, tooltip,
wave_nonnull, std_obs_flux_nonnull, autolimits = True)
def plotStdTabulated(self, subplot, title, tooltip):
self.tabdisp.setLabels('Angstrom','10^ (-16) erg/(cm^ (-2) e-)')
self.tabdisp.display(subplot, title, tooltip, self.wave, self.std_ref_flux)
class PlotableStdTabRedFlux :
def __init__(self, reducedfluxstd_fits, reducedstd_fits, specphot_fits):
self.reducedfluxstd = PlotableExtractedScience(reducedfluxstd_fits)
self.reducedstd = PlotableExtractedScience(reducedstd_fits)
self.specphot = PlotableSpecPhot(specphot_fits)
self.tabstddisp = ScatterDisplay()
self.stdreddisp = ScatterDisplay()
self.loadFromFits()
def loadFromFits(self) :
#This will select the brightest spectrum, which is the criteria
#used to extract the standard star
self.reducedfluxstd.loadFromFits()
self.reducedstd.loadFromFits()
self.specphot.loadFromFits()
self.std_ref_flux_nonnull = self.specphot.std_ref_flux[self.specphot.raw_response > 0]
self.std_ref_flux_used = self.specphot.std_ref_flux[self.specphot.used_fit > 0]
def | |
<reponame>sshaky2/python-netflow-v9-softflowd
#!/usr/bin/env python
import json
import time
import requests
import sys
import traceback
import re
import types
from abc import abstractmethod
from bottle import request, response
from ..smcontext import SmContext, ServiceManagerException
from ..smport import PortProvider
RUN_ON_PORT = 8085
RUN_ON_HOST = 'localhost'
SERVICE_START_TIMEOUT_SECONDS = 90
MAX_TEST_ID_LENGTH = 40
deprecated_release_params = {"SNAPSHOT_JAR": "SNAPSHOT", "RELEASE_JAR": "RELEASE"}
class BadRequestException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class SmResponse:
def __init__(self):
pass # Do nothing
@staticmethod
def bad_request(message):
print "Bad Request: " + message
response.status = 400
return json.dumps({"statusCode": 400, "errorMessage": message})
@staticmethod
def error_500(message):
response.status = 500
return json.dumps({"statusCode": 500, "errorMessage": message})
class SmRequest:
def __init__(self, server, json_body, offlineMode, show_progress, verbose):
self.server = server
self.json_body = json_body
try:
test_id = self.json_body["testId"]
except Exception:
raise BadRequestException("Missing testId parameter")
SmRequest._validate_test_id(test_id)
request_specific_features = SmRequest._extract_and_validate_request_specific_features(self.json_body)
self.test_id = test_id
self.context = SmContext(server.application, self.test_id, show_progress=show_progress, request_specific_features=request_specific_features, offline=offlineMode, verbose=verbose)
@abstractmethod
def process_request(self):
pass
@staticmethod
def _extract_and_validate_request_specific_features(json_body):
if not "features" in json_body:
return None
request_specific_features = json_body["features"]
if not request_specific_features:
return None
if not isinstance(request_specific_features, list):
raise BadRequestException("'features' must be a list of strings")
for feature in request_specific_features:
if not isinstance(feature, basestring):
raise BadRequestException("'features' must be a list of strings")
return request_specific_features
def _bad_request_exception(self, message):
return BadRequestException("[%s] %s" % (self.test_id, message))
def _log(self, message):
self.context.log(message)
@staticmethod
def _validate_test_id(test_id):
regex = re.compile("^[a-zA-Z0-9\-_]+$")
if not regex.match(test_id):
raise BadRequestException("Invalid parameter 'testId' with value '%s', valid characters are 'a-z', 'A-Z', '0-9', '-' and '_'" % test_id)
if test_id.upper() == "LOCAL":
raise BadRequestException("'%s' is not a valid value for testId" % test_id)
if len(test_id) > MAX_TEST_ID_LENGTH:
raise BadRequestException("Test id '%s' is too long (%d characters) (maximum is %d characters)" % (test_id, len(test_id), MAX_TEST_ID_LENGTH))
def _get_or_throw_bad_request(self, obj, key, message):
if key not in obj:
raise self._bad_request_exception(message)
value = obj[key]
if not value:
raise self._bad_request_exception(message)
return value
def _stop_services(self, drop_databases):
self._log("Stopping services (drop databases = %s)" % drop_databases)
errors = self.context.kill()
if drop_databases:
for service_name in self.server.service_names_for_test(self.test_id):
if self.context.service_data(service_name).get("hasMongo", False):
self.context.drop_database_for_service(service_name)
self.context.drop_database_for_test()
self.server.test_stopped(self.test_id)
return errors
class SmStartRequest(SmRequest):
def __init__(self, server, json_request_body, do_not_run_from_source, offlineMode, show_progress, verbose):
self.do_not_run_from_source = do_not_run_from_source
self.json_body = json_request_body
SmRequest.__init__(self, server, self.json_body, offlineMode, show_progress, verbose)
def process_request(self):
# START REQUEST PAYLOAD:
# {
# "testId": "blah",
# "features": ["feature1", "feature2", ...],
# "services": [
# {"serviceName" : "auth", "runFrom" : "SNAPSHOT"},
# {"serviceName" : "matching", "runFrom" : "RELEASE", "version" : "3.0.1"},
# {"serviceName" : "portal", "runFrom" : "SOURCE"},
# {"serviceName" : "nps", "runFrom" : "SOURCE"},
# ...
# ]
# }
self._log("Processing service start request")
if self.server.is_running(self.test_id):
raise self._bad_request_exception("Test '%s' is already running" % self.test_id)
self.server.starting_test(self.test_id)
services_to_start = self._get_or_throw_bad_request(self.json_body, "services", "'services' missing from request")
self._log("Service(s) to start: " + str(services_to_start))
(orchestration_services, service_mapping_ports) = self._validate_start_request_and_assign_ports(services_to_start, self.do_not_run_from_source)
try:
self._start_services_for_test(orchestration_services, service_mapping_ports)
sm_response = []
for service_mapping_name in service_mapping_ports:
sm_response += [{"serviceName": service_mapping_name, "port": service_mapping_ports[service_mapping_name]}]
self._log("All services started! To kill the running processes for this test, POST {\"testId\":\"%s\"} to http://%s:%s/stop" % (self.test_id, RUN_ON_HOST, RUN_ON_PORT))
return json.dumps(sm_response)
except Exception as e:
traceback.print_exc(file=sys.stdout)
return self._stop_services_and_return_500("Unexpected exception: " + e.message)
# {"AUTH": {"port": 43124, "runFrom":"JAR", "serviceMapping" : "auth"}}
def _start_services(self, orchestration_services, service_mapping_ports, proxy):
for service_name in orchestration_services:
port = orchestration_services[service_name]["port"]
admin_port = orchestration_services[service_name]["adminPort"]
run_from = orchestration_services[service_name]["runFrom"]
classifier = orchestration_services[service_name]["classifier"]
version = orchestration_services[service_name]["version"]
append_args = orchestration_services[service_name]["appendArgs"] # Allows for dynamic config overriding
# Allow for deprecated run_from values
if run_from in deprecated_release_params:
run_from = deprecated_release_params[run_from]
self.context.start_service(service_name, run_from, proxy, classifier, service_mapping_ports, port, admin_port, version, append_args)
def _await_service_startup(self, service_name, port, admin_port):
seconds_remaining = SERVICE_START_TIMEOUT_SECONDS
servicedata = self.context.service_data(service_name)
if "healthcheck" in servicedata:
healthcheck_url = servicedata["healthcheck"]["url"].replace("${port}", str(admin_port))
healthcheck_response_regex = self.context.service_data(service_name)["healthcheck"]["response"]
while seconds_remaining > 0:
if (seconds_remaining < 10 or seconds_remaining % 5 == 0) and seconds_remaining != 1:
self._log("Waiting for %s to start on port %d, %d seconds before timeout" % (service_name, port, seconds_remaining))
elif seconds_remaining == 1:
self._log("Waiting for %s to start on port %d, 1 second before timeout" % (service_name, port))
try:
ping_response = requests.get(healthcheck_url)
response_text = ping_response.text
healthcheck = re.search(healthcheck_response_regex, response_text)
except requests.RequestException:
healthcheck = False
if healthcheck or (seconds_remaining == 0):
self._log("Service %s health check SUCCESSFUL" % service_name)
break
else:
seconds_remaining -= 1
time.sleep(1)
if seconds_remaining <= 0:
raise self.context.exception("Service %s - healthcheck did not pass within allocated time (%d seconds)" % (service_name, SERVICE_START_TIMEOUT_SECONDS))
else:
self._log("There is no health check for '%s'. This is not really advisable we can only assume it has started correctly" % service_name)
def _start_services_for_test(self, orchestration_services, service_mapping_ports):
self._start_services(orchestration_services, service_mapping_ports, None)
for service_name in orchestration_services:
self.server.starting_service_for_test(self.test_id, service_name)
port = orchestration_services[service_name]["port"]
admin_port = orchestration_services[service_name]["adminPort"]
self._await_service_startup(service_name, port, admin_port)
def _stop_services_and_return_500(self, message):
self._log(message)
errors = self._stop_services(drop_databases=True)
if errors:
self._log("Errors during shutdown: %s" % str(errors))
return SmResponse.error_500(message)
def _service_mapping_for(self, service_start_request):
service_mapping_name = self._get_or_throw_bad_request(service_start_request, "serviceName", "Missing 'serviceName' parameter in instruction to start services")
mapping = self._get_or_throw_bad_request(self.context.application.service_mappings, service_mapping_name, "Unknown service name '%s'" % service_mapping_name)
need_classifier = isinstance(mapping, dict)
have_classifier = "classifier" in service_start_request and service_start_request["classifier"]
version = None
if "version" in service_start_request and service_start_request["version"]:
version = service_start_request["version"]
append_args = service_start_request.get("appendArgs", [])
if need_classifier:
valid_classifiers = "[" + (",".join(str(x) for x in mapping.keys())) + "]"
if not have_classifier:
raise self._bad_request_exception("Service '%s' requires a classifier (one of: %s)" % (service_mapping_name, valid_classifiers))
classifier = service_start_request["classifier"]
if classifier not in mapping:
raise self._bad_request_exception("Unknown classifier '%s' for service '%s' (expected one of: %s)" % (classifier, service_mapping_name, valid_classifiers))
service_name = mapping[classifier]
else:
if have_classifier:
raise self._bad_request_exception("Service '%s' does not take classifiers (found: '%s')" % (service_mapping_name, service_start_request["classifier"]))
service_name = mapping
classifier = None
return service_mapping_name, service_name, classifier, version, append_args
def _validate_start_request_and_assign_ports(self, services_to_start, dontrunfromsource):
orchestration_services = {}
service_mapping_ports = {}
for service_start_request in services_to_start:
service_mapping_name, service_name, classifier, version, append_args = self._service_mapping_for(service_start_request)
if append_args and not isinstance(append_args, types.ListType):
raise self._bad_request_exception("ERROR: I was passed a non list for append args of '" + str(append_args) + "' I dont know what to do with this")
if service_mapping_name in service_mapping_ports:
raise self._bad_request_exception("Duplicate entry for service '%s' in start request" % service_mapping_name)
run_from = self._get_or_throw_bad_request(service_start_request, "runFrom", "Missing 'runFrom' parameter in instruction to start '%s'" % service_mapping_name)
if run_from not in ["SOURCE", "SNAPSHOT", "RELEASE"] + deprecated_release_params.keys():
raise self._bad_request_exception("runFrom parameter has invalid value '%s' (should be 'SOURCE', 'SNAPSHOT' or 'RELEASE')" % run_from)
if dontrunfromsource:
if run_from == "SOURCE":
raise self._bad_request_exception("runFrom parameter has value '%s', however --nosource was specified when smserver started" % run_from)
if append_args and not self.context.get_service_starter(service_name, run_from, None).supports_append_args():
raise BadRequestException("The service type for '" + service_name + "' does not support append args")
if service_name in orchestration_services:
existing_entry = orchestration_services[service_name]
service_mapping_ports[service_mapping_name] = existing_entry["port"]
if run_from != existing_entry["runFrom"]:
raise self._bad_request_exception("Conflicting runFrom values (%s and %s) for underlying service '%s'" % (run_from, existing_entry["runFrom"], service_name))
if classifier and existing_entry["classifier"] and classifier != existing_entry["classifier"]:
raise self._bad_request_exception("Conflicting classifier values (%s and %s) for underlying service '%s'" % (classifier, existing_entry["classifier"], service_name))
else:
port = self.server.next_available_port()
admin_port = self.server.next_available_port() if self.context.service_type(service_name) == "dropwizard" else port
service_mapping_ports[service_mapping_name] = port
orchestration_services[service_name] = {
"port": port,
"adminPort": admin_port,
"runFrom": run_from,
"classifier": classifier,
"version": version,
"appendArgs": append_args
}
return orchestration_services, service_mapping_ports
class SmStopRequest(SmRequest):
def __init__(self, server, json_request_body, offlineMode, show_progress, verbose):
SmRequest.__init__(self, server, json_request_body, offlineMode, show_progress, verbose)
def process_request(self):
if not self.server.is_running(self.test_id):
raise BadRequestException("Invalid test id (or already stopped): %s" % self.test_id)
self._log("Stopping test")
drop_databases = self.json_body.get("dropDatabases", True)
if type(drop_databases) is not bool:
raise self._bad_request_exception("dropDatabases parameter must be boolean (value was: %s)" % drop_databases)
errors = self._stop_services(drop_databases)
if errors:
self._log("Completed stopping services - errors occurred: %s" % str(errors))
response.status = 500
return json.dumps({"statusCode": 500, "errorMessage": errors})
else:
self._log("Successfully stopped services")
response.status = 204
class SmShutdownRequest():
def __init__(self, server):
self.server = server
def process_request(self):
print "shutting down..."
for test_id in self.server.running_tests:
context = SmContext(self.server.application, test_id)
context.log("Killing everything for testId %s..." % test_id)
context.kill()
for service_name in self.server.service_names_for_test(test_id):
if context.service_data(service_name).get("hasMongo", False):
context.drop_database_for_service(service_name)
context.drop_database_for_test()
context.log("Successfully stopped all services for testId %s..." % test_id)
print "finished shutting down..."
class SmServer:
def __init__(self, application):
self.application = application
self.port_provider = PortProvider()
# Map of test_id to list of service names
self.running_tests = {}
def next_available_port(self):
return self.port_provider.next_available_port()
def service_names_for_test(self, test_id):
if self.is_running(test_id):
return self.running_tests[test_id]
else:
return []
def is_running(self, test_id):
return test_id in self.running_tests
def starting_test(self, test_id):
if self.is_running(test_id):
raise ServiceManagerException("Test '%s' is already running" % test_id)
self.running_tests[test_id] = []
def starting_service_for_test(self, test_id, service_name):
if not self.is_running(test_id):
raise ServiceManagerException("Test '%s' is not running" % test_id)
| |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""An implementation of the session and presentation layers as used in the Debug
Adapter Protocol (DAP): channels and their lifetime, JSON messages, requests,
responses, and events.
https://microsoft.github.io/debug-adapter-protocol/overview#base-protocol
"""
import collections
import contextlib
import functools
import itertools
import os
import socket
import sys
import threading
from debugpy.common import compat, fmt, json, log
from debugpy.common.compat import unicode
class JsonIOError(IOError):
"""Indicates that a read or write operation on JsonIOStream has failed.
"""
def __init__(self, *args, **kwargs):
stream = kwargs.pop("stream")
cause = kwargs.pop("cause", None)
if not len(args) and cause is not None:
args = [str(cause)]
super(JsonIOError, self).__init__(*args, **kwargs)
self.stream = stream
"""The stream that couldn't be read or written.
Set by JsonIOStream.read_json() and JsonIOStream.write_json().
JsonMessageChannel relies on this value to decide whether a NoMoreMessages
instance that bubbles up to the message loop is related to that loop.
"""
self.cause = cause
"""The underlying exception, if any."""
class NoMoreMessages(JsonIOError, EOFError):
"""Indicates that there are no more messages that can be read from or written
to a stream.
"""
def __init__(self, *args, **kwargs):
args = args if len(args) else ["No more messages"]
super(NoMoreMessages, self).__init__(*args, **kwargs)
class JsonIOStream(object):
"""Implements a JSON value stream over two byte streams (input and output).
Each value is encoded as a DAP packet, with metadata headers and a JSON payload.
"""
MAX_BODY_SIZE = 0xFFFFFF
json_decoder_factory = json.JsonDecoder
"""Used by read_json() when decoder is None."""
json_encoder_factory = json.JsonEncoder
"""Used by write_json() when encoder is None."""
@classmethod
def from_stdio(cls, name="stdio"):
"""Creates a new instance that receives messages from sys.stdin, and sends
them to sys.stdout.
On Win32, this also sets stdin and stdout to binary mode, since the protocol
requires that to work properly.
"""
if sys.version_info >= (3,):
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
stdin = sys.stdin
stdout = sys.stdout
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(stdin.fileno(), os.O_BINARY)
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
return cls(stdin, stdout, name)
@classmethod
def from_process(cls, process, name="stdio"):
"""Creates a new instance that receives messages from process.stdin, and sends
them to process.stdout.
"""
return cls(process.stdout, process.stdin, name)
@classmethod
def from_socket(cls, sock, name=None):
"""Creates a new instance that sends and receives messages over a socket.
"""
sock.settimeout(None) # make socket blocking
if name is None:
name = repr(sock)
# TODO: investigate switching to buffered sockets; readline() on unbuffered
# sockets is very slow! Although the implementation of readline() itself is
# native code, it calls read(1) in a loop - and that then ultimately calls
# SocketIO.readinto(), which is implemented in Python.
socket_io = sock.makefile("rwb", 0)
# SocketIO.close() doesn't close the underlying socket.
def cleanup():
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
sock.close()
return cls(socket_io, socket_io, name, cleanup)
def __init__(self, reader, writer, name=None, cleanup=lambda: None):
"""Creates a new JsonIOStream.
reader must be a BytesIO-like object, from which incoming messages will be
read by read_json().
writer must be a BytesIO-like object, into which outgoing messages will be
written by write_json().
cleanup must be a callable; it will be invoked without arguments when the
stream is closed.
reader.readline() must treat "\n" as the line terminator, and must leave "\r"
as is - it must not replace "\r\n" with "\n" automatically, as TextIO does.
"""
if name is None:
name = fmt("reader={0!r}, writer={1!r}", reader, writer)
self.name = name
self._reader = reader
self._writer = writer
self._cleanup = cleanup
self._closed = False
def close(self):
"""Closes the stream, the reader, and the writer.
"""
if self._closed:
return
self._closed = True
log.debug("Closing {0} message stream", self.name)
try:
try:
# Close the writer first, so that the other end of the connection has
# its message loop waiting on read() unblocked. If there is an exception
# while closing the writer, we still want to try to close the reader -
# only one exception can bubble up, so if both fail, it'll be the one
# from reader.
try:
self._writer.close()
finally:
if self._reader is not self._writer:
self._reader.close()
finally:
self._cleanup()
except Exception:
# On Python 2, close() will raise an exception if there is a concurrent
# read() or write(), which is a common and expected occurrence with
# JsonMessageChannel, so don't even bother logging it.
if sys.version_info >= (3,):
log.reraise_exception(
"Error while closing {0} message stream", self.name
)
def _log_message(self, dir, data, logger=log.debug):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return logger(format_string, self.name, dir, data)
def _read_line(self, reader):
line = b""
while True:
try:
line += reader.readline()
except Exception as exc:
raise NoMoreMessages(str(exc), stream=self)
if not line:
raise NoMoreMessages(stream=self)
if line.endswith(b"\r\n"):
line = line[0:-2]
return line
def read_json(self, decoder=None):
"""Read a single JSON value from reader.
Returns JSON value as parsed by decoder.decode(), or raises NoMoreMessages
if there are no more values to be read.
"""
decoder = decoder if decoder is not None else self.json_decoder_factory()
reader = self._reader
read_line = functools.partial(self._read_line, reader)
# If any error occurs while reading and parsing the message, log the original
# raw message data as is, so that it's possible to diagnose missing or invalid
# headers, encoding issues, JSON syntax errors etc.
def log_message_and_reraise_exception(format_string="", *args, **kwargs):
if format_string:
format_string += "\n\n"
format_string += "{name} -->\n{raw_lines}"
raw_lines = b"".join(raw_chunks).split(b"\n")
raw_lines = "\n".join(repr(line) for line in raw_lines)
log.reraise_exception(
format_string, *args, name=self.name, raw_lines=raw_lines, **kwargs
)
raw_chunks = []
headers = {}
while True:
try:
line = read_line()
except Exception:
# Only log it if we have already read some headers, and are looking
# for a blank line terminating them. If this is the very first read,
# there's no message data to log in any case, and the caller might
# be anticipating the error - e.g. NoMoreMessages on disconnect.
if headers:
log_message_and_reraise_exception(
"Error while reading message headers:"
)
else:
raise
raw_chunks += [line, b"\n"]
if line == b"":
break
key, _, value = line.partition(b":")
headers[key] = value
try:
length = int(headers[b"Content-Length"])
if not (0 <= length <= self.MAX_BODY_SIZE):
raise ValueError
except (KeyError, ValueError):
try:
raise IOError("Content-Length is missing or invalid:")
except Exception:
log_message_and_reraise_exception()
body_start = len(raw_chunks)
body_remaining = length
while body_remaining > 0:
try:
chunk = reader.read(body_remaining)
if not chunk:
raise EOFError
except Exception as exc:
# Not logged due to https://github.com/microsoft/ptvsd/issues/1699
raise NoMoreMessages(str(exc), stream=self)
raw_chunks.append(chunk)
body_remaining -= len(chunk)
assert body_remaining == 0
body = b"".join(raw_chunks[body_start:])
try:
body = body.decode("utf-8")
except Exception:
log_message_and_reraise_exception()
try:
body = decoder.decode(body)
except Exception:
log_message_and_reraise_exception()
# If parsed successfully, log as JSON for readability.
self._log_message("-->", body)
return body
def write_json(self, value, encoder=None):
"""Write a single JSON value into writer.
Value is written as encoded by encoder.encode().
"""
if self._closed:
# Don't log this - it's a common pattern to write to a stream while
# anticipating EOFError from it in case it got closed concurrently.
raise NoMoreMessages(stream=self)
encoder = encoder if encoder is not None else self.json_encoder_factory()
writer = self._writer
# Format the value as a message, and try to log any failures using as much
# information as we already have at the point of the failure. For example,
# if it fails after it is serialized to JSON, log that JSON.
try:
body = encoder.encode(value)
except Exception:
raise self._log_message("<--", value, logger=log.exception)
if not isinstance(body, bytes):
body = body.encode("utf-8")
header = fmt("Content-Length: {0}\r\n\r\n", len(body))
header = header.encode("ascii")
data = header + body
data_written = 0
try:
while data_written < len(data):
written = writer.write(data[data_written:])
# On Python 2, socket.makefile().write() does not properly implement
# BytesIO.write(), and always returns None instead of the number of
# bytes written - but also guarantees that it is always a full write.
if written is None:
break
data_written += written
writer.flush()
except Exception as exc:
self._log_message("<--", value, logger=log.exception)
raise JsonIOError(stream=self, cause=exc)
self._log_message("<--", value)
def __repr__(self):
return fmt("{0}({1!r})", type(self).__name__, self.name)
class MessageDict(collections.OrderedDict):
"""A specialized dict that is used for JSON message payloads - Request.arguments,
Response.body, and Event.body.
For all members that normally throw KeyError when a requested key | |
"FEGO": (8.90691666667, 22.7976666667),
"FEGR": (4.98863888889, 23.9280833333),
"FEGU": (6.51705555556, 18.2725555556),
"FEGZ": (6.34416666667, 16.3220555556),
"FGAB": (-1.41027777778, 5.62194444444),
"FGBT": (1.90527777778, 9.80555555556),
"FGSL": (3.75527777778, 8.70861111111),
"FHAW": (-7.96944444444, -14.3936111111),
"FIMP": (-20.43, 57.6833333333),
"FIMR": (-19.7577777778, 63.3619444444),
"FJDG": (-7.31305555556, 72.4108333333),
"FKKB": (2.87433333333, 9.97716666667),
"FKKC": (4.08916666667, 9.36027777778),
"FKKD": (4.00583333333, 9.71944444444),
"FKKF": (5.7045, 9.30577777778),
"FKKG": (5.89536111111, 10.0338888889),
"FKKH": (10.0928888889, 14.4445),
"FKKI": (4.47305555556, 14.3636111111),
"FKKJ": (10.3565277778, 15.2376944444),
"FKKL": (10.4513888889, 14.2572222222),
"FKKM": (5.63666666667, 10.7505555556),
"FKKN": (7.35694444444, 13.5591666667),
"FKKO": (4.54991666667, 13.7258888889),
"FKKR": (9.33583333333, 13.37),
"FKKS": (5.44747222222, 10.0679444444),
"FKKU": (5.53694444444, 10.3541666667),
"FKKV": (6.03916666667, 10.1225),
"FKKW": (2.87738888889, 11.1841666667),
"FKKY": (3.83527777778, 11.5236111111),
"FKYS": (3.7225, 11.5533333333),
"FLCP": (-13.5569444444, 32.5869444444),
"FLKE": (-12.5727777778, 27.8938888889),
"FLKL": (-14.9975, 22.6475),
"FLLC": (-15.4138888889, 28.3305555556),
"FLLI": (-17.8216666667, 25.8225),
"FLLS": (-15.3305555556, 28.4525),
"FLMA": (-11.1380555556, 28.875),
"FLMF": (-13.2586111111, 31.9363888889),
"FLMG": (-15.2544444444, 23.1622222222),
"FLML": (-12.5647222222, 28.2986111111),
"FLND": (-12.9980555556, 28.6647222222),
"FLSO": (-12.9002777778, 28.1497222222),
"FLSW": (-12.1741666667, 26.3666666667),
"FLZB": (-13.5386111111, 23.1097222222),
"FMCH": (-11.5336111111, 43.2716666667),
"FMCI": (-12.2980555556, 43.7663888889),
"FMCN": (-11.7105555556, 43.2436111111),
"FMCV": (-12.1316666667, 44.4302777778),
"FMCZ": (-12.8047222222, 45.2811111111),
"FMEE": (-20.8869444444, 55.5102777778),
"FMEP": (-21.3208333333, 55.4247222222),
"FMMA": (-19.0291666667, 47.1719444444),
"FMMG": (-18.7, 44.6169444444),
"FMMI": (-18.7966666667, 47.4786111111),
"FMMK": (-18.8, 45.2833333333),
"FMML": (-19.6866666667, 44.5419444444),
"FMMN": (-19.5627777778, 45.4508333333),
"FMMO": (-18.0505555556, 44.0322222222),
"FMMR": (-17.85, 44.9208333333),
"FMMS": (-17.0938888889, 49.8158333333),
"FMMT": (-18.1094444444, 49.3925),
"FMMU": (-17.4761111111, 43.9730555556),
"FMMV": (-20.2847222222, 44.3175),
"FMMX": (-18.7636111111, 46.0519444444),
"FMMZ": (-17.7958333333, 48.4433333333),
"FMNA": (-12.3491666667, 49.2916666667),
"FMNC": (-16.1638888889, 49.7736111111),
"FMND": (-14.6516666667, 49.6205555556),
"FMNE": (-13.1883333333, 48.9877777778),
"FMNG": (-15.5833333333, 47.6166666667),
"FMNH": (-14.9991666667, 50.32),
"FMNJ": (-13.6333333333, 48.45),
"FMNL": (-14.6294444444, 47.7636111111),
"FMNM": (-15.6672222222, 46.3516666667),
"FMNN": (-13.3122222222, 48.3138888889),
"FMNO": (-16.0830555556, 45.3669444444),
"FMNQ": (-16.7419444444, 44.4813888889),
"FMNR": (-15.4366666667, 49.6883333333),
"FMNS": (-14.2786111111, 50.1747222222),
"FMNT": (-16.75, 47.6169444444),
"FMNV": (-13.3758333333, 50.0027777778),
"FMNW": (-14.8986111111, 47.9938888889),
"FMNZ": (-13.4847222222, 48.6325),
"FMSD": (-25.0380555556, 46.9561111111),
"FMSF": (-21.4413888889, 47.1116666667),
"FMSG": (-22.8052777778, 47.8205555556),
"FMSJ": (-21.4166666667, 44.3166666667),
"FMSK": (-22.1197222222, 48.0216666667),
"FMSM": (-21.2016666667, 48.3580555556),
"FMSR": (-21.7536111111, 43.3752777778),
"FMST": (-23.3833333333, 43.7283333333),
"FNAM": (-7.86266666667, 13.1156111111),
"FNBC": (-6.26972222222, 14.2469444444),
"FNBG": (-12.6088888889, 13.4036111111),
"FNCA": (-5.59694444444, 12.1883333333),
"FNCF": (-8.78347222222, 17.9905555556),
"FNCH": (-7.3575, 20.8036111111),
"FNCT": (-9.42947222222, 20.3111111111),
"FNCV": (-15.1604722222, 19.1571944444),
"FNCX": (-8.37361111111, 18.9236111111),
"FNCZ": (-11.8936111111, 22.9161111111),
"FNDU": (-7.40063888889, 20.8185277778),
"FNGI": (-17.0447222222, 15.6869444444),
"FNHU": (-12.8086111111, 15.7602777778),
"FNKU": (-12.4044444444, 16.9472222222),
"FNLB": (-12.3711111111, 13.5363888889),
"FNLK": (-8.44152777778, 20.7328333333),
"FNLU": (-8.85833333333, 13.2311111111),
"FNMA": (-9.525, 16.3122222222),
"FNME": (-14.6575, 17.7197222222),
"FNMO": (-15.2611111111, 12.1466666667),
"FNNG": (-7.75444444444, 15.2875),
"FNPA": (-10.7219444444, 13.7652777778),
"FNSA": (-9.68888888889, 20.4316666667),
"FNSO": (-6.14083333333, 12.3716666667),
"FNTO": (-7.14722222222, 14.2480555556),
"FNUA": (-10.7151944444, 22.2305),
"FNUB": (-14.92675, 13.57675),
"FNUE": (-11.7680555556, 19.8975),
"FNUG": (-7.60305555556, 15.0277777778),
"FNWK": (-11.4264166667, 15.1015),
"FNXA": (-16.7552777778, 14.9652777778),
"FNZE": (-7.25902777778, 12.8625833333),
"FNZG": (-7.71652777778, 21.3578611111),
"FOGA": (-1.13972222222, 13.9033333333),
"FOGO": (1.54111111111, 11.5808333333),
"FOGQ": (-0.665277777778, 13.6730555556),
"FOGR": (-0.704166666667, 10.2455555556),
"FOOB": (2.07555555556, 11.4930555556),
"FOOD": (-1.5375, 13.2691666667),
"FOOG": (-0.711666666667, 8.75416666667),
"FOOH": (-1.57472222222, 9.26277777778),
"FOOK": (0.579166666667, 12.8908333333),
"FOOL": (0.458333333333, 9.41222222222),
"FOOM": (0.775555555556, 11.5525),
"FOON": (-1.65611111111, 13.4377777778),
"FOOR": (-0.826388888889, 12.7466666667),
"FOOT": (-2.88888888889, 10.9194444444),
"FPPA": (0.0333333333333, 6.51666666667),
"FPPR": (1.66277777778, 7.41166666667),
"FPST": (0.378055555556, 6.71194444444),
"FQAG": (-16.1819444444, 39.9447222222),
"FQBR": (-19.7963888889, 34.9075),
"FQCB": (-14.82, 36.5319444444),
"FQCH": (-19.1511111111, 33.4288888889),
"FQES": (-15.7341666667, 32.7566666667),
"FQIN": (-23.8763888889, 35.4083333333),
"FQLC": (-13.2738888889, 35.2661111111),
"FQLU": (-15.0330555556, 40.6716666667),
"FQMA": (-25.9208333333, 32.5725),
"FQMD": (-11.6727777778, 39.5630555556),
"FQMP": (-11.3616666667, 40.3547222222),
"FQMR": (-13.225, 37.5519444444),
"FQNC": (-14.4880555556, 40.7122222222),
"FQNP": (-15.1055555556, 39.2816666667),
"FQPB": (-12.9866666667, 40.5222222222),
"FQQL": (-17.8555555556, 36.8691666667),
"FQSG": (-15.6025, 32.7730555556),
"FQTT": (-16.1047222222, 33.64),
"FQUG": (-14.7044444444, 34.3522222222),
"FQVL": (-22.0183333333, 35.3130555556),
"FSAL": (-7.00472222222, 52.7261111111),
"FSAS": (-9.73485, 46.5002916667),
"FSDR": (-5.69583333333, 53.6544444444),
"FSFA": (-10.1094444444, 51.1761111111),
"FSIA": (-4.67416666667, 55.5216666667),
"FSPP": (-4.31916666667, 55.6913888889),
"FSSB": (-3.72146944444, 55.2086972222),
"FSSC": (-7.16416666667, 56.2638888889),
"FSSD": (-3.800325, 55.6644527778),
"FSSF": (-4.58388611111, 55.9462722222),
"FTTA": (9.15111111111, 18.3794444444),
"FTTB": (10.2883333333, 15.3797222222),
"FTTC": (13.8469444444, 20.8441666667),
"FTTD": (8.62027777778, 16.0683333333),
"FTTE": (14.5239722222, 20.9116666667),
"FTTF": (17.1883333333, 21.5116666667),
"FTTG": (12.2107222222, 21.4588333333),
"FTTH": (9.39766666667, 16.3124166667),
"FTTI": (13.2397222222, 18.3135833333),
"FTTJ": (12.1336111111, 15.0338888889),
"FTTK": (12.3855, 17.0709444444),
"FTTL": (13.4435277778, 14.73925),
"FTTM": (12.17025, 18.6753611111),
"FTTN": (11.0354722222, 20.2749444444),
"FTTP": (9.37916666667, 14.9258333333),
"FTTR": (20.449, 16.5705555556),
"FTTS": (10.4916944444, 16.7204444444),
"FTTU": (14.1464444444, 15.3151388889),
"FTTY": (17.9169444444, 19.1108333333),
"FTTZ": (21.4511944444, 17.0595),
"FVBU": (-20.0172222222, 28.6177777778),
"FVCP": (-17.7513888889, 30.9244444444),
"FVCZ": (-21.0080555556, 31.5783333333),
"FVFA": (-18.0961111111, 25.8391666667),
"FVGR": (-18.9775, 32.4505555556),
"FVHA": (-17.9316666667, 31.0927777778),
"FVKB": (-16.5197222222, 28.8847222222),
"FVMT": (-17.4316666667, 32.1844444444),
"FVMU": (-18.9975, 32.6272222222),
"FVMV": (-20.0552777778, 30.8588888889),
"FVOT": (-16.985, 32.6727777778),
"FVSH": (-20.2894444444, 30.0883333333),
"FVTL": (-19.4366666667, 29.8616666667),
"FVWN": (-18.6297222222, 27.0208333333),
"FVZC": (-19.0286111111, 29.7219444444),
"FWCL": (-15.6788888889, 34.9738888889),
"FWCM": (-14.3069444444, 35.1325),
"FWKA": (-9.95333333333, 33.8927777778),
"FWKG": (-13.0144444444, 33.4683333333),
"FWKI": (-13.7891666667, 33.7808333333),
"FWMG": (30.8377777778, -85.1816666667),
"FWUU": (-11.4447222222, 34.0116666667),
"FWZA": (-15.3852777778, 35.3844444444),
"FXMM": (-29.4622222222, 27.5525),
"FXMU": (-29.3038888889, 27.5033333333),
"FXSM": (-29.8383333333, 28.06),
"FYAR": (-22.4622222222, 14.98),
"FYEN": (-17.4830555556, 16.3219444444),
"FYGF": (-19.6019444444, 18.1225),
"FYKT": (-26.5397222222, 18.1113888889),
"FYLZ": (-26.6875, 15.2427777778),
"FYMP": (-17.6341666667, 24.1763888889),
"FYOA": (-17.8780555556, 15.9525),
"FYOG": (-28.5847222222, 16.4466666667),
"FYRU": (-17.9563888889, 19.7194444444),
"FYSM": (-22.6583333333, 14.5666666667),
"FYTM": (-19.2616666667, 17.7325),
"FYWB": (-22.9797222222, 14.6452777778),
"FYWE": (-22.6122222222, 17.0805555556),
"FYWH": (-22.4866666667, 17.4625),
"FZAA": (-4.38555555556, 15.4444444444),
"FZAB": (-4.32472222222, 15.3283333333),
"FZAG": (-5.93083333333, 12.3516666667),
"FZAI": (-5.91805555556, 12.4475),
"FZAJ": (-5.85416666667, 13.0636111111),
"FZAM": (-5.79944444444, 13.4408333333),
"FZBA": (-1.94722222222, 18.2858333333),
"FZBI": (-2.7175, 17.6847222222),
"FZBO": (-3.31111111111, 17.3816666667),
"FZBT": (-1.45, 19.0),
"FZCA": (-5.03555555556, 18.7855555556),
"FZCB": (-5.0, 19.6),
"FZDO": (-1.53777777778, 13.2694444444),
"FZEA": (0.0225, 18.2886111111),
"FZEN": (1.22472222222, 19.7888888889),
"FZFD": (4.25305555556, 20.9752777778),
"FZFK": (3.23527777778, 19.7711111111),
"FZFP": (4.1575, 21.6508333333),
"FZFU": (2.18277777778, 22.4816666667),
"FZGA": (2.17055555556, 21.4966666667),
"FZGN": (-0.283333333333, 20.8833333333),
"FZIA": (0.5175, 25.155),
"FZIC": (0.481666666667, 25.3380555556),
"FZIR": (0.8, 24.45),
"FZJH": (2.8275, 27.5880555556),
"FZKA": (1.56555555556, 30.2208333333),
"FZKJ": (2.81777777778, 24.7938888889),
"FZMA": (-2.30888888889, 28.8086111111),
"FZNA": (-1.67055555556, 29.2383333333),
"FZNP": (0.575, 29.4738888889),
"FZOA": (-2.91916666667, 25.9152777778),
"FZQA": (-11.5911111111, 27.5308333333),
"FZQC": (-8.46861111111, 28.8897222222),
"FZQM": (-10.7658333333, 25.5055555556),
"FZRF": (-5.87555555556, 29.25),
"FZRQ": (-5.39444444444, 26.99),
"FZSA": (-8.64194444444, 25.2527777778),
"FZSB": (-8.728889, 24.992222),
"FZTL": (-9.46944444444, 25.7588888889),
"FZUA": (-5.9, 22.4691666667),
"FZUK": (-6.43833333333, 20.7947222222),
"FZVA": (-3.39722222222, 23.4444444444),
"FZVI": (-4.96166666667, 23.3783333333),
"FZVR": (-4.31666666667, 20.4333333333),
"FZWA": (-6.12111111111, 23.5688888889),
"FZWC": (-6.73333333333, 23.95),
"GAAO": (15.7, 0.5),
"GABD": (14.3330555556, -3.6),
"GABF": (13.8, -10.85),
"GABG": (11.45, -7.51694444444),
"GABR": (17.0330555556, -0.4),
"GABS": (12.5333333333, -7.94972222222),
"GADZ": (15.0, -2.91694444444),
"GAGM": (16.3613888889, -3.59972222222),
"GAGO": (16.2483333333, -0.00527777777778),
"GAKA": (12.8391666667, -11.2527777778),
"GAKD": (14.4811111111, -11.4044444444),
"GAKL": (18.4330555556, 1.41694444444),
"GAKN": (13.5330555556, -8.05),
"GAKO": (12.3830555556, -5.46694444444),
"GAKT": (13.0666666667, -9.48333333333),
"GAKY": (14.4311111111, -11.4394444444),
"GAMA": (13.7, -6.06694444444),
"GAMB": (14.5127777778, -4.07944444444),
"GAMK": (15.85, 2.43333333333),
"GANF": (15.9333333333, -4.01666666667),
"GANK": (15.2286111111, -7.26138888889),
"GANR": (15.2386111111, -9.57638888889),
"GASK": (11.3330555556, -5.7),
"GATB": (16.7302777778, -3.0075),
"GATS": (20.2461111111, 0.980833333333),
"GAYE": (15.1330555556, -10.5669444444),
"GBYD": (13.3377777778, -16.6519444444),
"GCFV": (28.4525, -13.8636111111),
"GCGM": (28.0297222222, -17.2147222222),
"GCHI": (27.8147222222, -17.8869444444),
"GCLA": (28.6263888889, -17.7555555556),
"GCLP": (27.9316666667, -15.3863888889),
"GCRR": (28.9452777778, -13.605),
"GCTS": (28.0444444444, -16.5722222222),
"GCXO": (28.4825, -16.3413888889),
"GECE": (35.8925, -5.30583333333),
"GEML": (35.2797222222, -2.95611111111),
"GFBN": (7.53194444444, -12.5183333333),
"GFBO": (7.94388888889, -11.7616666667),
"GFGK": (7.76694444444, -12.3830555556),
"GFHA": (8.39444444444, -13.1283333333),
"GFKB": (9.638056, -11.515556),
"GFKE": (7.89277777778, -11.175),
"GFLL": (8.61638888889, -13.1952777778),
"GFYE": (8.61530833333, -11.0471916667),
"GGBU": (11.2972222222, -15.8380555556),
"GGCF": (11.2880555556, -15.1805555556),
"GGOV": (11.8947222222, -15.6536111111),
"GLBU": (5.90388888889, -10.0575),
"GLCP": (4.37888888889, -7.69694444444),
"GLGE": (5.03444444444, -9.06666666667),
"GLLB": (5.86694444444, -10.05),
"GLMR": (6.28888888889, -10.7586111111),
"GLNA": (7.5, -8.6),
"GLRB": (6.23361111111, -10.3622222222),
"GLST": (4.66666666667, -8.43333333333),
"GLTN": (6.06638888889, -8.13666666667),
"GLVA": (8.4, -9.76694444444),
"GMAA": (30.3811111111, -9.54611111111),
"GMAD": (30.325, -9.41305555556),
"GMAT": (28.4480555556, -11.1611111111),
"GMFF": (33.9272222222, -4.97777777778),
"GMFI": (33.5052777778, -5.15277777778),
"GMFK": (31.9488888889, -4.40055555556),
"GMFM": (33.8788888889, -5.515),
"GMFN": (35.1533333333, -2.92),
"GMFO": (34.7869444444, -1.92388888889),
"GMMB": (33.6555555556, -7.22138888889),
"GMMC": (33.5533333333, -7.66138888889),
"GMME": (34.0513888889, -6.75138888889),
"GMMF": (29.3688888889, -10.18),
"GMMI": (31.4038888889, -9.68472222222),
"GMMN": (33.3677777778, -7.58777777778),
"GMMW": (34.9888888889, -3.02833333333),
"GMMX": (31.6066666667, -8.03611111111),
"GMMY": (34.2988888889, -6.59583333333),
"GMMZ": (30.9388888889, -6.90916666667),
"GMSL": (34.2305555556, -6.05027777778),
"GMTA": (35.1769444444, -3.83944444444),
"GMTN": (35.5941666667, -5.32),
"GMTT": (35.7266666667, -5.91666666667),
"GOGG": (12.5555555556, -16.2816666667),
"GOGK": (12.88, -14.9552777778),
"GOGS": (12.41, -16.7461111111),
"GOOK": (14.1466666667, -16.0511111111),
"GOOY": (14.7394444444, -17.49),
"GOSM": (15.5936111111, -13.3227777778),
"GOSP": (16.6780555556, -14.965),
"GOSR": (16.4375, -15.6572222222),
"GOSS": (16.0497222222, -16.4611111111),
"GOTB": (14.8472222222, -12.4680555556),
"GOTK": (12.5722222222, -12.2202777778),
"GOTS": (13.0466666667, -13.2947222222),
"GOTT": (13.7366666667, -13.6530555556),
"GQNA": (16.7111111111, -9.63777777778),
"GQNB": (17.5330555556, -14.6830555556),
"GQNC": (18.45, -9.51694444444),
"GQND": (18.5702777778, -11.4230555556),
"GQNE": (16.6333333333, -14.2),
"GQNF": (16.5897222222, -11.4061111111),
"GQNH": (16.2330555556, -8.16694444444),
"GQNI": (16.6219444444, -7.31444444444),
"GQNJ": (19.7330555556, -14.3833333333),
"GQNK": (16.1594444444, -13.5075),
"GQNL": (17.75, -12.5),
"GQNM": (16.3, -8.05),
"GQNN": (18.0977777778, -15.9477777778),
"GQNS": (15.1794444444, -12.2072222222),
"GQNT": (17.2330555556, -10.8169444444),
"GQPA": (20.5066666667, -13.0430555556),
"GQPF": (22.6669444444, -12.7330555556),
"GQPP": (20.9283333333, -17.0311111111),
"GQPT": (25.2366666667, -11.5886111111),
"GQPZ": (22.7566666667, -12.0411111111),
"GSMA": (26.7316666667, -11.6844444444),
"GSVO": (23.7180555556, -15.9319444444),
"GUCY": (9.57688888889, -13.6119611111),
"GUFA": (10.3505555556, -13.5691666667),
"GUFH": (10.0355555556, -10.7697222222),
"GUGO": (9.24611111111, -9.29527777778),
"GUKR": (10.6511111111, -14.5336111111),
"GUKU": (9.16055555556, -10.1244444444),
"GULB": (11.3261111111, -12.2869444444),
"GUMA": (8.48111111111, -9.52583333333),
"GUNZ": (7.80583333333, -8.70166666667),
"GUOK": (10.9658333333, -14.2811111111),
"GUSA": (11.1169444444, -13.8330555556),
"GUSB": (12.5727777778, -13.3586111111),
"GUSI": (11.4330555556, -9.16694444444),
"GUXN": (10.4485, -9.22733333333),
"GVAC": (16.7413888889, -22.9494444444),
"GVAN": (17.2030555556, -25.0908333333),
"GVBA": (16.1366666667, -22.8888888889),
"GVFM": (14.9244444444, -23.4933333333),
"GVMA": (15.1558333333, -23.2136111111),
"GVMT": (15.0451111111, -24.34),
"GVNP": (14.9242, -23.4939),
"GVSF": (14.883, -24.48),
"GVSN": (16.5883333333, -24.2844444444),
"GVSV": (16.8338888889, -25.0566666667),
"HAAB": (8.97694444444, 38.8),
"HAAL": (9.00361111111, 38.7255555556),
"HAAM": (6.03972222222, 37.5902777778),
"HAAX": (14.1369444444, 38.7761111111),
"HABD": (11.6080555556, 37.3213888889),
"HABE": (9.39194444444, 34.5191666667),
"HADC": (11.0825, 39.7113888889),
"HADD": (8.55, 34.85),
"HADM": (10.3166666667, 37.7333333333),
"HADR": (9.625, | |
pd.read_csv('https://covidtracking.com/api/v1/states/daily.csv');
except:
print("ERROR: there was a problem loading the CTP dataset. Details: ")
for i in range(len(sys.exc_info())):
print(" " + str(sys.exc_info()[i]))
if verbose:
print('transforming CTP data...')
# transform our date to the standard format we're using (M/D/YY)
ctpStatesData['date'] = ctpStatesData['date'].astype(str)
ctpStatesData['date'] = ctpStatesData['date'].str[4:6].astype(int).astype(str) + '/' + ctpStatesData['date'].str[6:8].astype(int).astype(str) + '/' + ctpStatesData['date'].str[2:4]
# CTP uses state abbreviations, whereas our standard uses full names. Fix that with the 'us' module.
ctpStatesData['state'] = ctpStatesData['state'].apply(lambda x: str(us.states.lookup(x)))
ctpStatesData = ctpStatesData.rename(columns={"Long_": "Long", "Province_State": "Province/State", "Country_Region": "Country/Region", "Admin2": "County"})
# now, we need to transform our data into several timeseries, similar to what JHU has.
# to do this, we'll create a series of dataframes for each metric, pivot them, flesh them out with standard columns, and append them to our unified dataframe.
for i in range(len(ctpMetrics)):
workingDataframe = ctpStatesData[['state', ctpMetrics[i], 'date']]
workingDataframe = workingDataframe.pivot(index='state', columns='date', values=workingDataframe.columns[1]).fillna(0).reset_index()
workingDataframe.insert(1, 'Long', np.nan, True)
workingDataframe.insert(1, 'Lat', np.nan, True)
workingDataframe.insert(1, 'Population', np.nan, True)
workingDataframe.insert(1, 'County', '', True)
workingDataframe.insert(0, 'Country/Region', 'US', True)
workingDataframe.insert(0, 'Metric', ctpMetricEnums[i].value, True)
workingDataframe.insert(0, 'Source', CORVISDatasources.CTP.value, True)
workingDataframe = workingDataframe.rename(columns={"state": "Province/State"})
ctpDataframes.append(workingDataframe)
try:
pd.set_option('mode.chained_assignment', None) # temporarily disable 'SettingWithCopyWarning' message
for i in range(len(ctpMetrics)):
ctpDataframes[i].to_csv(dataPath+'.cpt_timeSeries' + ctpMetrics[i] + '.csv', index=False)
pd.set_option('mode.chained_assignment', 'warn')
except NameError:
print("ERROR: one or more CTP datasources failed to download. Please re-run this script.")
return;
if currentRepoInfoCTP is not None:
currentRepoInfoCTP.to_json(dataPath+'.ctpRepoInfo.json')
#####################################################
##### END OF LOAD BLOCK. PROCEED WITH CLEANING. #####
#####################################################
if verbose:
print('Data loading complete. Building unified CORVIS dataframe...')
# construct return dataframe
returnDataframe = pd.DataFrame(columns=CORVISBaselineColumnNames)
if ((datasourceToLoad == CORVISDatasources.ALL) | (datasourceToLoad == CORVISDatasources.JHU)):
returnDataframe = returnDataframe.append([timeSeriesConfirmedUS, timeSeriesDeathUS, timeSeriesConfirmedGlobal, timeSeriesDeathGlobal, timeSeriesRecoveredGlobal], ignore_index=True)
if ((datasourceToLoad == CORVISDatasources.ALL) | (datasourceToLoad == CORVISDatasources.CTP)):
returnDataframe = returnDataframe.append(ctpDataframes, ignore_index=True)
# strip out "unnamed: 0" index column that was imported with original data
returnDataframe = returnDataframe.loc[:, ~returnDataframe.columns.str.match('Unnamed')]
# Clean up data
returnDataframe['County'] = returnDataframe['County'].fillna('')
returnDataframe['Province/State'] = returnDataframe['Province/State'].fillna('')
# set string columns to type string, just to be extra cautious
returnDataframe['Source'] = returnDataframe['Source'].astype("string")
returnDataframe['Metric'] = returnDataframe['Metric'].astype("string")
returnDataframe['County'] = returnDataframe['County'].astype("string")
returnDataframe['Province/State'] = returnDataframe['Province/State'].astype("string")
returnDataframe['Country/Region'] = returnDataframe['Country/Region'].astype("string")
returnDataframe['Population'] = returnDataframe['Population'].replace(np.nan, 0)
if ((datasourceToLoad == CORVISDatasources.ALL) | (datasourceToLoad == CORVISDatasources.JHU)):
returnDataframe['Population'] = returnDataframe.apply(GetCORVISPopulationLambda, args=(lookupTable,), axis=1)
else:
print("WARNING: 'Population' is only calculated with the JHU dataset.")
returnDataframe['Population'] = returnDataframe['Population'].fillna(-1)
returnDataframe['Lat'] = returnDataframe['Lat'].fillna(1000) # fill with easy-to-catch junk data
returnDataframe['Long'] = returnDataframe['Long'].fillna(1000) # fill with easy-to-catch junk data
returnDataframe['Population'] = returnDataframe['Population'].astype(int)
returnDataframe['Lat'] = returnDataframe['Lat'].astype(float)
returnDataframe['Long'] = returnDataframe['Long'].astype(float)
# scrub any countries that double-report at the national level.
for nationalException in CORVISIgnoreStatesForNationalCount:
returnDataframe = returnDataframe.drop(returnDataframe[(returnDataframe['Country/Region'] == nationalException) & (returnDataframe['Province/State'] == '')].index, axis=0)
if verbose:
print('CORVIS data successfully loaded. Ready.')
return returnDataframe
def FilterCORVISData(sourceCORVISDataframe, country=None, state=None, county=None, region=None, province=None, aggregateBy=CORVISAggregations.NONE, metric=None, filterMissingPopulation=False, sourceData=CORVISDatasources.ALL, combineDatasources=None, allowStateCodesInFilters=True):
VerifyCORVISDataframe(sourceCORVISDataframe)
# first, check to see if our aliases are in use. If so, confirm that our primary entries aren't, then reassign accordingly.
if (region is not None):
if (country is not None):
# we have entries for BOTH country AND province. Do not allow this.
raise ValueError("ERROR: Ambiguous value: 'region' is an alias for 'country'. You cannot use both at the same time.")
else:
country = region
if (province is not None):
if (state is not None):
# we have entries for BOTH country AND province. Do not allow this.
raise ValueError("ERROR: Ambiguous value: 'province' is an alias for 'state'. You cannot use both at the same time.")
else:
state = province
if (metric is None):
metric = CORVISMetrics.ALL
if (combineDatasources is None):
combineDatasources = CORVISCombineDatasourcesBy.NONE
if isinstance(metric, CORVISMetrics):
metric = [metric]
# if country, county, or state is as a string, convert it to a 1-item list.
if isinstance(country, str):
country = [country]
if isinstance(county, str):
county = [county]
if isinstance(state, str):
state = [state]
if country == ['']:
country = []
if county == ['']:
county = []
if state == ['']:
state = []
if country is None:
country = []
if county is None:
county = []
if state is None:
state = []
# next, sort our lists so we can compare them.
country.sort()
county.sort()
state.sort()
# get our datasource, and warn if it isn't the enumerated type.
if isinstance(sourceData, CORVISDatasources):
sourceData = sourceData.value
else:
print("Note: we recommend using one of the following enumerated values for the 'metric' parameter:")
print(" CORVISDatasources.ALL, CORVISDatasources.JHU, CORVISDatasources.CTP")
# get the value of our CORVISMetrics enumerated type, if that's what was passed.
# Print a reminder to use the enumerated type if they don't.
hasWarnedOnMetric = False
for i in range(len(metric)):
if isinstance(metric[i], CORVISMetrics):
metric[i] = metric[i].value
elif hasWarnedOnMetric:
print("Note: we recommend using one of the following enumerated values for the 'metric' parameter:")
print(" CORVISMetrics.ALL, CORVISMetrics.CONFIRMED, CORVISMetrics.DEATH, or CORVISMetrics.RECOVERED")
hasWarnedOnMetric = True
# confirm that our metric will work in its current context
if not (metric[i] in ['Confirmed', 'Death', 'Recovered', 'all']):
raise ValueError("'metric' must be one of the following values: 'confirmed', 'death', 'recovered', 'all' (default: 'all')")
# get the value of our CORVISAggregations enumerated type, if that's what was passed.
# Print a reminder to use the enumerated type if they don't.
if isinstance(aggregateBy, CORVISAggregations):
aggregateBy = aggregateBy.value
elif aggregateBy is None:
# do nothing
pass
else:
print("Note: we recommend using one of the following enumerated values for the 'aggregateBy' parameter:")
print(" CORVISAggregations.GLOBAL, CORVISAggregations.COUNTRY, CORVISAggregations.STATE, CORVISAggregations.COUNTY, CORVISAggregations.NONE")
# confirm that our metric will work in its current context
if not (aggregateBy in ['global', 'country', 'state', None]):
raise ValueError("'aggregateBy' must be one of the following values: 'global', 'country', 'state', None (default)")
# get the value of our CORVISCombineDatasourcesBy enumerated type, if that's what was passed.
# Print a reminder to use the enumerated type if they don't.
if isinstance(combineDatasources, CORVISCombineDatasourcesBy):
combineDatasources = combineDatasources.value
elif combineDatasources is None:
# do nothing
pass
else:
print("Note: we recommend using one of the following enumerated values for the 'combineDatasources' parameter:")
print(" CORVISCombineDatasourcesBy.MIN, CORVISCombineDatasourcesBy.MAX, CORVISCombineDatasourcesBy.MEAN, CORVISAggregations.NONE")
# confirm that our metric will work in its current context
if not (combineDatasources in ['min', 'max', 'mean', None]):
raise ValueError("'combineDatasources' must be one of the following values: 'min', 'max', 'mean', None (default)")
returnDataframe = sourceCORVISDataframe.copy()
# loop through our filter lists and extract any string that begins with '!'.
# Put these values into a "does not include" filter list after removing the
# '!' at the front.
filterCountry = country
filterState = state
filterCounty = county
notCountry = []
notState = []
notCounty = []
country = []
state = []
county = []
for currentFilterItem in filterCountry:
if len(currentFilterItem) > 0:
if (currentFilterItem[0] != '!'):
country.append(currentFilterItem)
else:
currentFilterItem = currentFilterItem[1:]
notCountry.append(currentFilterItem)
for currentFilterItem in filterState:
if len(currentFilterItem) > 0:
if (currentFilterItem[0] != '!'):
state.append(currentFilterItem)
else:
currentFilterItem = currentFilterItem[1:]
notState.append(currentFilterItem)
for currentFilterItem in filterCounty:
if len(currentFilterItem) > 0:
if (currentFilterItem[0] != '!'):
county.append(currentFilterItem)
else:
currentFilterItem = currentFilterItem[1:]
notCounty.append(currentFilterItem)
# if requested, convert state codes to states. Will also properly capitalize other requests.
if allowStateCodesInFilters:
for i in range(len(state)):
if us.states.lookup(state[i]):
state[i] = str(us.states.lookup(state[i]))
for i in range(len(notState)):
if us.states.lookup(notState[i]):
notState[i] = str(us.states.lookup(notState[i]))
# filter before we aggregate: it's faster!
# if we need to filter our values, go in order of coarsest to finest: country, then state, then county.
if (country != []):
returnDataframe = returnDataframe[returnDataframe['Country/Region'].isin(country)]
if (state != []):
returnDataframe = returnDataframe[returnDataframe['Province/State'].isin(state)]
if (county != []):
returnDataframe = returnDataframe[returnDataframe['County'].isin(county)]
if (notCountry != []):
returnDataframe = returnDataframe[~returnDataframe['Country/Region'].isin(notCountry)]
if (notState != []):
returnDataframe = returnDataframe[~returnDataframe['Province/State'].isin(notState)]
if (notCounty != []):
returnDataframe = returnDataframe[~returnDataframe['County'].isin(notCounty)]
if (filterMissingPopulation):
returnDataframe = returnDataframe[returnDataframe['Population'] > 0]
if (metric != [CORVISMetrics.ALL.value]):
returnDataframe = returnDataframe[returnDataframe['Metric'].isin(metric)]
if (sourceData != CORVISDatasources.ALL.value):
returnDataframe = returnDataframe[returnDataframe['Source'] == str(sourceData)]
try:
VerifyCORVISDataframe(sourceCORVISDataframe)
except ValueError:
raise ValueError("ERROR in FilterCORVISData(): no data met the filtering criteria.")
if (aggregateBy is not None):
# set up our aggregators!
# In the event we need to group our results, we need to be able to aggregate columns.
# We have a LOT of columns, and the number grows every day.
# | |
дивизионной артиллерии, различные боеприпасы. Масса 40-60 кг. Дальность огня: 20 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':1000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.02,
'wpn_name_new':'Снаряды 150-мм новые',
'wpn_name_mid':'Снаряды 150-мм устаревшие',
'wpn_name_old':'Снаряды 150-мм под списание',
'wpn_age_mid':20,
'wpn_age_old':40,
'wpn_a':0.1,
'wpn_b':0.0002,
'wpn_c':1.4,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'<NAME>',
'wpn_name_comment':'Для автоматических гранатомётных систем. Калибр 30x30 мм, патрон 350 грамм.',
'wpn_troops_type':'ВПК',
'wpn_cost':50,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.004,
'wpn_name_new':'Выстрелы АГС новые',
'wpn_name_mid':'Выстрелы АГС устаревшие',
'wpn_name_old':'Выстрелы АГС под списание',
'wpn_age_mid':10,
'wpn_age_old':40,
'wpn_a':0.1,
'wpn_b':0.0002,
'wpn_c':1.4,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Миномётные мины 120-мм',
'wpn_name_comment':'Для станковых миномётов и 120-мм артиллерии. Масса 15-25 кг',
'wpn_troops_type':'ВПК',
'wpn_cost':500,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.02,
'wpn_name_new':'Миномётные мины 120-мм новые',
'wpn_name_mid':'Миномётные мины 120-мм устаревшие',
'wpn_name_old':'Миномётные мины 120-мм под списание',
'wpn_age_mid':10,
'wpn_age_old':40,
'wpn_a':0.1,
'wpn_b':0.0002,
'wpn_c':1.4,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Реактивные гранаты',
'wpn_name_comment':'Для переносных гранатомётов. Осколочные, куммулятивные, термобарические. Масса 3-6 кг',
'wpn_troops_type':'ВПК',
'wpn_cost':500,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.001,
'wpn_name_new':'Реактивные гранаты новые',
'wpn_name_mid':'Реактивные гранаты устаревшие',
'wpn_name_old':'Реактивные гранаты под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.1,
'wpn_b':0.0002,
'wpn_c':1.4,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Реактивные снаряды 210-мм',
'wpn_name_comment':'Для реактивной артиллерии. Различные типы снарядов, масса 200 кг (50 кг БЧ) Дальность огня до 60 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':5000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.025,
'wpn_name_new':'Реактивные снаряды 210-мм новые',
'wpn_name_mid':'Реактивные снаряды 210-мм устаревшие',
'wpn_name_old':'Реактивные снаряды 210-мм под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.1,
'wpn_b':0.0002,
'wpn_c':1.4,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Управляемые ракеты',
'wpn_name_comment':'Противотанковые ракеты управляемые с пусковой установки. Масса 30 кг. Дальность 10 км',
'wpn_troops_type':'ВПК',
'wpn_cost':25000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.02,
'wpn_name_new':'Управляемые ракеты новые',
'wpn_name_mid':'Управляемые ракеты устаревшие',
'wpn_name_old':'Управляемые ракеты под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.2,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Ракеты воздух-воздух',
'wpn_name_comment':'Управляемые авиационные ракеты. Масса: 500 кг (БЧ 50 кг) Дальность: 120 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':250000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.003,
'wpn_name_new':'Ракеты воздух-воздух новые',
'wpn_name_mid':'Ракеты воздух-воздух устаревшие',
'wpn_name_old':'Ракеты воздух-воздух под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.1,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Зенитные ракеты малой дальности',
'wpn_name_comment':'Противовоздушные ракеты малого радиуса. Масса 60 кг; высота: 10 км; дальность: 20 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':50000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.002,
'wpn_name_new':'Зенитные ракеты малой дальности новые',
'wpn_name_mid':'Зенитные ракеты малой дальности устаревшие',
'wpn_name_old':'Зенитные ракеты малой дальности под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.05,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Зенитные ракеты средней дальности',
'wpn_name_comment':'Противовоздушные ракеты среднего радиуса. Масса 500 кг; высота: 30; дальность 150 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':250000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.0015,
'wpn_name_new':'Зенитные ракеты средней дальности новые',
'wpn_name_mid':'Зенитные ракеты средней дальности устаревшие',
'wpn_name_old':'Зенитные ракеты средней дальности под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.1,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Зенитные ракеты большой дальности',
'wpn_name_comment':'Ракеты ПВО/ПРО большого радиуса с ядерной БЧ. Масса 2000 кг; высота: 250 км; дальность: 500 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':5000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.003,
'wpn_name_new':'Зенитные ракеты большой дальности новые',
'wpn_name_mid':'Зенитные ракеты большой дальности устаревшие',
'wpn_name_old':'Зенитные ракеты большой дальности под списание',
'wpn_age_mid':5,
'wpn_age_old':10,
'wpn_a':0.01,
'wpn_b':0.0006,
'wpn_c':1.8,
'wpn_ammo_1_name':'Нейтронные бомбы',
'wpn_ammo_1_capacity':1,
'wpn_ammo_1_expense':1,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Торпеды большой дальности',
'wpn_name_comment':'Торпеды с ядерными боеголовками. Масса: 4000 кг (200 кг БЧ); скорость 50 узлов; дальность 50 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':1000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.0001,
'wpn_name_new':'Торпеды большой дальности новые',
'wpn_name_mid':'Торпеды большой дальности устаревшие',
'wpn_name_old':'Торпеды большой дальности под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.02,
'wpn_b':0.0004,
'wpn_c':1.6,
'wpn_ammo_1_name':'Ядерные бомбы',
'wpn_ammo_1_capacity':1,
'wpn_ammo_1_expense':1,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Тактические ядерные ракеты',
'wpn_name_comment':'Крылатые ракеты с ядерными боеголовками. Масса 2000 кг. Дальность 2500 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':1000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.002,
'wpn_name_new':'Тактические ядерные ракеты новые',
'wpn_name_mid':'Тактические ядерные ракеты устаревшие',
'wpn_name_old':'Тактические ядерные ракеты под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.00,
'wpn_b':0.0004,
'wpn_c':1.6,
'wpn_ammo_1_name':'<NAME>ы',
'wpn_ammo_1_capacity':1,
'wpn_ammo_1_expense':1,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Стратегические ядерные ракеты',
'wpn_name_comment':'Баллистические ракеты с БЧ на шесть ядерных боеголовок. Масса 50 тонн. Дальность 10 000 км.',
'wpn_troops_type':'ВПК',
'wpn_cost':20000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.011,
'wpn_name_new':'Стратегические ядерные ракеты новые',
'wpn_name_mid':'Стратегические ядерные ракеты устаревшие',
'wpn_name_old':'Стратегические ядерные ракеты под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.00,
'wpn_b':0.0004,
'wpn_c':1.6,
'wpn_ammo_1_name':'Ядерные бомбы',
'wpn_ammo_1_capacity':6,
'wpn_ammo_1_expense':6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Ядерные бомбы',
'wpn_name_comment':'Ядерные/термоядерные заряды изменяемой мощности, 5-500 килотонн. Масса 200 килограмм.',
'wpn_troops_type':'ВПК',
'wpn_cost':5000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.02,
'wpn_name_new':'Ядерные бомбы новые',
'wpn_name_mid':'Ядерные бомбы устаревшие',
'wpn_name_old':'Ядерные бомбы под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.00,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'Нейтронные бомбы',
'wpn_name_comment':'Заряды килотонной мощности, 80% нейтронного излучения. Радиус поражения: 1500 метров. Масса: 100 килограмм.',
'wpn_troops_type':'ВПК',
'wpn_cost':5000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.015,
'wpn_name_new':'Нейтронные бомбы новые',
'wpn_name_mid':'Нейтронные бомбы устаревшие',
'wpn_name_old':'Нейтронные бомбы под списание',
'wpn_age_mid':10,
'wpn_age_old':20,
'wpn_a':0.00,
'wpn_b':0.0004,
'wpn_c':1.6,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'300-ГВт лазеры',
'wpn_name_comment':'Одноразовые кристаллы. Длительность импульса — одна микросекунда, энергия 3000 Дж, КПД 1%; «отдача» — 70 г тротила.',
'wpn_troops_type':'ВПК',
'wpn_cost':10000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.003,
'wpn_name_new':'3-ГВт лазеры новые',
'wpn_name_mid':'3-ГВт лазеры устаревшие',
'wpn_name_old':'3-ГВт лазеры не существующие',
'wpn_age_mid':5,
'wpn_age_old':10,
'wpn_a':0.1,
'wpn_b':0.0006,
'wpn_c':1.8,
}
metadict_wpn[dict_wpn_key] = dict_wpn
dict_wpn_key = dict_wpn_key + 1
dict_wpn = {
'wpn_name':'300-ТВт лазеры',
'wpn_name_comment':'Одноразовые кристаллы. Длительность выстрела — одна микросекунда, энергия 3 МДж, КПД 1%; «отдача» — 70 кг тротила.',
'wpn_troops_type':'ВПК',
'wpn_cost':10000000,
'wpn_cost_currency':'Эквестрийские биты',
'wpn_budget':0.02,
'wpn_name_new':'3-ТВт лазеры новые',
'wpn_name_mid':'3-ТВт лазеры устаревшие',
'wpn_name_old':'3-ТВт лазеры не существующие',
'wpn_age_mid':5,
'wpn_age_old':10,
'wpn_a':0.1,
'wpn_b':0.0006,
'wpn_c':1.8,
}
metadict_wpn[dict_wpn_key] = dict_wpn
#-------------------------------------------------------------------------
# Внутренние переменные.
# Создаём рабочие переменные на основе данных из опций (для удобства):
year_real = YEAR_START
age_real = AGE_END
pop = POPULATION
fert = FERTILITY_RATE
mort = MORTALITY_RATE
a = COMPONENT_A
b = COEFFICIENT_B
c = COEFFICIENT_C
#-------------------------------------------------------------------------
# Функции, подпрограммы. Последующие вызывают предыдущие.
def population_size(year):
"""Вычисляем численность популяции.
Рост популяции, это геометрическая прогрессия, например:
100000*1.002^(100-1)=121872
Начальная численность, годовой прирост, период в сто лет.
Функция вычисляет исходную численность, зная конечную:
121872*1.002^(1-100)=100000
"""
population = POPULATION * ((FERTILITY_RATE - MORTALITY_RATE + 1) ** (-year))
# Округляем число
population = round (population)
return population
def generation_size(year, percent):
"""Определяем численность поколения.
Поколение, это процент от популяции, например, если рождаемость 0.02:
121872*1.002^(1-100)*0.02=2000 (2% новорожденных в популяции)
Точно так же можно определить число умерших, прирост населения, состав:
121872*1.002^(1-100)*0.02*0.5=1000 (50% новорожденных мужского пола)
"""
generation = round(population_size(year) * percent)
return generation
def GDP_size(year):
"""ВВП страны в определённый год.
Рост благосостояния, это та же геометрическая прогрессия:
10000*1.03^(1-100)=536
В данном случае от 536$ за столетие ВВП вырос до 10 000$
"""
GDP_in_year = GDP_RATE * ((GDP_GROWTH + 1) ** (-year)) * population_size(year)
GDP_in_year = round (GDP_in_year)
return GDP_in_year
def gompertz_distribution(a, b, c, age):
"""Распределение Гомпертца. Риск смерти в зависимости от возраста.
Распределение Гомпертца-Мейкхама неплохо работает в
демографических расчётах для самых разных популяций.
Единственный недостаток — оно склонно занижать
смертность в начале и завышать в конце (экспонента, что поделать).
Для популяции людей даёт хорошие результаты в диапазоне — 30-70 лет.
Формула: p=a+b*(c^x)
Где:
p — вероятность смерти в процентах
a — независимый от возраста риск (0.002%)
b — коэффициент 2 (0.000350)
c — коэффициент 3 (1.08)
x — возраст в годах
Коэффициенты подобраны с учётом исследования:
"Parametric models for life insurance mortality data: gompertz's law over time".
"""
chance_of_dying = a + b * (c ** age)
# Проверка. Если получилось больше 1, значит 100% смерть.
if (chance_of_dying > 1):
chance_of_dying = 1
return chance_of_dying
def generation_alive(generation, a, b, c, age_real):
"""Число живых в поколении.
Каждый год умирает некий процент из поколения.
Этот цикл вычисляет точное число живых в определённый год.
"""
# Задаём рабочую переменную для цикла:
age = 0
# Из численности поколения вычитаем число погибших в первый год:
generation_survivors = generation - \
generation * \
gompertz_distribution(a, b , c, age)
# Далее это вычитание продолжается циклично.
while (age <= age_real):
age = age + 1
generation_survivors = generation_survivors - \
generation_survivors * \
gompertz_distribution(a, b, c, age)
# Проверка. Если число выживших уходит в минус, значит все мертвы.
if (generation_survivors <= 0):
generation_survivors = 0
break
# Округляем число
generation_survivors = round(generation_survivors)
return generation_survivors
def generation_profession(prof_percent, prof_hazard):
"""Число представителей определённой профессии, с учётом риска."""
prof_number = 0
if (prof_male_switch != 0):
# Берём из словаря численность живых в нужном поколении
# и пропускаем через ещё один цикл, чтобы учесть риск профессии.
prof_number = prof_number + \
generation_alive(dict_population['generation_alive'] * MALE_PERCENT * prof_percent,
# Отчёт начинается с возраста профессии.
prof_hazard, b, c, age_real - prof_age_apprentice)
if (prof_female_switch != 0):
prof_number = prof_number + \
generation_alive(dict_population['generation_alive'] * FEMALE_PERCENT * prof_percent,
prof_hazard, b, c, age_real - prof_age_apprentice)
return prof_number
#-------------------------------------------------------------------------
# | |
fail = False
for dirname in examples:
print >>sys.stderr, "Testing %s..." % dirname
sys.stderr.flush()
try:
run(arguments=["test",
"--pkgdir",
os.path.join(examples_dir, dirname)],
defaults=defaults,
env_root=env_root)
except SystemExit, e:
fail = (e.code != 0) or fail
if fail and defaults.get("stopOnError"):
break
if fail:
print >>sys.stderr, "Some examples tests were unsuccessful."
sys.exit(-1)
def test_all_packages(env_root, defaults):
packages_dir = os.path.join(env_root, "packages")
if os.path.isdir(packages_dir):
packages = [dirname for dirname in os.listdir(packages_dir)
if os.path.isdir(os.path.join(packages_dir, dirname))]
else:
packages = []
packages.append(env_root)
packages.sort()
print >>sys.stderr, "Testing all available packages: %s." % (", ".join(packages))
sys.stderr.flush()
fail = False
for dirname in packages:
print >>sys.stderr, "Testing %s..." % dirname
sys.stderr.flush()
try:
run(arguments=["test",
"--pkgdir",
os.path.join(packages_dir, dirname)],
defaults=defaults,
env_root=env_root)
except SystemExit, e:
fail = (e.code != 0) or fail
if fail and defaults.get('stopOnError'):
break
if fail:
print >>sys.stderr, "Some package tests were unsuccessful."
sys.exit(-1)
def get_config_args(name, env_root):
local_json = os.path.join(env_root, "local.json")
if not (os.path.exists(local_json) and
os.path.isfile(local_json)):
if name == "default":
return []
else:
print >>sys.stderr, "File does not exist: %s" % local_json
sys.exit(1)
local_json = packaging.load_json_file(local_json)
if 'configs' not in local_json:
print >>sys.stderr, "'configs' key not found in local.json."
sys.exit(1)
if name not in local_json.configs:
if name == "default":
return []
else:
print >>sys.stderr, "No config found for '%s'." % name
sys.exit(1)
config = local_json.configs[name]
if type(config) != list:
print >>sys.stderr, "Config for '%s' must be a list of strings." % name
sys.exit(1)
return config
def initializer(env_root, args, out=sys.stdout, err=sys.stderr):
from templates import PACKAGE_JSON, TEST_MAIN_JS
from preflight import create_jid
path = os.getcwd()
addon = os.path.basename(path)
# if more than two arguments
if len(args) > 2:
print >>err, 'Too many arguments.'
return {"result":1}
if len(args) == 2:
path = os.path.join(path,args[1])
try:
os.mkdir(path)
print >>out, '*', args[1], 'package directory created'
except OSError:
print >>out, '*', args[1], 'already exists, testing if directory is empty'
# avoid clobbering existing files, but we tolerate things like .git
existing = [fn for fn in os.listdir(path) if not fn.startswith(".")]
if existing:
print >>err, 'This command must be run in an empty directory.'
return {"result":1}
for d in ['lib','data','test','doc']:
os.mkdir(os.path.join(path,d))
print >>out, '*', d, 'directory created'
open(os.path.join(path,'README.md'),'w').write('')
print >>out, '* README.md written'
jid = create_jid()
print >>out, '* generated jID automatically:', jid
open(os.path.join(path,'package.json'),'w').write(PACKAGE_JSON % {'name':addon.lower(),
'fullName':addon,
'id':jid })
print >>out, '* package.json written'
open(os.path.join(path,'test','test-main.js'),'w').write(TEST_MAIN_JS)
print >>out, '* test/test-main.js written'
open(os.path.join(path,'lib','main.js'),'w').write('')
print >>out, '* lib/main.js written'
open(os.path.join(path,'doc','main.md'),'w').write('')
print >>out, '* doc/main.md written'
if len(args) == 1:
print >>out, '\nYour sample add-on is now ready.'
print >>out, 'Do "cfx test" to test it and "cfx run" to try it. Have fun!'
else:
print >>out, '\nYour sample add-on is now ready in the \'' + args[1] + '\' directory.'
print >>out, 'Change to that directory, then do "cfx test" to test it, \nand "cfx run" to try it. Have fun!'
return {"result":0, "jid":jid}
def buildJID(target_cfg):
if "id" in target_cfg:
jid = target_cfg["id"]
else:
import uuid
jid = str(uuid.uuid4())
if not ("@" in jid or jid.startswith("{")):
jid = jid + "@jetpack"
return jid
def run(arguments=sys.argv[1:], target_cfg=None, pkg_cfg=None,
defaults=None, env_root=os.environ.get('CUDDLEFISH_ROOT'),
stdout=sys.stdout):
versions = get_versions()
sdk_version = versions["version"]
display_version = "Add-on SDK %s (%s)" % (sdk_version, versions["full"])
parser_kwargs = dict(arguments=arguments,
global_options=global_options,
parser_groups=parser_groups,
usage=usage,
version=display_version,
defaults=defaults)
(options, args) = parse_args(**parser_kwargs)
config_args = get_config_args(options.config, env_root);
# reparse configs with arguments from local.json
if config_args:
parser_kwargs['arguments'] += config_args
(options, args) = parse_args(**parser_kwargs)
command = args[0]
if command == "init":
initializer(env_root, args)
return
if command == "testpkgs":
test_all_packages(env_root, defaults=options.__dict__)
return
elif command == "testaddons":
test_all_testaddons(env_root, defaults=options.__dict__)
return
elif command == "testex":
test_all_examples(env_root, defaults=options.__dict__)
return
elif command == "testall":
test_all(env_root, defaults=options.__dict__)
return
elif command == "testcfx":
if options.filter:
print >>sys.stderr, "The filter option is not valid with the testcfx command"
return
test_cfx(env_root, options.verbose)
return
elif command == "docs":
from cuddlefish.docs import generate
if len(args) > 1:
docs_home = generate.generate_named_file(env_root, filename_and_path=args[1])
else:
docs_home = generate.generate_local_docs(env_root)
webbrowser.open(docs_home)
return
elif command == "sdocs":
from cuddlefish.docs import generate
filename=""
if options.override_version:
filename = generate.generate_static_docs(env_root, override_version=options.override_version)
else:
filename = generate.generate_static_docs(env_root)
print >>stdout, "Wrote %s." % filename
return
elif command not in ["xpi", "test", "run"]:
print >>sys.stderr, "Unknown command: %s" % command
print >>sys.stderr, "Try using '--help' for assistance."
sys.exit(1)
target_cfg_json = None
if not target_cfg:
if not options.pkgdir:
options.pkgdir = find_parent_package(os.getcwd())
if not options.pkgdir:
print >>sys.stderr, ("cannot find 'package.json' in the"
" current directory or any parent.")
sys.exit(1)
else:
options.pkgdir = os.path.abspath(options.pkgdir)
if not os.path.exists(os.path.join(options.pkgdir, 'package.json')):
print >>sys.stderr, ("cannot find 'package.json' in"
" %s." % options.pkgdir)
sys.exit(1)
target_cfg_json = os.path.join(options.pkgdir, 'package.json')
target_cfg = packaging.get_config_in_dir(options.pkgdir)
# At this point, we're either building an XPI or running Jetpack code in
# a Mozilla application (which includes running tests).
use_main = False
inherited_options = ['verbose', 'enable_e10s']
enforce_timeouts = False
if command == "xpi":
use_main = True
elif command == "test":
if 'tests' not in target_cfg:
target_cfg['tests'] = []
inherited_options.extend(['iterations', 'filter', 'profileMemory',
'stopOnError', 'parseable'])
enforce_timeouts = True
elif command == "run":
use_main = True
else:
assert 0, "shouldn't get here"
if use_main and 'main' not in target_cfg:
# If the user supplies a template dir, then the main
# program may be contained in the template.
if not options.templatedir:
print >>sys.stderr, "package.json does not have a 'main' entry."
sys.exit(1)
if not pkg_cfg:
pkg_cfg = packaging.build_config(env_root, target_cfg, options.packagepath)
target = target_cfg.name
# TODO: Consider keeping a cache of dynamic UUIDs, based
# on absolute filesystem pathname, in the root directory
# or something.
if command in ('xpi', 'run'):
from cuddlefish.preflight import preflight_config
if target_cfg_json:
config_was_ok, modified = preflight_config(target_cfg,
target_cfg_json)
if not config_was_ok:
if modified:
# we need to re-read package.json . The safest approach
# is to re-run the "cfx xpi"/"cfx run" command.
print >>sys.stderr, ("package.json modified: please re-run"
" 'cfx %s'" % command)
else:
print >>sys.stderr, ("package.json needs modification:"
" please update it and then re-run"
" 'cfx %s'" % command)
sys.exit(1)
# if we make it this far, we have a JID
else:
assert command == "test"
jid = buildJID(target_cfg)
targets = [target]
if command == "test":
targets.append(options.test_runner_pkg)
extra_packages = []
if options.extra_packages:
extra_packages = options.extra_packages.split(",")
if extra_packages:
targets.extend(extra_packages)
target_cfg.extra_dependencies = extra_packages
deps = packaging.get_deps_for_targets(pkg_cfg, targets)
from cuddlefish.manifest import build_manifest, ModuleNotFoundError, \
BadChromeMarkerError
# Figure out what loader files should be scanned. This is normally
# computed inside packaging.generate_build_for_target(), by the first
# dependent package that defines a "loader" property in its package.json.
# This property is interpreted as a filename relative to the top of that
# file, and stored as a path in build.loader . generate_build_for_target()
# cannot be called yet (it needs the list of used_deps that
# build_manifest() computes, but build_manifest() needs the list of
# loader files that it computes). We could duplicate or factor out this
# build.loader logic, but that would be messy, so instead we hard-code
# the choice of loader for manifest-generation purposes. In practice,
# this means that alternative loaders probably won't work with
# --strip-xpi.
assert packaging.DEFAULT_LOADER == "addon-sdk"
assert pkg_cfg.packages["addon-sdk"].loader == "lib/sdk/loader/cuddlefish.js"
cuddlefish_js_path = os.path.join(pkg_cfg.packages["addon-sdk"].root_dir,
"lib", "sdk", "loader", "cuddlefish.js")
loader_modules = [("addon-sdk", "lib", "sdk/loader/cuddlefish", cuddlefish_js_path)]
scan_tests = command == "test"
test_filter_re = None
if scan_tests and options.filter:
test_filter_re = options.filter
if ":" in options.filter:
test_filter_re = options.filter.split(":")[0]
try:
manifest = build_manifest(target_cfg, pkg_cfg, deps,
scan_tests, test_filter_re,
loader_modules)
except ModuleNotFoundError, e:
print str(e)
sys.exit(1)
except BadChromeMarkerError, e:
# An error had already been displayed on stderr in manifest code
sys.exit(1)
used_deps = manifest.get_used_packages()
if command == "test":
# The test runner doesn't appear to link against any actual packages,
# because it loads everything at runtime (invisible to the linker).
# If we believe that, we won't set up URI mappings for anything, and
# tests won't be able to run.
used_deps = deps
for xp in extra_packages:
if xp not in used_deps:
used_deps.append(xp)
build = packaging.generate_build_for_target(
pkg_cfg, target, used_deps,
include_dep_tests=options.dep_tests,
is_running_tests=(command == "test")
)
harness_options = {
'jetpackID': jid,
'staticArgs': options.static_args,
'name': target,
}
harness_options.update(build)
# When cfx is run from sdk root directory, we will strip sdk modules and
# override them with local | |
import numpy as np
def get_pieces_count(state):
count = 0
for s in state:
if s.isalpha():
count += 1
return count
def is_kill_move(state_prev, state_next):
return get_pieces_count(state_prev) - get_pieces_count(state_next)
def create_position_labels():
labels_array = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
letters.reverse()
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for l1 in range(9):
for n1 in range(10):
move = letters[8 - l1] + numbers[n1]
labels_array.append(move)
# labels_array.reverse()
return labels_array
def create_position_labels_reverse():
labels_array = []
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
letters.reverse()
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for l1 in range(9):
for n1 in range(10):
move = letters[l1] + numbers[n1]
labels_array.append(move)
labels_array.reverse()
return labels_array
class GameBoard(object):
board_pos_name = np.array(create_position_labels()).reshape(9,10).transpose()
Ny = 10
Nx = 9
def __init__(self):
self.state = "RNBAKABNR/9/1C5C1/P1P1P1P1P/9/9/p1p1p1p1p/1c5c1/9/rnbakabnr"#"rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR" #
self.round = 1
# self.players = ["w", "b"]
self.current_player = "w"
self.restrict_round = 0
# 小写表示黑方,大写表示红方
# [
# "rheakaehr",
# " ",
# " c c ",
# "p p p p p",
# " ",
# " ",
# "P P P P P",
# " C C ",
# " ",
# "RHEAKAEHR"
# ]
def reload(self):
self.state = "RNBAKABNR/9/1C5C1/P1P1P1P1P/9/9/p1p1p1p1p/1c5c1/9/rnbakabnr"#"rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR" #
self.round = 1
self.current_player = "w"
self.restrict_round = 0
@staticmethod
def print_borad(board, action = None):
def string_reverse(string):
# return ''.join(string[len(string) - i] for i in range(1, len(string)+1))
return ''.join(string[i] for i in range(len(string) - 1, -1, -1))
x_trans = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8}
if(action != None):
src = action[0:2]
src_x = int(x_trans[src[0]])
src_y = int(src[1])
# board = string_reverse(board)
board = board.replace("1", " ")
board = board.replace("2", " ")
board = board.replace("3", " ")
board = board.replace("4", " ")
board = board.replace("5", " ")
board = board.replace("6", " ")
board = board.replace("7", " ")
board = board.replace("8", " ")
board = board.replace("9", " ")
board = board.split('/')
# board = board.replace("/", "\n")
print(" abcdefghi")
for i,line in enumerate(board):
if (action != None):
if(i == src_y):
s = list(line)
s[src_x] = 'x'
line = ''.join(s)
print(i,line)
# print(board)
@staticmethod
def sim_do_action(in_action, in_state):
x_trans = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8}
src = in_action[0:2]
dst = in_action[2:4]
src_x = int(x_trans[src[0]])
src_y = int(src[1])
dst_x = int(x_trans[dst[0]])
dst_y = int(dst[1])
# GameBoard.print_borad(in_state)
# print("sim_do_action : ", in_action)
# print(dst_y, dst_x, src_y, src_x)
board_positions = GameBoard.board_to_pos_name(in_state)
line_lst = []
for line in board_positions:
line_lst.append(list(line))
lines = np.array(line_lst)
# print(lines.shape)
# print(board_positions[src_y])
# print("before board_positions[dst_y] = ",board_positions[dst_y])
lines[dst_y][dst_x] = lines[src_y][src_x]
lines[src_y][src_x] = '1'
board_positions[dst_y] = ''.join(lines[dst_y])
board_positions[src_y] = ''.join(lines[src_y])
# src_str = list(board_positions[src_y])
# dst_str = list(board_positions[dst_y])
# print("src_str[src_x] = ", src_str[src_x])
# print("dst_str[dst_x] = ", dst_str[dst_x])
# c = copy.deepcopy(src_str[src_x])
# dst_str[dst_x] = c
# src_str[src_x] = '1'
# board_positions[dst_y] = ''.join(dst_str)
# board_positions[src_y] = ''.join(src_str)
# print("after board_positions[dst_y] = ", board_positions[dst_y])
# board_positions[dst_y][dst_x] = board_positions[src_y][src_x]
# board_positions[src_y][src_x] = '1'
board = "/".join(board_positions)
board = board.replace("111111111", "9")
board = board.replace("11111111", "8")
board = board.replace("1111111", "7")
board = board.replace("111111", "6")
board = board.replace("11111", "5")
board = board.replace("1111", "4")
board = board.replace("111", "3")
board = board.replace("11", "2")
# GameBoard.print_borad(board)
return board
@staticmethod
def board_to_pos_name(board):
board = board.replace("2", "11")
board = board.replace("3", "111")
board = board.replace("4", "1111")
board = board.replace("5", "11111")
board = board.replace("6", "111111")
board = board.replace("7", "1111111")
board = board.replace("8", "11111111")
board = board.replace("9", "111111111")
return board.split("/")
@staticmethod
def check_bounds(toY, toX):
if toY < 0 or toX < 0:
return False
if toY >= GameBoard.Ny or toX >= GameBoard.Nx:
return False
return True
@staticmethod
def validate_move(c, upper=True):
if (c.isalpha()):
if (upper == True):
if (c.islower()):
return True
else:
return False
else:
if (c.isupper()):
return True
else:
return False
else:
return True
@staticmethod
def get_legal_moves(state, current_player):
moves = []
k_x = None
k_y = None
K_x = None
K_y = None
face_to_face = False
board_positions = np.array(GameBoard.board_to_pos_name(state))
for y in range(board_positions.shape[0]):
for x in range(len(board_positions[y])):
if(board_positions[y][x].isalpha()):
if(board_positions[y][x] == 'r' and current_player == 'b'):
toY = y
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
toX = x
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].isupper()):
moves.append(m)
break
moves.append(m)
elif(board_positions[y][x] == 'R' and current_player == 'w'):
toY = y
for toX in range(x - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
for toX in range(x + 1, GameBoard.Nx):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
toX = x
for toY in range(y - 1, -1, -1):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
for toY in range(y + 1, GameBoard.Ny):
m = GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX]
if (board_positions[toY][toX].isalpha()):
if (board_positions[toY][toX].islower()):
moves.append(m)
break
moves.append(m)
elif ((board_positions[y][x] == 'n' or board_positions[y][x] == 'h') and current_player == 'b'):
for i in range(-1, 3, 2):
for j in range(-1, 3, 2):
toY = y + 2 * i
toX = x + 1 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False) and board_positions[toY - i][x].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + 1 * i
toX = x + 2 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=False) and board_positions[y][toX - j].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'N' or board_positions[y][x] == 'H') and current_player == 'w'):
for i in range(-1, 3, 2):
for j in range(-1, 3, 2):
toY = y + 2 * i
toX = x + 1 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True) and board_positions[toY - i][x].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + 1 * i
toX = x + 2 * j
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX], upper=True) and board_positions[y][toX - j].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'b' or board_positions[y][x] == 'e') and current_player == 'b'):
for i in range(-2, 3, 4):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 5 and \
board_positions[y + i // 2][x + i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 5 and \
board_positions[y + i // 2][x - i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif ((board_positions[y][x] == 'B' or board_positions[y][x] == 'E') and current_player == 'w'):
for i in range(-2, 3, 4):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 4 and \
board_positions[y + i // 2][x + i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 4 and \
board_positions[y + i // 2][x - i // 2].isalpha() == False:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'a' and current_player == 'b'):
for i in range(-1, 3, 2):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 7 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = y + i
toX = x - i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=False) and toY >= 7 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
elif (board_positions[y][x] == 'A' and current_player == 'w'):
for i in range(-1, 3, 2):
toY = y + i
toX = x + i
if GameBoard.check_bounds(toY, toX) and GameBoard.validate_move(board_positions[toY][toX],
upper=True) and toY <= 2 and toX >= 3 and toX <= 5:
moves.append(GameBoard.board_pos_name[y][x] + GameBoard.board_pos_name[toY][toX])
toY = | |
<reponame>vishaltiwari/XNet<filename>tools/thermo/plot_expansion_no_parser.py
import os,sys,itertools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import matplotlib.gridspec as gridspec
import matplotlib.axes as plax
from datetime import datetime
from scipy import interpolate
from scipy import signal
import scipy.io as scio
import numpy.matlib
from math import floor
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
import md5, sha
from get_time_input import get_time_input
from read_tracer_temp import *
from read_tracer_fate import *
'''Import all data into variables via parser in MATLAB: Here, we initialize and use functions'''
model_mass = '12'
model_name = 'B'+str(model_mass)+'-WH07'
time_start = 0.0
time_bounce = 2.63176547E-01
time_final = 1.67357
N = 5000
N_bounce = 10
time_interp = get_time_input(time_start, time_final, time_bounce, N, N_bounce)
path_temp = 'C:\\Users\\taylor\\Documents\\MATLAB\\B12\\B12-WH07\\full_profile\\p_row44\\temp_S12-0'
path_fate = 'C:\\Users\\taylor\\Documents\\MATLAB\\B12\\B12-WH07\\full_profile\\p_row44\\fate_S12-0'
plist = np.arange(1721,1761)
temp_fname_base = path_temp
fate_fname_base = path_fate
(time_,radius,theta,v_rad,v_theta,temp,density,ye,enpy,pe_int,pe_bind,press,lapse,dpe_nuc,dpe_neut) = read_tracer_fate(fate_fname_base, plist, time_interp)
(time_, temp, density, ye, flxtot, nu_temp) = read_tracer_temp(temp_fname_base, plist, time_interp)
time_extrap_min = 0.0025
time_extrap_max = 0.15
adiabatic_tol = 0.05
temp_nse = 8.0
temp_final = 0.5
sgwin_tau = 0.025
sgorder_tau = 6
sgwin_adi = 0.05
sgorder_adi = 2
tau_rho_min = 0.01
tau_rho_max = 1.0
min_jumpdiff = 5
change_min = 0.001
t_extend = 0.0
temp_min = 0.02
output_tdel = 0.0
nu_time_stop = 10.0
#last time temp leaves nse temp
t_start_array = []
#For each particle, we find indices where its temp exceeds the NSE temp, and we
#take the last index and get the final time when it was above that temperature.
for k in range(0,len(plist)):
temp_per_p = temp[:,k]
index = np.where(temp_per_p>=temp_nse)[0]
if type(index) == int:
if index or index==0:
i = int(index[len(index)-1])
t_start = time_[i]
t_start_array = np.append(t_start_array,t_start)
else:
t_start_array = np.append(t_start_array,0)
elif type(index) == numpy.ndarray:
if bool(index.any()):
i = int(index[len(index)-1])
t_start = time_[i]
t_start_array = np.append(t_start_array,t_start)
else:
t_start_array = np.append(t_start_array,0)
#times from which to start extrapolation if extrap_flag==true:
t_stop_array = [time_[len(time_)-1]] #default
#Initialize more variables:
print_flag = False
write_flag = True
extrap_flag = True
plot_flag = True
prc_min = 25
prc_max = 75
time_extrap_max0 = time_extrap_max;
#Make sure t_stop_array is the final time value.
if t_stop_array[0] == 0.0:
t_stop_array[0] = time_[len(time_)]
if time_final > 0.0:
#t_stop_array[t_stop_array > time_final] = time_final
for x in range(0,len(t_stop_array)):
if t_stop_array[x] > time_final:
t_stop_array[x] = time_final
plot_folder = '.\\' #unnecessary in OG code
if print_flag:
plot_flag = True
if not os.path.isdir(plot_folder):
os.mkdir(plot_folder,0777)
if write_flag:
profile_folder = os.getcwd()
if not os.path.isdir(profile_folder):
os.mkdir(profile_folder)
th_format = [np.tile('{:15.7e}',(1, 4)), np.tile('{:12.3e}', (1, 8)), '\n' ]
th_fname_base = profile_folder+'\\th_profile\\th-'
adiabatic_string = "%s%g%s" % (r'${\Delta}(T{\rho}^{-1/3}) <$ ',adiabatic_tol,r'$\times(T{\rho}^{-1/3})_{\mathrm{f}}$' )
cm = plt.get_cmap('gist_rainbow')
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
#for i in range(NUM_COLORS):
#ax.plot(np.arange(10)*(i+1))
color_array = []
NUM_COLORS = len(t_stop_array)
#Assign colors to an array for plotting later: 1 color per time value to start
#extrapolation from.
if len(t_stop_array) > 6:
for k in range(NUM_COLORS):
color_array = np.append(color_array, cm(1.*k/NUM_COLORS))
color_array = color_array.reshape(NUM_COLORS,4)
elif len(t_stop_array) < 1:
color_array=[]
elif len(t_stop_array)==1:
color_array=[('0.','0.','1.','1.')]
elif len(t_stop_array)==2:
color_array=[[('0.','0.','1.','1.')], [('1.','0.','0.','1.')]]
elif len(t_stop_array)==3:
color_array=[[('0.','0.','1.','1.')], [('1.','0.','0.','1.')], [('0.','0.5','0.','1.')]]
elif len(t_stop_array)==4:
color_array=[[('0.','0.','1.','1.')], [('1.','0.','0.','1.')], [('0.','0.','0.5','1.')], [('0.','0.75','0.75','1.')]]
elif len(t_stop_array)==5:
color_array=[[('0.','0.','1.','1.')], [('1.','0.','0.','1.')], [('0.','0.','0.5','1.')], [('0.','0.75','0.75','1.')], [('0.75','0.','0.75','1.')]]
elif len(t_stop_array)==6:
color_array=[[('0.','0.','1.','1.')], [('1.','0.','0.','1.')], [('0.','0.','0.5','1.')], [('0.','0.75','0.75','1.')], [('0.75','0.','0.75','1.')], [('0.75','0.75','0.','1.')]]
a = np.arange(0,1)
#Set t_start_array value equal to the initial for every particle if len==1.
if len(t_start_array) == 1 and len(plist) > 1:
for i in range(1,len(plist)):
t_start_array = np.concatenate((t_start_array,a))
t_start_array[i] = t_start_array[0]
i_peak = np.ones(plist.shape)
#temp_peak is an array of the maximum temp values per particle
temp_peak = np.zeros((1,len(plist)))
for _id in range(0,len(plist)):
temp_peak[0,_id] = np.max(temp[:,_id])
for id_ in range (0,len(plist)):
if len(plist) == 1:
p_id = 0
else:
p_id = id_
if t_start_array[p_id] > 0.0 :
time_minus_t_start = abs( time_ - t_start_array[p_id] )
i_nse = np.argmin(time_minus_t_start)
i_peak[p_id] = i_nse
else:
i_nse = 0
if temp_peak[0,p_id] >= temp_nse and temp[len(time_)-1,p_id] < temp_nse:
i_nse = np.where(temp[:,p_id] > temp_nse)[0]
if i_nse or i_nse==0:
i_nse = int(i_nse[len(i_nse)-1])
elif temp[len(temp)-1,p_id] >= temp_nse:
i_nse = len(time_)-1
elif temp_peak[0,p_id] < temp_nse:
i_nse = 0
if type(i_nse) == int:
if i_nse or i_nse==0:
t_start_array[p_id] = time_[i_nse]
i_peak[p_id] = i_nse
else:
if i_nse.any():
t_start_array[p_id] = time_[i_nse]
i_peak[p_id] = i_nse
tau = np.zeros((len(t_stop_array),len(plist)))
th_out = 0
for _id in range(0,len(plist)):
#for _id in range(3,4):
if len(plist) == 1:
p_id = 0
else:
p_id = _id
print p_id
#Reset extrap flag to initial value
extrap = extrap_flag
t_stop_max = np.max(t_stop_array)
time_minus_t_stop = abs( time_ - t_stop_max)
istop = np.argmin(time_minus_t_stop)
if temp_final >= temp[istop,p_id]:
temp_over_temp_final = np.where(temp[:,p_id] >= temp_final)[0]
temp_over_temp_final = temp_over_temp_final[len(temp_over_temp_final)-1]
if (np.all(temp_over_temp_final==0)):
istop = temp_over_temp_final[0]
t_stop = time_[istop]
extrap = False
elif temp[istop,p_id] >= temp_nse:
t_stop = time_[istop]
extrap = False
else:
t_stop = time_[istop]
#This is the time which is written to the profile to begin post-processing
t_start = t_start_array[p_id]
if istop <= i_peak[p_id]:
peak = 1
else:
peak = i_peak[p_id]
#This is the time from which we may extrapolate
t_peak = time_[peak]
if extrap or plot_flag:
rho = density[:,p_id]
max_peak_times = max(t_stop - time_extrap_max, t_peak)
time_difference = abs(time_-max_peak_times)
iwin_min = np.argmin(time_difference)
span_tau = floor( sgwin_tau * (istop - iwin_min + 1) / ( time_[istop] - time_[iwin_min] ) )
span_tau = int(span_tau)
if (span_tau%2) == 0:
span_tau = span_tau + 1
span_tau = max( span_tau, sgorder_tau+1 )
iwin_min = max( iwin_min-span_tau, 1 )
iwin_max = min( istop+span_tau, len(time_) )
rho[iwin_min:iwin_max] = signal.savgol_filter(density[iwin_min:iwin_max, p_id], span_tau, sgorder_tau)
#rho(iwin_min:iwin_max) = smooth( time(iwin_min:iwin_max), density(iwin_min:iwin_max,p_id), span_tau, 'sgolay', sgorder_tau );
rhodot = (np.gradient(rho))/(np.gradient(time_))
tau_rho = (-rho) / rhodot
adiabatic_raw = 0.34 * (temp[:,p_id]**3) / density[:,p_id]
adiabatic = adiabatic_raw
iwin_min = np.argmin(time_difference)
span_adi = floor( sgwin_adi * (istop - iwin_min + 1) / ( time_[istop] - time_[iwin_min] ) )
span_adi = int(span_adi)
if (span_adi%2) == 0:
span_adi = span_adi + 1
span_adi = max( span_adi, sgorder_adi+1 )
iwin_min = max( iwin_min-span_adi, 1 )
iwin_max = min( istop+span_adi, len(time_) )
adiabatic[iwin_min:iwin_max] = signal.savgol_filter(adiabatic_raw[iwin_min:iwin_max], span_adi, sgorder_adi )
# rhodot = gradient( density(:,p_id), time );
# rhodot = [ 0; diff(density(:,p_id)) ./ diff(time) ];
# rhodot = deriv_3pt( density(:,p_id), time );
# [~,span_tau] = min( abs( time - t_stop + sgwin_tau ) );
# span_tau = max( istop - span_tau + 1, sgorder_tau + 1 )
# tic;
# rho = smooth( time, density(:,p_id), span_tau, 'sgolay', sgorder_tau );
# toc;
# rhodot = gradient( rho, time );
# rhodot = deriv_5pt( rho, time );
#
# tau_rho = -density(:,p_id) ./ rhodot;
# tau_rho = -rho ./ rhodot;
#
# temp_smooth = smooth( time, temp(:,p_id), sgwin_adi, 'sgolay', sgorder_adi );
#
# [~,span_adi] = min( abs( time - t_stop + sgwin_adi ) );
# span_adi = max( istop - span_adi + 1, sgorder_adi + 1 )
#
# adiabatic_raw = 0.34 * temp(:,p_id).^3 ./ density(:,p_id);
# adiabatic_raw = temp_smooth .* rho.^(-1/3);
#
# tic;
# adiabatic = smooth( time, adiabatic_raw, span_adi, 'sgolay', sgorder_adi );
# toc;
# adiabatic = smooth( adiabatic_raw, 'rlowess', sgwin_adi );
# adiabatic = adiabatic_raw;
if plot_flag:
# tau_rho_avg = smooth( tau_rho, 'rlowess' );
# tau_rho_avg = smooth( tau_rho, sgwin_tau, 'sgolay', sgorder_tau);
tau_rho_avg = tau_rho
h_ratio = [3,1]
total_height = h_ratio[0] + h_ratio[1]
gs = gridspec.GridSpec(total_height, 1)
fig_h = plt.figure()
axis_h1 = plt.subplot(gs[:h_ratio[0], :])
axis_h1 = plt.gca()
plax.Axes.set_yscale(axis_h1,'linear')
ylim1 = plax.Axes.set_ylim(axis_h1,0,temp_nse)
plt.yticks(np.arange(0,9))
plax.Axes.set_xscale(axis_h1,'linear')
xlim1 = plax.Axes.set_xlim(axis_h1,t_peak-time_bounce, t_stop-time_bounce)
axis_h1.grid(True)
#plt.minorticks_on()
#plt.tick_params(axis='y', which='minor', direction='out')
axis_h2 = plt.subplot(gs[h_ratio[0],:],sharex=axis_h1)
plt.subplots_adjust(wspace=0, hspace=0)
axis_h2 = plt.gca()
plax.Axes.set_yscale(axis_h2,'linear')
axis_h2 = plt.gca()
plax.Axes.set_ylim(axis_h2,-2.9999,2.9999)
plt.yticks([-2.0,2.0])
plax.Axes.set_xscale(axis_h2,'linear')
plax.Axes.set_xlim(axis_h2,t_peak-time_bounce, t_stop-time_bounce)
axis_h2.grid(True)
#plt.minorticks_on()
#plt.tick_params(axis='x', which='minor', direction='out')
if time_bounce != 0.0:
axis_h2.set_xlabel('Time after bounce [s]' )
else:
axis_h2.set_xlabel('Elapsed time [s]' )
axis_h1.set_ylabel('Temperature [GK]')
axis_h2.set_ylabel('Expansion Timescale [s]')
#Plot the non-extrapolated temperature profile
handle1, = axis_h1.plot((time_[peak:istop] - time_bounce),temp[peak:istop,p_id],'k-', linewidth = 1.5, label='Temperature')
##############
raw_data_dict = {}
data_path = 'C:\\Users\\taylor\\Documents\\MATLAB\\B12\\B12-WH07\\raw_data'
raw_data_dict = scio.loadmat(data_path, appendmat=True, variable_names=('temp','time'))
temp_from_raw = raw_data_dict['temp']
time_from_raw = raw_data_dict['time']
'''
axis_h1.plot((time_from_raw[peak:istop]-time_bounce),temp_from_raw[peak:istop,p_id],'r--',linewidth=1.5)
#^We will leave this up for now, but this reads and plots MATLAB data for comparison.
'''##############
#Plot the expansion timescale: change back to 'k--'
handle2, = axis_h2.plot((time_[(peak+5):istop]-time_bounce),tau_rho_avg[(peak+5):istop],'k--',label=r'$\tau_{\mathrm{exp}}$')
'''#########
#This reads tau_rho_avg per particle from the MATLAB script and plots for comparison.
tau_rho_avg_dict = {}
data_path = 'C:\\Users\\taylor\\Documents\\MATLAB\\B12\\B12-WH07\\rho_values'+str(p_id+1)
tau_rho_avg_dict = scio.loadmat(data_path, appendmat=True, variable_names=('tau_rho_avg'))
tau_rho_avg_tracer1 = tau_rho_avg_dict['tau_rho_avg']
axis_h2.plot((time_from_raw[(peak+5):istop]-time_bounce),tau_rho_avg_tracer1[(peak+5):istop],'r--')
'''###########
axis_h3 = axis_h1.twinx()
ylim3=plax.Axes.set_ylim(axis_h3,min(adiabatic[peak:istop]),max(adiabatic[peak:istop]))
#Plot the isentropic curve. Change back to 'k-.' when no longer comparing!
handle3, = axis_h3.plot(time_[peak:istop]-time_bounce, adiabatic[peak:istop],'k-.',linewidth=0.5, label=r'$0.34 T_{9}^{3} / \rho_{5}$')
num_ticks = len(axis_h1.get_yticks())
ytick = np.linspace( ylim3[0], ylim3[1], num_ticks - 2)
plax.Axes.set_yticks(axis_h3,ytick)
plax.Axes.set_ylim(axis_h3, ylim3[0] - (ytick[1]-ytick[0]), ylim3[1] + (ytick[len(ytick)-1]-ytick[len(ytick)-2]) )
axis_h3.set_ylabel(r'$0.34 T_{9}^{3} / \rho_{5}$')
#plax.Axes.set_yticks(axis_h3,np.linspace(axis_h3.get_yticks()[0],axis_h3.get_yticks()[-1],len(axis_h1.get_yticks())))
'''###########
#This reads adiabatic data from MATLAB script per particle and plots for comparison.
adiabatic_dict = {}
data_path | |
plot_format: Whether the plot is shown vertical or horizontal. \
Vertical is default and denoted as `vert` else horizontal is `hoz`
:type file_path: String
:type plot_format: String. Default 'vert'
:returns: Nothing. Saves the visual to the file path given.
:rtype: None
'''
if self.model is None:
raise ValueError('The model has to be fitted before being able '\
'to visulaise it.')
rankdir = 'TB'
if plot_format == 'hoz':
rankdir = 'LR'
plot_model(self.model, to_file=file_path, show_shapes=True,
show_layer_names=True, rankdir=rankdir)
def __repr__(self):
return 'LSTM'
class TDLSTM(LSTM):
def __init__(self, tokeniser, embeddings, pad_size=-1, lower=False,
inc_target=True):
'''
:param pad_size: Applies to both the right and left hand side. However \
if -1 is set then the left and right maximum pad size is found \
independently.
:type pad_size: int
'''
super().__init__(tokeniser, embeddings, pad_size=pad_size, lower=lower)
self.left_pad_size = pad_size
self.left_test_pad_size = 0
self.right_pad_size = pad_size
self.right_test_pad_size = 0
self.inc_target = inc_target
def load_model(self, model_arch_fp, model_weights_fp, verbose=0):
super().load_model(model_arch_fp, model_weights_fp, verbose=verbose)
self.left_test_pad_size = self.pad_size
self.right_test_pad_size = self.pad_size
def predict(self, test_data):
'''
:param test_y: Test features. Specifically a list of dict like \
structures that contain `text` key.
:type test_y: list
:returns: A list of predicted samples for the test data.
:rtype: numpy.ndarray
'''
if self.model is None:
raise ValueError('The model has not been fitted please run the '\
'`fit` method.')
# Convert from a sequence of dictionaries into texts and then integers
# that represent the tokens in the text within the embedding space.
left_sequence, right_sequence = self._pre_process(test_data,
training=False)
return self.model.predict({'left_text_input' : left_sequence,
'right_text_input' : right_sequence})
def _pre_process(self, data_dicts, training=False):
def context_texts(context_data_dicts):
# Context returns all of the left and right context occurrences
# therefore if a target is mentioned Twice and are associated then
# for a single text two left and right occurrences are returned.
# Thus these are a list of lists we therefore chose only the
# first mentioned target as the paper linked to this method does
# not specify which they used.
left_texts = [context(data, 'left', inc_target=self.inc_target) \
for data in context_data_dicts]
right_texts = [context(data, 'right', inc_target=self.inc_target) \
for data in context_data_dicts]
left_texts = [texts[0] for texts in left_texts]
right_texts = [texts[0] for texts in right_texts]
return left_texts, right_texts
# Convert from a sequence of dictionaries into texts and then integers
# that represent the tokens in the text within the embedding space.
# Get left and right contexts
left_text, right_text = context_texts(data_dicts)
if training:
if self.model is not None:
raise ValueError('When pre-process the data for training the '\
'the model should be None not {}'\
.format(self.model))
left_pad_sequence = self.process_text(left_text, self.left_pad_size)
self.left_test_pad_size, left_sequence = left_pad_sequence
right_pad_sequence = self.process_text(right_text, self.right_pad_size,
padding='post', truncate='post')
self.right_test_pad_size, right_sequence = right_pad_sequence
return left_sequence, right_sequence
else:
left_pad_sequence = self.process_text(left_text,
self.left_test_pad_size)
_, left_sequence = left_pad_sequence
right_pad_sequence = self.process_text(right_text,
self.right_test_pad_size,
padding='post', truncate='post')
_, right_sequence = right_pad_sequence
return left_sequence, right_sequence
def create_training_text(self, train_data, validation_data):
'''
:param train_data: Training features. Specifically a list of dict like \
structures that contain `text` key.
:param train_y: Target values
:validation_size: The fraction of the training data to be set aside \
for validation data
:type train_data: list
:type train_y: list
:type validation_size: float Default 0.2
:returns: A tuple of length 2 where the first value is a list of \
Integers that reprsent the words in the text features where each Integer \
corresponds to a Word Vector in the embedding vector. Second value are \
the target values. Both lists in the tuples contain training data in the \
first part of the list and the second part of the list based on the \
validation split contains the validation data.
:rtype: tuple
'''
train_sequences = self._pre_process(train_data, training=True)
left_sequence_train, right_sequence_train = train_sequences
validation_sequences = self._pre_process(validation_data, training=False)
left_sequence_val, right_sequence_val = validation_sequences
# Stack the validation data with the training data to complie with Keras.
left_data = np.vstack((left_sequence_train, left_sequence_val))
right_data = np.vstack((right_sequence_train, right_sequence_val))
return left_data, right_data
def fit(self, train_data, train_y, validation_size=0.2, verbose=0,
reproducible=True, embedding_layer_trainable=False,
lstm_dimension=None, optimiser=None, patience=None,
batch_size=32, epochs=100, org_initialisers=True):
'''
:param train_data: Training features. Specifically a list of dict like \
structures that contain `text` key.
:param train_y: Target values of the training data
:param validation_size: The fraction of the training data to be set aside \
for validation data
:param verbose: Verbosity of the traning the model. 0=silent, \
1=progress bar, and 2=one line per epoch
:param reproducible: Wether or not to make the model to be reproducible. \
This will slow done the training.
:param embedding_layer_trainable: Whether the word embeddings weights \
are updated during training.
:param lstm_dimension: Output of the LSTM layer. If None it is the \
which is the default then the dimension will be the same as the \
embedding vector.
:param optimiser: Optimiser to for the LSTM default is SGD. Accepts any \
`keras optimiser <https://keras.io/optimizers/>`_.
:param patience: Wether or not to use EarlyStopping default is not \
stated by the None value. If so this is the patience value e.g. 5.
:param batch_size: Number of samples per gradient update
:param epochs: Number of epochs to train the model.
:param org_initialisers: Whether to use the original weight initializers \
that were stated in the paper. If False then use Keras default initializers.
:type train_data: list
:type train_y: list
:type validation_size: float. Default 0.2
:type verbose: int. Default 1
:type reproducible: bool. Default True.
:type embedding_layer_trainable: bool. Default False
:type lstm_dimension: int. Default None
:type optimiser: Keras optimiser. Default None which uses SDG.
:type patience: int. Default None.
:type batch_size: int. Default 32
:type epochs: int. Default 100.
:type org_initialisers: bool. Default True
:returns: Nothing. The self.model will be fitted.
:rtype: None
'''
self.model = None
self._to_be_reproducible(reproducible)
# Data pre-processing
data = self.validation_split(train_data, train_y,
validation_size=validation_size,
reproducible=reproducible)
temp_train, temp_train_y, validation_data, validation_y = data
left_data, right_data = self.create_training_text(temp_train, validation_data)
all_y = self.create_training_y(temp_train_y, validation_y)
num_classes = all_y.shape[1]
# LSTM model
embedding_matrix = self.embeddings.embedding_matrix
vocab_size, vector_size = embedding_matrix.shape
if lstm_dimension is None:
lstm_dimension = vector_size
if optimiser is None:
optimiser = optimizers.SGD(lr=0.01)
# Model layers
# Left LSTM
left_input = layers.Input(shape=(self.left_test_pad_size,),
name='left_text_input')
left_embedding_layer = layers\
.Embedding(input_dim=vocab_size,
output_dim=vector_size,
input_length=self.left_test_pad_size,
trainable=embedding_layer_trainable,
weights=[embedding_matrix],
name='left_embedding_layer')(left_input)
left_lstm_layer = layers.LSTM(lstm_dimension,
name='left_lstm_layer')(left_embedding_layer)
# Right LSTM
right_input = layers.Input(shape=(self.right_test_pad_size,),
name='right_text_input')
right_embedding_layer = layers\
.Embedding(input_dim=vocab_size,
output_dim=vector_size,
input_length=self.right_test_pad_size,
trainable=embedding_layer_trainable,
weights=[embedding_matrix],
name='right_embedding_layer')(right_input)
right_lstm_layer = layers.LSTM(lstm_dimension,
name='right_lstm_layer')(right_embedding_layer)
# Merge the outputs of the left and right LSTMs
merge_layer = layers.concatenate([left_lstm_layer, right_lstm_layer],
name='left_right_lstm_merge')
predictions = layers.Dense(num_classes, activation='softmax',
name='output')(merge_layer)
if org_initialisers:
uniform_init = initializers.RandomUniform(minval=-0.003, maxval=0.003)
lstm_init = {'kernel_initializer' : uniform_init,
'recurrent_initializer' : uniform_init,
'bias_initializer' : uniform_init}
dense_init = {'kernel_initializer' : uniform_init,
'bias_initializer' : uniform_init}
embedding_init = {'embeddings_initializer' : uniform_init}
# Model layers
left_embedding_layer = layers\
.Embedding(input_dim=vocab_size,
output_dim=vector_size,
input_length=self.left_test_pad_size,
trainable=embedding_layer_trainable,
weights=[embedding_matrix],
name='left_embedding_layer',
**embedding_init)(left_input)
left_lstm_layer = layers.LSTM(lstm_dimension, name='left_lstm_layer',
**lstm_init)(left_embedding_layer)
right_embedding_layer = layers\
.Embedding(input_dim=vocab_size,
output_dim=vector_size,
input_length=self.right_test_pad_size,
trainable=embedding_layer_trainable,
weights=[embedding_matrix],
name='right_embedding_layer',
**embedding_init)(right_input)
right_lstm_layer = layers.LSTM(lstm_dimension, name='right_lstm_layer',
**lstm_init)(right_embedding_layer)
predictions = layers.Dense(num_classes, activation='softmax',
name='output', **dense_init)(merge_layer)
model = models.Model(inputs=[left_input, right_input],
outputs=predictions)
model.compile(optimizer=optimiser, metrics=['accuracy'],
loss='categorical_crossentropy')
with tempfile.NamedTemporaryFile() as weight_file:
# Set up the callbacks
callbacks = None
if patience is not None:
model_checkpoint = ModelCheckpoint(weight_file.name,
monitor='val_loss',
save_best_only=True,
save_weights_only=True,
mode='min')
early_stopping = EarlyStopping(monitor='val_loss', mode='min',
patience=patience)
callbacks = [early_stopping, model_checkpoint]
history = model.fit([left_data, right_data], all_y,
validation_split=validation_size,
epochs=epochs, callbacks=callbacks,
verbose=verbose, batch_size=batch_size)
# Load the best model from the saved weight file
if patience is not None:
model.load_weights(weight_file.name)
self.model = model
return history
def __repr__(self):
return 'TDLSTM'
class TCLSTM(TDLSTM):
def __init__(self, tokeniser, embeddings, pad_size=-1, lower=False,
inc_target=True):
'''
:param pad_size: Applies to both the right and left hand side. However \
if -1 is set then the left and right maximum pad size is found \
independently.
:type pad_size: int
'''
super().__init__(tokeniser, embeddings, pad_size=pad_size, lower=lower)
self.left_pad_size = pad_size
self.left_test_pad_size = 0
self.right_pad_size = pad_size
self.right_test_pad_size = 0
self.inc_target = inc_target
def predict(self, test_data):
'''
:param test_y: Test features. Specifically a list of dict like \
structures that contain `text` key.
:type test_y: list
:returns: A list of predicted samples for the test data.
:rtype: numpy.ndarray
'''
if self.model is None:
raise ValueError('The model has not been fitted please | |
= Constraint(expr= m.b4 - m.b5 + m.b91 <= 1)
m.c68 = Constraint(expr= m.b4 - m.b6 + m.b92 <= 1)
m.c69 = Constraint(expr= m.b4 - m.b7 + m.b93 <= 1)
m.c70 = Constraint(expr= m.b4 - m.b8 + m.b94 <= 1)
m.c71 = Constraint(expr= m.b4 - m.b9 + m.b95 <= 1)
m.c72 = Constraint(expr= m.b4 - m.b10 + m.b96 <= 1)
m.c73 = Constraint(expr= m.b4 - m.b11 + m.b97 <= 1)
m.c74 = Constraint(expr= m.b4 - m.b12 + m.b98 <= 1)
m.c75 = Constraint(expr= m.b4 - m.b13 + m.b99 <= 1)
m.c76 = Constraint(expr= m.b4 - m.b14 + m.b100 <= 1)
m.c77 = Constraint(expr= m.b4 - m.b15 + m.b101 <= 1)
m.c78 = Constraint(expr= m.b4 - m.b16 + m.b102 <= 1)
m.c79 = Constraint(expr= m.b4 - m.b17 + m.b103 <= 1)
m.c80 = Constraint(expr= m.b4 - m.b18 + m.b104 <= 1)
m.c81 = Constraint(expr= m.b4 - m.b19 + m.b105 <= 1)
m.c82 = Constraint(expr= m.b4 - m.b20 + m.b106 <= 1)
m.c83 = Constraint(expr= m.b4 - m.b21 + m.b107 <= 1)
m.c84 = Constraint(expr= m.b4 - m.b22 + m.b108 <= 1)
m.c85 = Constraint(expr= m.b4 - m.b23 + m.b109 <= 1)
m.c86 = Constraint(expr= m.b4 - m.b24 + m.b110 <= 1)
m.c87 = Constraint(expr= m.b5 - m.b6 + m.b111 <= 1)
m.c88 = Constraint(expr= m.b5 - m.b7 + m.b112 <= 1)
m.c89 = Constraint(expr= m.b5 - m.b8 + m.b113 <= 1)
m.c90 = Constraint(expr= m.b5 - m.b9 + m.b114 <= 1)
m.c91 = Constraint(expr= m.b5 - m.b10 + m.b115 <= 1)
m.c92 = Constraint(expr= m.b5 - m.b11 + m.b116 <= 1)
m.c93 = Constraint(expr= m.b5 - m.b12 + m.b117 <= 1)
m.c94 = Constraint(expr= m.b5 - m.b13 + m.b118 <= 1)
m.c95 = Constraint(expr= m.b5 - m.b14 + m.b119 <= 1)
m.c96 = Constraint(expr= m.b5 - m.b15 + m.b120 <= 1)
m.c97 = Constraint(expr= m.b5 - m.b16 + m.b121 <= 1)
m.c98 = Constraint(expr= m.b5 - m.b17 + m.b122 <= 1)
m.c99 = Constraint(expr= m.b5 - m.b18 + m.b123 <= 1)
m.c100 = Constraint(expr= m.b5 - m.b19 + m.b124 <= 1)
m.c101 = Constraint(expr= m.b5 - m.b20 + m.b125 <= 1)
m.c102 = Constraint(expr= m.b5 - m.b21 + m.b126 <= 1)
m.c103 = Constraint(expr= m.b5 - m.b22 + m.b127 <= 1)
m.c104 = Constraint(expr= m.b5 - m.b23 + m.b128 <= 1)
m.c105 = Constraint(expr= m.b5 - m.b24 + m.b129 <= 1)
m.c106 = Constraint(expr= m.b6 - m.b7 + m.b130 <= 1)
m.c107 = Constraint(expr= m.b6 - m.b8 + m.b131 <= 1)
m.c108 = Constraint(expr= m.b6 - m.b9 + m.b132 <= 1)
m.c109 = Constraint(expr= m.b6 - m.b10 + m.b133 <= 1)
m.c110 = Constraint(expr= m.b6 - m.b11 + m.b134 <= 1)
m.c111 = Constraint(expr= m.b6 - m.b12 + m.b135 <= 1)
m.c112 = Constraint(expr= m.b6 - m.b13 + m.b136 <= 1)
m.c113 = Constraint(expr= m.b6 - m.b14 + m.b137 <= 1)
m.c114 = Constraint(expr= m.b6 - m.b15 + m.b138 <= 1)
m.c115 = Constraint(expr= m.b6 - m.b16 + m.b139 <= 1)
m.c116 = Constraint(expr= m.b6 - m.b17 + m.b140 <= 1)
m.c117 = Constraint(expr= m.b6 - m.b18 + m.b141 <= 1)
m.c118 = Constraint(expr= m.b6 - m.b19 + m.b142 <= 1)
m.c119 = Constraint(expr= m.b6 - m.b20 + m.b143 <= 1)
m.c120 = Constraint(expr= m.b6 - m.b21 + m.b144 <= 1)
m.c121 = Constraint(expr= m.b6 - m.b22 + m.b145 <= 1)
m.c122 = Constraint(expr= m.b6 - m.b23 + m.b146 <= 1)
m.c123 = Constraint(expr= m.b6 - m.b24 + m.b147 <= 1)
m.c124 = Constraint(expr= m.b7 - m.b8 + m.b148 <= 1)
m.c125 = Constraint(expr= m.b7 - m.b9 + m.b149 <= 1)
m.c126 = Constraint(expr= m.b7 - m.b10 + m.b150 <= 1)
m.c127 = Constraint(expr= m.b7 - m.b11 + m.b151 <= 1)
m.c128 = Constraint(expr= m.b7 - m.b12 + m.b152 <= 1)
m.c129 = Constraint(expr= m.b7 - m.b13 + m.b153 <= 1)
m.c130 = Constraint(expr= m.b7 - m.b14 + m.b154 <= 1)
m.c131 = Constraint(expr= m.b7 - m.b15 + m.b155 <= 1)
m.c132 = Constraint(expr= m.b7 - m.b16 + m.b156 <= 1)
m.c133 = Constraint(expr= m.b7 - m.b17 + m.b157 <= 1)
m.c134 = Constraint(expr= m.b7 - m.b18 + m.b158 <= 1)
m.c135 = Constraint(expr= m.b7 - m.b19 + m.b159 <= 1)
m.c136 = Constraint(expr= m.b7 - m.b20 + m.b160 <= 1)
m.c137 = Constraint(expr= m.b7 - m.b21 + m.b161 <= 1)
m.c138 = Constraint(expr= m.b7 - m.b22 + m.b162 <= 1)
m.c139 = Constraint(expr= m.b7 - m.b23 + m.b163 <= 1)
m.c140 = Constraint(expr= m.b7 - m.b24 + m.b164 <= 1)
m.c141 = Constraint(expr= m.b8 - m.b9 + m.b165 <= 1)
m.c142 = Constraint(expr= m.b8 - m.b10 + m.b166 <= 1)
m.c143 = Constraint(expr= m.b8 - m.b11 + m.b167 <= 1)
m.c144 = Constraint(expr= m.b8 - m.b12 + m.b168 <= 1)
m.c145 = Constraint(expr= m.b8 - m.b13 + m.b169 <= 1)
m.c146 = Constraint(expr= m.b8 - m.b14 + m.b170 <= 1)
m.c147 = Constraint(expr= m.b8 - m.b15 + m.b171 <= 1)
m.c148 = Constraint(expr= m.b8 - m.b16 + m.b172 <= 1)
m.c149 = Constraint(expr= m.b8 - m.b17 + m.b173 <= 1)
m.c150 = Constraint(expr= m.b8 - m.b18 + m.b174 <= 1)
m.c151 = Constraint(expr= m.b8 - m.b19 + m.b175 <= 1)
m.c152 = Constraint(expr= m.b8 - m.b20 + m.b176 <= 1)
m.c153 = Constraint(expr= m.b8 - m.b21 + m.b177 <= 1)
m.c154 = Constraint(expr= m.b8 - m.b22 + m.b178 <= 1)
m.c155 = Constraint(expr= m.b8 - m.b23 + m.b179 <= 1)
m.c156 = Constraint(expr= m.b8 - m.b24 + m.b180 <= 1)
m.c157 = Constraint(expr= m.b9 - m.b10 + m.b181 <= 1)
m.c158 = Constraint(expr= m.b9 - m.b11 + m.b182 <= 1)
m.c159 = Constraint(expr= m.b9 - m.b12 + m.b183 <= 1)
m.c160 = Constraint(expr= m.b9 - m.b13 + m.b184 <= 1)
m.c161 = Constraint(expr= m.b9 - m.b14 + m.b185 <= 1)
m.c162 = Constraint(expr= m.b9 - m.b15 + m.b186 <= 1)
m.c163 = Constraint(expr= m.b9 - m.b16 + m.b187 <= 1)
m.c164 = Constraint(expr= m.b9 - m.b17 + m.b188 <= 1)
m.c165 = Constraint(expr= m.b9 - m.b18 + m.b189 <= 1)
m.c166 = Constraint(expr= m.b9 - m.b19 + m.b190 <= 1)
m.c167 = Constraint(expr= m.b9 - m.b20 + m.b191 <= 1)
m.c168 = Constraint(expr= m.b9 - m.b21 + m.b192 <= 1)
m.c169 = Constraint(expr= m.b9 - m.b22 + m.b193 <= 1)
m.c170 = Constraint(expr= m.b9 - m.b23 + m.b194 <= 1)
m.c171 = Constraint(expr= m.b9 - m.b24 + m.b195 <= 1)
m.c172 = Constraint(expr= m.b10 - m.b11 + m.b196 <= 1)
m.c173 = Constraint(expr= m.b10 - m.b12 + m.b197 <= 1)
m.c174 = Constraint(expr= m.b10 - m.b13 + m.b198 <= 1)
m.c175 = Constraint(expr= m.b10 - m.b14 + m.b199 <= 1)
m.c176 = Constraint(expr= m.b10 - m.b15 + m.b200 <= 1)
m.c177 = Constraint(expr= m.b10 - m.b16 + m.b201 <= 1)
m.c178 = Constraint(expr= m.b10 - m.b17 + m.b202 <= 1)
m.c179 = Constraint(expr= m.b10 - m.b18 + m.b203 <= 1)
m.c180 = Constraint(expr= m.b10 - m.b19 + m.b204 <= 1)
m.c181 = Constraint(expr= m.b10 - m.b20 + m.b205 <= 1)
m.c182 = Constraint(expr= m.b10 - m.b21 + m.b206 <= 1)
m.c183 = Constraint(expr= m.b10 - m.b22 + m.b207 <= 1)
m.c184 = Constraint(expr= m.b10 - m.b23 + m.b208 <= 1)
m.c185 = Constraint(expr= m.b10 - m.b24 + m.b209 <= 1)
m.c186 = Constraint(expr= m.b11 - m.b12 + m.b210 <= 1)
m.c187 = Constraint(expr= m.b11 - m.b13 + m.b211 <= 1)
m.c188 = Constraint(expr= m.b11 - m.b14 + m.b212 <= 1)
m.c189 = Constraint(expr= m.b11 - m.b15 + m.b213 <= 1)
m.c190 = Constraint(expr= m.b11 - m.b16 + m.b214 <= 1)
m.c191 = Constraint(expr= m.b11 - m.b17 + m.b215 <= 1)
m.c192 = Constraint(expr= m.b11 - m.b18 + m.b216 <= 1)
m.c193 = Constraint(expr= m.b11 - m.b19 + m.b217 <= 1)
m.c194 = Constraint(expr= m.b11 - m.b20 + m.b218 <= 1)
m.c195 = Constraint(expr= m.b11 - m.b21 + m.b219 <= 1)
m.c196 = Constraint(expr= m.b11 - m.b22 + m.b220 <= 1)
m.c197 = Constraint(expr= m.b11 - m.b23 + m.b221 <= 1)
m.c198 = Constraint(expr= m.b11 - m.b24 + m.b222 <= 1)
m.c199 = Constraint(expr= m.b12 - m.b13 + m.b223 <= 1)
m.c200 = Constraint(expr= m.b12 - m.b14 + m.b224 <= 1)
m.c201 = Constraint(expr= m.b12 - m.b15 + m.b225 <= 1)
m.c202 = Constraint(expr= m.b12 - m.b16 + m.b226 <= 1)
m.c203 = Constraint(expr= m.b12 - m.b17 + m.b227 <= 1)
m.c204 = Constraint(expr= m.b12 - m.b18 + m.b228 <= 1)
m.c205 = Constraint(expr= m.b12 - m.b19 + m.b229 <= 1)
m.c206 = Constraint(expr= m.b12 - m.b20 + m.b230 <= 1)
m.c207 = Constraint(expr= m.b12 - m.b21 + m.b231 <= 1)
m.c208 = Constraint(expr= m.b12 - m.b22 + m.b232 <= 1)
m.c209 = Constraint(expr= | |
role mentions.
""" # noqa: E501 - Line too long
@property
@abc.abstractmethod
def user_mentions(
self,
) -> undefined.UndefinedOr[typing.Union[snowflakes.SnowflakeishSequence[users.PartialUser], bool]]:
"""Whether and what user mentions should be enabled for this response.
Returns
-------
hikari.undefined.UndefinedOr[typing.Union[hikari.snowflakes.SnowflakeishSequence[hikari.users.PartialUser], builtins.bool]]
Either a sequence of object/IDs of the users mentions should be enabled for,
`builtins.False` or `hikari.undefined.UNDEFINED` to disallow any user
mentions or `True` to allow all user mentions.
""" # noqa: E501 - Line too long
@abc.abstractmethod
def add_component(self: _T, component: ComponentBuilder, /) -> _T:
"""Add a component to this response.
Parameters
----------
component : ComponentBuilder
The component builder to add to this response.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def add_embed(self: _T, embed: embeds_.Embed, /) -> _T:
"""Add an embed to this response.
Parameters
----------
embed : hikari.embeds.Embed
Object of the embed to add to this response.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def set_content(self: _T, content: undefined.UndefinedOr[str], /) -> _T:
"""Set the response's message content.
Parameters
----------
content : hikari.undefined.UndefinedOr[builtins.str]
The message content to set for this response.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def set_flags(self: _T, flags: typing.Union[undefined.UndefinedType, int, messages.MessageFlag], /) -> _T:
"""Set message flags for this response.
!!! note
As of writing, the only message flag which can be set is EPHEMERAL.
Parameters
----------
flags : typing.Union[hikari.undefined.UndefinedType, builtins.int, hikari.messages.MessageFlag]
The message flags to set for this response.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def set_tts(self: _T, tts: undefined.UndefinedOr[bool], /) -> _T:
"""Set whether this response should trigger text-to-speech processing.
Parameters
----------
tts : Whether this response should trigger text-to-speech processing.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def set_mentions_everyone(self: _T, mentions: undefined.UndefinedOr[bool] = undefined.UNDEFINED, /) -> _T:
"""Set whether this response should be able to mention @everyone/@here.
Parameters
----------
mentions : hikari.undefined.UndefinedOr[builtins.bool]
Whether this response should be able to mention @everyone/@here.
Returns
-------
InteractionMessageBuilder
Object of this builder.
"""
@abc.abstractmethod
def set_role_mentions(
self: _T,
mentions: undefined.UndefinedOr[
typing.Union[snowflakes.SnowflakeishSequence[guilds.PartialRole], bool]
] = undefined.UNDEFINED,
/,
) -> _T:
"""Set whether and what role mentions should be possible for this response.
Parameters
----------
mentions : hikari.undefined.UndefinedOr[typing.Union[hikari.snowflakes.SnowflakeishSequence[hikari.users.PartialUser], builtins.bool]]
Either a sequence of object/IDs of the roles mentions should be enabled for,
`builtins.False` or `hikari.undefined.UNDEFINED` to disallow any role
mentions or `True` to allow all role mentions.
Returns
-------
InteractionMessageBuilder
Object of this builder.
""" # noqa: E501 - Line too long
@abc.abstractmethod
def set_user_mentions(
self: _T,
mentions: undefined.UndefinedOr[
typing.Union[snowflakes.SnowflakeishSequence[users.PartialUser], bool]
] = undefined.UNDEFINED,
/,
) -> _T:
"""Set whether and what user mentions should be possible for this response.
Parameters
----------
mentions: hikari.undefined.UndefinedOr[typing.Union[hikari.snowflakes.SnowflakeishSequence[hikari.users.PartialUser], builtins.bool]]
Either a sequence of object/IDs of the users mentions should be enabled for,
`builtins.False` or `hikari.undefined.UNDEFINED` to disallow any user
mentions or `True` to allow all user mentions.
Returns
-------
InteractionMessageBuilder
Object of this builder.
""" # noqa: E501 - Line too long
class CommandBuilder(abc.ABC):
"""Interface of a command builder used when bulk creating commands over REST."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def name(self) -> str:
r"""Name to set for this command.
!!! warning
This should match the regex `^[\w-]{1,32}$` in Unicode mode
and must be lowercase.
Returns
-------
builtins.str
The name to set for this command.
"""
@property
@abc.abstractmethod
def type(self) -> undefined.UndefinedOr[commands.CommandType]:
"""Return the type of this command.
Returns
-------
hikari.commands.CommandType
The type of this command.
"""
@property
@abc.abstractmethod
def id(self) -> undefined.UndefinedOr[snowflakes.Snowflake]:
"""ID of this command.
Returns
-------
hikari.undefined.UndefinedOr[hikari.snowflakes.Snowflake]
The ID of this command if set.
"""
@property
@abc.abstractmethod
def default_permission(self) -> undefined.UndefinedOr[bool]:
"""Whether the command should be enabled by default (without any permissions).
Defaults to `builtins.bool`.
Returns
-------
undefined.UndefinedOr[builtins.bool]
Whether the command should be enabled by default (without any permissions).
"""
@abc.abstractmethod
def set_id(self: _T, id_: undefined.UndefinedOr[snowflakes.Snowflakeish], /) -> _T:
"""Set the ID of this command.
Parameters
----------
id_ : hikari.undefined.UndefinedOr[hikari.snowflakes.Snowflake]
The ID to set for this command.
Returns
-------
CommandBuilder
Object of this command builder.
"""
@abc.abstractmethod
def set_default_permission(self: _T, state: undefined.UndefinedOr[bool], /) -> _T:
"""Whether this command should be enabled by default (without any permissions).
Parameters
----------
state : hikari.undefined.UndefinedOr[builtins.bool]
Whether this command should be enabled by default.
Returns
-------
CommandBuilder
Object of this command builder for chained calls.
"""
@abc.abstractmethod
def build(self, entity_factory: entity_factory_.EntityFactory, /) -> data_binding.JSONObject:
"""Build a JSON object from this builder.
Parameters
----------
entity_factory : hikari.api.entity_factory.EntityFactory
The entity factory to use to serialize entities within this builder.
Returns
-------
hikari.internal.data_binding.JSONObject
The built json object representation of this builder.
"""
class SlashCommandBuilder(CommandBuilder):
"""SlashCommandBuilder."""
@property
@abc.abstractmethod
def description(self) -> str:
"""Return the description to set for this command.
!!! warning
This should be inclusively between 1-100 characters in length.
Returns
-------
builtins.str
The description to set for this command.
"""
@property
@abc.abstractmethod
def options(self) -> typing.Sequence[commands.CommandOption]:
"""Sequence of up to 25 of the options set for this command.
Returns
-------
typing.Sequence[hikari.commands.CommandOption]
A sequence of up to 25 of the options set for this command.
"""
@abc.abstractmethod
def add_option(self: _T, option: commands.CommandOption) -> _T:
"""Add an option to this command.
!!! note
A command can have up to 25 options.
Parameters
----------
option : hikari.commands.CommandOption
The option to add to this command.
Returns
-------
CommandBuilder
Object of this command builder.
"""
class ContextMenuCommandBuilder(CommandBuilder):
"""ContextMenuCommandBuilder."""
class ComponentBuilder(abc.ABC):
"""Base class for all component builder classes."""
__slots__: typing.Sequence[str] = ()
@abc.abstractmethod
def build(self) -> data_binding.JSONObject:
"""Build a JSON object from this builder.
Returns
-------
hikari.internal.data_binding.JSONObject
The built json object representation of this builder.
"""
class ButtonBuilder(ComponentBuilder, abc.ABC, typing.Generic[_ContainerT]):
"""Builder class for a message button component."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def style(self) -> typing.Union[messages.ButtonStyle, int]:
"""Button's style.
Returns
-------
typing.Union[builtins.int, hikari.messages.ButtonStyle]
The button's style.
"""
@property
@abc.abstractmethod
def emoji(self) -> typing.Union[snowflakes.Snowflakeish, emojis.Emoji, str, undefined.UndefinedType]:
"""Emoji which should appear on this button.
Returns
-------
typing.Union[hikari.snowflakes.Snowflakeish, hikari.emojis.Emoji, builtins.str, hikari.undefined.UndefinedType]
Object or ID or raw string of the emoji which should be displayed
on this button if set.
"""
@property
@abc.abstractmethod
def label(self) -> undefined.UndefinedOr[str]:
"""Text label which should appear on this button.
!!! note
The text label to that should appear on this button. This may be
up to 80 characters long.
Returns
-------
hikari.undefined.UndefinedOr[builtins.str]
Text label which should appear on this button.
"""
@property
@abc.abstractmethod
def is_disabled(self) -> bool:
"""Whether the button should be marked as disabled.
!!! note
Defaults to `builtins.False`.
Returns
-------
builtins.bool
Whether the button should be marked as disabled.
"""
@abc.abstractmethod
def set_emoji(
self: _T, emoji: typing.Union[snowflakes.Snowflakeish, emojis.Emoji, str, undefined.UndefinedType], /
) -> _T:
"""Set the emoji to display on this button.
Parameters
----------
emoji : typing.Union[hikari.snowflakes.Snowflakeish, hikari.emojis.Emoji, builtins.str, hikari.undefined.UndefinedType]
Object, ID or raw string of the emoji which should be displayed on
this button.
Returns
-------
ButtonBuilder
The builder object to enable chained calls.
""" # noqa E501 - Line too long
@abc.abstractmethod
def set_label(self: _T, label: undefined.UndefinedOr[str], /) -> _T:
"""Set the text label which should be displayed on this button.
Parameters
----------
label : hikari.undefined.UndefinedOr[builtins.str]
The text label to show on this button.
This may be up to 80 characters long.
Returns
-------
ButtonBuilder
The builder object to enable chained calls.
"""
@abc.abstractmethod
def set_is_disabled(self: _T, state: bool, /) -> _T:
"""Set whether this button should be disabled.
Parameters
----------
state : bool
Whether this button should be disabled.
Returns
-------
ButtonBuilder
The builder object to enable chained calls.
"""
@abc.abstractmethod
def add_to_container(self) -> _ContainerT:
"""Add this button to the container component it belongs to.
This is used as the finalising call during chained calls.
Returns
-------
_ContainerT
The container component that owns this button.
"""
class LinkButtonBuilder(ButtonBuilder[_ContainerT], abc.ABC):
"""Builder interface for link buttons."""
@property
@abc.abstractmethod
def url(self) -> str:
"""Url this button should link to when pressed.
Returns
-------
builtins.str
Url this button should link to when pressed.
"""
class InteractiveButtonBuilder(ButtonBuilder[_ContainerT], abc.ABC):
"""Builder interface for interactive buttons."""
@property
@abc.abstractmethod
def custom_id(self) -> str:
"""Developer set custom ID used for identifying interactions with this button.
Returns
-------
builtins.str
Developer set custom ID used | |
\
hl.size != (ml,1):
raise TypeError("'hl' must be a dense 'd' matrix of " \
"size (%d,1)" %ml)
if Gq is None: Gq = []
if type(Gq) is not list or [ G for G in Gq if (type(G) is not matrix
and type(G) is not spmatrix) or G.typecode != 'd' or
G.size[1] != n ]:
raise TypeError("'Gq' must be a list of sparse or dense 'd' "\
"matrices with %d columns" %n)
mq = [ G.size[0] for G in Gq ]
a = [ k for k in range(len(mq)) if mq[k] == 0 ]
if a: raise TypeError("the number of rows of Gq[%d] is zero" %a[0])
if hq is None: hq = []
if type(hq) is not list or len(hq) != len(mq) or [ h for h in hq if
(type(h) is not matrix and type(h) is not spmatrix) or
h.typecode != 'd' ]:
raise TypeError("'hq' must be a list of %d dense or sparse "\
"'d' matrices" %len(mq))
a = [ k for k in range(len(mq)) if hq[k].size != (mq[k], 1) ]
if a:
k = a[0]
raise TypeError("'hq[%d]' has size (%d,%d). Expected size "\
"is (%d,1)." %(k, hq[k].size[0], hq[k].size[1], mq[k]))
N = ml + sum(mq)
h = matrix(0.0, (N,1))
if type(Gl) is matrix or [ Gk for Gk in Gq if type(Gk) is matrix ]:
G = matrix(0.0, (N, n))
else:
G = spmatrix([], [], [], (N, n), 'd')
h[:ml] = hl
G[:ml,:] = Gl
ind = ml
for k in range(len(mq)):
h[ind : ind + mq[k]] = hq[k]
G[ind : ind + mq[k], :] = Gq[k]
ind += mq[k]
bkc = n*[ mosek.boundkey.fx ]
blc = list(-c)
buc = list(-c)
bkx = ml*[ mosek.boundkey.lo ] + sum(mq)*[ mosek.boundkey.fr ]
blx = ml*[ 0.0 ] + sum(mq)*[ -inf ]
bux = N*[ +inf ]
c = -h
colptr, asub, acof = sparse([G.T]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (n, # number of constraints
N, # number of variables
list(c), # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.maximize)
for k in range(len(mq)):
task.appendcone(mosek.conetype.quad, 0.0,
list(range(ml+sum(mq[:k]),ml+sum(mq[:k+1]))))
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
xu, xl, zq = n*[0.0], n*[0.0], sum(mq)*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, 0, n, xl)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, n, xu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, ml, N, zq)
x = matrix(xu) - matrix(xl)
zq = [ matrix(zq[sum(mq[:k]):sum(mq[:k+1])]) for k in range(len(mq)) ]
if ml:
zl = ml*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, ml,
zl)
zl = matrix(zl)
else:
zl = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None, None)
else:
return (solsta, x, zl, zq)
def qp(P, q, G=None, h=None, A=None, b=None, taskfile=None):
"""
Solves a quadratic program
minimize (1/2)*x'*P*x + q'*x
subject to G*x <= h
A*x = b.
using MOSEK 8.0.
solsta, x, z, y = qp(P, q, G=None, h=None, A=None, b=None, taskfile=None)
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, y, z) contains the primal-dual solution.
If solsta is mosek.solsta.prim_infeas_cer,
then (x, y, z) is a certificate of primal infeasibility.
If solsta is mosek.solsta.dual_infeas_cer,
then (x, y, z) is a certificate of dual infeasibility.
If solsta is mosek.solsta.unknown, then (x, y, z) are all None.
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, z, y the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
"""
with mosek.Env() as env:
if (type(P) is not matrix and type(P) is not spmatrix) or \
P.typecode != 'd' or P.size[0] != P.size[1]:
raise TypeError("'P' must be a square dense or sparse 'd' matrix ")
n = P.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if type(q) is not matrix or q.typecode != 'd' or q.size != (n,1):
raise TypeError("'q' must be a 'd' matrix of size (%d,1)" %n)
if G is None: G = spmatrix([], [], [], (0,n), 'd')
if (type(G) is not matrix and type(G) is not spmatrix) or \
G.typecode != 'd' or G.size[1] != n:
raise TypeError("'G' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
m = G.size[0]
if h is None: h = matrix(0.0, (0,1))
if type(h) is not matrix or h.typecode != 'd' or h.size != (m,1):
raise TypeError("'h' must be a 'd' matrix of size (%d,1)" %m)
if A is None: A = spmatrix([], [], [], (0,n), 'd')
if (type(A) is not matrix and type(A) is not spmatrix) or \
A.typecode != 'd' or A.size[1] != n:
raise TypeError("'A' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
p = A.size[0]
if b is None: b = matrix(0.0, (0,1))
if type(b) is not matrix or b.typecode != 'd' or b.size != (p,1):
raise TypeError("'b' must be a dense matrix of size (%d,1)" %p)
if m+p is 0: raise ValueError("m + p must be greater than 0")
c = list(q)
bkc = m*[ mosek.boundkey.up ] + p*[ mosek.boundkey.fx ]
blc = m*[ -inf ] + [ bi for bi in b ]
buc = list(h)+list(b)
bkx = n*[mosek.boundkey.fr]
blx = n*[ -inf ]
bux = n*[ +inf ]
colptr, asub, acof = sparse([G,A]).CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (m+p, # number of constraints
n, # number of variables
c, # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
Ps = sparse(P)
I, J = Ps.I, Ps.J
tril = [ k for k in range(len(I)) if I[k] >= J[k] ]
task.putqobj(list(I[tril]), list(J[tril]), list(Ps.V[tril]))
task.putobjsense(mosek.objsense.minimize)
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
x = n*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, n, x)
x = matrix(x)
if m is not 0:
z = m*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, m,
z)
z = matrix(z)
else:
z = matrix(0.0, (0,1))
if p is not 0:
yu, yl = p*[0.0], p*[0.0]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, m, m+p,
yu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, m, m+p,
yl)
y = matrix(yu) - matrix(yl)
else:
y = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None, None)
else:
return (solsta, x, z, y)
def ilp(c, G, h, A=None, b=None, I=None, taskfile=None):
"""
Solves the mixed integer LP
minimize c'*x
subject to G*x + s = h
A*x = b
s >= 0
xi integer, forall i in I
using MOSEK 8.0.
solsta, x = ilp(c, G, h, A=None, b=None, I=None, taskfile=None).
Input arguments
G is m x n, h is m x 1, A is p x n, b is p x 1. G and A must be
dense or sparse 'd' matrices. h and b are dense 'd' matrices
with one column. The default | |
connection, preferred_phy, accepted_phy)
def gecko_cmd_gatt_set_max_mtu(self, max_mtu):
return struct.pack('<4BH', 0x20, 2, 9, 0, max_mtu)
def gecko_cmd_gatt_discover_primary_services(self, connection):
return struct.pack('<4BB', 0x20, 1, 9, 1, connection)
def gecko_cmd_gatt_discover_primary_services_by_uuid(self, connection, uuid):
return struct.pack('<4BBB' + str(len(uuid)) + 's', 0x20, 2 + len(uuid), 9, 2, connection, len(uuid), bytes(i for i in uuid))
def gecko_cmd_gatt_discover_characteristics(self, connection, service):
return struct.pack('<4BBI', 0x20, 5, 9, 3, connection, service)
def gecko_cmd_gatt_discover_characteristics_by_uuid(self, connection, service, uuid):
return struct.pack('<4BBIB' + str(len(uuid)) + 's', 0x20, 6 + len(uuid), 9, 4, connection, service, len(uuid), bytes(i for i in uuid))
def gecko_cmd_gatt_set_characteristic_notification(self, connection, characteristic, flags):
return struct.pack('<4BBHB', 0x20, 4, 9, 5, connection, characteristic, flags)
def gecko_cmd_gatt_discover_descriptors(self, connection, characteristic):
return struct.pack('<4BBH', 0x20, 3, 9, 6, connection, characteristic)
def gecko_cmd_gatt_read_characteristic_value(self, connection, characteristic):
return struct.pack('<4BBH', 0x20, 3, 9, 7, connection, characteristic)
def gecko_cmd_gatt_read_characteristic_value_by_uuid(self, connection, service, uuid):
return struct.pack('<4BBIB' + str(len(uuid)) + 's', 0x20, 6 + len(uuid), 9, 8, connection, service, len(uuid), bytes(i for i in uuid))
def gecko_cmd_gatt_write_characteristic_value(self, connection, characteristic, value):
return struct.pack('<4BBHB' + str(len(value)) + 's', 0x20, 4 + len(value), 9, 9, connection, characteristic, len(value), bytes(i for i in value))
def gecko_cmd_gatt_write_characteristic_value_without_response(self, connection, characteristic, value):
return struct.pack('<4BBHB' + str(len(value)) + 's', 0x20, 4 + len(value), 9, 10, connection, characteristic, len(value), bytes(i for i in value))
def gecko_cmd_gatt_prepare_characteristic_value_write(self, connection, characteristic, offset, value):
return struct.pack('<4BBHHB' + str(len(value)) + 's', 0x20, 6 + len(value), 9, 11, connection, characteristic, offset, len(value), bytes(i for i in value))
def gecko_cmd_gatt_execute_characteristic_value_write(self, connection, flags):
return struct.pack('<4BBB', 0x20, 2, 9, 12, connection, flags)
def gecko_cmd_gatt_send_characteristic_confirmation(self, connection):
return struct.pack('<4BB', 0x20, 1, 9, 13, connection)
def gecko_cmd_gatt_read_descriptor_value(self, connection, descriptor):
return struct.pack('<4BBH', 0x20, 3, 9, 14, connection, descriptor)
def gecko_cmd_gatt_write_descriptor_value(self, connection, descriptor, value):
return struct.pack('<4BBHB' + str(len(value)) + 's', 0x20, 4 + len(value), 9, 15, connection, descriptor, len(value), bytes(i for i in value))
def gecko_cmd_gatt_find_included_services(self, connection, service):
return struct.pack('<4BBI', 0x20, 5, 9, 16, connection, service)
def gecko_cmd_gatt_read_multiple_characteristic_values(self, connection, characteristic_list):
return struct.pack('<4BBB' + str(len(characteristic_list)) + 's', 0x20, 2 + len(characteristic_list), 9, 17, connection, len(characteristic_list), bytes(i for i in characteristic_list))
def gecko_cmd_gatt_read_characteristic_value_from_offset(self, connection, characteristic, offset, maxlen):
return struct.pack('<4BBHHH', 0x20, 7, 9, 18, connection, characteristic, offset, maxlen)
def gecko_cmd_gatt_prepare_characteristic_value_reliable_write(self, connection, characteristic, offset, value):
return struct.pack('<4BBHHB' + str(len(value)) + 's', 0x20, 6 + len(value), 9, 19, connection, characteristic, offset, len(value), bytes(i for i in value))
def gecko_cmd_gatt_server_read_attribute_value(self, attribute, offset):
return struct.pack('<4BHH', 0x20, 4, 10, 0, attribute, offset)
def gecko_cmd_gatt_server_read_attribute_type(self, attribute):
return struct.pack('<4BH', 0x20, 2, 10, 1, attribute)
def gecko_cmd_gatt_server_write_attribute_value(self, attribute, offset, value):
return struct.pack('<4BHHB' + str(len(value)) + 's', 0x20, 5 + len(value), 10, 2, attribute, offset, len(value), bytes(i for i in value))
def gecko_cmd_gatt_server_send_user_read_response(self, connection, characteristic, att_errorcode, value):
return struct.pack('<4BBHBB' + str(len(value)) + 's', 0x20, 5 + len(value), 10, 3, connection, characteristic, att_errorcode, len(value), bytes(i for i in value))
def gecko_cmd_gatt_server_send_user_write_response(self, connection, characteristic, att_errorcode):
return struct.pack('<4BBHB', 0x20, 4, 10, 4, connection, characteristic, att_errorcode)
def gecko_cmd_gatt_server_send_characteristic_notification(self, connection, characteristic, value):
return struct.pack('<4BBHB' + str(len(value)) + 's', 0x20, 4 + len(value), 10, 5, connection, characteristic, len(value), bytes(i for i in value))
def gecko_cmd_gatt_server_find_attribute(self, start, type):
return struct.pack('<4BHB' + str(len(type)) + 's', 0x20, 3 + len(type), 10, 6, start, len(type), bytes(i for i in type))
def gecko_cmd_gatt_server_set_capabilities(self, caps, reserved):
return struct.pack('<4BII', 0x20, 8, 10, 8, caps, reserved)
def gecko_cmd_gatt_server_find_primary_service(self, start, uuid):
return struct.pack('<4BHB' + str(len(uuid)) + 's', 0x20, 3 + len(uuid), 10, 9, start, len(uuid), bytes(i for i in uuid))
def gecko_cmd_gatt_server_set_max_mtu(self, max_mtu):
return struct.pack('<4BH', 0x20, 2, 10, 10, max_mtu)
def gecko_cmd_gatt_server_get_mtu(self, connection):
return struct.pack('<4BB', 0x20, 1, 10, 11, connection)
def gecko_cmd_gatt_server_enable_capabilities(self, caps):
return struct.pack('<4BI', 0x20, 4, 10, 12, caps)
def gecko_cmd_gatt_server_disable_capabilities(self, caps):
return struct.pack('<4BI', 0x20, 4, 10, 13, caps)
def gecko_cmd_gatt_server_get_enabled_capabilities(self):
return struct.pack('<4B', 0x20, 0, 10, 14)
def gecko_cmd_hardware_set_soft_timer(self, time, handle, single_shot):
return struct.pack('<4BIBB', 0x20, 6, 12, 0, time, handle, single_shot)
def gecko_cmd_hardware_get_time(self):
return struct.pack('<4B', 0x20, 0, 12, 11)
def gecko_cmd_hardware_set_lazy_soft_timer(self, time, slack, handle, single_shot):
return struct.pack('<4BIIBB', 0x20, 10, 12, 12, time, slack, handle, single_shot)
def gecko_cmd_flash_ps_erase_all(self):
return struct.pack('<4B', 0x20, 0, 13, 1)
def gecko_cmd_flash_ps_save(self, key, value):
return struct.pack('<4BHB' + str(len(value)) + 's', 0x20, 3 + len(value), 13, 2, key, len(value), bytes(i for i in value))
def gecko_cmd_flash_ps_load(self, key):
return struct.pack('<4BH', 0x20, 2, 13, 3, key)
def gecko_cmd_flash_ps_erase(self, key):
return struct.pack('<4BH', 0x20, 2, 13, 4, key)
def gecko_cmd_test_dtm_tx(self, packet_type, length, channel, phy):
return struct.pack('<4BBBBB', 0x20, 4, 14, 0, packet_type, length, channel, phy)
def gecko_cmd_test_dtm_rx(self, channel, phy):
return struct.pack('<4BBB', 0x20, 2, 14, 1, channel, phy)
def gecko_cmd_test_dtm_end(self):
return struct.pack('<4B', 0x20, 0, 14, 2)
def gecko_cmd_test_debug_command(self, id, debugdata):
return struct.pack('<4BBB' + str(len(debugdata)) + 's', 0x20, 2 + len(debugdata), 14, 7, id, len(debugdata), bytes(i for i in debugdata))
def gecko_cmd_test_debug_counter(self, id):
return struct.pack('<4BI', 0x20, 4, 14, 12, id)
def gecko_cmd_sm_set_bondable_mode(self, bondable):
return struct.pack('<4BB', 0x20, 1, 15, 0, bondable)
def gecko_cmd_sm_configure(self, flags, io_capabilities):
return struct.pack('<4BBB', 0x20, 2, 15, 1, flags, io_capabilities)
def gecko_cmd_sm_store_bonding_configuration(self, max_bonding_count, policy_flags):
return struct.pack('<4BBB', 0x20, 2, 15, 2, max_bonding_count, policy_flags)
def gecko_cmd_sm_increase_security(self, connection):
return struct.pack('<4BB', 0x20, 1, 15, 4, connection)
def gecko_cmd_sm_delete_bonding(self, bonding):
return struct.pack('<4BB', 0x20, 1, 15, 6, bonding)
def gecko_cmd_sm_delete_bondings(self):
return struct.pack('<4B', 0x20, 0, 15, 7)
def gecko_cmd_sm_enter_passkey(self, connection, passkey):
return struct.pack('<4BB', 0x20, 1, 15, 8, connection)
def gecko_cmd_sm_passkey_confirm(self, connection, confirm):
return struct.pack('<4BBB', 0x20, 2, 15, 9, connection, confirm)
def gecko_cmd_sm_set_oob_data(self, oob_data):
return struct.pack('<4BB' + str(len(oob_data)) + 's', 0x20, 1 + len(oob_data), 15, 10, len(oob_data), bytes(i for i in oob_data))
def gecko_cmd_sm_list_all_bondings(self):
return struct.pack('<4B', 0x20, 0, 15, 11)
def gecko_cmd_sm_bonding_confirm(self, connection, confirm):
return struct.pack('<4BBB', 0x20, 2, 15, 14, connection, confirm)
def gecko_cmd_sm_set_debug_mode(self):
return struct.pack('<4B', 0x20, 0, 15, 15)
def gecko_cmd_sm_set_passkey(self, passkey):
return struct.pack('<4B', 0x20, 0, 15, 16)
def gecko_cmd_sm_use_sc_oob(self, enable):
return struct.pack('<4BB', 0x20, 1, 15, 17, enable)
def gecko_cmd_sm_set_sc_remote_oob_data(self, oob_data):
return struct.pack('<4BB' + str(len(oob_data)) + 's', 0x20, 1 + len(oob_data), 15, 18, len(oob_data), bytes(i for i in oob_data))
def gecko_cmd_sm_add_to_whitelist(self, address, address_type):
return struct.pack('<4B6sB', 0x20, 7, 15, 19, bytes(i for i in address), address_type)
def gecko_cmd_sm_set_minimum_key_size(self, minimum_key_size):
return struct.pack('<4BB', 0x20, 1, 15, 20, minimum_key_size)
def gecko_cmd_homekit_configure(self, i2c_address, support_display, hap_attribute_features, category, configuration_number, fast_advert_interval, fast_advert_timeout, flag, broadcast_advert_timeout, model_name):
return struct.pack('<4BBBBHBHHIHB' + str(len(model_name)) + 's', 0x20, 17 + len(model_name), 19, 0, i2c_address, support_display, hap_attribute_features, category, configuration_number, fast_advert_interval, fast_advert_timeout, flag, broadcast_advert_timeout, len(model_name), bytes(i for i in model_name))
def gecko_cmd_homekit_advertise(self, enable, interval_min, interval_max, channel_map):
return struct.pack('<4BBHHB', 0x20, 6, 19, 1, enable, interval_min, interval_max, channel_map)
def gecko_cmd_homekit_delete_pairings(self):
return struct.pack('<4B', 0x20, 0, 19, 2)
def gecko_cmd_homekit_check_authcp(self):
return struct.pack('<4B', 0x20, 0, 19, 3)
def gecko_cmd_homekit_get_pairing_id(self, connection):
return struct.pack('<4BB', 0x20, 1, 19, 4, connection)
def gecko_cmd_homekit_send_write_response(self, connection, characteristic, status_code):
return struct.pack('<4BBHB', 0x20, 4, 19, 5, connection, characteristic, status_code)
def gecko_cmd_homekit_send_read_response(self, connection, characteristic, status_code, attribute_size, value):
return struct.pack('<4BBHBHB' + str(len(value)) + 's', 0x20, 7 + len(value), 19, 6, connection, characteristic, status_code, attribute_size, len(value), bytes(i for i in value))
def gecko_cmd_homekit_gsn_action(self, action):
return struct.pack('<4BB', 0x20, 1, 19, 7, action)
def gecko_cmd_homekit_event_notification(self, connection, characteristic, change_originator, value):
return struct.pack('<4BBHBB' + str(len(value)) + 's', 0x20, 5 + len(value), 19, 8, connection, characteristic, change_originator, len(value), bytes(i for i in value))
def gecko_cmd_homekit_broadcast_action(self, action, params):
return struct.pack('<4BBB' + str(len(params)) + 's', 0x20, 2 + len(params), 19, 9, action, len(params), bytes(i for i in params))
def gecko_cmd_homekit_configure_product_data(self, product_data):
return struct.pack('<4BB' + str(len(product_data)) + 's', 0x20, 1 + len(product_data), 19, 10, len(product_data), bytes(i for i in product_data))
def gecko_cmd_coex_set_options(self, mask, options):
return struct.pack('<4BII', 0x20, 8, 32, 0, mask, options)
def gecko_cmd_coex_get_counters(self, reset):
return struct.pack('<4BB', 0x20, 1, 32, 1, reset)
def gecko_cmd_coex_set_parameters(self, priority, request, pwm_period, pwm_dutycycle):
return struct.pack('<4BBBBB', 0x20, 4, 32, 2, priority, request, pwm_period, pwm_dutycycle)
def gecko_cmd_coex_set_directional_priority_pulse(self, pulse):
return struct.pack('<4BB', 0x20, 1, 32, 3, pulse)
def gecko_cmd_l2cap_coc_send_connection_request(self, connection, le_psm, mtu, mps, initial_credit):
return struct.pack('<4BBHHHH', 0x20, 9, 67, 1, connection, le_psm, mtu, mps, initial_credit)
def gecko_cmd_l2cap_coc_send_connection_response(self, connection, cid, mtu, mps, initial_credit, result):
return struct.pack('<4BBHHHHH', 0x20, 11, 67, 2, connection, cid, mtu, mps, initial_credit, result)
def gecko_cmd_l2cap_coc_send_le_flow_control_credit(self, connection, cid, credits):
return struct.pack('<4BBHH', 0x20, 5, 67, 3, connection, cid, credits)
def gecko_cmd_l2cap_coc_send_disconnection_request(self, connection, cid):
return struct.pack('<4BBH', 0x20, 3, 67, 4, connection, cid)
def gecko_cmd_l2cap_coc_send_data(self, connection, cid, data):
return struct.pack('<4BBHB' + str(len(data)) + 's', 0x20, 4 + len(data), 67, 5, connection, cid, len(data), bytes(i for i in data))
def | |
<reponame>overmeulen/qpid-dispatch<filename>tests/system_tests_drain_support.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton.handlers import MessagingHandler
from proton.reactor import Container
from proton import Message, Endpoint
from system_test import main_module, TIMEOUT
from system_test import unittest
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class DrainMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked and we can
# declare that the test is successful
if self.received_count == 10 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
def on_sendable(self, event):
if self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 20. This means that we will receive all the 10 messages
# that the sender is sending. The router will also send back a response flow frame with
# drain=True but I don't have any way of making sure that the response frame reached the
# receiver
event.receiver.drain(20)
def run(self):
Container(self).run()
class DrainOneMessageHandler(DrainMessagesHandler):
def __init__(self, address):
super(DrainOneMessageHandler, self).__init__(address)
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count += 1
if self.received_count < 4:
event.receiver.flow(1)
elif self.received_count == 4:
# We are issuing a drain of 1 after we receive the 4th message.
# This means that going forward, we will receive only one more message.
event.receiver.drain(1)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 5 messages received (4 earlier messages and 1 extra message for drain=1)
# indicates that the drain worked and we can declare that the test is successful
if self.received_count == 5 and event.link.credit == 0:
self.error = None
self.timer.cancel()
self.receiver.close()
self.sender.close()
self.conn.close()
class DrainNoMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired"
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainNoMoreMessagesHandler(MessagingHandler):
def __init__(self, address):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainNoMoreMessagesHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.address = address
self.sent = 0
self.rcvd = 0
self.error = "Unexpected Exit"
def timeout(self):
self.error = "Timeout Expired: sent=%d rcvd=%d" % (self.sent, self.rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, "org.apache.dev")
self.sender = event.container.create_sender(self.conn, "org.apache.dev")
self.receiver.flow(1)
def on_sendable(self, event):
if self.sent == 0:
msg = Message(body="Hello World")
event.sender.send(msg)
self.sent += 1
def on_message(self, event):
self.rcvd += 1
def on_settled(self, event):
self.receiver.drain(1)
def on_link_flow(self, event):
if self.receiver.credit == 0:
self.error = None
self.timer.cancel()
self.conn.close()
def run(self):
Container(self).run()
class DrainMessagesMoreHandler(MessagingHandler):
"""
Make sure the clients can send/receive after going through a drain cycle.
Send phase
1. Sender sending first 10 messages
2. Sender paused waiting for drain to finish
3. Sender is sending second 10 messages
4. Sender is done.
Receive phase
1. Receiver receiving first four messages; At #4 receiver issues drain 4,20
2. Reciever receives messages 5..10.
When 10 messages have been received and link credit =0 the drain is done
Receiver issues 10 credits
3. Receiver recieves messages 11..20.
4. Receiver is done
At issue in DISPATCH-1055 is that the 10 credits issued in Receive step 2
are never propagated across a link route to the 'broker'.
This code is instantiated with and without the link route to demonstrate that
it works properly when the 'test-router' is handling the drain by itself
and that it fails only on the link route.
"""
def __init__(self, address, route_name):
# prefetch is set to zero so that proton does not automatically issue 10 credits.
super(DrainMessagesMoreHandler, self).__init__(prefetch=0)
self.conn = None
self.sender = None
self.receiver = None
self.sent_count = 0
self.received_count = 0
self.address = address
self.error = "Unexpected Exit"
self.send_phase = 1
self.recv_phase = 1
self.route_name = route_name
self.verbose_printing = False
def show_state(self):
return str("send_phase:" + str(self.send_phase)
+ ", sent_count:" + str(self.sent_count)
+ ", recv_phase:" + str(self.recv_phase)
+ ", receive_count:" + str(self.received_count)
+ ", receiver_credit:" + str(self.receiver.credit)
+ ", sender_credit:" + str(self.sender.credit))
def printme(self, str):
if (self.verbose_printing):
print (str + " " + self.show_state())
def timeout(self):
self.error = "Timeout Expired: sent: %d rcvd: %d" % (self.sent_count, self.received_count)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.address)
# Create a sender and a receiver. They are both listening on the same address
self.receiver = event.container.create_receiver(self.conn, source=self.route_name)
self.sender = event.container.create_sender(self.conn, target=self.route_name)
self.receiver.flow(1)
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
# The fact that the event.link.credit is 0 means that the receiver will not be receiving any more
# messages. That along with 10 messages received indicates that the drain worked.
if self.send_phase == 2 and self.received_count == 10 and event.link.credit == 0:
self.printme ("sender transitions to phase 3 - drain completed, send new flow now")
self.receiver.flow(10)
self.send_phase = 3
if event.link.is_sender and event.link.credit \
and event.link.state & Endpoint.LOCAL_ACTIVE \
and event.link.state & Endpoint.REMOTE_ACTIVE :
self.on_sendable(event)
self.printme (("sender " if event.link.is_sender else "receiver ") + "exit on_link_flow:")
def on_sendable(self, event):
if event.link.is_sender and self.send_phase == 1 and self.sent_count < 10:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 10:
self.printme ("sender transitions to phase 2 - wait for drain to finish")
self.send_phase = 2
elif event.link.is_sender and self.send_phase == 3 and self.sent_count < 20:
msg = Message(body="Hello World", properties={'seq': self.sent_count})
dlv = event.sender.send(msg)
dlv.settle()
self.sent_count += 1
if self.sent_count == 20:
self.printme ("sender transitions to phase 4 - done sending")
self.send_phase = 4
self.printme (("sender " if event.link.is_sender else "receiver ") + "exit on_sendable:")
def on_message(self, event):
if event.receiver == self.receiver:
if "Hello World" == event.message.body:
self.received_count | |
def enter_drain(self):
self.state = BBRMode.BBR_DRAIN
self.pacing_gain = 1 / BBR_HIGH_GAIN # pace slowly
self.cwnd_gain = BBR_HIGH_GAIN # maintain cwnd
def check_drain(self):
if self.state == BBRMode.BBR_STARTUP and self.filled_pipe:
self.enter_drain()
if self.state == BBRMode.BBR_DRAIN and self.bytes_in_flight <= self.inflight(1.0):
self.enter_probe_bw() # we estimate queue is drained
def enter_probe_bw(self):
self.state = BBRMode.BBR_PROBE_BW
self.pacing_gain = 1
self.cwnd_gain = 2
self.cycle_index = BBR_GAIN_CYCLE_LEN - 1 - self.prng.randint(0, 6)
self.advance_cycle_phase()
def check_cycle_phase(self):
if self.state == BBRMode.BBR_PROBE_BW and self.is_next_cycle_phase():
self.advance_cycle_phase()
def advance_cycle_phase(self):
self.cycle_stamp = self.get_cur_time()
self.cycle_index = (self.cycle_index + 1) % BBR_GAIN_CYCLE_LEN
pacing_gain_cycle = [5/4, 3/4, 1, 1, 1, 1, 1, 1]
self.pacing_gain = pacing_gain_cycle[self.cycle_index]
def is_next_cycle_phase(self):
is_full_length = (self.get_cur_time() - self.cycle_stamp) > self.rtprop
if self.pacing_gain == 1:
return is_full_length
if self.pacing_gain > 1:
return is_full_length and (self.rs.losses > 0 or self.rs.prior_in_flight >= self.inflight(self.pacing_gain))
else: # (BBR.pacing_gain < 1)
return is_full_length or self.rs.prior_in_flight <= self.inflight(1)
def handle_restart_from_idle(self):
packets_in_flight = self.bytes_in_flight / BYTES_PER_PACKET
if packets_in_flight == 0 and self.app_limited:
self.idle_start = True
if self.state == BBRMode.BBR_PROBE_BW:
self.set_pacing_rate_with_gain(1)
def check_probe_rtt(self):
if self.state != BBRMode.BBR_PROBE_RTT and self.rtprop_expired and not self.idle_restart:
self.enter_probe_rtt()
self.save_cwnd()
self.probe_rtt_done_stamp = 0
if self.state == BBRMode.BBR_PROBE_RTT:
self.handle_probe_rtt()
self.idle_restart = False
def enter_probe_rtt(self):
self.state = BBRMode.BBR_PROBE_RTT
self.pacing_gain = 1
self.cwnd_gain = 1
def handle_probe_rtt(self):
# Ignore low rate samples during ProbeRTT:
packets_in_flight = self.bytes_in_flight / BYTES_PER_PACKET
self.app_limited = False # assume always have available data to send from app
# instead of (BW.delivered + packets_in_flight) ? : 1
if self.probe_rtt_done_stamp == 0 and packets_in_flight <= BBR_MIN_PIPE_CWND:
self.probe_rtt_done_stamp = self.get_cur_time() + PROBE_RTT_DURATION
self.probe_rtt_round_done = False
self.next_round_delivered = self.conn_state.delivered
elif self.probe_rtt_done_stamp != 0:
if self.round_start:
self.probe_rtt_round_done = True
if self.probe_rtt_round_done and self.get_cur_time() > self.probe_rtt_done_stamp:
self.rtprop_stamp = self.get_cur_time()
self.restore_cwnd()
self.exit_probe_rtt()
def exit_probe_rtt(self):
if self.filled_pipe:
self.enter_probe_bw()
else:
self.enter_startup()
def update_on_ack(self, pkt: BBRPacket):
self.update_model_and_state(pkt)
self.update_control_parameters()
def update_model_and_state(self, pkt):
self.update_btlbw(pkt)
self.check_cycle_phase()
self.check_full_pipe()
self.check_drain()
self.update_rtprop(pkt)
self.check_probe_rtt()
def update_control_parameters(self):
self.set_pacing_rate()
self.set_send_quantum()
self.set_cwnd()
def on_transmit(self):
self.handle_restart_from_idle()
def send_packet(self, pkt: BBRPacket):
# self.pipe = self.bytes_in_flight / BYTES_PER_PACKET
if self.bytes_in_flight / BYTES_PER_PACKET == 0:
self.conn_state.first_sent_time = self.get_cur_time()
self.conn_state.delivered_time = self.get_cur_time()
pkt.first_sent_time = self.conn_state.first_sent_time
pkt.delivered_time = self.conn_state.delivered_time
pkt.delivered = self.conn_state.delivered
pkt.is_app_limited = False # (self.app_limited != 0)
# Upon receiving ACK, fill in delivery rate sample rs.
def generate_rate_sample(self, pkt: BBRPacket):
# for each newly SACKed or ACKed packet P:
# self.update_rate_sample(P, rs)
self.update_rate_sample(pkt)
# TODO: BUG!! uncomment the following 2 lines to fix the btlbw overestimation bug
# if not self.update_rate_sample(pkt):
# return False
# Clear app-limited field if bubble is ACKed and gone.
if self.conn_state.app_limited and self.conn_state.delivered > self.conn_state.app_limited:
self.app_limited = 0
# TODO: comment out and need to recheck
# if self.rs.prior_time == 0:
# return False # nothing delivered on this ACK
# Use the longer of the send_elapsed and ack_elapsed
self.rs.interval = max(self.rs.send_elapsed, self.rs.ack_elapsed)
# print(self.rs.send_elapsed, self.rs.ack_elapsed)
self.rs.delivered = self.conn_state.delivered - self.rs.prior_delivered
# print("C.delivered: {}, rs.prior_delivered: {}".format(self.delivered, self.rs.prior_delivered))
# Normally we expect interval >= MinRTT.
# Note that rate may still be over-estimated when a spuriously
# retransmitted skb was first (s)acked because "interval"
# is under-estimated (up to an RTT). However, continuously
# measuring the delivery rate during loss recovery is crucial
# for connections suffer heavy or prolonged losses.
#
# TODO: uncomment this
# if self.rs.interval < MinRTT(tp):
# self.rs.interval = -1
# return False # no reliable sample
if self.rs.interval != 0:
self.rs.delivery_rate = self.rs.delivered / self.rs.interval
# if self.rs.delivery_rate * 8 / 1e6 > 1.2:
# print("C.delivered:", self.conn_state.delivered, "rs.prior_delivered:", self.rs.prior_delivered, "rs.delivered:", self.rs.delivered, "rs.interval:", self.rs.interval, "rs.delivery_rate:", self.rs.delivery_rate * 8 / 1e6)
return True # we filled in rs with a rate sample
# Update rs when packet is SACKed or ACKed.
def update_rate_sample(self, pkt: BBRPacket):
# comment out because we don't need this in the simulator.
# if pkt.delivered_time == 0:
# return # P already SACKed
self.rs.prior_in_flight = self.bytes_in_flight
self.conn_state.delivered += pkt.pkt_size
self.conn_state.delivered_time = self.get_cur_time()
# Update info using the newest packet:
# print(pkt.delivered, self.rs.prior_delivered)
# if pkt.delivered > self.rs.prior_delivered:
if (not self.rs.prior_delivered) or pkt.delivered > self.rs.prior_delivered:
self.rs.prior_delivered = pkt.delivered
self.rs.prior_time = pkt.delivered_time
self.rs.is_app_limited = pkt.is_app_limited
self.rs.send_elapsed = pkt.sent_time - pkt.first_sent_time
self.rs.ack_elapsed = self.conn_state.delivered_time - pkt.delivered_time
# print("pkt.sent_time:", pkt.sent_time, "pkt.first_sent_time:", pkt.first_sent_time, "send_elapsed:", self.rs.send_elapsed)
# print("C.delivered_time:", self.conn_state.delivered_time, "P.delivered_time:", pkt.delivered_time, "ack_elapsed:", self.rs.ack_elapsed)
self.conn_state.first_sent_time = pkt.sent_time
return True
return False
# pkt.debug_print()
# Mark the packet as delivered once it's SACKed to
# avoid being used again when it's cumulatively acked.
# pkt.delivered_time = 0
def can_send_packet(self):
# the commendted logic is from bbr paper, uncomment if the current
# implementation is not working well
# if not self.srtt or self.btlbw == 0: # no valid rtt measurement yet
# estimated_bdp = TCP_INIT_CWND
# cwnd_gain = 1
#
# else:
# estimated_bdp = self.btlbw * self.rtprop / BYTES_PER_PACKET
# cwnd_gain = self.cwnd_gain
# if self.bytes_in_flight >= cwnd_gain * estimated_bdp * BYTES_PER_PACKET:
if self.bytes_in_flight >= self.cwnd * BYTES_PER_PACKET:
# wait for ack or timeout
return False
# for debug purpose
# self.bbr_log.append([self.get_cur_time(), self.pacing_gain,
# self.pacing_rate * BITS_PER_BYTE / 1e6, self.cwnd_gain, self.cwnd,
# self.target_cwnd, self.prior_cwnd, self.btlbw * BITS_PER_BYTE / 1e6,
# self.rtprop, self.full_bw * BITS_PER_BYTE / 1e6, self.state.value,
# self.bytes_in_flight / BYTES_PER_PACKET,
# int(self.in_fast_recovery_mode),
# self.rs.delivery_rate * BITS_PER_BYTE / 1e6, self.round_start,
# self.round_count, self.rto, self.net.links[0].pkt_in_queue,
# self.conn_state.delivered])
return True
def schedule_send(self, first_pkt: bool = False, on_ack: bool = False):
assert self.net, "network is not registered in sender."
if first_pkt:
self.next_send_time = 0
elif on_ack:
if self.get_cur_time() < self.next_send_time:
return
return
# self.next_send_time = self.get_cur_time()
else:
self.next_send_time = self.get_cur_time() + BYTES_PER_PACKET / self.pacing_rate # (5 * 1e6 / 8)#
next_pkt = BBRPacket(self.next_send_time, self, 0)
self.net.add_packet(next_pkt)
def on_packet_sent(self, pkt: BBRPacket) -> None:
# if self.get_cur_time() >= self.next_send_time:
# packet = nextPacketToSend() # assume always a packet to send from app
if not pkt:
self.app_limited_until = self.bytes_in_flight
return
self.send_packet(pkt)
# ship(packet) # no need to do this in the simulator.
super().on_packet_sent(pkt)
# self.next_send_time = self.net.get_cur_time() + pkt.pkt_size / \
# (self.pacing_gain * self.btlbw)
# else:
# ipdb.set_trace()
# timerCallbackAt(send, nextSendTime)
# TODO: potential bug here if previous call return at if inflight < cwnd
def on_packet_acked(self, pkt: BBRPacket) -> None:
if not self.net:
raise RuntimeError("network is not registered in sender.")
if not self.in_fast_recovery_mode:
self.rs.losses = 0
self.generate_rate_sample(pkt)
super().on_packet_acked(pkt)
self.update_on_ack(pkt)
if self.in_fast_recovery_mode and self.get_cur_time() >= self.exit_fast_recovery_ts:
self.packet_conservation = False
self.on_exit_fast_recovery()
def on_packet_lost(self, pkt: BBRPacket) -> None:
if not self.net:
raise RuntimeError("network is not registered in sender.")
super().on_packet_lost(pkt)
self.rs.losses += 1
self.on_enter_fast_recovery(pkt)
def reset(self):
super().reset()
self.cwnd = TCP_INIT_CWND
self.conn_state = ConnectionState()
self.rs = RateSample()
self.btlbw = 0 # bottleneck bw in bytes/sec
self.app_limited_until = 0
self.next_send_time = 0
self.pacing_gain = BBR_HIGH_GAIN # referece:
self.target_cwnd = 0
self.in_fast_recovery_mode = False
self.init()
def debug_print(self):
print("ts: {:.3f}, round_count: {}, pacing_gain:{}, "
"pacing_rate: {:.3f}Mbps, next_send_time: {:.3f}, cwnd_gain: {}, "
"cwnd: {}, target_cwnd: {}, bbr_state: {}, btlbw: {:.3f}Mbps, "
"rtprop: {:.3f}, rs.delivery_rate: {:.3f}Mbps, "
"can_send_packet: {}, pkt_in_flight: {}, full_bw: {:.3f}Mbps, "
"full_bw_count: {}, filled_pipe: {}, C.delivered: {}, "
"next_round_delivered: {}, round_start: {}, prior_delivered: {}".format(
self.get_cur_time(), self.round_count, self.pacing_gain,
self.pacing_rate * BITS_PER_BYTE / 1e6, self.next_send_time,
self.cwnd_gain, self.cwnd, self.target_cwnd, self.state.value,
self.btlbw * BITS_PER_BYTE / 1e6, self.rtprop,
self.rs.delivery_rate * BITS_PER_BYTE / 1e6,
self.can_send_packet(),
self.bytes_in_flight / BYTES_PER_PACKET,
self.full_bw * BITS_PER_BYTE / 1e6, self.full_bw_count,
self.filled_pipe, self.conn_state.delivered,
self.next_round_delivered, self.round_start,
self.rs.prior_delivered))
class BBR_old:
cc_name = 'bbr_old'
def __init__(self, record_pkt_log: bool = False, seed: int = 42):
self.record_pkt_log = record_pkt_log
self.seed = seed
def test(self, trace: Trace, save_dir: str, plot_flag: bool = False) -> Tuple[float, float]:
"""Test a network trace and return rewards.
The 1st return value is the reward in Monitor Interval(MI) level and
the length of MI is 1 srtt. The 2nd return value is the reward in
packet level. It is computed by using throughput, average rtt, and
loss rate in each 500ms bin of the packet log. The 2nd value will be 0
if record_pkt_log flag is False.
Args:
trace: network trace.
save_dir: where a MI level log will be saved if save_dir is a
valid path. A packet level log will be saved if record_pkt_log
flag is True and save_dir is | |
<filename>emma/core/metadata.py
#!/usr/bin/env python
# encoding: utf-8
"""
metadata.py
Created by <NAME> on 2008-02-11.
Copyright (c) 2008, 2009 <NAME> Web Studio, 2010, 2011, 2012 Django Web Studio. All rights reserved.
"""
import string
import sys
import os
import unittest
import subprocess
import re
import datetime
import time
from utes import *
try:
import json
except:
import simplejson as json
class Metadata(object):
def __init__(self): pass
def stat(self, path):
"""
On OSX, st_birthtime is the only file time that accurately
reflects the original creation time of a file. Even
st_ctime gets updated once in a while, for example
when the metadata is updated.
Sadly, st_birthtime is not exposed to python on all OSX versions.
Notably not on 10.5, but it is on 10.7 (have not checked 10.6).
Other platforms use st_ctime more correctly, but have no st_birthtime.
This function is an alternative for os.stat(<path>).st_birthtime.
It leaves testing the platform and/or the existence of the st_birthtime
method up to you.
Returns a dict containing the response of stat -s <file>.
The return value will be something like this:
{'st_rdev': '0', 'st_ctime': '1339613207', 'st_mtime': '1339613207',
'st_blocks': '31432', 'st_nlink': '1', 'st_flags': '0', 'st_gid': '20',
'st_blksize': '4096', 'st_dev': '234881026', 'st_size': '16089528',
'st_mode': '0100667', 'st_uid': '501', 'st_birthtime': '1108716743',
'st_ino': '102959153', 'st_atime': '1339613537'}
You will need to test for the exact response for your target
platform.
"""
r = subprocess.Popen(['stat', '-s', path], stdout=subprocess.PIPE).communicate()[0]
d = dict([x.split('=') for x in r.rstrip().split(' ')])
return d
def exifGrepForCopyright(self, fileToCheck):
""" gets metadata from a file
takes a full path
this function is very specific for the way the copyright info has been (wrongly) written to file
uses shell grep, not python grep
requests metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
returns 1 or 0
"""
grepStatement = 'grep -w -c "[cC]opyright[s]:[ ]ja"'
proc = subprocess.Popen('exiftool' + ' -f -description ' + '\"' + fileToCheck + '\"' + ' | ' + grepStatement,
shell=True,
stdout=subprocess.PIPE,
)
return proc.communicate()[0]
def exifGrepForCopyrightFromDescription(self, stringToGrep):
yes = re.compile('copyrights*:\s*ja|yes|vrij', re.IGNORECASE)
m = yes.search(stringToGrep)
if m:
return 1
else:
no = re.compile('copyrights*:\s*nee|no', re.IGNORECASE)
n = no.search(stringToGrep)
if n:
return 0
else:
return 2
def exifFromDescription(self, stringToGrep, t=0):
"""
Migration script
----------------
Try to populate standards-compliant metadata set from 1st generation metadata.
This project has quite a history, and a web tool existed many years prior to the
release of emma / beeldnet. By "1st generation" is meant metadata entered into the
predecessor application called Cumulus. Metadata was often incorrectly entered, so
a migration script was necessary.
At first release of emma / beeldnet, this migration script was used to migrate metadata
from the older images.
-title (images_metadata.subject)*
-keywords (images_keyword.keywords, images_metadata.keywords)
-description (images_metadata.description)
-copyright (images_keyword.cright)
-instructions (images_metadata.instructions)
-icc_profile:colorspacedata (images_keyword.profile)
-creator (images_metadata.creator)
-urgency (images_metadata.urgency)
-captionwriter (images_metadata.captionwriter)
-source (images_metadata.source)
-DateTimeOriginal (images_metadata.datetimeoriginal)
-credit (images_metadata.credit)
-location (images_metadata.location)
(see exifAll)
* Unavailable
Todo: combine this function with exifAll
"""
# Add a 'keywords' key if there is none
if not re.compile('^Keywords:', re.IGNORECASE).match(stringToGrep): stringToGrep = 'Keywords: ' + stringToGrep
s = stringToGrep.splitlines()
d = re.compile('(^.+?)(:)(.+$)', re.IGNORECASE)
rdict = {}
cdict = {}
for item in s:
m = d.match(item)
try:
rdict[m.group(1).lower()] = m.group(3).strip()
except:
pass
# Pass non-compliant instructions-related fieldnames to instructions key/value pair
instructions = []
for a in rdict.iterkeys():
if re.compile(r'^toestemming').match(a):
instructions.append(a)
instructions.append(rdict[a])
for b in rdict.iterkeys():
if re.compile(r'voorbeeld').match(b):
instructions.append(b)
instructions.append(rdict[b])
for c in rdict.iterkeys():
if re.compile(r'naamsvermelding').match(c):
instructions.append(c)
instructions.append(rdict[c])
cdict['instructions'] = ','.join(instructions)
cdict['credit'] = rdict['fotobureau'] if rdict.has_key('fotobureau') == True else rdict['fotograaf'] if rdict.has_key('fotograaf') == True else ''
cdict['creator'] = rdict['fotograaf'] if rdict.has_key('fotograaf') == True else ''
cdict['keywords'] = rdict['keywords'] if rdict.has_key('keywords') == True else ''
cdict['location'] = rdict['locatie'] if rdict.has_key('locatie') == True else ''
cdict['softdate'] = rdict['datum'] if rdict.has_key('datum') == True else ''
cdict['source'] = rdict['stocknummer'] if rdict.has_key('stocknummer') == True else ''
# copyright is somewhat more complicated
# sometimes the plural is used as key
cdict['copyright'] = rdict['copyright'] if rdict.has_key('copyright') == True else rdict['copyrights'] if rdict.has_key('copyrights') == True else ''
# and then users give a range of values...
copyright = cdict['copyright']
if copyright: cdict['copyright'] = self.copyright_case(copyright)
if t == 1:
return cdict, rdict
else:
return cdict
def getInlineThumbnail(self, fileToCheck, out_path):
""" gets inline thumbnail from a file's metadata
takes an attribute to request and a full path
returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
"""
cmd = "exiftool -b " + " -thumbnailsimage " + "\"" + fileToCheck + "\" > " + out_path
proc = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE,)
return out_path
def copyright_case(self, copyright):
"""Convert all known copyright values to numeric values"""
c = copyright.lower().strip().replace('"','')
for case in switch(c):
if case('yes'):
r = 1
break
if case('ja'):
r = 1
break
if case('no'):
r = 0
break
if case('nee'):
r = 0
break
if case('vrij'):
r = 0
break
if case():
if c.find('ja') == 0:
r = 1
elif c.find('nee') == 0:
r = 0
else:
r = 2
return r
def exif(self, attr, fileToCheck):
""" gets metadata from a file
takes an attribute to request and a full path
returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
"""
cmd = "exiftool -b " + " -" + attr + " " + "\"" + fileToCheck + "\""
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
)
return proc.communicate()[0]
def ex(self, attr, fileToCheck):
""" gets metadata from a file
takes an attribute to request and a full path
returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)"""
cmd = "exiftool " + " -" + attr + " " + "\"" + fileToCheck + "\""
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
)
return proc.communicate()[0]
def exifWrite(self, attr, stringToWrite, writeToFile, concat=False ):
""" writes metadata to a file
Takes 1) an attribute (str) to write to, 2) a string to write, and 2) a full path.
Optionally 4) a concat is done (boolean), adding to the attribute instead of wiping it clean and starting over.
Hardcoded within this function:
-P option set to ensure mtime is not altered
-overwrite_original_in_place means that no backup will be made
(Exiftool normally writes a backup file ((filename)._original))
You should REALLY, REALLY make a copy of the tree you are processing first!
returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
"""
if not concat:
cmd = ["exiftool", "-P", "-overwrite_original_in_place", "%s='%s" % (attr, stringToWrite), writeToFile]
else:
cmd = ''.join(['exiftool -P -overwrite_original_in_place ', '-', attr, '+="', stringToWrite, '" ', writeToFile])
proc = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE,)
return proc.communicate()[0]
def exifWriteAll(self, cmdDict, writeToFile):
""" writes metadata to a file
takes a dict of attributes to write to and a full path
-P option set to ensure mtime is not altered
-overwrite_original_in_place leaves you with nothing at all if this action goes south.
A better option would be to leave the _original backup files for a bit and clean them up at night.
returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
"""
cmdList = ["exiftool", "-P", "-overwrite_original_in_place",]
for a, b in cmdDict.iteritems():
item = ''.join(['-',unicode(a), '=',unicode(b)])
cmdList.append(item)
cmdList.append(writeToFile)
return subprocess.Popen(cmdList,stdout=subprocess.PIPE,).stdout.read()
def description(self, fileToCheck):
"""
# gets metadata from a file
# takes an attribute to request and a full path
# returns requested metadata using exiftool (http://www.sno.phy.queensu.ca/~phil/exiftool/)
"""
cmd = "exiftool -b -description " + "\"" + fileToCheck + "\""
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
)
results = proc.communicate()[0]
if results == '':
cmd = "exiftool -b -caption-abstract " + "\"" + fileToCheck + "\""
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
)
results = proc.communicate()[0]
return results
def exifRAW(self, path):
""" Just a simple wrapper to exiftool -all (http://www.sno.phy.queensu.ca/~phil/exiftool/)"""
cmd = ['exiftool', '-all', '-j', path]
r = subprocess.Popen(cmd,stdout=subprocess.PIPE,).stdout.read()
try:
j = json.loads(r)[0]
return j
except:
return None
def exifJSON(self, path, key):
""" Wrapper to exiftool. Returns JSON for a single key """
k = '-%s' % key
cmd = ['exiftool', k, | |
"""
An n-th order discontinuous Galerkin solver for 1D scalar advection and inviscid Burgers eqn.
"""
from typing import NamedTuple
from sailfish.mesh import PlanarCartesianMesh
from sailfish.solver import SolverBase
from sailfish.kernel.library import Library
from numpy.polynomial.legendre import leggauss, Legendre
import numpy as np
NUM_CONS = 1
class CellData:
"""
Gauss weights, quadrature points, and tabulated Legendre polonomials.
This class works for n-th order Gaussian quadrature in 1D.
"""
def __init__(self, order=1):
if order <= 0:
raise ValueError("cell order must be at least 1")
def leg(x, n, m=0):
c = [(2 * n + 1) ** 0.5 if i is n else 0.0 for i in range(n + 1)]
return Legendre(c).deriv(m)(x)
f = [-1.0, 1.0] # xsi-coordinate of faces
g, w = leggauss(order)
self.gauss_points = g
self.weights = w
self.phi_faces = np.array([[leg(x, n, m=0) for n in range(order)] for x in f])
self.phi_value = np.array([[leg(x, n, m=0) for n in range(order)] for x in g])
self.phi_deriv = np.array([[leg(x, n, m=1) for n in range(order)] for x in g])
self.order = order
def to_weights(self, ux):
w = self.weights
p = self.phi_value
uw = np.zeros([NUM_CONS, self.order])
for q in range(NUM_CONS):
for n in range(self.order):
for j in range(self.num_points):
uw[q, n] += ux[q, j] * p[j][n] * w[j] * 0.5
return uw
def sample(self, uw, j):
ux = np.zeros(NUM_CONS)
for q in range(NUM_CONS):
for n in range(self.order):
ux[q] += uw[q, n] * self.phi_value[j, n]
return ux
def sample_face(self, uw, j):
ux = np.zeros(NUM_CONS)
for q in range(NUM_CONS):
for n in range(self.order):
ux[q] += uw[q, n] * self.phi_faces[j, n]
return ux
@property
def num_points(self):
return self.order
# def limit_troubled_cells(u):
# def minmod(w1, w0l, w0, w0r):
# BETA_TVB = 1.0
# a = w1 * (3.0 ** 0.5)
# b = (w0 - w0l) * BETA_TVB
# c = (w0r - w0) * BETA_TVB
# return (
# (0.25 / (3.0 ** 0.5))
# * abs(np.sign(a) + np.sign(b))
# * (np.sign(a) + np.sign(c))
# * min(abs(a), abs(b), abs(c))
# )
# nz = u.shape[0]
# for i in range(nz):
# im1 = (i - 1 + nz) % nz
# ip1 = (i + 1 + nz) % nz
# # integrating polynomial extended from left zone into this zone
# a = (
# 1.0 * u[im1, 0]
# + 2.0 * (3.0 ** 0.5) * u[im1, 1]
# + 5.0 * (5.0 ** 0.5) / 3.0 * u[im1, 2]
# )
# # integrating polynomial extended from right zone into this zone
# b = (
# 1.0 * u[ip1, 0]
# - 2.0 * (3.0 ** 0.5) * u[ip1, 1]
# + 5.0 * (5.0 ** 0.5) / 3.0 * u[ip1, 2]
# )
# tci = (abs(u[i, 0] - a) + abs(u[i, 0] - b)) / max(
# abs(u[im1, 0]), abs(u[i, 0]), abs(u[ip1, 0])
# )
# if tci > 0.1:
# w1t = minmod(u[i, 1], u[im1, 0], u[i, 0], u[ip1, 0])
# if u[i, 1] != w1t:
# u[i, 1] = w1t
# u[i, 2] = 0.0
def rhs(physics, uw, cell, dx, uwdot):
if physics.equation == "advection":
wavespeed = physics.wavespeed
def flux(ux):
return wavespeed * ux
def upwind(ul, ur):
if wavespeed > 0.0:
return flux(ul)
else:
return flux(ur)
elif physics.equation == "burgers":
def flux(ux):
return np.array([0.5 * ux[0] * ux[0]])
def upwind(ul, ur):
al = ul[0]
ar = ur[0]
if al > 0.0 and ar > 0.0:
return flux(ul)
elif al < 0.0 and ar < 0.0:
return flux(ur)
else:
return np.array([0.0])
nz = uw.shape[0]
pv = cell.phi_value
pf = cell.phi_faces
pd = cell.phi_deriv
w = cell.weights
nhat = np.array([-1.0, 1.0])
for i in range(nz):
im1 = (i - 1 + nz) % nz
ip1 = (i + 1 + nz) % nz
uimh_l = cell.sample_face(uw[im1], 1)
uimh_r = cell.sample_face(uw[i], 0)
uiph_l = cell.sample_face(uw[i], 1)
uiph_r = cell.sample_face(uw[ip1], 0)
fimh = upwind(uimh_l, uimh_r)
fiph = upwind(uiph_l, uiph_r)
fs = np.array([fimh, fiph]).T
ux = np.array([cell.sample(uw[i], j) for j in range(cell.order)]).T
fx = np.array([flux(u) for u in ux.T]).T
for n in range(cell.order):
udot_s = 0.0
udot_v = 0.0
for j in range(2):
udot_s -= fs[0, j] * pf[j, n] * nhat[j] / dx
for j in range(cell.num_points):
udot_v += fx[0, j] * pd[j, n] * w[j] / dx
uwdot[i, 0, n] = udot_s + udot_v
class Options(NamedTuple):
order: int = 1
integrator: str = "rk2"
class Physics(NamedTuple):
wavespeed: float = 1.0
equation: str = "advection" # or burgers
class Solver(SolverBase):
"""
An n-th order, discontinuous Galerkin solver for 1D scalar advection.
Time-advance integrator options:
- :code:`rk1`: Forward Euler
- :code:`rk2`: SSP-RK2 of Shu & Osher (1988; Eq. 2.15)
- :code:`rk3`: SSP-RK3 of Shu & Osher (1988; Eq. 2.18)
- :code:`rk3-sr02`: four-stage 3rd Order SSP-4RK3 of Spiteri & Ruuth (2002)
"""
def __init__(
self,
setup=None,
mesh=None,
time=0.0,
solution=None,
num_patches=1,
mode="cpu",
physics=dict(),
options=dict(),
):
options = Options(**options)
physics = Physics(**physics)
cell = CellData(order=options.order)
if num_patches != 1:
raise ValueError("only works on one patch")
if type(mesh) != PlanarCartesianMesh:
raise ValueError("only the planar cartesian mesh is supported")
if mode != "cpu":
raise ValueError("only cpu mode is supported")
if setup.boundary_condition != "periodic":
raise ValueError("only periodic boundaries are supported")
if physics.equation not in ["advection", "burgers"]:
raise ValueError("physics.equation must be advection or burgers")
if options.integrator not in [
"rk1",
"rk2",
"rk3",
"rk3-sr02",
"SSPRK32",
"SSPRK43",
"SSPRK53",
"SSPRK54",
]:
raise ValueError(
"options.integrator must be "
"rk1|rk2|rk3|rk3-sr02|SSPRK32|SSPRK43|SSPRK53|SSPRK54"
)
if options.order <= 0:
raise ValueError("option.order must be greater than 0")
with open(__file__.replace(".py", ".c"), "r") as f:
source = f.read()
self.lib = Library(source, mode=mode, debug=True)
if solution is None:
num_zones = mesh.shape[0]
xf = mesh.faces(0, num_zones) # face coordinates
px = np.zeros([num_zones, 1, cell.num_points])
ux = np.zeros([num_zones, 1, cell.num_points])
uw = np.zeros([num_zones, 1, cell.order])
dx = mesh.dx
for i in range(num_zones):
for j in range(cell.num_points):
xsi = cell.gauss_points[j]
xj = xf[i] + (xsi + 1.0) * 0.5 * dx
setup.primitive(time, xj, px[i, :, j])
ux[...] = px[...] # the conserved variable is also the primitive
for i in range(num_zones):
uw[i] = cell.to_weights(ux[i])
self.conserved_w = uw
else:
self.conserved_w = solution
self.t = time
self.mesh = mesh
self.cell = cell
self._options = options
self._physics = physics
@property
def solution(self):
return self.conserved_w
@property
def primitive(self):
return self.conserved_w[:, 0]
@property
def time(self):
return self.t
@property
def maximum_cfl(self):
return 1.0
@property
def options(self):
return self._options._asdict()
@property
def physics(self):
return self._physics._asdict()
@property
def maximum_cfl(self):
k = self.cell.order - 1
if self._options.integrator == "rk1":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk2":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk3":
return 1.0 / (2 * k + 1)
if self._options.integrator == "rk3-sr02":
return 2.0 / (2 * k + 1)
if self._options.integrator == "SSPRK32":
return 2.0 / (2 * k + 1) # up to 2.2 / (2 * k + 1) seems to work
if self._options.integrator == "SSPRK43":
return 2.0 / (2 * k + 1) # C = 1.683339717642499
if self._options.integrator == "SSPRK53":
return 2.387300839230550 / (2 * k + 1) # C = 2.387300839230550
if self._options.integrator == "SSPRK54":
return 1.5 / (2 * k + 1) # up to 1.7 / (2 * k + 1) seems to work
def maximum_wavespeed(self):
if self._physics.equation == "advection":
return abs(self._physics.wavespeed)
elif self._physics.equation == "burgers":
return abs(self.conserved_w[:, 0]).max()
def advance(self, dt):
def udot(u):
udot = np.zeros_like(u)
# rhs(self._physics, u, self.cell, self.mesh.dx, udot)
self.lib.scdg_1d_udot[u.shape[0]](u, udot, self.mesh.dx)
return udot
if self._options.integrator == "rk1":
u = self.conserved_w
u += dt * udot(u)
if self._options.integrator == "rk2":
b1 = 0.0
b2 = 0.5
u = u0 = self.conserved_w.copy()
u = u0 * b1 + (1.0 - b1) * (u + dt * udot(u))
u = u0 * b2 + (1.0 - b2) * (u + dt * udot(u))
if self._options.integrator == "rk3":
b1 = 0.0
b2 = 3.0 / 4.0
b3 = 1.0 / 3.0
u = u0 = self.conserved_w.copy()
u = u0 * b1 + (1.0 - b1) * (u + dt * udot(u))
u = u0 * b2 + (1.0 - b2) * (u + dt * udot(u))
u = u0 * b3 + (1.0 - b3) * (u + dt * udot(u))
if self._options.integrator == "rk3-sr02":
u = | |
<filename>template/template.py<gh_stars>0
# -*- coding: utf-8 -*-
# ---
# jupyter:
# celltoolbar: Edit Metadata
# hide_input: false
# ipub:
# bibliography: ExampleBib
# listcode: false
# listfigures: false
# listtables: false
# titlepage:
# address:
# - <NAME>
# - <NAME> Hall 322
# - Princeton University
# - Princeton, NJ 08540
# - <EMAIL>
# author: <NAME>
# email: <EMAIL>
# institution:
# - Princeton University
# logo: princeton_logo.pdf
# running_head: PsiPyPublish
# subtitle: ''
# tagline: ''
# title: 'PsiPyPublish: An IPyPublish Template for Psychological Research'
# version: Template file -- not for submission
# word_count: XXX (Main text + abstract)
# toc: true
# jupytext:
# metadata_filter:
# cells:
# additional: all
# notebook:
# additional: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.7.2
# toc:
# nav_menu: {}
# number_sections: true
# sideBar: true
# skip_h1_title: false
# toc_cell: false
# toc_position:
# height: calc(100% - 180px)
# left: 10px
# top: 150px
# width: 234.6px
# toc_section_display: block
# toc_window_display: true
# varInspector:
# cols:
# lenName: 16
# lenType: 16
# lenVar: 40
# kernels_config:
# python:
# delete_cmd_postfix: ''
# delete_cmd_prefix: 'del '
# library: var_list.py
# varRefreshCmd: print(var_dic_list())
# r:
# delete_cmd_postfix: ') '
# delete_cmd_prefix: rm(
# library: var_list.r
# varRefreshCmd: 'cat(var_dic_list()) '
# types_to_exclude:
# - module
# - function
# - builtin_function_or_method
# - instance
# - _Feature
# window_display: false
# ---
# %%
# My default imports for data analysis
# %reset -f
# %matplotlib inline
# %config InlineBackend.figure_format = "retina" # High-res graphs (rendered irrelevant by svg option below)
# %config InlineBackend.print_figure_kwargs = {"bbox_inches":"tight"} # No extra white space
# %config InlineBackend.figure_format = "svg" # 'png' is default
import warnings
warnings.filterwarnings("ignore") # Because we are adults
from IPython.core.debugger import set_trace
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from IPython.display import SVG, display, Latex
from dfply import *
# ipypublish imports
# See the imported script for changes to matplotlib settings
# Has helpful commands and settings for making the final pdf
from ipypublish.scripts.ipynb_latex_setup import *
# %% [markdown]
# \renewcommand{\baselinestretch}{1.5} % make PDF's line spacing a little roomier
# %% [markdown]
# # Introduction
# This is a template for an (approximately) APA-style [iPyPublish](https://github.com/chrisjsewell/ipypublish) manuscript. Feel free to check out the documentation and examples at that link; it's all very good. There you can find information on how to embed figures, code, tables, and more. References are managed using [Zotero](https://www.zotero.org/) in concert with [Better BibTex](https://github.com/retorquere/zotero-better-bibtex/). For now, you're going to want to edit the notebook's metadata in order to change what appears on the title page. In addition, the metadata includes `jupytext` configuration, so that you can automatically generate markdown and py:percent versions of this notebook automatically on saving -- assuming you have `jupytext` installed and correctly configured, that is!
# %% [markdown]
# ## Configuration
# My working configuration files for Jupyter (with Jupytext) and iPyPublish can be found in this repository. Naturally, you will need to replace your computer's original versions of these files with the new ones included here. For example, if using Anaconda, your iPyPublish installation can be found at `your_environment_name/Lib/site-packages/ipypublish`.
#
# - `biblio_natbib.py`, `doc_article.py`, and `front_pages.py` all live in `ipypublish\latex\ipypublish`
# - `ipynb_latex_setup.py` lives in `ipypublish\scripts`
# - `latex_ipypublish_main.py` lives in `ipypublish\export_plugins`
# %% [markdown]
# ## Caveats
# Since the creation of this template, `ipypublish` has been upgraded to version 0.9.0. This template was designed to work with version 0.6.7, and suits my needs; as such, it may take some time before I update this guide to deal with the latest version — if any changes are even needed at all, since I haven't had a chance to try out the latest edition.
# %% [markdown]
# ## Troubleshooting
#
# ### Jupytext
# - If saving to `Rmd` format, beware using single quotes within figure caption metadata, since R Markdown uses single quotes for metadata and not double quotes, which creates issues when you want to include apostrophes.
# - If you are encountering problems opening or even "trusting" a notebook that was previously working fine, simply delete the other non-ipynb representations of the notebook. I encounter this issue most often when synchronizing notebook files via Dropbox.
# %% [markdown]
# # Notes
# %% [markdown]
# ## Production
# Produce a notebook in the terminal with the command `nbpublish -pdf -pbug file_name.ipynb` [^1]. Outputs to `converted` folder at the `.ipynb` file's location.[^2]
#
# [^1]: \hphantom{} Technically `-pbug` is optional so you can see verbose output, but nbpublish seems to work more reliably with this option enabled.
# [^2]: \hphantom{} `nbpublish` requires a lot of different technologies to work together. As such, if a build fails, simply try running the same command once more to see if that fixes the issue before moving on to more intense debugging.
# %% [markdown]
# ## Markdown
# - Headings and sub-headings (and so on) are made by prefacing text with `#`. The more `#`s, the greater the level of heading.
# - Unordered lists are made by prefacing text with a "-".
# 1. Numbered lists start with a number and dot.
# 2. Create sublists via tabbed indentation.
# - Footnote links are made with `[^X]` (where `X` is some number). Footnote content is placed below with `[^X]: Content goes here`. Here's an example.[^2]
# - Correct formatting only appears after running `nbpublish`.
# - [Links](https://google.com) can be generated with the following syntax: `[link](http://www.website.com)`
# - `Code` can be placed between backticks (the character to the left of the `1` key at the top of your keyboard).
# - Place it between 3 backticks (with an optional language name) and you get (syntax-highlighted) block code.[^3]
# ```python
# print(foo)
# ```
# - *Italic*, __bold__, and ***bolded italic*** text can be created by sandwiching text between 1, 2, or 3 `*`s or `_`s respectively.
# - > Blockquotes are made by prefacing text with `>` .
#
# \todo[inline]{Get inline todos with \LaTeX's "todo" command.}
#
# [^2]: \hphantom{} Footnote content goes here!
# [^3]: \hphantom{} Note, however, that one should not use this for displaying large chunks of code in an nbpublish PDF. Instead, see code cell \cref{code:example_list_comp} below for an example of how to place code in the PDF
# %% [markdown] {"variables": {"str(2 + 2)": "4"}}
# ## Templating — Pass Variables into Markdown
# - Using the [Python Markdown Extension](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/python-markdown/readme.html), you can pipe valid Python code into markdown cells directly by sandwiching it between two curly braces: E.g., 2 + 2 = {{str(2 + 2)}}. (You should see `2 + 2 = 4` in the PDF output, despite the fact that I never typed out `4` at all.)
# - Note that the notebook needs to be `Trusted`; look to the top right to see if it is and simply click on `Not Trusted` to change that.
#
# [^4]: \hphantom{}
# %% [markdown]
# ## Latex
# - Execute arbitrary \LaTeX \hphantom{} by sandwiching it between dollar signs: $a = b+c$
# - Alternatively, use `Latex()` command from `ipypublish` within a code cell.
# - \LaTeX's `hphantom` command is useful when you just want a little more horizontal space between items.
# %% [markdown]
# ## Citations and References
# - First, specify the `bibliography` entry in the notebook metadata to the correct bibliography file (Edit --> Edit Notebook Metadata). _Leave out the `.bib` extension from this file name!_ It should look like `path/to/bibFileName` .
# - If nbpublish is having problems finding the `.bib` file, I have had success by placing a copy in the `converted/notebook_name_files/` directory, as well as placing the file in the same folder as the actual notebook. This makes set up for the notebook's bibliography metadata especially easy.
# - Citations can be input with citation keys and standard \LaTeX \hphantom{} commands (e.g., `\cite{citationKey}`).
# - I've had success with citation keys generated via Zotero Better BibTex, like so \citep{uddenbergTelefaceSerialReproduction2018}. Note that you won't see the final formatted output until you run `nbpublish`.
# - See a [cheat sheet of valid cite commands here](http://merkel.texture.rocks/Latex/natbib.php).
# %% [markdown]
# ## Terminal commands
# - Execute terminal commands in Jupyter by prefacing code with `!` .
# - For example, you can export this notebook with the following code cell (uncommented, of course):
# %%
# !nbpublish -pdf -pbug template.ipynb
# %% [markdown]
# ## Figures
# - Figures can be displayed with commands | |
{
"color": "morado",
"size": "m",
"brand": "zara",
"precio": 4444
}
},
{
"id": 253,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "polo",
"precio": 2333
}
},
{
"id": 254,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "nike",
"precio": 4447
}
},
{
"id": 255,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "blue",
"precio": 9500
}
},
{
"id": 256,
"atrr": {
"color": "morado",
"size": "xl",
"brand": "polo",
"precio": 4447
}
},
{
"id": 257,
"atrr": {
"color": "rojo",
"size": "s",
"brand": "polo",
"precio": 4444
}
},
{
"id": 258,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "blue",
"precio": 4569
}
},
{
"id": 259,
"atrr": {
"color": "azul",
"size": "m",
"brand": "blue",
"precio": 14000
}
},
{
"id": 260,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "polo",
"precio": 10000
}
},
{
"id": 261,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "polo",
"precio": 2333
}
},
{
"id": 262,
"atrr": {
"color": "verde",
"size": "s",
"brand": "polo",
"precio": 4447
}
},
{
"id": 263,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "blue",
"precio": 4558
}
},
{
"id": 264,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "nike",
"precio": 4789
}
},
{
"id": 265,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "blue",
"precio": 14000
}
},
{
"id": 266,
"atrr": {
"color": "rojo",
"size": "xl",
"brand": "polo",
"precio": 4444
}
},
{
"id": 267,
"atrr": {
"color": "verde",
"size": "xl",
"brand": "zara",
"precio": 2333
}
},
{
"id": 268,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "zara",
"precio": 8889
}
},
{
"id": 269,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "blue",
"precio": 8889
}
},
{
"id": 270,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "adidas",
"precio": 1540
}
},
{
"id": 271,
"atrr": {
"color": "verde",
"size": "xxl",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 272,
"atrr": {
"color": "morado",
"size": "xl",
"brand": "blue",
"precio": 4447
}
},
{
"id": 273,
"atrr": {
"color": "morado",
"size": "m",
"brand": "zara",
"precio": 4569
}
},
{
"id": 274,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "zara",
"precio": 4558
}
},
{
"id": 275,
"atrr": {
"color": "rojo",
"size": "xl",
"brand": "polo",
"precio": 4447
}
},
{
"id": 276,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "nike",
"precio": 15000
}
},
{
"id": 277,
"atrr": {
"color": "naranja",
"size": "l",
"brand": "nike",
"precio": 4569
}
},
{
"id": 278,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "nike",
"precio": 11000
}
},
{
"id": 279,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "polo",
"precio": 4569
}
},
{
"id": 280,
"atrr": {
"color": "naranja",
"size": "xl",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 281,
"atrr": {
"color": "amarillo",
"size": "s",
"brand": "nike",
"precio": 14000
}
},
{
"id": 282,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "polo",
"precio": 4558
}
},
{
"id": 283,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "zara",
"precio": 9500
}
},
{
"id": 284,
"atrr": {
"color": "amarillo",
"size": "xl",
"brand": "adidas",
"precio": 8889
}
},
{
"id": 285,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "adidas",
"precio": 2333
}
},
{
"id": 286,
"atrr": {
"color": "naranja",
"size": "s",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 287,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "zara",
"precio": 9500
}
},
{
"id": 288,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "nike",
"precio": 14000
}
},
{
"id": 289,
"atrr": {
"color": "rojo",
"size": "xl",
"brand": "blue",
"precio": 4447
}
},
{
"id": 290,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "adidas",
"precio": 15000
}
},
{
"id": 291,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "polo",
"precio": 11000
}
},
{
"id": 292,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "blue",
"precio": 11000
}
},
{
"id": 293,
"atrr": {
"color": "verde",
"size": "xxl",
"brand": "zara",
"precio": 8889
}
},
{
"id": 294,
"atrr": {
"color": "amarillo",
"size": "l",
"brand": "polo",
"precio": 1540
}
},
{
"id": 295,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "nike",
"precio": 1540
}
},
{
"id": 296,
"atrr": {
"color": "amarillo",
"size": "m",
"brand": "polo",
"precio": 8889
}
},
{
"id": 297,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "adidas",
"precio": 11000
}
},
{
"id": 298,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "adidas",
"precio": 4569
}
},
{
"id": 299,
"atrr": {
"color": "verde",
"size": "l",
"brand": "blue",
"precio": 8889
}
},
{
"id": 300,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "blue",
"precio": 11000
}
},
{
"id": 301,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "nike",
"precio": 4447
}
},
{
"id": 302,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "nike",
"precio": 4789
}
},
{
"id": 303,
"atrr": {
"color": "verde",
"size": "xxl",
"brand": "blue",
"precio": 9500
}
},
{
"id": 304,
"atrr": {
"color": "naranja",
"size": "xxl",
"brand": "zara",
"precio": 9500
}
},
{
"id": 305,
"atrr": {
"color": "morado",
"size": "m",
"brand": "zara",
"precio": 4569
}
},
{
"id": 306,
"atrr": {
"color": "naranja",
"size": "s",
"brand": "polo",
"precio": 4569
}
},
{
"id": 307,
"atrr": {
"color": "rojo",
"size": "xxl",
"brand": "polo",
"precio": 4789
}
},
{
"id": 308,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "zara",
"precio": 10000
}
},
{
"id": 309,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "polo",
"precio": 8889
}
},
{
"id": 310,
"atrr": {
"color": "rojo",
"size": "m",
"brand": "zara",
"precio": 4444
}
},
{
"id": 311,
"atrr": {
"color": "amarillo",
"size": "l",
"brand": "adidas",
"precio": 11000
}
},
{
"id": 312,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "polo",
"precio": 4569
}
},
{
"id": 313,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "blue",
"precio": 10000
}
},
{
"id": 314,
"atrr": {
"color": "rojo",
"size": "xl",
"brand": "adidas",
"precio": 11000
}
},
{
"id": 315,
"atrr": {
"color": "azul",
"size": "xl",
"brand": "zara",
"precio": 11000
}
},
{
"id": 316,
"atrr": {
"color": "amarillo",
"size": "xs",
"brand": "zara",
"precio": 15000
}
},
{
"id": 317,
"atrr": {
"color": "amarillo",
"size": "l",
"brand": "adidas",
"precio": 8889
}
},
{
"id": 318,
"atrr": {
"color": "morado",
"size": "m",
"brand": "nike",
"precio": 11000
}
},
{
"id": 319,
"atrr": {
"color": "amarillo",
"size": "l",
"brand": "nike",
"precio": 4569
}
},
{
"id": 320,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "nike",
"precio": 2333
}
},
{
"id": 321,
"atrr": {
"color": "morado",
"size": "xl",
"brand": "zara",
"precio": 4789
}
},
{
"id": 322,
"atrr": {
"color": "rojo",
"size": "l",
"brand": "nike",
"precio": 4789
}
},
{
"id": 323,
"atrr": {
"color": "azul",
"size": "xs",
"brand": "polo",
"precio": 10000
}
},
{
"id": 324,
"atrr": {
"color": "morado",
"size": "s",
"brand": "blue",
"precio": 4447
}
},
{
"id": 325,
"atrr": {
"color": "rojo",
"size": "xl",
"brand": "adidas",
"precio": 4447
}
},
{
"id": 326,
"atrr": {
"color": "verde",
"size": "xs",
"brand": "zara",
"precio": 4569
}
},
{
"id": 327,
"atrr": {
"color": "azul",
"size": "xxl",
"brand": "adidas",
"precio": 9500
}
},
{
"id": 328,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "zara",
"precio": 14000
}
},
{
"id": 329,
"atrr": {
"color": "morado",
"size": "xs",
"brand": "adidas",
"precio": 4569
}
},
{
"id": 330,
"atrr": {
"color": "morado",
"size": "xxl",
"brand": "polo",
"precio": 4447
}
},
{
"id": 331,
"atrr": {
"color": "naranja",
"size": "xs",
"brand": "blue",
"precio": 2333
}
},
{
"id": 332,
"atrr": {
"color": "amarillo",
"size": "xl",
"brand": "blue",
"precio": 8889
}
},
{
"id": 333,
"atrr": {
"color": "verde",
"size": "xl",
"brand": "blue",
"precio": 4444
}
},
{
"id": 334,
"atrr": {
"color": "rojo",
"size": "xxl",
"brand": "nike",
"precio": 4447
}
},
{
"id": 335,
"atrr": {
"color": "naranja",
"size": "l",
"brand": "blue",
"precio": 10000
}
},
{
"id": 336,
"atrr": {
"color": "rojo",
"size": "xs",
"brand": "blue",
"precio": 10000
}
},
{
"id": 337,
"atrr": {
"color": "rojo",
"size": "xs",
| |
city, Colorado",28002),
("Laird CDP, Colorado",52),
("La Jara town, Colorado",835),
("La Junta city, Colorado",6914),
("La Junta Gardens CDP, Colorado",90),
("Lake City town, Colorado",454),
("Lakeside town, Colorado",8),
("Lakewood city, Colorado",153522),
("Lamar city, Colorado",7606),
("Laporte CDP, Colorado",2411),
("Larkspur town, Colorado",257),
("La Salle town, Colorado",2539),
("Las Animas city, Colorado",2065),
("La Veta town, Colorado",772),
("Lazy Acres CDP, Colorado",868),
("Leadville city, Colorado",2679),
("Leadville North CDP, Colorado",2031),
("Lewis CDP, Colorado",121),
("Leyner CDP, Colorado",0),
("Limon town, Colorado",1269),
("Lincoln Park CDP, Colorado",3290),
("Littleton city, Colorado",47035),
("Lochbuie town, Colorado",6036),
("Loghill Village CDP, Colorado",604),
("Log Lane Village town, Colorado",1224),
("Loma CDP, Colorado",1161),
("Lone Tree city, Colorado",14209),
("Longmont city, Colorado",93244),
("Louisville city, Colorado",20705),
("Louviers CDP, Colorado",307),
("Loveland city, Colorado",75395),
("Lynn CDP, Colorado",57),
("Lyons town, Colorado",2114),
("McCoy CDP, Colorado",41),
("Manassa town, Colorado",1020),
("Mancos town, Colorado",1742),
("Manitou Springs city, Colorado",5280),
("Manzanola town, Colorado",478),
("Marble town, Colorado",105),
("Maybell CDP, Colorado",34),
("Maysville CDP, Colorado",157),
("Mead town, Colorado",4523),
("Meeker town, Colorado",2591),
("Meridian CDP, Colorado",3615),
("Merino town, Colorado",249),
("Midland CDP, Colorado",404),
("Milliken town, Colorado",6773),
("Minturn town, Colorado",1090),
("Moffat town, Colorado",135),
("Monte Vista city, Colorado",4169),
("Montezuma town, Colorado",78),
("Montrose city, Colorado",19096),
("Monument town, Colorado",7065),
("Morgan Heights CDP, Colorado",267),
("Morrison town, Colorado",388),
("Mountain Meadows CDP, Colorado",98),
("Mountain View town, Colorado",614),
("Mountain Village town, Colorado",1767),
("Mount Crested Butte town, Colorado",967),
("Mulford CDP, Colorado",407),
("Naturita town, Colorado",484),
("Nederland town, Colorado",1385),
("New Castle town, Colorado",4767),
("Niwot CDP, Colorado",4106),
("No Name CDP, Colorado",53),
("Norrie CDP, Colorado",0),
("Northglenn city, Colorado",38918),
("North La Junta CDP, Colorado",313),
("North Washington CDP, Colorado",553),
("Norwood town, Colorado",619),
("Nucla town, Colorado",518),
("Nunn town, Colorado",577),
("Oak Creek town, Colorado",894),
("Olathe town, Colorado",1882),
("Olney Springs town, Colorado",539),
("Ophir town, Colorado",170),
("Orchard CDP, Colorado",115),
("Orchard City town, Colorado",3069),
("Orchard Mesa CDP, Colorado",6601),
("Ordway town, Colorado",1472),
("Otis town, Colorado",530),
("Ouray city, Colorado",883),
("Ovid town, Colorado",216),
("Padroni CDP, Colorado",97),
("Pagosa Springs town, Colorado",2064),
("Palisade town, Colorado",2686),
("Palmer Lake town, Colorado",2684),
("Paoli town, Colorado",71),
("Paonia town, Colorado",1374),
("Parachute town, Colorado",1242),
("Paragon Estates CDP, Colorado",876),
("Parker town, Colorado",52563),
("Parshall CDP, Colorado",51),
("Peetz town, Colorado",203),
("Penrose CDP, Colorado",2990),
("Peoria CDP, Colorado",221),
("Perry Park CDP, Colorado",1917),
("Peyton CDP, Colorado",281),
("Phippsburg CDP, Colorado",225),
("Piedra CDP, Colorado",35),
("Pierce town, Colorado",1031),
("Pine Brook Hill CDP, Colorado",1045),
("Pitkin town, Colorado",100),
("Platteville town, Colorado",2707),
("Poncha Springs town, Colorado",778),
("Ponderosa Park CDP, Colorado",3617),
("Portland CDP, Colorado",88),
("Pritchett town, Colorado",104),
("Pueblo city, Colorado",109985),
("Pueblo West CDP, Colorado",31860),
("Ramah town, Colorado",156),
("Rangely town, Colorado",2289),
("Raymer (New Raymer) town, Colorado",62),
("Red Cliff town, Colorado",366),
("Red Feather Lakes CDP, Colorado",443),
("Redlands CDP, Colorado",9036),
("Redstone CDP, Colorado",72),
("Redvale CDP, Colorado",241),
("Rico town, Colorado",174),
("Ridgway town, Colorado",1060),
("Rifle city, Colorado",9600),
("Rock Creek Park CDP, Colorado",111),
("Rockvale town, Colorado",516),
("Rocky Ford city, Colorado",3824),
("Rollinsville CDP, Colorado",218),
("Romeo town, Colorado",312),
("Roxborough Park CDP, Colorado",9452),
("Rye town, Colorado",185),
("Saddle Ridge CDP, Colorado",84),
("Saguache town, Colorado",526),
("St. Ann Highlands CDP, Colorado",440),
("St. Mary's CDP, Colorado",516),
("Salida city, Colorado",5642),
("Salt Creek CDP, Colorado",601),
("San Acacio CDP, Colorado",91),
("Sanford town, Colorado",935),
("San Luis town, Colorado",741),
("Sawpit town, Colorado",43),
("Security-Widefield CDP, Colorado",38134),
("Sedalia CDP, Colorado",113),
("Sedgwick town, Colorado",135),
("Segundo CDP, Colorado",143),
("Seibert town, Colorado",119),
("Seven Hills CDP, Colorado",167),
("Severance town, Colorado",4187),
("Shaw Heights CDP, Colorado",5459),
("Sheridan city, Colorado",6056),
("Sheridan Lake town, Colorado",95),
("Sherrelwood CDP, Colorado",19243),
("Silt town, Colorado",3091),
("Silver Cliff town, Colorado",614),
("Silver Plume town, Colorado",163),
("Silverthorne town, Colorado",4559),
("Silverton town, Colorado",490),
("Simla town, Colorado",693),
("Smeltertown CDP, Colorado",52),
("Snowmass Village town, Colorado",2824),
("Snyder CDP, Colorado",147),
("Southern Ute CDP, Colorado",137),
("South Fork town, Colorado",332),
("Springfield town, Colorado",1347),
("Starkville town, Colorado",83),
("Steamboat Springs city, Colorado",12777),
("Sterling city, Colorado",13775),
("Stonegate CDP, Colorado",9511),
("Stonewall Gap CDP, Colorado",40),
("Strasburg CDP, Colorado",2705),
("Stratmoor CDP, Colorado",5883),
("Stratton town, Colorado",650),
("Sugar City town, Colorado",519),
("Sugarloaf CDP, Colorado",393),
("Sunshine CDP, Colorado",223),
("Superior town, Colorado",13014),
("Swink town, Colorado",766),
("Tabernash CDP, Colorado",400),
("Tall Timber CDP, Colorado",179),
("Telluride town, Colorado",1826),
("The Pinery CDP, Colorado",11754),
("Thornton city, Colorado",134588),
("Timnath town, Colorado",2922),
("Todd Creek CDP, Colorado",4286),
("Towaoc CDP, Colorado",1099),
("Towner CDP, Colorado",18),
("Trail Side CDP, Colorado",148),
("Trinidad city, Colorado",8043),
("Twin Lakes CDP (Adams County), Colorado",7586),
("Twin Lakes CDP (Lake County), Colorado",185),
("Two Buttes town, Colorado",41),
("Upper Bear Creek CDP, Colorado",1013),
("Vail town, Colorado",5469),
("Valdez CDP, Colorado",14),
("Valmont CDP, Colorado",48),
("Vernon CDP, Colorado",0),
("Victor city, Colorado",429),
("Vilas town, Colorado",90),
("Vineland CDP, Colorado",273),
("Vona town, Colorado",103),
("Walden town, Colorado",579),
("Walsenburg city, Colorado",2931),
("Walsh town, Colorado",638),
("Ward town, Colorado",137),
("Watkins CDP, Colorado",598),
("Welby CDP, Colorado",16352),
("Weldona CDP, Colorado",146),
("Wellington town, Colorado",8571),
("Westcliffe town, Colorado",378),
("Westcreek CDP, Colorado",179),
("Westminster city, Colorado",112747),
("Weston CDP, Colorado",23),
("West Pleasant View CDP, Colorado",4691),
("Wheat Ridge city, Colorado",31323),
("Wiggins town, Colorado",1102),
("Wiley town, Colorado",309),
("Williamsburg town, Colorado",549),
("Windsor town, Colorado",25232),
("Winter Park town, Colorado",650),
("Wolcott CDP, Colorado",0),
("Woodland Park city, Colorado",7421),
("Woodmoor CDP, Colorado",9117),
("Woody Creek CDP, Colorado",182),
("Wray city, Colorado",2430),
("Yampa town, Colorado",452),
("Yuma city, Colorado",3482),
("Ansonia city, Connecticut",18860),
("Baltic CDP, Connecticut",1182),
("Bantam borough, Connecticut",735),
("Bethel CDP, Connecticut",9659),
("Bethlehem Village CDP, Connecticut",1798),
("Blue Hills CDP, Connecticut",2753),
("Branford Center CDP, Connecticut",6413),
("Bridgeport city, Connecticut",146417),
("Bristol city, Connecticut",60308),
("Broad Brook CDP, Connecticut",4766),
("Brooklyn CDP, Connecticut",965),
("Byram CDP, Connecticut",4216),
("Canaan CDP, Connecticut",1227),
("Cannondale CDP, Connecticut",29),
("Canton Valley CDP, Connecticut",1525),
("Cheshire Village CDP, Connecticut",6484),
("Chester Center CDP, Connecticut",1653),
("Clinton CDP, Connecticut",3453),
("Colchester CDP, Connecticut",5048),
("Collinsville CDP, Connecticut",3514),
("Conning Towers Nautilus Park CDP, Connecticut",9749),
("Cos Cob CDP, Connecticut",6959),
("Coventry Lake CDP, Connecticut",2749),
("Crystal Lake CDP, Connecticut",2231),
("Danbury city, Connecticut",84479),
("Danielson borough, Connecticut",3983),
("Darien CDP, Connecticut",21759),
("Deep River Center CDP, Connecticut",2448),
("Derby city, Connecticut",12596),
("Durham CDP, Connecticut",2631),
("East Brooklyn CDP, Connecticut",1350),
("East Hampton CDP, Connecticut",2760),
("East Hartford CDP, Connecticut",50453),
("East Haven CDP, Connecticut",28860),
("Essex Village CDP, Connecticut",2595),
("Falls Village CDP, Connecticut",478),
("Fenwick borough, Connecticut",69),
("Gales Ferry CDP, Connecticut",862),
("Georgetown CDP, Connecticut",1824),
("Glastonbury Center CDP, Connecticut",7742),
("Glenville CDP, Connecticut",2499),
("Greenwich CDP, Connecticut",13303),
("Groton city, Connecticut",9109),
("Groton Long Point borough, Connecticut",499),
("Guilford Center CDP, Connecticut",2618),
("Hartford city, Connecticut",123628),
("Hazardville CDP, Connecticut",4896),
("Heritage Village CDP, Connecticut",4143),
("Higganum CDP, Connecticut",1496),
("Jewett City borough, Connecticut",3411),
("Kensington CDP, Connecticut",8190),
("Lake Pocotopaug CDP, Connecticut",3413),
("Lakeville CDP, Connecticut",1154),
("Litchfield borough, Connecticut",1244),
("Long Hill CDP, Connecticut",4191),
("Madison Center CDP, Connecticut",2519),
("Manchester CDP, Connecticut",29899),
("Mansfield Center CDP, Connecticut",913),
("Mashantucket CDP, Connecticut",216),
("Meriden city, Connecticut",59864),
("Middletown city, Connecticut",46473),
("Milford city (balance), Connecticut",52349),
("Moodus CDP, Connecticut",1610),
("Moosup CDP, Connecticut",3943),
("Mystic CDP, Connecticut",4221),
("Naugatuck borough, Connecticut",31481),
("New Britain city, Connecticut",72839),
("New Hartford Center CDP, Connecticut",1061),
("New Haven city, Connecticut",130529),
("Newington CDP, Connecticut",30323),
("New London city, Connecticut",27032),
("New Milford CDP, Connecticut",6737),
("New Preston CDP, Connecticut",908),
("Newtown borough, Connecticut",1949),
("Niantic CDP, Connecticut",2872),
("Noank CDP, Connecticut",1487),
("Norfolk CDP, Connecticut",496),
("North Granby CDP, Connecticut",1657),
("North Grosvenor Dale CDP, Connecticut",1271),
("North Haven CDP, Connecticut",23786),
("Northwest Harwinton CDP, Connecticut",3244),
("Norwalk city, Connecticut",88436),
("Norwich city, Connecticut",39567),
("Oakville CDP, Connecticut",8929),
("Old Greenwich CDP, Connecticut",6819),
("Old Mystic CDP, Connecticut",3513),
("Old Saybrook Center CDP, Connecticut",1881),
("Orange CDP, Connecticut",13937),
("Oxoboxo River CDP, Connecticut",2906),
("Pawcatuck CDP, Connecticut",5249),
("Pemberwick CDP, Connecticut",4114),
("Plainfield Village CDP, Connecticut",2113),
("Plantsville CDP, Connecticut",1726),
("Poquonock Bridge CDP, Connecticut",2450),
("Portland CDP, Connecticut",5572),
("Putnam CDP, Connecticut",6772),
("Quinebaug CDP, Connecticut",1327),
("Ridgefield CDP, Connecticut",7963),
("Riverside CDP, Connecticut",8414),
("Rockville CDP, Connecticut",7332),
("Salmon Brook CDP, Connecticut",2438),
("Saybrook Manor CDP, Connecticut",1103),
("Sharon CDP, Connecticut",724),
("Shelton city, Connecticut",41155),
("Sherwood Manor CDP, Connecticut",5438),
("Simsbury Center CDP, Connecticut",6091),
("Somers CDP, Connecticut",1887),
("South Coventry CDP, Connecticut",1117),
("Southport CDP, Connecticut",1664),
("South Windham CDP, Connecticut",1533),
("Southwood Acres CDP, Connecticut",7715),
("South Woodstock CDP, Connecticut",1181),
("Stafford Springs CDP, Connecticut",4947),
("Stamford city, Connecticut",129026),
("Stonington borough, Connecticut",906),
("Storrs CDP, Connecticut",16516),
("Stratford CDP, Connecticut",52279),
("Suffield Depot CDP, Connecticut",1334),
("Tariffville CDP, Connecticut",1391),
("Terramuggus CDP, Connecticut",1136),
("Terryville CDP, Connecticut",4900),
("Thomaston CDP, Connecticut",1793),
("Thompsonville CDP, Connecticut",8439),
("Torrington city, Connecticut",34737),
("Trumbull CDP, Connecticut",36174),
("Wallingford Center CDP, Connecticut",18322),
("Waterbury city, Connecticut",108672),
("Waterford CDP, Connecticut",2796),
("Watertown CDP, Connecticut",3324),
("Wauregan CDP, Connecticut",1321),
("Weatogue CDP, Connecticut",2788),
("Westbrook Center CDP, Connecticut",2075),
("West Hartford CDP, Connecticut",63127),
("West Haven city, Connecticut",54918),
("Westport CDP, Connecticut",27840),
("West Simsbury CDP, Connecticut",2523),
("Wethersfield CDP, Connecticut",26267),
("Willimantic CDP, Connecticut",17184),
("Wilton Center CDP, Connecticut",824),
("Windsor Locks CDP, Connecticut",12613),
("Winsted CDP, Connecticut",7367),
("Woodbury Center CDP, Connecticut",1449),
("Woodmont borough, Connecticut",1698),
("Washington city, District of Columbia",684498),
("Arden village, Delaware",388),
("Ardencroft village, Delaware",207),
("Ardentown village, Delaware",259),
("Bear CDP, Delaware",21362),
("Bellefonte town, Delaware",1140),
("Bethany Beach town, Delaware",1006),
("Bethel town, Delaware",191),
("Blades town, Delaware",1366),
("Bowers town, Delaware",345),
("Bridgeville town, Delaware",2696),
("Brookside CDP, Delaware",13191),
("Camden town, Delaware",3469),
("Cheswold town, Delaware",1313),
("Claymont CDP, Delaware",8676),
("Clayton town, Delaware",3237),
("Dagsboro town, Delaware",856),
("Delaware City city, Delaware",1719),
("Delmar town, Delaware",1790),
("Dewey Beach town, Delaware",306),
("Dover city, Delaware",37331),
("Dover Base Housing CDP, Delaware",3467),
("Edgemoor CDP, Delaware",6471),
("Ellendale town, Delaware",436),
("Elsmere town, Delaware",6049),
("Farmington town, Delaware",100),
("Felton town, Delaware",1478),
("Fenwick Island town, Delaware",461),
("Frankford town, Delaware",984),
("Frederica town, Delaware",995),
("Georgetown town, Delaware",7123),
("Glasgow CDP, Delaware",14342),
("Greenville CDP, Delaware",2333),
("Greenwood town, Delaware",1017),
("Harrington city, Delaware",3667),
("Hartly town, Delaware",59),
("Henlopen Acres town, Delaware",204),
("Highland Acres CDP, Delaware",3645),
("Hockessin CDP, Delaware",13472),
("Houston town, Delaware",409),
("Kent Acres CDP, Delaware",2429),
("Kenton town, Delaware",208),
("Laurel town, Delaware",4211),
("Leipsic town, Delaware",144),
("Lewes city, Delaware",3118),
("Little Creek town, Delaware",169),
("Long Neck CDP, Delaware",2556),
("Magnolia town, Delaware",346),
("Middletown town, Delaware",21250),
("Milford city, Delaware",10835),
("Millsboro town, Delaware",4238),
("Millville town, Delaware",1877),
("Milton town, Delaware",2840),
("Newark city, Delaware",33352),
("New Castle city, Delaware",5359),
("Newport town, Delaware",1335),
("North Star CDP, Delaware",7441),
("Ocean View town, Delaware",2198),
("Odessa town, Delaware",565),
("Pike Creek CDP, Delaware",8312),
("Pike Creek Valley CDP, Delaware",10664),
("Rehoboth Beach city, Delaware",1281),
("Rising Sun-Lebanon CDP, Delaware",4263),
("Riverview CDP, Delaware",2684),
("Rodney Village CDP, Delaware",1370),
("St. Georges CDP, Delaware",795),
("Seaford city, Delaware",7572),
("Selbyville town, Delaware",2252),
("Slaughter Beach town, Delaware",231),
("Smyrna town, Delaware",11333),
("South Bethany town, Delaware",426),
("Townsend town, Delaware",2332),
("Viola town, Delaware",165),
("Wilmington city, Delaware",70904),
("Wilmington Manor CDP, Delaware",7902),
("Woodside town, Delaware",164),
("Woodside East CDP, Delaware",2555),
("Wyoming town, Delaware",1437),
("Acacia Villas CDP, Florida",459),
("Alachua city, Florida",9794),
("Alafaya CDP, Florida",88542),
("Alford town, Florida",662),
("Allentown CDP, Florida",1002),
("Altamonte Springs city, Florida",43426),
("Altha town, Florida",598),
("Altoona CDP, Florida",100),
("Alturas CDP, Florida",3673),
("Alva CDP, Florida",2724),
("Andrews CDP, Florida",720),
("Anna Maria city, Florida",869),
("Apalachicola city, Florida",2477),
("Apollo Beach CDP, Florida",19457),
("Apopka city, Florida",50529),
("Arcadia city, Florida",7943),
("Archer city, Florida",1336),
("Aripeka CDP, Florida",142),
("Asbury Lake CDP, Florida",9510),
("Astatula town, Florida",1637),
("Astor CDP, Florida",1418),
("Atlantic Beach city, Florida",13398),
("Atlantis city, Florida",2079),
("Auburndale city, Florida",15343),
("Aucilla CDP, Florida",175),
("Avalon CDP, Florida",469),
("Aventura city, Florida",37780),
("Avon Park city, Florida",10386),
("Azalea Park CDP, Florida",14817),
("Babson Park CDP, Florida",1167),
("Bagdad CDP, Florida",3474),
("Baldwin town, Florida",1641),
("Bal Harbour village, Florida",3000),
("Balm CDP, Florida",3745),
("Bardmoor CDP, Florida",9514),
("Bartow city, Florida",19063),
("Bascom town, Florida",56),
("Bay Harbor Islands town, Florida",5938),
("Bay Hill CDP, Florida",5066),
("Bay Lake city, Florida",71),
("Bayonet Point CDP, Florida",26740),
("Bay Pines CDP, Florida",3202),
("Bayport CDP, Florida",63),
("Bayshore Gardens CDP, Florida",20293),
("Beacon Square CDP, Florida",6873),
("Bear Creek CDP, Florida",1720),
("Bee Ridge CDP, Florida",10012),
("Bell town, Florida",634),
("Bellair-Meadowbrook Terrace CDP, Florida",15117),
("Belleair town, Florida",4097),
("Belleair Beach city, Florida",1545),
("Belleair Bluffs city, Florida",2273),
("Belleair Shore town, Florida",87),
("Belle Glade city, Florida",19358),
("Belle Isle city, Florida",6865),
("Belleview city, Florida",4844),
("Bellview CDP, Florida",21269),
("Berrydale CDP, Florida",326),
("Beverly Beach town, Florida",481),
("Beverly Hills CDP, Florida",9381),
("Big Coppitt Key CDP, Florida",2951),
("Big Pine Key CDP, Florida",4887),
("Biscayne Park village, Florida",3175),
("Bithlo CDP, Florida",9457),
("Black Diamond CDP, Florida",753),
("Black Hammock CDP, Florida",1327),
("Bloomingdale CDP, Florida",23758),
("Blountstown city, Florida",2843),
("Boca Raton city, Florida",95745),
("Bokeelia CDP, Florida",1420),
("Bonifay city, Florida",2693),
("Bonita Springs city, Florida",53812),
("Boulevard Gardens CDP, Florida",1567),
("Bowling Green city, Florida",2890),
("Boynton Beach city, Florida",75720),
("Bradenton city, Florida",55059),
("Bradenton Beach city, Florida",929),
("Bradley Junction CDP, Florida",587),
("Brandon CDP, Florida",113279),
("Branford town, Florida",796),
("Brent CDP, Florida",22056),
("Briny Breezes town, Florida",876),
("Bristol city, | |
"""Load :class:`lime.lime_tabular.LimeTabularExplainer`."""
x_train = self.get_x_array('train', impute_nans=True)
y_train = self.get_y_array('train', impute_nans=True)
verbosity = self._get_verbosity_parameters(LimeTabularExplainer,
boolean=True)
for param in verbosity:
verbosity[param] = False
categorical_features_idx = [
int(np.where(self.features == tag)[0][0])
for tag in self.categorical_features
]
self._lime_explainer = LimeTabularExplainer(
x_train,
mode='regression',
training_labels=y_train,
feature_names=self.features,
categorical_features=categorical_features_idx,
discretize_continuous=False,
sample_around_instance=True,
**verbosity,
)
logger.debug(
"Loaded %s with new training data", str(LimeTabularExplainer))
def _mask_prediction_array(self, y_pred, ref_cube):
"""Apply mask of reference cube to prediction array."""
mask = np.ma.getmaskarray(ref_cube.data).ravel()
if y_pred.ndim == 1 and y_pred.shape[0] != mask.shape[0]:
new_y_pred = np.empty(mask.shape[0], dtype=self._cfg['dtype'])
new_y_pred[mask] = np.nan
new_y_pred[~mask] = y_pred
else:
new_y_pred = y_pred
return np.ma.masked_invalid(new_y_pred)
def _plot_feature_importance(self, feature_importance_dict, colors,
plot_path):
"""Plot feature importance."""
logger.info("Plotting feature importance")
(_, axes) = plt.subplots()
# Sort data and get position of bars
features = np.array(list(feature_importance_dict.keys()))
feature_importances = np.array(list(feature_importance_dict.values()))
sorted_idx = np.argsort(feature_importances)
pos = np.arange(sorted_idx.shape[0]) + 0.5
# Write cube with feature importance for provenance tracking
ancestors = self.get_ancestors(prediction_names=[])
cube = mlr.get_1d_cube(
features,
feature_importances,
x_kwargs={'var_name': 'feature',
'long_name': 'Feature name',
'units': 'no unit'},
y_kwargs={'var_name': 'feature_importance',
'long_name': 'Relative Feature Importance',
'units': '1',
'attributes': {'project': '', 'dataset': ''}},
)
# Plot
for (idx, importance) in enumerate(feature_importances[sorted_idx]):
feature = features[sorted_idx][idx]
axes.barh(pos[idx], importance, align='center',
color=colors[feature])
# Plot appearance
axes.tick_params(axis='y', which='minor', left=False, right=False)
axes.tick_params(axis='y', which='major', left=True, right=False)
title = f"Global feature importance ({self._cfg['mlr_model_name']})"
axes.set_title(title)
axes.set_xlabel('Relative Importance')
axes.set_yticks(pos)
axes.set_yticklabels(features[sorted_idx])
# Save plot and provenance
plt.savefig(plot_path, **self._cfg['savefig_kwargs'])
logger.info("Wrote %s", plot_path)
self._write_plot_provenance(cube, plot_path, ancestors=ancestors,
caption=title + '.', plot_types=['bar'])
# Save additional plot with logarithmic X axis
axes.set_xscale('log')
axes.xaxis.set_major_formatter(ScalarFormatter())
ext = os.path.splitext(plot_path)[1]
plot_path_log = plot_path.replace(ext, f'_log{ext}')
plt.savefig(plot_path_log, **self._cfg['savefig_kwargs'])
logger.info("Wrote %s", plot_path_log)
self._write_plot_provenance(cube, plot_path_log, ancestors=ancestors,
caption=title + '.', plot_types=['bar'])
plt.close()
def _prediction_to_dict(self, pred_out, **kwargs):
"""Convert output of final regressor's ``predict()`` to :obj:`dict`."""
if not isinstance(pred_out, (list, tuple)):
pred_out = [pred_out]
idx_to_name = {0: None}
if 'return_var' in kwargs:
idx_to_name[1] = 'var'
elif 'return_cov' in kwargs:
idx_to_name[1] = 'cov'
pred_dict = {}
for (idx, pred) in enumerate(pred_out):
pred = pred.astype(self._cfg['dtype'], casting='same_kind')
if pred.ndim == 2 and pred.shape[1] == 1:
logger.warning(
"Prediction output is 2D and length of second axis is 1, "
"squeezing second axis")
pred = np.squeeze(pred, axis=1)
pred_dict[idx_to_name.get(idx, idx)] = pred
return pred_dict
def _pred_type_to_metadata(self, pred_type, cube):
"""Get correct :mod:`iris.cube.CubeMetadata` of prediction cube."""
standard_name = cube.standard_name
var_name = cube.var_name
long_name = cube.long_name
units = cube.units
attributes = cube.attributes
suffix = '' if pred_type is None else f'_{pred_type}'
error_types = {
'var': ' (variance)',
'cov': ' (covariance)',
'squared_mlr_model_error_estim': (' (squared MLR model error '
'estimation using hold-out test '
'data set)'),
'squared_propagated_input_error': (' (squared propagated error of '
'prediction input estimated by '
'LIME)'),
}
if pred_type is None:
attributes['var_type'] = 'prediction_output'
elif isinstance(pred_type, int):
var_name += '_{:d}'.format(pred_type)
long_name += ' {:d}'.format(pred_type)
logger.warning("Got unknown prediction type with index %i",
pred_type)
attributes['var_type'] = 'prediction_output_misc'
elif pred_type in error_types:
var_name += suffix
long_name += error_types[pred_type]
units = mlr.units_power(cube.units, 2)
attributes['var_type'] = 'prediction_output_error'
attributes['squared'] = 1
elif 'lime_importance___' in pred_type:
standard_name = None
feature = pred_type.replace('lime_importance___', '')
var_name = f'importance_of_feature_{feature}'
long_name = (f'Local importance of feature {feature} for '
f'predicting {self.label} given by LIME')
units = Unit('1')
attributes['var_type'] = 'prediction_output_misc'
elif pred_type == 'residual':
var_name += suffix
long_name += ' (residual)'
attributes['residual'] = 'true minus predicted values'
attributes['var_type'] = 'prediction_residual'
else:
raise ValueError(f"Got unknown prediction type '{pred_type}'")
return iris.cube.CubeMetadata(
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=units,
attributes=attributes,
cell_methods=cube.cell_methods,
)
def _print_metrics(self, regression_metrics, data_type):
"""Print regression metrics."""
if data_type not in self.data:
return
logger.info("Evaluating regression metrics for %s data", data_type)
x_data = self.data[data_type].x
y_true = self.get_y_array(data_type)
y_pred = self._clf.predict(x_data)
sample_weights = self._get_sample_weights(data_type)
for metric in regression_metrics:
metric_function = getattr(metrics, metric)
value = metric_function(y_true, y_pred)
if 'squared' in metric:
value = np.sqrt(value)
metric = f'root_{metric}'
logger.info("%s: %s", metric, value)
if sample_weights is None:
return
for metric in regression_metrics:
metric_function = getattr(metrics, metric)
value = metric_function(y_true, y_pred,
sample_weight=sample_weights)
if 'squared' in metric:
value = np.sqrt(value)
metric = f'root_{metric}'
logger.info("Weighted %s: %s", metric, value)
def _propagate_input_errors(self, x_pred, x_err):
"""Propagate errors from prediction input."""
logger.info(
"Propagating prediction input errors using LIME (this may take a "
"while...)")
if 'feature_selection' in self._clf.named_steps:
logger.warning(
"Propagating input errors might not work correctly when a "
"'feature_selection' step is present (usually because of "
"calling rfecv())")
x_pred = self._impute_nans(x_pred)
# Propagated error for single input
def _propagated_error(x_single_pred, x_single_err, explainer,
predict_fn, features, categorical_features):
"""Get propagated prediction input error for single input."""
exp = explainer.explain_instance(x_single_pred, predict_fn)
x_single_err = np.nan_to_num(x_single_err)
x_err_scaled = x_single_err / explainer.scaler.scale_
squared_error = 0.0
for (idx, coef) in exp.local_exp[1]:
if features[idx] in categorical_features:
continue
squared_error += (x_err_scaled[idx] * coef)**2
return squared_error
# Apply on whole input (using multiple processes)
parallel = Parallel(n_jobs=self._cfg['n_jobs'])
errors = parallel(
[delayed(_propagated_error)(
x, x_e, explainer=self._lime_explainer,
predict_fn=self._clf.predict,
features=self.features,
categorical_features=self.categorical_features,
) for (x, x_e) in zip(x_pred.values, x_err.values)]
)
return np.array(errors, dtype=self._cfg['dtype'])
def _remove_missing_features(self, x_data, y_data, sample_weights):
"""Remove missing values in the features data (if desired)."""
mask = self._get_mask(x_data, 'training')
x_data = x_data[~mask]
y_data = y_data[~mask]
if sample_weights is not None:
sample_weights = sample_weights[~mask]
diff = mask.sum()
if diff:
msg = ('Removed %i training point(s) where features were '
'missing')
if self._cfg.get('accept_only_scalar_data'):
removed_groups = self.group_attributes[mask]
msg += f' ({removed_groups})'
self._classes['group_attributes'] = (
self.group_attributes[~mask])
logger.info(msg, diff)
return (x_data, y_data, sample_weights)
def _remove_missing_pred_input(self, x_pred, x_err=None, y_ref=None):
"""Remove missing values in the prediction input data."""
mask = self._get_mask(x_pred, 'prediction input')
x_pred = x_pred[~mask]
if x_err is not None:
x_err = x_err[~mask]
if y_ref is not None:
y_ref = y_ref[~mask]
diff = mask.sum()
if diff:
logger.info(
"Removed %i prediction input point(s) where features were "
"missing", diff)
return (x_pred, x_err, y_ref, mask)
def _save_prediction_cubes(self, pred_dict, pred_name, x_cube):
"""Save (multi-dimensional) prediction output."""
logger.debug("Creating output cubes")
for (pred_type, y_pred) in pred_dict.items():
y_pred = self._mask_prediction_array(y_pred, x_cube)
if y_pred.size == np.prod(x_cube.shape, dtype=np.int):
pred_cube = x_cube.copy(y_pred.reshape(x_cube.shape))
else:
dim_coords = []
for (dim_idx, dim_size) in enumerate(y_pred.shape):
dim_coords.append((iris.coords.DimCoord(
np.arange(dim_size, dtype=np.float64),
long_name=f'MLR prediction index {dim_idx}',
var_name=f'idx_{dim_idx}'), dim_idx))
pred_cube = iris.cube.Cube(y_pred,
dim_coords_and_dims=dim_coords)
new_path = self._set_prediction_cube_attributes(
pred_cube, pred_type, pred_name=pred_name)
io.iris_save(pred_cube, new_path)
# Save provenance
ancestors = self.get_ancestors(
prediction_names=[pred_name],
prediction_reference=pred_type == 'residual')
record = {
'ancestors': ancestors,
'authors': ['schlund_manuel'],
'caption': (f"{pred_cube.long_name} of MLR model "
f"{self._cfg['mlr_model_name']} for prediction "
f"{pred_name}."),
'references': ['schlund20jgr'],
}
with ProvenanceLogger(self._cfg) as provenance_logger:
provenance_logger.log(new_path, record)
def _save_csv_file(self, data_type, filename, pred_name=None):
"""Save CSV file."""
if data_type not in self.data:
return
if data_type == 'pred':
csv_data = self.data[data_type][pred_name]
else:
csv_data = self.data[data_type]
# Filename and path
if filename is None:
if data_type == 'pred':
filename = '{data_type}_{pred_name}.csv'
format_kwargs = {
'data_type': data_type,
'pred_name': self._get_name(pred_name),
}
else:
filename = '{data_type}.csv'
format_kwargs = {'data_type': data_type}
filename = filename.format(**format_kwargs)
path = os.path.join(self._cfg['mlr_work_dir'], filename)
# Save file
csv_data.to_csv(path, na_rep='nan')
logger.info("Wrote %s", path)
def _set_default_settings(self):
"""Set default (non-``False``) keyword arguments."""
self._cfg.setdefault('weighted_samples',
{'area_weighted': True, 'time_weighted': True})
self._cfg.setdefault('cache_intermediate_results', True)
self._cfg.setdefault('dtype', 'float64')
self._cfg.setdefault('fit_kwargs', {})
self._cfg.setdefault('group_datasets_by_attributes', [])
self._cfg.setdefault('imputation_strategy', 'remove')
self._cfg.setdefault('log_level', 'info')
self._cfg.setdefault('mlr_model_name', f'{self._CLF_TYPE} model')
self._cfg.setdefault('n_jobs', 1)
self._cfg.setdefault('output_file_type', 'png')
self._cfg.setdefault('parameters', {})
self._cfg.setdefault('plot_dir',
os.path.expanduser(os.path.join('~', 'plots')))
self._cfg.setdefault('plot_units', {})
self._cfg.setdefault('savefig_kwargs', {
'bbox_inches': 'tight',
'dpi': 300,
'orientation': 'landscape',
})
self._cfg.setdefault('standardize_data', True)
self._cfg.setdefault('sub_dir', '')
self._cfg.setdefault('test_size', 0.25)
self._cfg.setdefault('work_dir',
os.path.expanduser(os.path.join('~', 'work')))
self._cfg.setdefault('write_plots', True)
logger.info("Using imputation strategy '%s'",
self._cfg['imputation_strategy'])
if self._cfg['fit_kwargs']:
logger.info(
"Using additional keyword argument(s) %s for fit() function",
self._cfg['fit_kwargs'])
def _set_prediction_cube_attributes(self, cube, pred_type, pred_name=None):
"""Set the attributes of the prediction cube."""
cube.cell_methods = None
cube.attributes = {
'description': 'MLR model prediction',
'mlr_model_name': self._cfg['mlr_model_name'],
'mlr_model_type': self.mlr_model_type,
'final_regressor': str(self._CLF_TYPE),
'prediction_name': self._get_name(pred_name),
'tag': self.label,
}
cube.attributes.update(self._get_prediction_properties())
for (key, val) in self.parameters.items():
cube.attributes[key] = str(val)
cube.attributes['mlr_parameters'] = list(self.parameters.keys())
label_cube = self._load_cube(self._datasets['label'][0])
for attr in ('standard_name', 'var_name', 'long_name', 'units'):
setattr(cube, attr, getattr(label_cube, attr))
# Modify cube metadata depending on prediction type
cube.metadata = self._pred_type_to_metadata(pred_type, cube)
# Get new path
suffix = '' if pred_type is None else f'_{pred_type}'
pred_str = f'_for_prediction_{self._get_name(pred_name)}'
sub_str = ('' if self._cfg['sub_dir'] == '' else
f"_of_group_{self._cfg['sub_dir']}")
filename = (f'{self.mlr_model_type}_{self.label}_prediction{suffix}'
f'{pred_str}{sub_str}.nc')
new_path = os.path.join(self._cfg['mlr_work_dir'], filename)
cube.attributes['filename'] = new_path
return new_path
def _update_fit_kwargs(self, fit_kwargs):
"""Check and update fit kwargs."""
new_fit_kwargs = {}
# Sort out wrong fit kwargs
for (param_name, param_val) in fit_kwargs.items():
step = param_name.split('__')[0]
if step in self._clf.named_steps:
new_fit_kwargs[param_name] = param_val
else:
raise ValueError(
f"Got invalid pipeline step '{step}' in fit parameter "
f"'{param_name}'")
# Add sample weights if possible
allowed_fit_kwargs = getfullargspec(self._CLF_TYPE.fit).args
| |
# -*- coding: utf-8 -*-
"""Cisco Identity Services Engine Node Services API wrapper.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
get_next_page,
)
class NodeServices(object):
"""Identity Services Engine Node Services API (version: 3.1.1).
Wraps the Identity Services Engine Node Services
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new NodeServices
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(NodeServices, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def get_interfaces(self,
hostname,
headers=None,
**query_parameters):
"""This API retrieves the list of interfaces on a node in a
cluster. .
Args:
hostname(basestring): hostname path parameter. Hostname
of the node.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(hostname, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostname': hostname,
}
e_url = ('/api/v1/node/{hostname}/interface')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_f6f429e124ea58ba85f0b34296d61300_v3_1_1', _api_response)
def get_sxp_interface(self,
hostname,
headers=None,
**query_parameters):
"""This API retrieves the SXP interface. .
Args:
hostname(basestring): hostname path parameter. Hostname
of the node.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(hostname, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostname': hostname,
}
e_url = ('/api/v1/node/{hostname}/sxp-interface')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_ba4b550caf3845b4cbe1074d_v3_1_1', _api_response)
def set_sxp_interface(self,
hostname,
interface=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""This API configures the SXP interface. .
Args:
interface(string): interface, property of the request
body.
hostname(basestring): hostname path parameter. Hostname
of the node.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
payload(dict): A JSON serializable Python object to send in the
body of the Request.
active_validation(bool): Enable/Disable payload validation.
Defaults to True.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])
if active_validation and is_xml_payload:
check_type(payload, basestring)
if active_validation and not is_xml_payload:
check_type(payload, dict)
check_type(hostname, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostname': hostname,
}
if is_xml_payload:
_payload = payload
else:
_payload = {
'interface':
interface,
}
_payload.update(payload or {})
_payload = dict_from_items_with_values(_payload)
if active_validation and not is_xml_payload:
self._request_validator('jsd_c6d188a13915253869849c4b0be7759_v3_1_1')\
.validate(_payload)
e_url = ('/api/v1/node/{hostname}/sxp-interface')
endpoint_full_url = apply_path_params(e_url, path_params)
request_params = {'data': _payload} if is_xml_payload else {'json': _payload}
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params,
headers=_headers,
**request_params)
else:
_api_response = self._session.put(endpoint_full_url, params=_params,
**request_params)
return self._object_factory('bpm_c6d188a13915253869849c4b0be7759_v3_1_1', _api_response)
def get_profiler_probe_config(self,
hostname,
headers=None,
**query_parameters):
"""This API retrieves the profiler probe configuration of a PSN. .
Args:
hostname(basestring): hostname path parameter. Hostname
of the node.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
pass
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(hostname, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'hostname': hostname,
}
e_url = ('/api/v1/profile/{hostname}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_bfa308ed7b5fb6acde734f6267b4e3_v3_1_1', _api_response)
def set_profiler_probe_config(self,
hostname,
active_directory=None,
dhcp=None,
dhcp_span=None,
dns=None,
http=None,
netflow=None,
nmap=None,
pxgrid=None,
radius=None,
snmp_query=None,
snmp_trap=None,
headers=None,
payload=None,
active_validation=True,
**query_parameters):
"""This API updates the profiler probe configuration of a PSN. Set
probe value as null to disable probe. Ex: Below
payload will disable NMAP, PxGrid and SNMPTRAP probes {
"activeDirectory": { "daysBeforeRescan": 1 }, "dhcp":
{ "interfaces": "[{"interface":"GigabitEthernet 0"}]",
"port": 0 }, "dhcpSpan": { "interfaces":
"[{"interface":"GigabitEthernet 0"}]" }, "dns": {
"timeout": 2 }, "http": { "interfaces":
"[{"interface":"GigabitEthernet 0"}]" }, "netflow": {
"interfaces": "[{"interface":"GigabitEthernet 0"}]",
"port": 0 }, "nmap": null , "pxgrid": null ,
"radius": [], "snmpQuery": { "eventTimeout": 30,
"retries": 2, "timeout": 1000 }, "snmpTrap": null
} .
Args:
active_directory(object): The Active Directory probe
queries the Active Directory for Windows
information., property of the request
body.
dhcp(object): The DHCP probe listens for DHCP packets
from IP helpers., property of the
request body.
dhcp_span(object): The DHCP SPAN probe collects DHCP
packets., property of the request body.
dns(object): The DNS probe performs a DNS lookup for the
FQDN., property of the request body.
http(object): The HTTP probe receives and parses HTTP
packets., property of the request body.
netflow(object): The | |
'DQX',
'\xe5\xa4\xa7\xe5\x86\xb6\xe5\x8c\x97': 'DBN',
'\xe5\xa4\xa7\xe5\x8f\xa3\xe5\xb1\xaf': 'DKP',
'\xe5\xa4\xa7\xe5\x90\x8c': 'DTV',
'\xe5\xa4\xa7\xe5\x9d\x9d': 'DBJ',
'\xe5\xa4\xa7\xe5\x9f\x94': 'DPI',
'\xe5\xa4\xa7\xe5\xa0\xa1': 'DVT',
'\xe5\xa4\xa7\xe5\xad\xa4\xe5\xb1\xb1': 'RMT',
'\xe5\xa4\xa7\xe5\xae\x89': 'RAT',
'\xe5\xa4\xa7\xe5\xae\x89\xe5\x8c\x97': 'RNT',
'\xe5\xa4\xa7\xe5\xae\x98\xe5\xb1\xaf': 'DTT',
'\xe5\xa4\xa7\xe5\xb1\xaf': 'DNT',
'\xe5\xa4\xa7\xe5\xb7\xb4': 'DBD',
'\xe5\xa4\xa7\xe5\xb9\xb3\xe6\x88\xbf': 'DPD',
'\xe5\xa4\xa7\xe5\xba\x86': 'DZX',
'\xe5\xa4\xa7\xe5\xba\x86\xe4\xb8\x9c': 'LFX',
'\xe5\xa4\xa7\xe5\xba\x86\xe8\xa5\xbf': 'RHX',
'\xe5\xa4\xa7\xe6\x88\x90': 'DCT',
'\xe5\xa4\xa7\xe6\x88\x98\xe5\x9c\xba': 'DTJ',
'\xe5\xa4\xa7\xe6\x8b\x9f': 'DNZ',
'\xe5\xa4\xa7\xe6\x96\xb9\xe5\x8d\x97': 'DNE',
'\xe5\xa4\xa7\xe6\x97\xba': 'WWQ',
'\xe5\xa4\xa7\xe6\x9d\x96\xe5\xad\x90': 'DAP',
'\xe5\xa4\xa7\xe6\x9d\xa8\xe6\xa0\x91': 'DUX',
'\xe5\xa4\xa7\xe6\x9d\xbf': 'DBC',
'\xe5\xa4\xa7\xe6\x9e\x97': 'DLD',
'\xe5\xa4\xa7\xe6\xad\xa6\xe5\x8f\xa3': 'DFJ',
'\xe5\xa4\xa7\xe6\xb6\xa7': 'DFP',
'\xe5\xa4\xa7\xe6\xb9\xbe\xe5\xad\x90': 'DFM',
'\xe5\xa4\xa7\xe7\x81\xb0\xe5\x8e\x82': 'DHP',
'\xe5\xa4\xa7\xe7\x8e\x8b\xe6\xbb\xa9': 'DZZ',
'\xe5\xa4\xa7\xe7\x90\x86': 'DKM',
'\xe5\xa4\xa7\xe7\x94\xb0\xe8\xbe\xb9': 'DBM',
'\xe5\xa4\xa7\xe7\x9b\x98\xe7\x9f\xb3': 'RPP',
'\xe5\xa4\xa7\xe7\x9f\xb3\xe5\xa4\xb4': 'DSL',
'\xe5\xa4\xa7\xe7\x9f\xb3\xe5\xa4\xb4\xe5\x8d\x97': 'DAL',
'\xe5\xa4\xa7\xe7\x9f\xb3\xe5\xaf\xa8': 'RZT',
'\xe5\xa4\xa7\xe7\x9f\xb3\xe6\xa1\xa5': 'DQT',
'\xe5\xa4\xa7\xe7\xa3\xb4\xe6\xb2\x9f': 'DKJ',
'\xe5\xa4\xa7\xe7\xa6\xbe\xe5\xa1\x98': 'SOQ',
'\xe5\xa4\xa7\xe7\xab\xb9\xe5\x9b\xad': 'DZY',
'\xe5\xa4\xa7\xe7\xba\xa2\xe6\x97\x97': 'DQD',
'\xe5\xa4\xa7\xe8\x8b\xb1\xe4\xb8\x9c': 'IAW',
'\xe5\xa4\xa7\xe8\x8b\xb4': 'DIM',
'\xe5\xa4\xa7\xe8\x8d\x94': 'DNY',
'\xe5\xa4\xa7\xe8\x90\xa5': 'DYV',
'\xe5\xa4\xa7\xe8\x90\xa5\xe5\xad\x90': 'DZD',
'\xe5\xa4\xa7\xe8\x90\xa5\xe9\x95\x87': 'DJP',
'\xe5\xa4\xa7\xe8\x99\x8e\xe5\xb1\xb1': 'DHD',
'\xe5\xa4\xa7\xe8\xb6\xb3\xe5\x8d\x97': 'FQW',
'\xe5\xa4\xa7\xe8\xbf\x9e': 'DLT',
'\xe5\xa4\xa7\xe8\xbf\x9e\xe5\x8c\x97': 'DFT',
'\xe5\xa4\xa7\xe9\x80\x9a\xe8\xa5\xbf': 'DTO',
'\xe5\xa4\xa7\xe9\x99\x86\xe5\x8f\xb7': 'DLC',
'\xe5\xa4\xa7\xe9\x9b\x81': 'DYX',
'\xe5\xa4\xa7\xe9\x9d\x92\xe6\xb2\x9f': 'DSD',
'\xe5\xa4\xa9\xe4\xb9\x89': 'TND',
'\xe5\xa4\xa9\xe5\xb2\x97': 'TGL',
'\xe5\xa4\xa9\xe6\x9f\xb1\xe5\xb1\xb1': 'QWH',
'\xe5\xa4\xa9\xe6\xa1\xa5\xe5\xb2\xad': 'TQL',
'\xe5\xa4\xa9\xe6\xb0\xb4': 'TSJ',
'\xe5\xa4\xa9\xe6\xb2\xb3\xe6\x9c\xba\xe5\x9c\xba': 'TJN',
'\xe5\xa4\xa9\xe6\xb2\xb3\xe8\xa1\x97': 'TEN',
'\xe5\xa4\xa9\xe6\xb4\xa5': 'TJP',
'\xe5\xa4\xa9\xe6\xb4\xa5\xe5\x8c\x97': 'TBP',
'\xe5\xa4\xa9\xe6\xb4\xa5\xe5\x8d\x97': 'TIP',
'\xe5\xa4\xa9\xe6\xb4\xa5\xe8\xa5\xbf': 'TXP',
'\xe5\xa4\xa9\xe7\xa5\x9d': 'TZJ',
'\xe5\xa4\xa9\xe9\x95\x87': 'TZV',
'\xe5\xa4\xa9\xe9\x97\xa8': 'TMN',
'\xe5\xa4\xa9\xe9\x97\xa8\xe5\x8d\x97': 'TNN',
'\xe5\xa4\xaa\xe5\x8e\x9f': 'TYV',
'\xe5\xa4\xaa\xe5\x8e\x9f\xe4\xb8\x9c': 'TDV',
'\xe5\xa4\xaa\xe5\x8e\x9f\xe5\x8c\x97': 'TBV',
'\xe5\xa4\xaa\xe5\x8e\x9f\xe5\x8d\x97': 'TNV',
'\xe5\xa4\xaa\xe5\xa7\xa5\xe5\xb1\xb1': 'TLS',
'\xe5\xa4\xaa\xe5\xb9\xb3\xe5\xb7\x9d': 'TIT',
'\xe5\xa4\xaa\xe5\xb9\xb3\xe9\x95\x87': 'TEB',
'\xe5\xa4\xaa\xe6\xb9\x96': 'TKH',
'\xe5\xa4\xaa\xe8\xb0\xb7': 'TGV',
'\xe5\xa4\xaa\xe8\xb0\xb7\xe8\xa5\xbf': 'TIV',
'\xe5\xa4\xaa\xe9\x98\xb3\xe5\x8d\x87': 'TQT',
'\xe5\xa4\xaa\xe9\x98\xb3\xe5\xb1\xb1': 'TYJ',
'\xe5\xa4\xb9\xe5\xbf\x83\xe5\xad\x90': 'JXT',
'\xe5\xa5\x87\xe5\xb3\xb0\xe5\xa1\x94': 'QVP',
'\xe5\xa5\x88\xe6\x9b\xbc': 'NMD',
'\xe5\xa5\x89\xe5\x8c\x96': 'FHH',
'\xe5\xa5\x8e\xe5\xb1\xaf': 'KTR',
'\xe5\xa5\x8e\xe5\xb1\xb1': 'KAB',
'\xe5\xa6\x82\xe4\xb8\x9c': 'RIH',
'\xe5\xa6\x82\xe7\x9a\x8b': 'RBH',
'\xe5\xa7\x8b\xe5\x85\xb4': 'IPQ',
'\xe5\xa7\x9a\xe5\x8d\x83\xe6\x88\xb7\xe5\xb1\xaf': 'YQT',
'\xe5\xa7\x9a\xe5\xae\x89': 'YAC',
'\xe5\xa7\x9a\xe5\xae\xb6': 'YAT',
'\xe5\xa7\x9c\xe5\xa0\xb0': 'UEH',
'\xe5\xa7\x9c\xe5\xae\xb6': 'JJB',
'\xe5\xa8\x81\xe6\xb5\xb7': 'WKK',
'\xe5\xa8\x81\xe6\xb5\xb7\xe5\x8c\x97': 'WHK',
'\xe5\xa8\x81\xe7\xae\x90': 'WAM',
'\xe5\xa8\x81\xe8\x88\x8d': 'WSM',
'\xe5\xa8\x81\xe8\x99\x8e\xe5\xb2\xad\xe5\x8c\x97': 'WBL',
'\xe5\xa8\x84\xe5\xba\x95': 'LDQ',
'\xe5\xa8\x84\xe5\xba\x95\xe5\x8d\x97': 'UOQ',
'\xe5\xa8\x98\xe5\xad\x90\xe5\x85\xb3': 'NIP',
'\xe5\xa9\xba\xe6\xba\x90': 'WYG',
'\xe5\xab\xa9\xe6\xb1\x9f': 'NGX',
'\xe5\xad\x90\xe6\xb4\xb2': 'ZZY',
'\xe5\xad\x90\xe9\x95\xbf': 'ZHY',
'\xe5\xad\x99\xe5\x90\xb4': 'SKB',
'\xe5\xad\x99\xe5\xae\xb6': 'SUB',
'\xe5\xad\x99\xe9\x95\x87': 'OZY',
'\xe5\xad\x9d\xe5\x8d\x97': 'XNV',
'\xe5\xad\x9d\xe6\x84\x9f': 'XGN',
'\xe5\xad\x9d\xe6\x84\x9f\xe4\xb8\x9c': 'GDN',
'\xe5\xad\x9d\xe6\x84\x9f\xe5\x8c\x97': 'XJN',
'\xe5\xad\x9d\xe8\xa5\xbf': 'XOV',
'\xe5\xad\x9f\xe5\xae\xb6\xe5\xb2\x97': 'MGB',
'\xe5\xad\x9f\xe5\xba\x84': 'MZF',
'\xe5\xad\xa4\xe5\xae\xb6\xe5\xad\x90': 'GKT',
'\xe5\xad\xa4\xe5\xb1\xb1\xe5\x8f\xa3': 'GSP',
'\xe5\xae\x81\xe4\xb8\x9c': 'NOJ',
'\xe5\xae\x81\xe4\xb8\x9c\xe5\x8d\x97': 'NDJ',
'\xe5\xae\x81\xe4\xb9\xa1': 'NXQ',
'\xe5\xae\x81\xe5\x9b\xbd': 'NNH',
'\xe5\xae\x81\xe5\xae\x89': 'NAB',
'\xe5\xae\x81\xe5\xae\xb6': 'NVT',
'\xe5\xae\x81\xe5\xbe\xb7': 'NES',
'\xe5\xae\x81\xe6\x98\x8e': 'NMZ',
'\xe5\xae\x81\xe6\x9d\x91': 'NCZ',
'\xe5\xae\x81\xe6\xad\xa6': 'NWV',
'\xe5\xae\x81\xe6\xb3\xa2': 'NGH',
'\xe5\xae\x81\xe6\xb3\xa2\xe4\xb8\x9c': 'NVH',
'\xe5\xae\x81\xe6\xb5\xb7': 'NHH',
'\xe5\xae\x81\xe9\x99\xb5\xe5\x8e\xbf': 'NLF',
'\xe5\xae\x89\xe4\xba\xad\xe5\x8c\x97': 'ASH',
'\xe5\xae\x89\xe4\xbb\x81': 'ARG',
'\xe5\xae\x89\xe5\x8c\x96': 'PKQ',
'\xe5\xae\x89\xe5\x8f\xa3\xe7\xaa\x91': 'AYY',
'\xe5\xae\x89\xe5\x9b\xbe': 'ATL',
'\xe5\xae\x89\xe5\x9b\xbe\xe8\xa5\xbf': 'AXL',
'\xe5\xae\x89\xe5\xa1\x98': 'ATV',
'\xe5\xae\x89\xe5\xae\x9a': 'ADP',
'\xe5\xae\x89\xe5\xae\xb6': 'AJB',
'\xe5\xae\x89\xe5\xb9\xb3': 'APT',
'\xe5\xae\x89\xe5\xb9\xbf': 'AGT',
'\xe5\xae\x89\xe5\xba\x86': 'AQH',
'\xe5\xae\x89\xe5\xba\x86\xe8\xa5\xbf': 'APH',
'\xe5\xae\x89\xe5\xba\xb7': 'AKY',
'\xe5\xae\x89\xe5\xbe\xb7': 'ARW',
'\xe5\xae\x89\xe6\xba\xaa': 'AXS',
'\xe5\xae\x89\xe8\xbe\xbe': 'ADX',
'\xe5\xae\x89\xe9\x98\xb3': 'AYF',
'\xe5\xae\x89\xe9\x98\xb3\xe4\xb8\x9c': 'ADF',
'\xe5\xae\x89\xe9\x99\x86': 'ALN',
'\xe5\xae\x89\xe9\xa1\xba': 'ASW',
'\xe5\xae\x89\xe9\xa1\xba\xe8\xa5\xbf': 'ASE',
'\xe5\xae\x89\xe9\xbe\x99': 'AUZ',
'\xe5\xae\x8b': 'SOB',
'\xe5\xae\x8b\xe5\x9f\x8e\xe8\xb7\xaf': 'SFF',
'\xe5\xae\x8f\xe5\xba\x86': 'HEY',
'\xe5\xae\x98\xe5\x8e\x85': 'GTP',
'\xe5\xae\x98\xe5\x8e\x85\xe8\xa5\xbf': 'KEP',
'\xe5\xae\x98\xe5\xad\x97\xe4\xba\x95': 'GOT',
'\xe5\xae\x98\xe9\xab\x98': 'GVP',
'\xe5\xae\x9a\xe5\x8d\x97': 'DNG',
'\xe5\xae\x9a\xe5\xb7\x9e': 'DXP',
'\xe5\xae\x9a\xe5\xb7\x9e\xe4\xb8\x9c': 'DOP',
'\xe5\xae\x9a\xe8\xa5\x84': 'DXV',
'\xe5\xae\x9a\xe8\xa5\xbf': 'DSJ',
'\xe5\xae\x9a\xe8\xbe\xb9': 'DYJ',
'\xe5\xae\x9a\xe8\xbf\x9c': 'EWH',
'\xe5\xae\x9a\xe9\x99\xb6': 'DQK',
'\xe5\xae\x9c\xe5\x85\xb4': 'YUH',
'\xe5\xae\x9c\xe5\x9f\x8e': 'YIN',
'\xe5\xae\x9c\xe5\xae\xbe': 'YBW',
'\xe5\xae\x9c\xe5\xb7\x9e': 'YSZ',
'\xe5\xae\x9c\xe6\x98\x8c': 'YCN',
'\xe5\xae\x9c\xe6\x98\x8c\xe4\xb8\x9c': 'HAN',
'\xe5\xae\x9c\xe6\x98\xa5': 'YEG',
'\xe5\xae\x9c\xe6\x98\xa5\xe8\xa5\xbf': 'YCG',
'\xe5\xae\x9c\xe8\x80\x90': 'YVM',
'\xe5\xae\x9c\xe8\x89\xaf\xe5\x8c\x97': 'YSM',
'\xe5\xae\x9d\xe5\x8d\x8e\xe5\xb1\xb1': 'BWH',
'\xe5\xae\x9d\xe5\x9d\xbb': 'BPP',
'\xe5\xae\x9d\xe6\x8b\x89\xe6\xa0\xbc': 'BQC',
'\xe5\xae\x9d\xe6\x9e\x97': 'BNB',
'\xe5\xae\x9d\xe6\xb3\x89\xe5\xb2\xad': 'BQB',
'\xe5\xae\x9d\xe6\xb8\x85': 'BUB',
'\xe5\xae\x9d\xe9\xb8\xa1': 'BJY',
'\xe5\xae\x9d\xe9\xb8\xa1\xe5\x8d\x97': 'BBY',
'\xe5\xae\x9d\xe9\xbe\x99\xe5\xb1\xb1': 'BND',
'\xe5\xae\xa3\xe5\x8c\x96': 'XHP',
'\xe5\xae\xa3\xe5\x92\x8c': 'XWJ',
'\xe5\xae\xa3\xe5\x9f\x8e': 'ECH',
'\xe5\xae\xa3\xe5\xa8\x81': 'XWM',
'\xe5\xae\xa3\xe6\xb1\x89': 'XHY',
'\xe5\xae\xb9\xe5\x8e\xbf': 'RXZ',
'\xe5\xae\xb9\xe6\xa1\x82': 'RUQ',
'\xe5\xae\xbd\xe7\x94\xb8': 'KDT',
'\xe5\xae\xbe\xe9\x98\xb3': 'UKZ',
'\xe5\xae\xbf\xe5\xb7\x9e': 'OXH',
'\xe5\xae\xbf\xe5\xb7\x9e\xe4\xb8\x9c': 'SRH',
'\xe5\xae\xbf\xe6\x9d\xbe': 'OAH',
'\xe5\xaf\x86\xe4\xba\x91\xe5\x8c\x97': 'MUP',
'\xe5\xaf\x86\xe5\xb1\xb1': 'MSB',
'\xe5\xaf\x8c\xe5\x8e\xbf': 'FEY',
'\xe5\xaf\x8c\xe5\x8e\xbf\xe4\xb8\x9c': 'FDY',
'\xe5\xaf\x8c\xe5\xb7\x9d': 'FDZ',
'\xe5\xaf\x8c\xe6\x8b\x89\xe5\xb0\x94\xe5\x9f\xba': 'FRX',
'\xe5\xaf\x8c\xe6\xb5\xb7': 'FHX',
'\xe5\xaf\x8c\xe6\xba\x90': 'FYM',
'\xe5\xaf\x8c\xe8\xa3\x95': 'FYX',
'\xe5\xaf\x8c\xe9\x94\xa6': 'FIB',
'\xe5\xaf\x92\xe5\xb2\xad': 'HAT',
'\xe5\xaf\x92\xe8\x91\xb1\xe6\xb2\x9f': 'HKB',
'\xe5\xaf\x9f\xe7\xb4\xa0\xe9\xbd\x90': 'CSC',
'\xe5\xaf\xb9\xe9\x9d\x92\xe5\xb1\xb1': 'DQB',
'\xe5\xaf\xbf\xe9\x98\xb3': 'SYV',
'\xe5\xb0\x86\xe4\xb9\x90': 'JLS',
'\xe5\xb0\x8f\xe4\xb8\x9c': 'XOD',
'\xe5\xb0\x8f\xe5\x93\xa8': 'XAM',
'\xe5\xb0\x8f\xe5\xaf\xba\xe6\xb2\x9f': 'ESP',
'\xe5\xb0\x8f\xe5\xb2\xad': 'XLB',
'\xe5\xb0\x8f\xe5\xb8\x82': 'XST',
'\xe5\xb0\x8f\xe5\xbe\x97\xe6\xb1\x9f': 'EJM',
'\xe5\xb0\x8f\xe6\x89\xac\xe6\xb0\x94': 'XYX',
'\xe5\xb0\x8f\xe6\x96\xb0\xe8\xa1\x97': 'XXM',
'\xe5\xb0\x8f\xe6\x9c\x88\xe6\x97\xa7': 'XFM',
'\xe5\xb0\x8f\xe6\x9d\x91': 'XEM',
'\xe5\xb0\x8f\xe6\xa6\x84': 'EAQ',
'\xe5\xb0\x8f\xe6\xb2\xb3\xe6\xb2\xbf': 'XYD',
'\xe5\xb0\x8f\xe6\xb2\xb3\xe9\x95\x87': 'EKY',
'\xe5\xb0\x8f\xe8\x91\xa3': 'XEZ',
'\xe5\xb0\x8f\xe8\xa5\xbf\xe5\xba\x84': 'XXP',
'\xe5\xb0\x8f\xe9\x87\x91\xe5\x8f\xa3': 'NKQ',
'\xe5\xb0\x8f\xe9\x9b\xa8\xe8\xb0\xb7': 'XHM',
'\xe5\xb0\x96\xe5\xb3\xb0': 'PFQ',
'\xe5\xb0\x9a\xe5\xae\xb6': 'SJB',
'\xe5\xb0\x9a\xe5\xbf\x97': 'SZB',
'\xe5\xb0\xa4\xe6\xba\xaa': 'YXS',
'\xe5\xb0\xb9\xe5\x9c\xb0': 'YDM',
'\xe5\xb0\xbc\xe5\x8b\x92\xe5\x85\x8b': 'NIR',
'\xe5\xb0\xbc\xe6\x9c\xa8': 'NMO',
'\xe5\xb1\x8f\xe8\xbe\xb9': 'PBM',
'\xe5\xb1\xb1\xe4\xb8\xb9': 'SDJ',
'\xe5\xb1\xb1\xe5\x9d\xa1\xe4\xb8\x9c': 'SBN',
'\xe5\xb1\xb1\xe5\x9f\x8e\xe9\x95\x87': 'SCL',
'\xe5\xb1\xb1\xe5\xb8\x82': 'SQB',
'\xe5\xb1\xb1\xe6\xb2\xb3\xe5\xb1\xaf': 'SHL',
'\xe5\xb1\xb1\xe6\xb5\xb7\xe5\x85\xb3': 'SHD',
'\xe5\xb1\xb1\xe9\x98\xb4': 'SNV',
'\xe5\xb2\x90\xe5\xb1\xb1': 'QAY',
'\xe5\xb2\x91\xe6\xba\xaa': 'CNZ',
'\xe5\xb2\x94\xe6\xb1\x9f': 'CAM',
'\xe5\xb2\xa2\xe5\xb2\x9a': 'KLV',
'\xe5\xb2\xa9\xe4\xbc\x9a': 'AEP',
'\xe5\xb2\xb1\xe5\xb2\xb3': 'RYV',
'\xe5\xb2\xb3\xe5\xae\xb6\xe4\xba\x95': 'YGJ',
'\xe5\xb2\xb3\xe6\xb1\xa0': 'AWW',
'\xe5\xb2\xb3\xe9\x98\xb3': 'YYQ',
'\xe5\xb2\xb3\xe9\x98\xb3\xe4\xb8\x9c': 'YIQ',
'\xe5\xb3\xa1\xe6\xb1\x9f': 'EJG',
'\xe5\xb3\xa8\xe7\x9c\x89': 'EMW',
'\xe5\xb3\xa8\xe7\x9c\x89\xe5\xb1\xb1': 'IXW',
'\xe5\xb3\xa8\xe8\xbe\xb9': 'EBW',
'\xe5\xb3\xbb\xe5\xbe\xb7': 'JDB',
'\xe5\xb4\x87\xe4\xbb\x81': 'CRG',
'\xe5\xb4\x87\xe5\xb7\xa6': 'CZZ',
'\xe5\xb4\x94\xe9\xbb\x84\xe5\x8f\xa3': 'CHP',
'\xe5\xb4\x96\xe5\xb7\x9e': 'YUQ',
'\xe5\xb5\xaf\xe5\xb2\x97': 'CAX',
'\xe5\xb7\xa2\xe6\xb9\x96': 'CIH',
'\xe5\xb7\xa2\xe6\xb9\x96\xe4\xb8\x9c': 'GUH',
'\xe5\xb7\xa5\xe5\x86\x9c\xe6\xb9\x96': 'GRT',
'\xe5\xb7\xa6\xe5\xb2\xad': 'ZSN',
'\xe5\xb7\xa8\xe5\xae\x9d': 'JRT',
'\xe5\xb7\xa8\xe9\x87\x8e': 'JYK',
'\xe5\xb7\xa9\xe4\xb9\x89': 'GXF',
'\xe5\xb7\xa9\xe4\xb9\x89\xe5\x8d\x97': 'GYF',
'\xe5\xb7\xb4\xe4\xb8\x9c': 'BNN',
'\xe5\xb7\xb4\xe4\xb8\xad': 'IEW',
'\xe5\xb7\xb4\xe4\xb8\xad\xe4\xb8\x9c': 'BDE',
'\xe5\xb7\xb4\xe5\xb1\xb1': 'BAY',
'\xe5\xb7\xb4\xe5\xbd\xa6\xe9\xab\x98\xe5\x8b\x92': 'BAC',
'\xe5\xb7\xb4\xe6\x9e\x97': 'BLX',
'\xe5\xb7\xb4\xe6\xa5\x9a': 'BCR',
'\xe5\xb8\x83\xe5\x88\x97\xe5\xbc\x80': 'BLR',
'\xe5\xb8\x83\xe6\xb5\xb7': 'BUT',
'\xe5\xb8\x88\xe5\xae\x97': 'SEM',
'\xe5\xb8\x88\xe5\xba\x84': 'SNM',
'\xe5\xb8\xa6\xe5\xb2\xad': 'DLB',
'\xe5\xb8\xb8\xe5\xb7\x9e': 'CZH',
'\xe5\xb8\xb8\xe5\xb7\x9e\xe5\x8c\x97': 'ESH',
'\xe5\xb8\xb8\xe5\xb9\xb3': 'DAQ',
'\xe5\xb8\xb8\xe5\xb9\xb3\xe4\xb8\x9c': 'FQQ',
'\xe5\xb8\xb8\xe5\xba\x84': 'CVK',
'\xe5\xb8\xb8\xe5\xbe\xb7': 'VGQ',
'\xe5\xb8\xbd\xe5\x84\xbf\xe5\xb1\xb1': 'MRB',
'\xe5\xb9\xb2\xe5\xa1\x98': 'GNJ',
'\xe5\xb9\xb2\xe6\xb2\x9f': 'GGL',
'\xe5\xb9\xb3\xe5\x85\xb3': 'PGM',
'\xe5\xb9\xb3\xe5\x87\x89': 'PIJ',
'\xe5\xb9\xb3\xe5\x87\x89\xe5\x8d\x97': 'POJ',
'\xe5\xb9\xb3\xe5\x8d\x97\xe5\x8d\x97': 'PAZ',
'\xe5\xb9\xb3\xe5\x8e\x9f': 'PYK',
'\xe5\xb9\xb3\xe5\x8e\x9f\xe5\xa0\xa1': 'PPJ',
'\xe5\xb9\xb3\xe5\x8f\xb0': 'PVT',
'\xe5\xb9\xb3\xe5\x9d\x9d\xe5\x8d\x97': 'PBE',
'\xe5\xb9\xb3\xe5\x9e\x8b\xe5\x85\xb3': 'PGV',
'\xe5\xb9\xb3\xe5\xae\x89': 'PAL',
'\xe5\xb9\xb3\xe5\xae\x89\xe9\x95\x87': 'PZT',
'\xe5\xb9\xb3\xe5\xae\x89\xe9\xa9\xbf': 'PNO',
'\xe5\xb9\xb3\xe5\xb1\xb1': 'PSB',
'\xe5\xb9\xb3\xe5\xb2\x97': 'PGL',
'\xe5\xb9\xb3\xe5\xb3\xaa': 'PYP',
'\xe5\xb9\xb3\xe5\xba\x84': 'PZD',
'\xe5\xb9\xb3\xe5\xba\x84\xe5\x8d\x97': 'PND',
'\xe5\xb9\xb3\xe5\xba\xa6': 'PAK',
'\xe5\xb9\xb3\xe6\x88\xbf': 'PFB',
'\xe5\xb9\xb3\xe6\x97\xba': 'PWV',
'\xe5\xb9\xb3\xe6\x98\x8c': 'PCE',
'\xe5\xb9\xb3\xe6\x9e\x9c': 'PGZ',
'\xe5\xb9\xb3\xe6\xb2\xb3\xe5\x8f\xa3': 'PHM',
'\xe5\xb9\xb3\xe6\xb3\x89': 'PQP',
'\xe5\xb9\xb3\xe6\xb4\x8b': 'PYX',
'\xe5\xb9\xb3\xe6\xb9\x96': 'PHQ',
'\xe5\xb9\xb3\xe7\x94\xb0': 'PTM',
'\xe5\xb9\xb3\xe7\xa4\xbe': 'PSV',
'\xe5\xb9\xb3\xe9\x81\xa5': 'PYV',
'\xe5\xb9\xb3\xe9\x81\xa5\xe5\x8f\xa4\xe5\x9f\x8e': 'PDV',
'\xe5\xb9\xb3\xe9\x82\x91': 'PIK',
'\xe5\xb9\xb3\xe9\xa1\xb6\xe5\xb1\xb1': 'PEN',
'\xe5\xb9\xb3\xe9\xa1\xb6\xe5\xb1\xb1\xe8\xa5\xbf': 'BFF',
'\xe5\xb9\xbf\xe5\x85\x83': 'GYW',
'\xe5\xb9\xbf\xe5\x85\x83\xe5\x8d\x97': 'GAW',
'\xe5\xb9\xbf\xe5\x8d\x97\xe5\x8d\xab': 'GNM',
'\xe5\xb9\xbf\xe5\xae\x81': 'FBQ',
'\xe5\xb9\xbf\xe5\xae\x81\xe5\xaf\xba': 'GQT',
'\xe5\xb9\xbf\xe5\xae\x81\xe5\xaf\xba\xe5\x8d\x97': 'GNT',
'\xe5\xb9\xbf\xe5\xae\x89': 'VJW',
'\xe5\xb9\xbf\xe5\xae\x89\xe5\x8d\x97': 'VUW',
'\xe5\xb9\xbf\xe5\xb7\x9e': 'GZQ',
'\xe5\xb9\xbf\xe5\xb7\x9e\xe4\xb8\x9c': 'GGQ',
'\xe5\xb9\xbf\xe5\xb7\x9e\xe5\x8c\x97': 'GBQ',
'\xe5\xb9\xbf\xe5\xb7\x9e\xe5\x8d\x97': 'IZQ',
'\xe5\xb9\xbf\xe5\xb7\x9e\xe8\xa5\xbf': 'GXQ',
'\xe5\xb9\xbf\xe5\xbe\xb7': 'GRH',
'\xe5\xb9\xbf\xe6\xb0\xb4': 'GSN',
'\xe5\xb9\xbf\xe6\xb1\x89': 'GHW',
'\xe5\xb9\xbf\xe6\xb1\x89\xe5\x8c\x97': 'GVW',
'\xe5\xb9\xbf\xe9\x80\x9a\xe5\x8c\x97': 'GPM',
'\xe5\xba\x84\xe6\xa1\xa5': 'ZQH',
'\xe5\xba\x84\xe6\xb2\xb3\xe5\x8c\x97': 'ZUT',
'\xe5\xba\x86\xe4\xb8\xb0': 'QFT',
'\xe5\xba\x86\xe5\xae\x89': 'QAB',
'\xe5\xba\x86\xe7\x9b\x9b': 'QSQ',
'\xe5\xba\x86\xe9\x98\xb3\xe5\xb1\xb1': 'QSJ',
'\xe5\xba\x90\xe5\xb1\xb1': 'LSG',
'\xe5\xba\x90\xe6\xb1\x9f': 'UJH',
'\xe5\xba\x93\xe4\xbc\xa6': 'KLD',
'\xe5\xba\x93\xe5\xb0\x94\xe5\x8b\x92': 'KLR',
'\xe5\xba\x93\xe8\xbd\xa6': 'KCR',
'\xe5\xba\x93\xe9\x83\xbd\xe5\xb0\x94': 'KDX',
'\xe5\xba\x94\xe5\x8e\xbf': 'YZV',
'\xe5\xba\x94\xe5\x9f\x8e': 'YHN',
'\xe5\xba\x99\xe5\x9f\x8e': 'MAP',
'\xe5\xba\x99\xe5\xb1\xb1': 'MSN',
'\xe5\xba\x99\xe5\xb2\xad': 'MLL',
'\xe5\xba\x99\xe5\xba\x84': 'MZJ',
'\xe5\xba\xb7\xe5\x9f\x8e': 'KCP',
'\xe5\xba\xb7\xe5\xba\x84': 'KZP',
'\xe5\xba\xb7\xe7\x86\x99\xe5\xb2\xad': 'KXZ',
'\xe5\xba\xb7\xe9\x87\x91\xe4\xba\x95': 'KJB',
'\xe5\xbb\x89\xe6\xb1\x9f': 'LJZ',
'\xe5\xbb\x8a\xe5\x9d\x8a': 'LJP',
'\xe5\xbb\x8a\xe5\x9d\x8a\xe5\x8c\x97': 'LFP',
'\xe5\xbb\xb6\xe5\x90\x89': 'YJL',
'\xe5\xbb\xb6\xe5\x90\x89\xe8\xa5\xbf': 'YXL',
'\xe5\xbb\xb6\xe5\xae\x89': 'YWY',
'\xe5\xbb\xb6\xe5\xba\x86': 'YNP',
'\xe5\xbb\xba\xe4\xb8\x89\xe6\xb1\x9f': 'JIB',
'\xe5\xbb\xba\xe5\xa7\x8b': 'JRN',
'\xe5\xbb\xba\xe5\xae\x81\xe5\x8e\xbf\xe5\x8c\x97': 'JCS',
'\xe5\xbb\xba\xe6\x98\x8c': 'JFD',
'\xe5\xbb\xba\xe6\xb0\xb4': 'JSM',
'\xe5\xbb\xba\xe6\xb9\x96': 'AJH',
'\xe5\xbb\xba\xe7\x93\xaf': 'JVS',
'\xe5\xbb\xba\xe7\x93\xaf\xe8\xa5\xbf': 'JUS',
'\xe5\xbb\xba\xe8\xae\xbe': 'JET',
'\xe5\xbb\xba\xe9\x98\xb3': 'JYS',
'\xe5\xbc\x80\xe5\x8e\x9f': 'KYT',
'\xe5\xbc\x80\xe5\x8e\x9f\xe8\xa5\xbf': 'KXT',
'\xe5\xbc\x80\xe5\xae\x89': 'KAT',
'\xe5\xbc\x80\xe5\xb0\x81': 'KFF',
'\xe5\xbc\x80\xe5\xb0\x81\xe5\x8c\x97': 'KBF',
'\xe5\xbc\x80\xe6\xb1\x9f': 'KAW',
'\xe5\xbc\x80\xe9\x80\x9a': 'KTT',
'\xe5\xbc\x80\xe9\x98\xb3': 'KVW',
'\xe5\xbc\x80\xe9\xb2\x81': 'KLC',
'\xe5\xbc\x8b\xe6\xb1\x9f': 'RVH',
'\xe5\xbc\x8b\xe9\x98\xb3': 'YIG',
'\xe5\xbc\x93\xe6\xa3\x9a\xe5\xad\x90': 'GPT',
'\xe5\xbc\xa0\xe5\x85\xb0': 'ZLV',
'\xe5\xbc\xa0\xe5\xae\xb6\xe5\x8f\xa3': 'ZKP',
'\xe5\xbc\xa0\xe5\xae\xb6\xe5\x8f\xa3\xe5\x8d\x97': 'ZMP',
'\xe5\xbc\xa0\xe5\xae\xb6\xe7\x95\x8c': 'DIQ',
'\xe5\xbc\xa0\xe6\x8e\x96': 'ZYJ',
'\xe5\xbc\xa0\xe6\x8e\x96\xe8\xa5\xbf': 'ZEJ',
'\xe5\xbc\xa0\xe6\xa1\xa5': 'ZQY',
'\xe5\xbc\xa0\xe7\x99\xbe\xe6\xb9\xbe': 'ZUP',
'\xe5\xbc\xa0\xe7\xbb\xb4\xe5\xb1\xaf': 'ZWB',
'\xe5\xbc\xa0\xe8\xbe\x9b': 'ZIP',
'\xe5\xbc\xa5\xe6\xb8\xa1': 'MDF',
'\xe5\xbd\x92\xe6\xb5\x81\xe6\xb2\xb3': 'GHT',
'\xe5\xbd\x93\xe6\xb6\x82\xe4\xb8\x9c': 'OWH',
'\xe5\xbd\x93\xe9\x98\xb3': 'DYN',
'\xe5\xbd\x9d\xe8\x89\xaf': 'ALW',
'\xe5\xbd\xac\xe5\x8e\xbf': 'BXY',
'\xe5\xbd\xad\xe5\xb1\xb1': 'PSW',
'\xe5\xbd\xad\xe5\xb1\xb1\xe5\x8c\x97': 'PPW',
'\xe5\xbd\xad\xe5\xb7\x9e': 'PMW',
'\xe5\xbd\xad\xe6\xb0\xb4': 'PHW',
'\xe5\xbd\xad\xe6\xb3\xbd': 'PZG',
'\xe5\xbd\xad\xe9\x98\xb3': 'PYJ',
'\xe5\xbd\xb0\xe6\xad\xa6': 'ZWD',
'\xe5\xbe\x90\xe5\xae\xb6': 'XJB',
'\xe5\xbe\x90\xe5\xb7\x9e': 'XCH',
'\xe5\xbe\x90\xe5\xb7\x9e\xe4\xb8\x9c': 'UUH',
'\xe5\xbe\x90\xe6\xb0\xb4': 'XSP',
'\xe5\xbe\x90\xe9\x97\xbb': 'XJQ',
'\xe5\xbe\x97\xe8\x80\xb3\xe5\xb8\x83\xe5\xb0\x94': 'DRX',
'\xe5\xbe\x98\xe5\xbe\x8a\xe5\x8c\x97': 'PHP',
'\xe5\xbe\xae\xe5\xad\x90\xe9\x95\x87': 'WQP',
'\xe5\xbe\xb7\xe4\xbb\xa4\xe5\x93\x88': 'DHO',
'\xe5\xbe\xb7\xe4\xbc\xaf\xe6\x96\xaf': 'RDT',
'\xe5\xbe\xb7\xe4\xbf\x9d': 'RBZ',
'\xe5\xbe\xb7\xe5\x85\xb4': 'DWG',
'\xe5\xbe\xb7\xe5\xae\x89': 'DAG',
'\xe5\xbe\xb7\xe5\xb7\x9e': 'DZP',
'\xe5\xbe\xb7\xe5\xb7\x9e\xe4\xb8\x9c': 'DIP',
'\xe5\xbe\xb7\xe6\x83\xa0': 'DHT',
'\xe5\xbe\xb7\xe6\x83\xa0\xe8\xa5\xbf': 'DXT',
'\xe5\xbe\xb7\xe6\x98\x8c': 'DVW',
'\xe5\xbe\xb7\xe6\xb8\x85': 'DRH',
'\xe5\xbe\xb7\xe6\xb8\x85\xe8\xa5\xbf': 'MOH',
'\xe5\xbe\xb7\xe9\x98\xb3': 'DYW',
'\xe5\xbe\xbd\xe5\x8e\xbf': 'HYY',
'\xe5\xbf\xbb\xe5\xb7\x9e': 'XXV',
'\xe6\x80\x80\xe4\xbb\x81': 'HRV',
'\xe6\x80\x80\xe4\xbb\x81\xe4\xb8\x9c': 'HFV',
'\xe6\x80\x80\xe5\x8c\x96': 'HHQ',
'\xe6\x80\x80\xe5\x8c\x96\xe5\x8d\x97': 'KAQ',
'\xe6\x80\x80\xe6\x9f\x94': 'HRP',
'\xe6\x80\x80\xe6\x9f\x94\xe5\x8c\x97': 'HBP',
'\xe6\x80\x80\xe9\x9b\x86': 'FAQ',
'\xe6\x81\xa9\xe6\x96\xbd': 'ESN',
'\xe6\x81\xad\xe5\x9f\x8e': 'GCZ',
'\xe6\x81\xaf\xe5\x8e\xbf': 'ENN',
'\xe6\x81\xaf\xe7\x83\xbd': 'XFW',
'\xe6\x82\xac\xe9\x92\x9f': 'XRP',
'\xe6\x83\xa0\xe4\xb8\x9c': 'KDQ',
'\xe6\x83\xa0\xe5\x86\x9c': 'HMJ',
'\xe6\x83\xa0\xe5\xae\x89': 'HNS',
'\xe6\x83\xa0\xe5\xb1\xb1': 'VCH',
'\xe6\x83\xa0\xe5\xb7\x9e': 'HCQ',
'\xe6\x83\xa0\xe5\xb7\x9e\xe5\x8d\x97': 'KNQ',
'\xe6\x83\xa0\xe5\xb7\x9e\xe8\xa5\xbf': 'VXQ',
'\xe6\x83\xa0\xe7\x8e\xaf': 'KHQ',
'\xe6\x85\x88\xe5\x88\xa9': 'CUQ',
'\xe6\x88\x90\xe5\x90\x89\xe6\x80\x9d\xe6\xb1\x97': 'CJX',
'\xe6\x88\x90\xe9\x83\xbd': 'CDW',
'\xe6\x88\x90\xe9\x83\xbd\xe4\xb8\x9c': 'ICW',
'\xe6\x88\x90\xe9\x83\xbd\xe5\x8d\x97': 'CNW',
'\xe6\x88\x90\xe9\xab\x98\xe5\xad\x90': 'CZB',
'\xe6\x88\x9a\xe5\xa2\x85\xe5\xa0\xb0': 'QYH',
'\xe6\x89\x8e\xe5\x85\xb0\xe5\xb1\xaf': 'ZTX',
'\xe6\x89\x8e\xe8\xb5\x89\xe8\xaf\xba\xe5\xb0\x94\xe8\xa5\xbf': 'ZXX',
'\xe6\x89\x8e\xe9\xb2\x81\xe7\x89\xb9': 'ZLD',
'\xe6\x89\x93\xe6\x9f\xb4\xe6\xb2\x9f': 'DGJ',
'\xe6\x89\xac\xe5\xb7\x9e': 'YLH',
'\xe6\x89\xb6\xe4\xbd\x99': 'FYT',
'\xe6\x89\xb6\xe4\xbd\x99\xe5\x8c\x97': 'FBT',
'\xe6\x89\xb6\xe7\xbb\xa5': 'FSZ',
'\xe6\x89\xbf\xe5\xbe\xb7': 'CDP',
'\xe6\x89\xbf\xe5\xbe\xb7\xe4\xb8\x9c': 'CCP',
'\xe6\x8a\x9a\xe5\xae\x81': 'FNP',
'\xe6\x8a\x9a\xe5\xb7\x9e': 'FZG',
'\xe6\x8a\x9a\xe5\xb7\x9e\xe4\xb8\x9c': 'FDG',
'\xe6\x8a\x9a\xe5\xb7\x9e\xe5\x8c\x97': 'FBG',
'\xe6\x8a\x9a\xe6\x9d\xbe': 'FSL',
'\xe6\x8a\x9a\xe8\xbf\x9c': 'FYB',
'\xe6\x8a\x9a\xe9\xa1\xba': 'FST',
'\xe6\x8a\x9a\xe9\xa1\xba\xe5\x8c\x97': 'FET',
'\xe6\x8b\x89\xe5\x8f\xa4': 'LGB',
'\xe6\x8b\x89\xe5\x93\x88': 'LHX',
'\xe6\x8b\x89\xe6\x9e\x97': 'LAB',
'\xe6\x8b\x89\xe8\x90\xa8': 'LSO',
'\xe6\x8b\x89\xe9\xb2\x8a': 'LEM',
'\xe6\x8b\x9b\xe6\x9f\x8f': 'ZBP',
'\xe6\x8d\xa2\xe6\x96\xb0\xe5\xa4\xa9': 'VTB',
'\xe6\x8f\xad\xe9\x98\xb3': 'JRQ',
'\xe6\x94\x80\xe6\x9e\x9d\xe8\x8a\xb1': 'PRW',
'\xe6\x94\xb8\xe5\x8e\xbf': 'YOG',
'\xe6\x94\xb8\xe5\x8e\xbf\xe5\x8d\x97': 'YXG',
'\xe6\x95\x96\xe5\x8a\x9b\xe5\xb8\x83\xe5\x91\x8a': 'ALD',
'\xe6\x95\xa6\xe5\x8c\x96': 'DHL',
'\xe6\x95\xa6\xe7\x85\x8c': 'DHJ',
'\xe6\x96\x87\xe5\x9c\xb0': 'WNZ',
'\xe6\x96\x87\xe5\xae\x89': 'WBP',
'\xe6\x96\x87\xe6\x98\x8c': 'WEQ',
'\xe6\x96\x87\xe6\xb0\xb4': 'WEV',
'\xe6\x96\x87\xe7\x99\xbb': 'WBK',
'\xe6\x96\x87\xe7\x99\xbb\xe4\xb8\x9c': 'WGK',
'\xe6\x96\x9c\xe6\xb2\xb3\xe6\xb6\xa7': 'EEP',
'\xe6\x96\xb0\xe4\xb9\x90': 'ELP',
'\xe6\x96\xb0\xe4\xb9\xa1': 'XXF',
'\xe6\x96\xb0\xe4\xb9\xa1\xe4\xb8\x9c': 'EGF',
'\xe6\x96\xb0\xe4\xbc\x9a': 'EFQ',
'\xe6\x96\xb0\xe4\xbd\x99': 'XUG',
'\xe6\x96\xb0\xe4\xbd\x99\xe5\x8c\x97': 'XBG',
'\xe6\x96\xb0\xe4\xbf\x9d\xe5\xae\x89': 'XAP',
'\xe6\x96\xb0\xe5\x85\xb4\xe5\x8e\xbf': 'XGQ',
'\xe6\x96\xb0\xe5\x8c\x96': 'EHQ',
'\xe6\x96\xb0\xe5\x8c\x96\xe5\x8d\x97': 'EJQ',
'\xe6\x96\xb0\xe5\x8d\x8e': 'XHB',
'\xe6\x96\xb0\xe5\x8d\x8e\xe5\xb1\xaf': 'XAX',
'\xe6\x96\xb0\xe5\x8e\xbf': 'XSN',
'\xe6\x96\xb0\xe5\x8f\x8b\xe8\xb0\x8a': 'EYB',
'\xe6\x96\xb0\xe5\x92\x8c': 'XIR',
'\xe6\x96\xb0\xe5\x9d\xaa\xe7\x94\xb0': 'XPM',
'\xe6\x96\xb0\xe5\x9f\x8e\xe5\xad\x90': 'XCT',
'\xe6\x96\xb0\xe5\xae\x89': 'EAM',
'\xe6\x96\xb0\xe5\xae\x89\xe5\x8e\xbf': 'XAF',
'\xe6\x96\xb0\xe5\xb8\x90\xe6\x88\xbf': 'XZX',
'\xe6\x96\xb0\xe5\xb9\xb2': 'EGG',
'\xe6\x96\xb0\xe6\x99\x83': 'XLQ',
'\xe6\x96\xb0\xe6\x99\x83\xe8\xa5\xbf': 'EWQ',
'\xe6\x96\xb0\xe6\x9d\x8e': 'XLJ',
'\xe6\x96\xb0\xe6\x9d\x96\xe5\xad\x90': 'ERP',
'\xe6\x96\xb0\xe6\x9d\xbe\xe6\xb5\xa6': 'XOB',
'\xe6\x96\xb0\xe6\x9e\x97': 'XPX',
'\xe6\x96\xb0\xe6\xb0\x91': 'XMD',
'\xe6\x96\xb0\xe6\xb1\x9f': 'XJM',
'\xe6\x96\xb0\xe6\xb2\x82': 'VIH',
'\xe6\x96\xb0\xe6\xb4\xa5': 'IRW',
'\xe6\x96\xb0\xe6\xb4\xa5\xe5\x8d\x97': 'ITW',
'\xe6\x96\xb0\xe7\xaa\x9d\xe9\x93\xba': 'EPD',
'\xe6\x96\xb0\xe7\xab\x8b\xe5\xb1\xaf': 'XLD',
'\xe6\x96\xb0\xe7\xab\x8b\xe9\x95\x87': 'XGT',
'\xe6\x96\xb0\xe7\xbb\x9b': 'XJV',
'\xe6\x96\xb0\xe7\xbb\xb0\xe6\xba\x90': 'XRX',
'\xe6\x96\xb0\xe8\x82\x87': 'XZT',
'\xe6\x96\xb0\xe9\x82\xb1': 'XQD',
'\xe6\x96\xb0\xe9\x83\x91\xe6\x9c\xba\xe5\x9c\xba': 'EZF',
'\xe6\x96\xb0\xe9\x83\xbd\xe4\xb8\x9c': 'EWW',
'\xe6\x96\xb0\xe9\x98\xb3\xe9\x95\x87': 'XZJ',
'\xe6\x96\xb0\xe9\x9d\x92': 'XQB',
'\xe6\x96\xbd\xe5\xae\xb6\xe5\x98\xb4': 'SHM',
'\xe6\x96\xbd\xe7\xa7\x89': 'AQW',
'\xe6\x97\x85\xe9\xa1\xba': 'LST',
'\xe6\x97\x8c\xe5\xbe\xb7': 'NSH',
'\xe6\x97\x97\xe4\xb8\x8b\xe8\x90\xa5': 'QXC',
'\xe6\x97\xa0\xe4\xb8\xba': 'IIH',
'\xe6\x97\xa0\xe9\x94\xa1': 'WXH',
'\xe6\x97\xa0\xe9\x94\xa1\xe4\xb8\x9c': 'WGH',
'\xe6\x97\xa0\xe9\x94\xa1\xe6\x96\xb0\xe5\x8c\xba': 'IFH',
'\xe6\x97\xa5\xe5\x96\x80\xe5\x88\x99': 'RKO',
'\xe6\x97\xa5\xe7\x85\xa7': 'RZK',
'\xe6\x97\xa7\xe5\xba\x84\xe7\xaa\x9d': 'JVP',
'\xe6\x97\xac\xe9\x98\xb3': 'XUY',
'\xe6\x97\xac\xe9\x98\xb3\xe5\x8c\x97': 'XBY',
'\xe6\x97\xba\xe8\x8b\x8d': 'WEW',
'\xe6\x98\x82\xe6\x98\x82\xe6\xba\xaa': 'AAX',
'\xe6\x98\x86\xe5\xb1\xb1': 'KSH',
'\xe6\x98\x86\xe5\xb1\xb1\xe5\x8d\x97': 'KNH',
'\xe6\x98\x86\xe6\x98\x8e': 'KMM',
'\xe6\x98\x86\xe6\x98\x8e\xe8\xa5\xbf': 'KXM',
'\xe6\x98\x86\xe7\x8b\xac\xe4\xbb\x91\xe5\x8f\xac': 'KDC',
'\xe6\x98\x86\xe9\x98\xb3': 'KAM',
'\xe6\x98\x8c\xe4\xb9\x90': 'CLK',
'\xe6\x98\x8c\xe5\x9b\xbe': 'CTT',
'\xe6\x98\x8c\xe5\x9b\xbe\xe8\xa5\xbf': 'CPT',
'\xe6\x98\x8c\xe5\xb9\xb3': 'CPP',
'\xe6\x98\x8c\xe5\xb9\xb3\xe5\x8c\x97': 'VBP',
'\xe6\x98\x8c\xe9\xbb\x8e': 'CLP',
'\xe6\x98\x8e\xe5\x85\x89': 'MGH',
'\xe6\x98\x8e\xe5\x9f\x8e': 'MCL',
'\xe6\x98\x8e\xe6\xb0\xb4\xe6\xb2\xb3': 'MUT',
'\xe6\x98\x8e\xe6\xb8\xaf': 'MGN',
'\xe6\x98\x8e\xe6\xb8\xaf\xe4\xb8\x9c': 'MDN',
'\xe6\x98\x8e\xe7\x8f\xa0': 'MFQ',
'\xe6\x98\xa5\xe6\xb9\xbe': 'CQQ',
'\xe6\x98\xa5\xe9\x98\xb3': 'CAL',
'\xe6\x98\xad\xe5\x8c\x96': 'ZHW',
'\xe6\x98\xad\xe9\x80\x9a': 'ZDW',
'\xe6\x99\x8b\xe4\xb8\xad': 'JZV',
'\xe6\x99\x8b\xe5\x9f\x8e': 'JCF',
'\xe6\x99\x8b\xe5\x9f\x8e\xe5\x8c\x97': 'JEF',
'\xe6\x99\x8b\xe5\xb7\x9e': 'JXP',
'\xe6\x99\x8b\xe6\xb1\x9f': 'JJS',
'\xe6\x99\x8f\xe5\x9f\x8e': 'YEK',
'\xe6\x99\xa8\xe6\x98\x8e': 'CMB',
'\xe6\x99\xae\xe5\x85\xb0\xe5\xba\x97': 'PLT',
'\xe6\x99\xae\xe5\xae\x81': 'PEQ',
'\xe6\x99\xae\xe5\xae\x89': 'PAN',
'\xe6\x99\xae\xe5\xae\x89\xe5\x8e\xbf': 'PUE',
'\xe6\x99\xae\xe5\xae\x9a': 'PGW',
'\xe6\x99\xae\xe6\xb9\xbe': 'PWT',
'\xe6\x99\xae\xe9\x9b\x84': 'POW',
'\xe6\x99\xaf\xe5\xbe\xb7\xe9\x95\x87': 'JCG',
'\xe6\x99\xaf\xe6\xb3\xb0': 'JTJ',
'\xe6\x9a\x96\xe6\xb3\x89': 'NQJ',
'\xe6\x9b\xb2\xe6\xb0\xb4\xe5\x8e\xbf': 'QSO',
'\xe6\x9b\xb2\xe6\xb1\x9f': 'QIM',
'\xe6\x9b\xb2\xe9\x98\x9c': 'QFK',
'\xe6\x9b\xb2\xe9\x98\x9c\xe4\xb8\x9c': 'QAK',
'\xe6\x9b\xb2\xe9\x9d\x96': 'QJM',
'\xe6\x9b\xb9\xe5\x8e\xbf': 'CXK',
'\xe6\x9b\xb9\xe5\xad\x90\xe9\x87\x8c': 'CFP',
'\xe6\x9b\xbe\xe5\x8f\xa3': 'ZKE',
'\xe6\x9b\xbe\xe5\xae\xb6\xe5\x9d\xaa\xe5\xad\x90': 'ZBW',
'\xe6\x9c\x88\xe4\xba\xae\xe7\x94\xb0': 'YUM',
'\xe6\x9c\x88\xe5\xb1\xb1': 'YBF',
'\xe6\x9c\x94\xe5\xb7\x9e': 'SUV',
'\xe6\x9c\x97\xe4\xb9\xa1': 'LXB',
'\xe6\x9c\x9b\xe9\x83\xbd': 'WDP',
'\xe6\x9c\x9d\xe9\x98\xb3': 'CYD',
'\xe6\x9c\x9d\xe9\x98\xb3\xe5\x9c\xb0': 'CDD',
'\xe6\x9c\x9d\xe9\x98\xb3\xe5\xb7\x9d': 'CYL',
'\xe6\x9c\x9d\xe9\x98\xb3\xe9\x95\x87': 'CZL',
'\xe6\x9c\xa8\xe9\x87\x8c\xe5\x9b\xbe': 'MUD',
'\xe6\x9c\xac\xe6\xba\xaa': 'BXT',
'\xe6\x9c\xac\xe6\xba\xaa\xe6\x96\xb0\xe5\x9f\x8e': 'BVT',
'\xe6\x9c\xac\xe6\xba\xaa\xe6\xb9\x96': 'BHT',
'\xe6\x9c\xb1\xe5\xae\xb6\xe6\xb2\x9f': 'ZUB',
'\xe6\x9c\xb1\xe5\xae\xb6\xe7\xaa\x91': 'ZUJ',
'\xe6\x9c\xb1\xe6\x97\xa5\xe5\x92\x8c': 'ZRC',
'\xe6\x9c\xb1\xe6\x9d\xa8\xe6\xba\xaa': 'ZXW',
'\xe6\x9d\x8e\xe5\xae\xb6': 'LJB',
'\xe6\x9d\x8e\xe5\xae\xb6\xe5\x9d\xaa': | |
or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in owner:
wait["sticker"] = True
cl.sendMessage(msg.to,"Deteksi sticker diaktifkan")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in owner:
wait["sticker"] = False
cl.sendMessage(msg.to,"Deteksi sticker dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
settings["autoJoinTicket"] = True
cl.sendMessage(msg.to,"Join ticket diaktifkan")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
settings["autoJoinTicket"] = False
cl.sendMessage(msg.to,"Notag dinonaktifkan")
#===========COMMAND BLACKLIST============#
elif cmd == "ban all":
if msg.toType == 2:
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
targets.append(g.mid)
targets.remove(mid)
if targets == []:
cl.sendMessage(msg.to,"gak ada orang")
else:
for target in targets:
if target not in wait["selfbot"] or target not in Bots:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Anda ternoda")
except:
pass
elif cmd == "unban all":
if msg.toType == 2:
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
targets.append(g.mid)
targets.remove(mid)
if targets == []:
cl.sendMessage(msg.to,"gak ada orang")
else:
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Anda ternoda")
except:
pass
elif ("Talkban:on " in msg.text):
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Team:
try:
wait["Talkblacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Untalkban:on " in msg.text):
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in owner:
wait["Talkwblacklist"] = True
cl.sendMessage(msg.to,"Send contact")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in owner:
wait["Talkdblacklist"] = True
cl.sendMessage(msg.to,"Send contact")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Team:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin or msg._from in staff:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
wait["wblacklist"] = True
cl.sendMessage(msg.to,"Send contact")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
wait["dblacklist"] = True
cl.sendMessage(msg.to,"Send contact")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Nothing blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"Blacklist\n\n"+ma+"\n %s User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in owner:
if wait["Talkblacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to," Talkban User\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "bl" or text.lower() == 'bl':
if wait["selfbot"] == True:
if msg._from in owner:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in owner or msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "%i" % len(ragets)
cl.sendMessage(msg.to,"Succes clearall " +mc)
elif text.lower() == 'dz':
cl.sendMessage(msg.to, "cie pake sc nya dhenza ya")
#===========COMMAND SET============#
elif 'Spesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Spesan: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Swelcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Swelcome: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Srespon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Srespon: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Sspam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Sspam: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["RAmessage1"] = spl
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Ssider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ssider: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cpesan":
if msg._from in admin:
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg mu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cwelcome":
if msg._from in admin:
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg mu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "crespon":
if msg._from in admin:
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg mu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cspam":
if msg._from in admin:
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg mu :\n\n「 " + str(Setmain["RAmessage1"]) + " 」")
elif text.lower() == "csider":
if msg._from in admin:
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg mu :\n\n「 " + str(wait["mention"]) + " 」")
elif cmd == "batre":
if msg._from in admin or msg._from in owner:
try:cl.inviteIntoGroup(to, ["u45882d0ead1703855dbc60d40e37bec7"]);has = "OK"
except:has = "NOT"
try:cl.kickoutFromGroup(to, ["u45882d0ead1703855dbc60d40e37bec7"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
cl.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:ki.inviteIntoGroup(to, ["u2bf37dc8bb9ac850615395a9e15850f9"]);has = "OK"
except:has = "NOT"
try:ki.kickoutFromGroup(to, ["u2bf37dc8bb9ac850615395a9e15850f9"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒ Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
ki.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kk.inviteIntoGroup(to, ["u0a5ee8d796e3677a56b84ff03b6564ec"]);has = "OK"
except:has = "NOT"
try:kk.kickoutFromGroup(to, ["u0a5ee8d796e3677a56b84ff03b6564ec"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kk.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kc.inviteIntoGroup(to, ["u29b16f0e99cfdf0e7d7b8170f7cdc1a7"]);has = "OK"
except:has = "NOT"
try:kc.kickoutFromGroup(to, ["u29b16f0e99cfdf0e7d7b8170f7cdc1a7"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒. Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kc.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kc.inviteIntoGroup(to, ["udfad8056476f3e76903575513cc8aebe"]);has = "OK"
except:has = "NOT"
try:km.kickoutFromGroup(to, ["udfad8056476f3e76903575513cc8aebe"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒ Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
km.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
try:kb.inviteIntoGroup(to, ["uea5fe04e39713e6768cf5687bc5ac7aa"]);has = "OK"
except:has = "NOT"
try:kb.kickoutFromGroup(to, ["uea5fe04e39713e6768cf5687bc5ac7aa"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "🔋██ full 100%"
else:sil = "🔌█▒ Low 0%"
if has1 == "OK":sil1 = "🔋██ full 100%"
else:sil1 = "🔌█▒ Low 0%"
kb.sendMessage(to, "Status:\n\n🔴Kick : {} \n🔴Invite : {}".format(sil1,sil))
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if msg._from in admin or msg._from in owner:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
| |
"""Integration for chat integrations"""
from __future__ import unicode_literals
import logging
from djblets.db.query import get_object_or_none
from djblets.util.templatetags.djblets_utils import user_displayname
from reviewboard.accounts.models import Trophy
from reviewboard.admin.server import build_server_url
from reviewboard.extensions.hooks import SignalHook
from reviewboard.integrations import Integration
from reviewboard.reviews.models import (BaseComment, Comment,
FileAttachmentComment,
GeneralComment,
ReviewRequest,
ScreenshotComment)
from reviewboard.reviews.signals import (review_request_closed,
review_request_published,
review_request_reopened,
review_published,
reply_published)
from reviewboard.site.urlresolvers import local_site_reverse
class BaseChatIntegration(Integration):
"""Integrates Review Board with chat applications.
This will handle updating chat channels when review requests are posted,
changed, or closed, and when there's new activity on the review request.
"""
def initialize(self):
"""Initialize the integration hooks."""
hooks = (
(review_request_closed, self._on_review_request_closed),
(review_request_published, self._on_review_request_published),
(review_request_reopened, self._on_review_request_reopened),
(review_published, self._on_review_published),
(reply_published, self._on_reply_published),
)
for signal, handler in hooks:
SignalHook(self, signal, handler)
def notify(self, title, title_link, fallback_text, local_site,
review_request, event_name=None, fields={}, pre_text=None,
body=None, color=None, thumb_url=None, image_url=None):
"""Send a webhook notification to chat application.
This will post the given message to any channels configured to
receive it.
Args:
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
fields (dict):
The fields comprising the rich message to display in chat.
local_site (reviewboard.site.models.LocalSite):
The Local Site for the review request or review emitting
the message. Only integration configurations matching this
Local Site will be processed.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the notification is bound to.
event_name (unicode):
The name of the event triggering this notification.
pre_text (unicode, optional):
Text to display before the rest of the message.
body (unicode, optional):
The body of the message.
color (unicode, optional):
A color string or RGB hex value for the message.
thumb_url (unicode, optional):
URL of an image to show on the side of the message.
image_url (unicode, optional):
URL of an image to show in the message.
"""
raise NotImplementedError(
'%s must implement notify' % type(self).__name__)
def notify_review_or_reply(self, user, review, pre_text, fallback_text,
event_name, first_comment=None, **kwargs):
"""Notify chat application for any new posted reviews or replies.
This performs the common work of notifying configured channels
when there's a review or a reply.
Args:
user (django.contrib.auth.models.User):
The user who posted the review or reply.
review (reviewboard.reviews.models.Review):
The review or reply that was posted.
pre_text (unicode, optional):
Text to show before the message attachments.
fallback_text (unicode, optional):
Text to show in the fallback text, before the review URL and
after the review request ID.
event_name (unicode):
The name of the event triggering this notification.
first_comment (reviewboard.reviews.models.BaseComment, optional):
The first comment in a review, to generate the body message
from. This is optional, and will be computed if needed.
**kwargs (dict):
Other keyword arguments to pass to :py:meth:`notify`.
"""
review_request = review.review_request
review_url = build_server_url(review.get_absolute_url())
fallback_text = '#%s: %s: %s' % (review_request.display_id,
fallback_text, review_url)
if review.body_top:
body = review.body_top
# This is silly to show twice.
if review.ship_it and body == 'Ship It!':
body = ''
else:
if not first_comment:
for comment_cls in (Comment, FileAttachmentComment,
ScreenshotComment, GeneralComment):
try:
first_comment = (
comment_cls.objects
.filter(review=review)
.only('text')
)[0]
break
except IndexError:
pass
if first_comment:
body = first_comment.text
self.notify(title=self.get_review_request_title(review_request),
title_link=review_url,
fallback_text=fallback_text,
pre_text=pre_text,
body=body,
local_site=review.review_request.local_site,
review_request=review_request,
event_name=event_name,
**kwargs)
def notify_review_request(self, review_request, fallback_text, event_name,
**kwargs):
"""Notify chat application for a review request update.
This performs the common work of notifying configured channels
when there's a new review request or update to a review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
fallback_text (unicode, optional):
Text to show in the fallback text, before the review URL and
after the review request ID.
event_name (unicode):
The name of the event triggering this notification.
**kwargs (dict):
Other keyword arguments to pass to :py:meth:`notify`.
"""
review_request_url = self.get_review_request_url(review_request)
fallback_text = '#%s: %s: %s' % (review_request.display_id,
fallback_text,
review_request_url)
self.notify(title=self.get_review_request_title(review_request),
title_link=review_request_url,
fallback_text=fallback_text,
review_request=review_request,
event_name=event_name,
**kwargs)
def format_link(self, path, text):
"""Format the given URL and text to be shown in a Slack message.
This will combine together the parts of the URL (method, domain, path)
and format it using Slack's URL syntax.
Args:
path (unicode):
The path on the Review Board server.
text (unicode):
The text for the link.
Returns:
unicode:
The link for use in Slack.
"""
raise NotImplementedError(
'%s must implement format_link' % type(self).__name__)
def get_user_text_url(self, user, local_site):
"""Return the URL to a user page.
Args:
user (django.contrib.auth.models.User):
The user being linked to.
local_site (reviewboard.site.models.LocalSite):
The local site for the link, if any.
Returns:
unicode:
The URL to the user page.
"""
# This doesn't use user.get_absolute_url because that won't include
# site roots or local site names.
return local_site_reverse(
'user',
local_site=local_site,
kwargs={'username': user.username})
def get_user_text_link(self, user, local_site):
"""Return the chat application-formatted link to a user page.
Args:
user (django.contrib.auth.models.User):
The user being linked to.
local_site (reviewboard.site.models.LocalSite):
The local site for the link, if any.
Returns:
unicode:
The formatted link to the user page.
"""
return self.format_link(self.get_user_text_url(user, local_site),
user.get_full_name() or user.username)
def get_review_request_title(self, review_request):
"""Return the title for a review request message.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
Returns:
unicode:
The title for the message.
"""
return '#%s: %s' % (review_request.display_id, review_request.summary)
def get_review_request_text_link(self, review_request):
"""Return the chat application-formatted link to a review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request being linked to.
Returns:
unicode:
The formatted link to the review request.
"""
return self.format_link(review_request.get_absolute_url(),
review_request.summary)
def get_review_request_url(self, review_request):
"""Return the absolute URL to a review request.
Returns:
unicode:
The absolute URL to the review request.
"""
return build_server_url(review_request.get_absolute_url())
def _on_review_request_closed(self, user, review_request, close_type,
description=None, **kwargs):
"""Handler for when review requests are closed.
This will send a notification to any configured channels when
a review request is closed.
Args:
user (django.contrib.auth.models.User):
The user who closed the review request.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that was closed.
close_type (unicode):
The close type.
description (unicode):
The close message,
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
if not user:
user = review_request.submitter
user_link = self.get_user_text_link(user, review_request.local_site)
if close_type == ReviewRequest.DISCARDED:
pre_text = 'Discarded by %s' % user_link
fallback_text = 'Discarded by %s' % user_displayname(user)
elif close_type == ReviewRequest.SUBMITTED:
pre_text = 'Closed as completed by %s' % user_link
fallback_text = 'Closed as completed by %s' % \
user_displayname(user)
else:
logging.error('Tried to notify on review_request_closed for '
' review request pk=%d with unknown close type "%s"',
review_request.pk, close_type)
return
if not user:
user = review_request.submitter
self.notify_review_request(review_request,
fallback_text=fallback_text,
body=description,
pre_text=pre_text,
local_site=review_request.local_site,
event_name='review_request_closed')
def _on_review_request_published(self, user, review_request, changedesc,
**kwargs):
"""Handler for when review requests are published.
This will send a notification to any configured channels when
a review request is published.
Args:
user (django.contrib.auth.models.User):
The user who published the review request.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that was published.
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description for the update, if any.
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
user_link = self.get_user_text_link(user, review_request.local_site)
fields = []
if changedesc:
fallback_text = 'New update from %s' % user_displayname(user)
pre_text = 'New update from %s' % user_link
# This might be empty, which is fine. We won't show an update
# at that point.
body = changedesc.text
else:
fallback_text = 'New review request from %s' % \
user_displayname(user)
pre_text = 'New review request from %s' % user_link
body = None
fields.append({
'short': False,
'title': 'Description',
'value': review_request.description,
})
# Link to the diff in the update, if any.
diffset = review_request.get_latest_diffset()
if diffset:
diff_url = local_site_reverse(
'view-diff-revision',
local_site=review_request.local_site,
kwargs={
'review_request_id': review_request.display_id,
'revision': diffset.revision,
})
fields.append({
'short': True,
'title': 'Diff',
'value': self.format_link(diff_url,
'Revision %s' % diffset.revision),
})
if review_request.repository:
fields.append({
'short': True,
'title': 'Repository',
'value': review_request.repository.name,
})
if review_request.branch:
fields.append({
'short': True,
'title': 'Branch',
'value': review_request.branch,
})
# See if there are any new interesting file attachments to show.
# These will only show up if the file is accessible.
attachment = None
if changedesc:
# Only show new files added in this change.
try:
new_files = changedesc.fields_changed['files']['added']
except KeyError:
new_files = []
for file_info in new_files:
if (len(file_info) >= 3 and
file_info[1].endswith(self.VALID_IMAGE_URL_EXTS)):
# This one wins. Show it.
attachment = get_object_or_none(
| |
wantedTypes, branchList, pointer(listLen) )
for i in range(listLen.value):
print("Branch=" + OlxAPI.FullBranchName(branchList[i]))
return
def testOlxAPIGetSetObjTagsMemo():
# Test GetObjTags and GetObjMemo
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_LINE
argsGetEquipment["hnd"] = 0 # Get all lines
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
lineHnd = argsGetEquipment["hnd"]
print(OlxAPI.FullBranchName(lineHnd))
aLine1 = OlxAPI.GetObjTags(lineHnd)
aLine2 = OlxAPI.GetObjMemo(lineHnd)
if (aLine1 != "") or (aLine2 != ""):
print(( "Line: " + OlxAPI.FullBranchName(lineHnd) ))
if aLine1 != "":
print(( " Existing tags=" + aLine1 ))
if OLXAPI_OK != OlxAPI.SetObjTags(lineHnd, c_char_p("NewTag;" + aLine1 ) ):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
aLine1 = OlxAPI.GetObjTags(lineHnd)
print(( " New tags=" + aLine1 ))
if aLine2 != "":
print(( " Existing memo=" + aLine2 ))
if OLXAPI_OK != OlxAPI.SetObjMemo(lineHnd, c_char_p("New memo: line 1\r\nLine2\r\n" + aLine2 ) ):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
aLine2 = OlxAPI.GetObjMemo(lineHnd)
print(( " New memo=" + aLine2 ))
return 0
def testFaultSimulation():
# Test Fault simulation
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_BUS
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
busHnd = argsGetEquipment["hnd"]
sObj = str(OlxAPI.PrintObj1LPF(busHnd))
print(sObj)
if sObj.find("NEVADA") > -1:
print("\n>>>>>>Bus fault at: " + sObj)
OlxAPILib.run_busFault(busHnd)
print ("\n>>>>>>Test bus fault SEA")
OlxAPILib.run_steppedEvent(busHnd)
return 0
def testBoundaryEquivalent(OlrFileName):
# Test boundary equivalent network creation
EquFileName = OlrFileName.lower().replace( ".olr", "_eq.olr" )
FltOpt = (c_double*3)(99,0,0)
BusList = (c_int*3)(0)
bsName = "CLAYTOR"
bsKV = 132.0
hnd = OlxAPI.FindBus( bsName, bsKV )
if hnd == OLXAPI_FAILURE:
raise OlxAPI.OlxAPIException("Bus ", bsName, bsKV, " not found")
BusList[0] = c_int(hnd)
bsName = "NEVADA"
bsKV = 132.0
hnd = OlxAPI.FindBus( bsName, bsKV )
if hnd == OLXAPI_FAILURE:
raise OlxAPI.OlxAPIException("Bus ", bsName, bsKV, " not found")
BusList[1] = c_int(hnd)
BusList[2] = c_int(-1)
if OLXAPI_OK != OlxAPI.BoundaryEquivalent(c_char_p(EquFileName), BusList, FltOpt):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print("Success. Equivalent is in " + EquFileName)
return 0
def testDoBreakerRating():
# Test Breaker rating simulation
Scope = (c_int*3)(0,1,1)
RatingThreshold = c_double(70);
OutputOpt = c_double(1)
OptionalReport = c_int(1+2+4)
ReportTXT = c_char_p("bkrratingreport.txt")
ReportCSV = c_char_p("")
ConfigFile = c_char_p("")
if OLXAPI_OK != OlxAPI.DoBreakerRating(Scope, RatingThreshold, OutputOpt, OptionalReport,
ReportTXT, ReportCSV, ConfigFile):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print("Success. Report is in " + ReportTXT.value)
return 0
def testGetData_BUS():
# Test GetData
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_BUS
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
busHnd = argsGetEquipment["hnd"]
argsGetData = {}
argsGetData["hnd"] = busHnd
argsGetData["token"] = BUS_sName
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busName = argsGetData["data"]
argsGetData["token"] = <PASSWORD>_dKVnominal
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busKV = argsGetData["data"]
print(( busName, busKV ))
return 0
def testGetData_DSRLY():
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_RLYDSP
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
argsGetData = {}
argsGetData["hnd"] = argsGetEquipment["hnd"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
val = argsGetData["data"]
print(val)
return 0
def testGetData_GENUNIT():
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_GENUNIT
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
argsGetData = {}
argsGetData["hnd"] = argsGetEquipment["hnd"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
val = argsGetData["data"]
print('X= ' + str(val))
argsGetData = {}
argsGetData["hnd"] = argsGetEquipment["hnd"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
val = argsGetData["data"]
print('nOnline= ' + str(val))
print(val)
return 0
def testGetData_BREAKER():
argsGetEquipment = {}
argsGetEquipment["tc"] = TC_BREAKER
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
argsGetData = {}
argsGetData["hnd"] = argsGetEquipment["hnd"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
val = argsGetData["data"]
print(val)
return 0
def testGetData_SCHEME():
# Using getequipment
argsGetEquipment = {}
argsGetData = {}
argsGetEquipment["tc"] = TC_SCHEME
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
argsGetData["hnd"] = argsGetEquipment["hnd"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
rlyGrpHnd = argsGetData["data"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sID = argsGetData["data"]
argsGetData["token"] = <PASSWORD>Equation
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sEqu = argsGetData["data"]
argsGetData["token"] = LS_sVariables
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sVariables = argsGetData["data"]
print('Scheme: ' + sID + '@' + OlxAPI.FullBranchName(rlyGrpHnd) + "\n" + \
sEqu + "\n" + sVariables)
# Through relay groups
argsGetEquipment["tc"] = TC_RLYGROUP
argsGetEquipment["hnd"] = 0
argsGetLogicScheme = {}
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
schemeHnd = c_int(0)
while (OLXAPI_OK == OlxAPI.GetLogicScheme(argsGetEquipment["hnd"], byref(schemeHnd) )):
argsGetData["hnd"] = schemeHnd.value
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
rlyGrpHnd = argsGetData["data"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sID = argsGetData["data"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sEqu = argsGetData["data"]
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
sVariables = argsGetData["data"]
print('Scheme: ' + sID + '@' + OlxAPI.FullBranchName(rlyGrpHnd) + "\n" + \
sEqu + "\n" + sVariables)
return 0
def testSaveDataFile(olrFilePath):
olrFilePath = olrFilePath.lower()
testReadChangeFile(str(olrFilePath).replace( ".olr", ".chf"))
olrFilePathNew = str(olrFilePath).replace( ".olr", "x.olr" )
olrFilePathNew = olrFilePathNew.replace( ".OLR", "x.olr" )
if OLXAPI_FAILURE == OlxAPI.SaveDataFile(olrFilePathNew):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print(olrFilePathNew + " had been saved successfully")
return 0
def testFindObj():
# Test OlxAPI.FindBus()
bsName = "CLAYTOR"
bsKV = 132.0
hnd = OlxAPI.FindBus( bsName, bsKV )
if hnd == OLXAPI_FAILURE:
print("Bus ", bsName, bsKV, " not found")
else:
argsGetData = {}
argsGetData["hnd"] = hnd
argsGetData["token"] = <PASSWORD>_<PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
bsNo = argsGetData["data"]
print("hnd= ", hnd, "Bus ", bsNo, " ", bsName, bsKV)
print(OlxAPI.PrintObj1LPF(hnd))
# Test OlxAPI.FindBusNo()
bsNo = 99
hnd = OlxAPI.FindBusNo( bsNo )
if hnd == OLXAPI_FAILURE:
print("Bus ", bsNo, " not found")
else:
argsGetData = {}
argsGetData["hnd"] = hnd
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
bsName = argsGetData["data"]
argsGetData["hnd"] = hnd
argsGetData["token"] = <PASSWORD>_d<PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
bsKV = argsGetData["data"]
print("hnd= ", hnd, " Bus ", bsNo, " ", bsName, " ", bsKV)
# Test OlxAPI.FindEquipmentByTag
tags = c_char_p("tagS")
equType = c_int(0)
equHnd = (c_int*1)(0)
count = 0
while OLXAPI_OK == OlxAPI.FindEquipmentByTag( tags, equType, equHnd ):
print(OlxAPI.PrintObj1LPF(equHnd[0]))
count = count + 1
print("Objects with tag " + tags.value + ": " + str(count))
return 0
def testDeleteEquipment(olrFilePath):
hnd = (c_int*1)(0)
ii = 5
while (OLXAPI_OK == OlxAPI.GetEquipment(TC_BUS,hnd)):
busHnd = hnd[0]
print("Delete " + OlxAPI.PrintObj1LPF(busHnd))
OlxAPI.DeleteEquipment(busHnd)
if ii == 0:
break
ii = ii - 1
olrFilePathNew = olrFilePath.lower()
olrFilePathNew = olrFilePathNew.replace( ".olr", "x.olr" )
if OLXAPI_FAILURE == OlxAPI.SaveDataFile(olrFilePathNew):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print(olrFilePathNew + " had been saved successfully")
def testGetData_SetData():
argsGetEquipment = {}
argsGetData = {}
# Test GetData with special handles
argsGetData["hnd"] = HND_SYS
argsGetData["token"] = <PASSWORD>
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print("Number of buses: ", argsGetData["data"])
# Test SetData and GetData
argsGetEquipment["tc"] = TC_BUS
argsGetEquipment["hnd"] = 0
ii = 100
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
busHnd = argsGetEquipment["hnd"]
argsGetData = {}
argsGetData["hnd"] = busHnd
argsGetData["token"] = BUS_dKVnominal
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busKV = argsGetData["data"]
argsGetData["token"] = BUS_sName
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busNameOld = argsGetData["data"]
argsSetData = {}
argsSetData["hnd"] = busHnd
argsSetData["token"] = BUS_sName
argsSetData["data"] = busNameOld+str(ii+1)
if OLXAPI_OK != set_data(argsSetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
argsGetData["token"] = BUS_nNumber
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busNumberOld = argsGetData["data"]
argsSetData["token"] = BUS_nNumber
argsSetData["data"] = ii+1
if OLXAPI_OK != set_data(argsSetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
if OLXAPI_OK != OlxAPI.PostData(busHnd):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
argsGetData["token"] = BUS_sName
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busNameNew = argsGetData["data"]
argsGetData["token"] = BUS_nNumber
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
busNumberNew = argsGetData["data"]
print("Old:", busNumberOld, busNameOld, busKV, "kV -> New: ", busNumberNew, busNameNew, busKV, "kV")
ii = ii + 1
argsGetEquipment["tc"] = TC_GENUNIT
argsGetEquipment["hnd"] = 0
while (OLXAPI_OK == OlxAPILib.get_equipment(argsGetEquipment)):
argsGetData = {}
hnd = argsGetEquipment["hnd"]
argsGetData["hnd"] = hnd
argsGetData["token"] = GU_vdX
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print(OlxAPI.PrintObj1LPF(hnd) + " X=", argsGetData["data"])
argsSetData = {}
argsSetData["hnd"] = hnd
argsSetData["token"] = <PASSWORD>
argsSetData["data"] = [0.21,0.22,0.23,0.24,0.25]
if OLXAPI_OK != set_data(argsSetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
if OLXAPI_OK != OlxAPI.PostData(hnd):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
if OLXAPI_OK != OlxAPILib.get_data(argsGetData):
raise OlxAPI.OlxAPIException(OlxAPI.ErrorString())
print(OlxAPI.PrintObj1LPF(hnd) + " Xnew=", argsGetData["data"])
return 0
def testGetData():
#testGetData_SetData()
#testGetData_SCHEME()
#testGetData_BREAKER()
testGetData_GENUNIT()
#testGetData_DSRLY()
#testGetData_BUS()
#testGetRelay()
#testGetJournalRecord()
return 0
def testOlxAPI():
try:
if len(sys.argv) == 1:
print("Usage: " + sys.argv[0] + " YourNetwork.olr")
return 0
olrFilePath = sys.argv[1]
"""
if (not os.path.isfile(olrFilePath)):
Tkinter.Tk().withdraw() # Close the root window
opts = {}
opts['filetypes'] = [('ASPEN OneLiner file',('.olr'))]
opts['title'] = 'Open OneLiner Network'
olrFilePath = str(tkFileDialog.askopenfilename(**opts))
"""
if not os.path.isfile(olrFilePath):
print("OLR file does not exit: | |
<reponame>prometheusresearch/rios.core
#
# Copyright (c) 2015, Prometheus Research, LLC
#
import re
from copy import deepcopy
import colander
from six import iteritems, iterkeys, string_types
from .common import ValidationError, RE_IDENTIFIER, IdentifierString, \
sub_schema, AnyType, OneOfType, StrictBooleanType, OptionalStringType, \
MetadataCollection, RE_PRODUCT_TOKENS
__all__ = (
'TYPES_SIMPLE',
'TYPES_COMPLEX',
'TYPES_ALL',
'CONSTRAINTS_ALL',
'TYPES_CONSTRAINED',
'TYPES_CONSTRAINED_REQUIRED',
'RE_ENUMERATION_ID',
'METADATA_PROPS',
'get_full_type_definition',
'InstrumentIdentifier',
'InstrumentReference',
'Version',
'Description',
'EnumerationIdentifier',
'Enumeration',
'EnumerationCollection',
'BoundConstraint',
'IntegerBoundConstraint',
'Column',
'ColumnCollection',
'Row',
'RowCollection',
'TypeDefinition',
'RequiredOptionalField',
'InstrumentTypes',
'FieldType',
'Field',
'Record',
'Instrument',
)
TYPES_SIMPLE = (
'text',
'integer',
'float',
'boolean',
'enumeration',
'enumerationSet',
'date',
'time',
'dateTime',
)
TYPES_COMPLEX = (
'recordList',
'matrix',
)
TYPES_ALL = TYPES_SIMPLE + TYPES_COMPLEX
CONSTRAINTS_ALL = (
'range',
'length',
'pattern',
'enumerations',
'record',
'columns',
'rows',
)
TYPES_CONSTRAINED = {
'integer': [
'range',
],
'float': [
'range',
],
'date': [
'range',
],
'time': [
'range',
],
'dateTime': [
'range',
],
'text': [
'length',
'pattern',
],
'enumeration': [
'enumerations',
],
'enumerationSet': [
'length',
'enumerations',
],
'recordList': [
'length',
'record',
],
'matrix': [
'rows',
'columns',
],
}
TYPES_CONSTRAINED_REQUIRED = {
'enumeration': [
'enumerations',
],
'enumerationSet': [
'enumerations',
],
'recordList': [
'record',
],
'matrix': [
'rows',
'columns',
],
}
RANGE_CONSTRAINT_TYPES = {
'integer': colander.Integer(),
'float': colander.Float(),
'date': colander.Date(),
'time': colander.Time(),
'dateTime': colander.DateTime(),
}
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
RE_VERSION = re.compile(r'(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)')
RE_ENUMERATION_ID = re.compile(
r'^(?:[a-z0-9]{1,2}|[a-z0-9](?:[a-z0-9]|[_-](?![_-]))+[a-z0-9])$'
)
# pylint: disable=abstract-method
class Uri(object):
RE_ID = re.compile(
# From https://tools.ietf.org/html/rfc3986#appendix-B
r'^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?'
)
def __call__(self, node, value):
match = self.RE_ID.match(value)
if match is None:
raise colander.Invalid(node, 'Value does not resemble a URI')
if not match.groups()[1]:
raise colander.Invalid(node, 'No scheme specified in URI')
class InstrumentIdentifier(colander.SchemaNode):
schema_type = colander.String
validator = Uri()
class Version(colander.SchemaNode):
schema_type = colander.String
validator = colander.Regex(RE_VERSION)
class InstrumentReference(colander.SchemaNode):
id = InstrumentIdentifier()
version = Version()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(InstrumentReference, self).__init__(*args, **kwargs)
class Description(colander.SchemaNode):
schema_type = OptionalStringType
validator = colander.Length(min=1)
missing = colander.drop
class Enumeration(colander.SchemaNode):
description = Description()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Enumeration, self).__init__(*args, **kwargs)
class EnumerationIdentifier(colander.SchemaNode):
schema_type = colander.String
validator = colander.Regex(RE_ENUMERATION_ID)
class EnumerationCollection(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(EnumerationCollection, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one Enumeration must be defined',
)
for enum_id, enum_def in iteritems(cstruct):
sub_schema(EnumerationIdentifier, node, enum_id)
if enum_def is not None:
sub_schema(Enumeration, node, enum_def)
class BoundConstraint(colander.SchemaNode):
def __init__(self, schema_type=None, **kwargs):
self.schema_type = schema_type
schema_type = schema_type or AnyType()
super(BoundConstraint, self).__init__(
colander.Mapping(unknown='raise'),
colander.SchemaNode(
schema_type,
name='min',
missing=colander.drop,
),
colander.SchemaNode(
schema_type,
name='max',
missing=colander.drop,
),
**kwargs
)
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'At least one bound must be specified',
)
if self.schema_type:
min_value = cstruct.get('min', None)
max_value = cstruct.get('max', None)
if min_value is not None \
and max_value is not None \
and min_value > max_value:
raise ValidationError(
node,
'The minimum bound must be lower than'
' the maximum: %s < %s' % (min_value, max_value),
)
class IntegerBoundConstraint(BoundConstraint):
def __init__(self, *args, **kwargs):
super(IntegerBoundConstraint, self).__init__(
*args,
schema_type=colander.Integer(),
**kwargs
)
class FieldType(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = OneOfType(
colander.String,
colander.Mapping(unknown='preserve'),
)
super(FieldType, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if isinstance(cstruct, string_types):
if cstruct not in TYPES_ALL \
and not RE_IDENTIFIER.match(cstruct):
raise ValidationError(
node,
'"%r" is not a valid type identifier' % (cstruct,),
)
else:
sub_schema(TypeDefinition, node, cstruct)
class Column(colander.SchemaNode):
id = IdentifierString()
description = Description()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
identifiable = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Column, self).__init__(*args, **kwargs)
self.add(FieldType(name='type'))
class ColumnCollection(colander.SequenceSchema):
column = Column()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [col['id'] for col in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Column IDs must be unique within a collection:'
' %s' % ', '.join(duplicates),
)
class Row(colander.SchemaNode):
id = IdentifierString()
description = Description()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Row, self).__init__(*args, **kwargs)
class RowCollection(colander.SequenceSchema):
row = Row()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [row['id'] for row in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Row IDs must be unique within a collection:'
' %s' % ', '.join(duplicates),
)
class RangeConstraint(BoundConstraint):
def __init__(self, *args, **kwargs):
super(RangeConstraint, self).__init__(
*args,
**kwargs
)
class TypeDefinition(colander.SchemaNode):
base = colander.SchemaNode(colander.String())
range = RangeConstraint(missing=colander.drop)
length = IntegerBoundConstraint(missing=colander.drop)
pattern = colander.SchemaNode(
colander.String(),
missing=colander.drop,
)
enumerations = EnumerationCollection(missing=colander.drop)
columns = ColumnCollection(missing=colander.drop)
rows = RowCollection(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TypeDefinition, self).__init__(*args, **kwargs)
self.add(Record(name='record', missing=colander.drop))
class RequiredOptionalField(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf([
'required',
'optional',
'none',
])
missing = colander.drop
class Field(colander.SchemaNode):
id = IdentifierString()
description = Description()
type = FieldType()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
identifiable = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
annotation = RequiredOptionalField()
explanation = RequiredOptionalField()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Field, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if 'annotation' in cstruct and cstruct['annotation'] != 'none':
if 'required' in cstruct and cstruct['required']:
raise ValidationError(
node,
'A Field cannot have an annotation'
' if it is required: %s' % cstruct['id'],
)
class Record(colander.SequenceSchema):
field = Field()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [field['id'] for field in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Field IDs must be unique within a record:'
' %s' % ', '.join(duplicates),
)
class InstrumentTypes(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(InstrumentTypes, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
for type_id, type_def in iteritems(cstruct):
if type_id in TYPES_ALL or not RE_IDENTIFIER.match(type_id):
raise ValidationError(
node,
'"%r" is not a valid custom type ID' % type_id,
)
sub_schema(TypeDefinition, node, type_def)
class Instrument(colander.SchemaNode):
id = InstrumentIdentifier()
version = Version()
title = colander.SchemaNode(colander.String())
description = Description()
types = InstrumentTypes(missing=colander.drop)
record = Record()
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Instrument, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
for _, type_def in iteritems(cstruct.get('types', {})):
self.check_type(type_def, node.get('types'), cstruct)
for field in cstruct['record']:
self.check_type(field['type'], node.get('record'), cstruct)
def check_type(self, type_def, node, cstruct):
try:
full_type_def = get_full_type_definition(cstruct, type_def)
except Exception as exc:
raise ValidationError(
node,
str(exc),
)
self._check_required_constraints(full_type_def, node, type_def)
self._check_appropriate_constraints(full_type_def, node)
self._check_range_constraints(full_type_def, node)
self._check_complex_subfields(full_type_def, node, cstruct)
return full_type_def
def _check_required_constraints(self, full_type_def, node, cstruct):
if full_type_def['base'] in iterkeys(TYPES_CONSTRAINED_REQUIRED):
for con in TYPES_CONSTRAINED_REQUIRED[full_type_def['base']]:
if con not in full_type_def:
raise ValidationError(
node,
'Type definition "%r" missing required constraint'
' "%s"' % (
cstruct,
con,
),
)
def _check_appropriate_constraints(self, full_type_def, node):
for con in CONSTRAINTS_ALL:
if con in full_type_def:
if con not in TYPES_CONSTRAINED.get(full_type_def['base'], []):
raise ValidationError(
node,
'Constraint "%s" cannot be used on types based on'
' "%s"' % (
con,
full_type_def['base'],
),
)
def _check_range_constraints(self, full_type_def, node):
if 'range' in full_type_def \
and full_type_def['base'] in RANGE_CONSTRAINT_TYPES:
sub_schema(
BoundConstraint(RANGE_CONSTRAINT_TYPES[full_type_def['base']]),
node,
full_type_def['range'],
)
def _check_complex_subfields(self, full_type_def, node, instrument):
for sub_field_constraint in ('record', 'columns'):
if sub_field_constraint in full_type_def:
for field in full_type_def[sub_field_constraint]:
sub_type = self.check_type(field['type'], node, instrument)
if sub_type['base'] in TYPES_COMPLEX:
raise ValidationError(
node,
'Complex types cannot contain other complex'
' types.',
)
def get_full_type_definition(instrument, type_def):
"""
Returns a fully merged version of an Instrument Type Object that
includes all constraints inherited from parent data types.
The ``base`` property of this object will always reflect the base RIOS
data type that the specified type definition is an implementation of.
:param instrument:
the full Instrument definition that the Field in question is a part of
:type instrument: dict
:param type_def:
the contents of the ``type`` property from an Instrument Field
definition
:type type_def: dict or str
:rtype: dict
"""
if isinstance(type_def, string_types):
if type_def in TYPES_ALL:
return {
'base': type_def
}
if type_def in iterkeys(instrument.get('types', {})):
return get_full_type_definition(
instrument,
instrument['types'][type_def],
)
raise ValueError(
'no type is defined for identifier "%s"' % (
type_def,
)
)
if isinstance(type_def, dict):
type_def = deepcopy(type_def)
base_type = type_def.pop('base')
try:
parent_type_def = get_full_type_definition(instrument, base_type)
except ValueError:
raise ValueError(
'invalid definition, references undefined base type "%s"' % (
base_type,
)
)
parent_type_def.update(type_def)
return parent_type_def
raise TypeError(
'type_def must be | |
0
self.nsample_array = np.abs(raw_data_array[:, :, :, :, 2])
if fix_old_proj:
self.fix_phase(use_ant_pos=fix_use_ant_pos)
# check if object has all required UVParameters set
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
allow_flip_conj=True,
)
def read_uvfits(
self,
filename,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in header, metadata and data from a uvfits file.
Supports reading only selected portions of the data.
Parameters
----------
filename : str
The uvfits file to read from.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be
length 2. Some of the times in the object should fall between the
first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data have multi spw with different channel widths.
If the metadata are not internally consistent or missing.
"""
# update filename attribute
basename = os.path.basename(filename)
self.filename = [basename]
self._filename.form = (1,)
with fits.open(filename, memmap=True) as hdu_list:
vis_hdu = hdu_list[0] # assumes the visibilities are in the primary hdu
vis_hdr = vis_hdu.header.copy()
hdunames = uvutils._fits_indexhdus(hdu_list) # find the rest of the tables
# First get everything we can out of the header.
self._set_phased()
# check if we have an spw dimension
if vis_hdr["NAXIS"] == 7:
self.Nspws = vis_hdr.pop("NAXIS5")
self.spw_array = (
uvutils._fits_gethduaxis(vis_hdu, 5).astype(np.int64) - 1
)
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL6"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL7"))
else:
self.Nspws = 1
self.spw_array = np.array([np.int64(0)])
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL5"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL6"))
# get shapes
self.Npols = vis_hdr.pop("NAXIS3")
self.Nblts = vis_hdr.pop("GCOUNT")
if self.Nspws > 1:
# If this is multi-spw, use the 'flexible' spectral window setup
self._set_flex_spw()
uvfits_nchan = vis_hdr.pop("NAXIS4")
self.Nfreqs = uvfits_nchan * self.Nspws
self.flex_spw_id_array = np.transpose(
np.tile(np.arange(self.Nspws), (uvfits_nchan, 1))
).flatten()
fq_hdu = hdu_list[hdunames["AIPS FQ"]]
assert self.Nspws == fq_hdu.header["NO_IF"]
# TODO: This is fine for now, although I (karto) think that this
# is relative to the ref_freq, which can be specified as part of
# the AIPS SU table.
# Get rest freq value
ref_freq = uvutils._fits_gethduaxis(vis_hdu, 4)[0]
self.channel_width = np.transpose(
np.tile(abs(fq_hdu.data["CH WIDTH"]), (uvfits_nchan, 1))
).flatten()
self.freq_array = np.reshape(
np.transpose(
(
ref_freq
+ fq_hdu.data["IF FREQ"]
+ np.outer(np.arange(uvfits_nchan), fq_hdu.data["CH WIDTH"])
)
),
(1, -1),
)
else:
self.Nfreqs = vis_hdr.pop("NAXIS4")
self.freq_array = uvutils._fits_gethduaxis(vis_hdu, 4)
# TODO: Spw axis to be collapsed in future release
self.freq_array.shape = (1,) + self.freq_array.shape
self.channel_width = vis_hdr.pop("CDELT4")
self.polarization_array = np.int32(uvutils._fits_gethduaxis(vis_hdu, 3))
# other info -- not required but frequently used
self.object_name = vis_hdr.pop("OBJECT", None)
self.telescope_name = vis_hdr.pop("TELESCOP", None)
self.instrument = vis_hdr.pop("INSTRUME", None)
latitude_degrees = vis_hdr.pop("LAT", None)
longitude_degrees = vis_hdr.pop("LON", | |
<gh_stars>1-10
"""
Created on May 23, 2010
@author: <NAME>
"""
import unittest
from qal.sql.types import *
from qal.dal.types import *
from qal.sql.sql import *
# from qal.tools.diff import diff_strings # Use when debugging tests
r_create_table_mysql = "CREATE TABLE Table1 (" + DEFAULT_ROWSEP + "\
`Table1ID` INTEGER AUTO_INCREMENT NOT NULL," + DEFAULT_ROWSEP + "\
`Table1Name` VARCHAR(400) NULL," + DEFAULT_ROWSEP + "\
`Table1Changed` TIMESTAMP DEFAULT CURRENT_TIMESTAMP NULL," + DEFAULT_ROWSEP + "\
CONSTRAINT `PK_Table1_Table1ID` PRIMARY KEY (Table1ID)," + DEFAULT_ROWSEP + "\
CONSTRAINT `FK_Table1_Table1ID_Table2_Table2ID` FOREIGN KEY (Table1ID) REFERENCES Table2(Table2ID)," + DEFAULT_ROWSEP + "\
CONSTRAINT `CK_Table1_Name` CHECK ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas'))," + DEFAULT_ROWSEP + "\
CONSTRAINT `UQ_Table1_Name` UNIQUE (Table1ID)" + DEFAULT_ROWSEP + "\
) ENGINE=InnoDB"
r_create_table_oracle = "CREATE TABLE \"Table1\" (" + DEFAULT_ROWSEP + "\
\"Table1ID\" integer NOT NULL," + DEFAULT_ROWSEP + "\
\"Table1Name\" VARCHAR2(400) NULL," + DEFAULT_ROWSEP + "\
\"Table1Changed\" TIMESTAMP DEFAULT (CURRENT_TIMESTAMP) NULL," + DEFAULT_ROWSEP + "\
CONSTRAINT \"PK_Table1_Table1ID\" PRIMARY KEY (\"Table1ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"FK_Table1_Table1ID_Table2_Tabl\" FOREIGN KEY (\"Table1ID\") REFERENCES \"Table2\"(\"Table2ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"CK_Table1_Name\" CHECK ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas'))," + DEFAULT_ROWSEP + "\
CONSTRAINT \"UQ_Table1_Name\" UNIQUE (\"Table1ID\")" + DEFAULT_ROWSEP + "\
)"
"""
#TODO: Should the following too be tested?
"+DEFAULT_ROWSEP + "\
CREATE SEQUENCE seq_Table1_Table1ID_DAL_serial"+DEFAULT_ROWSEP + "\
start with 1"+DEFAULT_ROWSEP + "\
increment by 1 "+DEFAULT_ROWSEP + "\
nomaxvalue;"+DEFAULT_ROWSEP + "\
CREATE TRIGGER tr_Table1_Table1ID_DAL_serial"+DEFAULT_ROWSEP + "\
BEFORE INSERT ON Table1 FOR EACH ROW BEGIN"+DEFAULT_ROWSEP +
"SELECT seq_Table1_Table1ID_DAL_serial.nextval INTO :new.id FROM dual;"+DEFAULT_ROWSEP + "\
END;"+DEFAULT_ROWSEP
"""
r_create_table_postgresql = "CREATE TABLE \"Table1\" (" + DEFAULT_ROWSEP + "\
\"Table1ID\" serial NOT NULL," + DEFAULT_ROWSEP + "\
\"Table1Name\" varchar(400) NULL," + DEFAULT_ROWSEP + "\
\"Table1Changed\" timestamp DEFAULT (current_timestamp) NULL," + DEFAULT_ROWSEP + "\
CONSTRAINT \"PK_Table1_Table1ID\" PRIMARY KEY (\"Table1ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"FK_Table1_Table1ID_Table2_Table2ID\" FOREIGN KEY (\"Table1ID\") REFERENCES \"Table2\"(\"Table2ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"CK_Table1_Name\" CHECK ((1.3 > 2.4) AND (T1.\"firstname\" ILIKE '%icklas'))," + DEFAULT_ROWSEP + "\
CONSTRAINT \"UQ_Table1_Name\" UNIQUE (\"Table1ID\")" + DEFAULT_ROWSEP + "\
)"
r_create_table_db2 = "CREATE TABLE \"Table1\" (" + DEFAULT_ROWSEP + "\
\"Table1ID\" INT GENERATED ALWAYS AS IDENTITY NOT NULL," + DEFAULT_ROWSEP + "\
\"Table1Name\" VARCHAR(400) NULL," + DEFAULT_ROWSEP + "\
\"Table1Changed\" TIMESTAMP DEFAULT CURRENT_TIMESTAMP NULL," + DEFAULT_ROWSEP + "\
CONSTRAINT \"PK_Table1_Table1ID\" PRIMARY KEY (\"Table1ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"FK_Table1_Table1ID_Table2_Table2ID\" FOREIGN KEY (\"Table1ID\") REFERENCES \"Table2\"(\"Table2ID\")," + DEFAULT_ROWSEP + "\
CONSTRAINT \"CK_Table1_Name\" CHECK ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas'))," + DEFAULT_ROWSEP + "\
CONSTRAINT \"UQ_Table1_Name\" UNIQUE (\"Table1ID\")" + DEFAULT_ROWSEP + "\
)"
r_create_table_sqlserver = "CREATE TABLE Table1 (" + DEFAULT_ROWSEP + "\
[Table1ID] int IDENTITY(1,1) NOT NULL," + DEFAULT_ROWSEP + "\
[Table1Name] varchar(400) NULL," + DEFAULT_ROWSEP + "\
[Table1Changed] DATETIME DEFAULT (GETDATE()) NULL," + DEFAULT_ROWSEP + "\
CONSTRAINT [PK_Table1_Table1ID] PRIMARY KEY (Table1ID)," + DEFAULT_ROWSEP + "\
CONSTRAINT [FK_Table1_Table1ID_Table2_Table2ID] FOREIGN KEY (Table1ID) REFERENCES Table2(Table2ID)," + DEFAULT_ROWSEP + "\
CONSTRAINT [CK_Table1_Name] CHECK ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas'))," + DEFAULT_ROWSEP + "\
CONSTRAINT [UQ_Table1_Name] UNIQUE (Table1ID)" + DEFAULT_ROWSEP + "\
)"
# noinspection PyPep8
r_SELECT_SQL = "SELECT (T1.CountryPrefix + '+' + T1.PhoneNumber + Simple(CAST((T2.CountryPrefix + '+' + T2.PhoneNumber) AS VARCHAR(200)), (T2.CountryPrefix + '+' + T2.PhoneNumber))) AS Field1, (T2.CountryPrefix + '+' + T2.PhoneNumber) AS Field2 FROM testtable AS T1 JOIN testtable AS T2 ON ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas')) WHERE ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas')) ORDER BY T1.Field1 desc, T2.Field1 asc" + DEFAULT_ROWSEP + "LIMIT 1"
# noinspection PyPep8
r_SELECT_DB_DB2 = "SELECT (T1.\"CountryPrefix\" || '+' || T1.\"PhoneNumber\" || Simple(CAST((T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\") AS VARCHAR(200)), (T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\"))) AS Field1, (T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\") AS Field2 FROM \"testtable\" AS T1 JOIN \"testtable\" AS T2 ON ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas')) WHERE ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas')) ORDER BY T1.\"Field1\" desc, T2.\"Field1\" asc" + DEFAULT_ROWSEP + "FETCH FIRST 1 ROWS ONLY "
# noinspection PyPep8
r_SELECT_postgresql = "SELECT (T1.\"CountryPrefix\" || '+' || T1.\"PhoneNumber\" || Simple(CAST((T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\") AS varchar(200)), (T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\"))) AS Field1, (T2.\"CountryPrefix\" || '+' || T2.\"PhoneNumber\") AS Field2 FROM \"testtable\" AS T1 JOIN \"testtable\" AS T2 ON ((1.3 > 2.4) AND (T1.\"firstname\" ILIKE '%icklas')) WHERE ((1.3 > 2.4) AND (T1.\"firstname\" ILIKE '%icklas')) ORDER BY T1.\"Field1\" desc, T2.\"Field1\" asc" + DEFAULT_ROWSEP + "LIMIT 1"
# noinspection PyPep8
r_SELECT_oracle = "SELECT (T1.\"CountryPrefix\" + '+' + T1.\"PhoneNumber\" + Simple(CAST((T2.\"CountryPrefix\" + '+' + T2.\"PhoneNumber\") AS VARCHAR2(200)), (T2.\"CountryPrefix\" + '+' + T2.\"PhoneNumber\"))) AS Field1, (T2.\"CountryPrefix\" + '+' + T2.\"PhoneNumber\") AS Field2 FROM \"testtable\" T1 JOIN \"testtable\" T2 ON ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas')) WHERE ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas')) AND (ROWNUM < 2) ORDER BY T1.\"Field1\" desc, T2.\"Field1\" asc"
# noinspection PyPep8
r_SELECT_SQL_Server = "SELECT TOP 1 (T1.CountryPrefix + '+' + T1.PhoneNumber + Simple(CAST((T2.CountryPrefix + '+' + T2.PhoneNumber) AS varchar(200)), (T2.CountryPrefix + '+' + T2.PhoneNumber))) AS Field1, (T2.CountryPrefix + '+' + T2.PhoneNumber) AS Field2 FROM testtable AS T1 JOIN testtable AS T2 ON ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas')) WHERE ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas')) ORDER BY T1.Field1 desc, T2.Field1 asc"
# noinspection PyPep8
r_UPDATE_my_sql = "SET" + DEFAULT_ROWSEP + "dest_column = 'Hello'" + DEFAULT_ROWSEP + "WHERE ((col_1 = '1') AND (col_2 = '1'))"
# noinspection PyPep8
r_UPDATE_DB2 = "SET" + DEFAULT_ROWSEP + "\"dest_column\" = 'Hello'" + DEFAULT_ROWSEP + "WHERE ((\"col_1\" = '1') AND (\"col_2\" = '1'))"
# noinspection PyPep8
r_UPDATE_postgresql = "SET" + DEFAULT_ROWSEP + "\"dest_column\" = 'Hello'" + DEFAULT_ROWSEP + "WHERE ((\"col_1\" = '1') AND (\"col_2\" = '1'))"
# noinspection PyPep8
r_UPDATE_oracle = "SET" + DEFAULT_ROWSEP + "\"dest_column\" = 'Hello'" + DEFAULT_ROWSEP + "WHERE ((\"col_1\" = '1') AND (\"col_2\" = '1'))"
# noinspection PyPep8
r_UPDATE_SQL_Server = "SET" + DEFAULT_ROWSEP + "dest_column = 'Hello'" + DEFAULT_ROWSEP + "WHERE ((col_1 = '1') AND (col_2 = '1'))"
# Generate test objects.
def gen_simple_condition_1():
_cond = ParameterCondition(_operator='>', _and_or='AND')
_cond.left.append(ParameterNumeric(1.3, '+'))
_cond.right.append(ParameterNumeric(2.4, '+'))
return _cond
def gen_simple_condition_2():
_cond = ParameterCondition(_operator='LIKE', _and_or='AND')
_cond.left.append(ParameterIdentifier('firstname', 'C', 'T1'))
_cond.right.append(ParameterString('%icklas', '+'))
return _cond
def gen_simple_conditions():
_cond = ParameterConditions()
_cond.append(gen_simple_condition_1())
_cond.append(gen_simple_condition_2())
return _cond
def gen_complex_conditions():
_cond = ParameterConditions()
_cond.append(gen_simple_condition_1())
_cond.append(gen_simple_condition_2())
_cond.append(gen_simple_conditions())
return _cond
def gen_simple_function():
param = ParameterFunction(_name='Simple', _operator='C')
param.parameters.append(gen_simple_cast())
param.parameters.append(gen_simple_expression_2())
return param
def gen_simpleexpression_1():
param = ParameterExpression(_operator='+')
param.expressionitems.append(ParameterIdentifier('CountryPrefix', 'C', 'T1'))
param.expressionitems.append(ParameterString('+', 'C'))
param.expressionitems.append(ParameterIdentifier('PhoneNumber', 'C', 'T1'))
param.expressionitems.append(gen_simple_function())
return param
def gen_simple_expression_2():
param = ParameterExpression(_operator='+')
param.expressionitems.append(ParameterIdentifier('CountryPrefix', 'C', 'T2'))
param.expressionitems.append(ParameterString('+', 'C'))
param.expressionitems.append(ParameterIdentifier('PhoneNumber', 'C', 'T2'))
return param
def gen_simple_cast():
param = ParameterCast(None, 'string(200)', 'C')
param.expression.append(gen_simple_expression_2())
return param
def gen_complex_expression():
exp = ParameterExpression()
param = ParameterExpression()
param.expressionitems.append(exp)
param.expressionitems.append(ParameterString('+', 'C', '\\'))
param.expressionitems.append(ParameterIdentifier('PhoneNumber', 'C', 'T1'))
return param
def gen_complex_function():
param = ParameterFunction(_name='Test', _operator='+')
param.parameters.append(gen_simpleexpression_1())
param.parameters.append(gen_complex_expression())
return param
def gen_simple_case():
param = ParameterCase()
when1 = ParameterWhen(gen_simple_conditions(), gen_simpleexpression_1())
param.when_statements.append(when1)
when2 = ParameterWhen(gen_simple_conditions(), gen_simple_expression_2())
param.when_statements.append(when2)
param.else_statement = gen_simple_expression_2()
return param
def gen_simple_field_1():
parameter = ParameterField(None, _alias='Field1')
parameter.expression.append(gen_simpleexpression_1())
return parameter
def gen_simple_field_2():
_parameter = ParameterField(None, _alias='Field2')
_parameter.expression.append(gen_simple_expression_2())
return _parameter
def gen_simple_source_1():
# _condlist = SqlList("ParameterCondition")
_condlist = gen_simple_conditions()
source = ParameterSource(None, _condlist, 'T1')
source.expression.append(ParameterIdentifier('testtable', 'C'))
return source
def gen_simple_source_2():
# _condlist = SqlList("ParameterCondition")
_condlist = gen_simple_conditions()
source = ParameterSource(None, _condlist, 'T2')
source.expression.append(ParameterIdentifier('testtable', 'C'))
return source
def gen_simple_select():
select = VerbSelect(_operator='C')
select.top_limit = 1
select.fields.append(gen_simple_field_1())
select.fields.append(gen_simple_field_2())
select.sources.append(gen_simple_source_1())
select.sources.append(gen_simple_source_2())
po1 = ParameterOrderByItem(_direction="desc")
po1.expressionitems.append(ParameterIdentifier("Field1", _prefix="T1"))
select.order_by.append(po1)
po2 = ParameterOrderByItem(_direction="asc")
po2.expressionitems.append(ParameterIdentifier("Field1", _prefix="T2"))
select.order_by.append(po2)
return select
def gen_simple_insert():
insert = VerbInsert()
insert.destination_identifier = ParameterIdentifier("test")
insert.data = gen_simple_select()
insert.column_identifiers.append(ParameterIdentifier("Table1ID"))
insert.column_identifiers.append(ParameterIdentifier("Table1Name"))
insert.column_identifiers.append(ParameterIdentifier("Table1Changed"))
return insert
def gen_simple_update():
_a_1 = ParameterCondition(ParameterIdentifier("col_1"), ParameterString("1"), "=")
_a_2 = ParameterCondition(ParameterIdentifier("col_2"), ParameterString("1"), "=", "AND")
_conditions = ParameterConditions()
_conditions.append(_a_1)
_conditions.append(_a_2)
_assignments = SqlList()
_assignments.append(ParameterAssignment(_left=ParameterIdentifier("dest_column"), _right=ParameterString("Hello")))
_update = VerbUpdate(_table_identifier=ParameterIdentifier("test"), _assignments=_assignments,
_conditions=_conditions)
return _update
def gen_simple_create():
col1_constraint1 = ParameterConstraint('PK_Table1_Table1ID', "PRIMARY KEY", [ParameterIdentifier('Table1ID')])
col1_constraint2 = ParameterConstraint('FK_Table1_Table1ID_Table2_Table2ID', "FOREIGN KEY",
[ParameterIdentifier('Table1ID'), ParameterIdentifier('Table2'),
ParameterIdentifier('Table2ID')])
col1_constraint3 = ParameterConstraint('CK_Table1_Name', "CHECK", [ParameterIdentifier('Table1ID')])
col1_constraint3.checkconditions = gen_simple_conditions()
col1_constraint4 = ParameterConstraint('UQ_Table1_Name', "UNIQUE", [ParameterIdentifier('Table1ID')])
# col1_constraint5 = ParameterConstraint('DF_Table1_name', C_DEFAULT, ['GETDATE()'])
col1 = ParameterColumndefinition('Table1ID', 'serial', True)
col2 = ParameterColumndefinition('Table1Name', 'string(400)', False)
col3 = ParameterColumndefinition('Table1Changed', 'timestamp', False)
col3.default = '::currdatetime::'
# col4 = ParameterColumndefinition('Table1Date', 'datetime', False, 'NOW()')
result = VerbCreateTable('Table1')
result.columns.append(col1)
result.columns.append(col2)
result.columns.append(col3)
result.constraints.append(col1_constraint1)
result.constraints.append(col1_constraint2)
result.constraints.append(col1_constraint3)
result.constraints.append(col1_constraint4)
# result.Constraints.append(col1_constraint5)
return result
class ParameterTest(unittest.TestCase):
def test_00_ParameterCondition_simple(self):
self.maxDiff = None
param = gen_simple_condition_1()
paramclass = param.__class__.__name__
_testvalue = '(1.3 > 2.4)'
self.assertEqual(param.as_sql(DB_MYSQL), _testvalue, paramclass + '.as_sql(DB_MYSQL) failed.')
self.assertEqual(param.as_sql(DB_ORACLE), _testvalue, paramclass + '.as_sql(DB_ORACLE) failed.')
self.assertEqual(param.as_sql(DB_POSTGRESQL), _testvalue, paramclass + '.as_sql(DB_POSTGRESQL) failed.')
self.assertEqual(param.as_sql(DB_DB2), _testvalue, paramclass + '.as_sql(DB_DB2) failed.')
self.assertEqual(param.as_sql(DB_SQLSERVER), _testvalue, paramclass + '.as_sql(DB_SQLSERVER) failed.')
# noinspection PyPep8
def test_01_ParameterCondition_complex(self):
self.maxDiff = None
param = gen_complex_conditions()
paramclass = param.__class__.__name__
_testvalue = "((1.3 > 2.4) AND (T1.firstname LIKE '%icklas') AND ((1.3 > 2.4) AND (T1.firstname LIKE '%icklas')))"
_testvalue_Oracle = "((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas') AND ((1.3 > 2.4) AND (T1.\"firstname\" LIKE '%icklas')))"
_testvalue_PostgreSQL = "((1.3 > 2.4) AND (T1.\"firstname\" ILIKE '%icklas') AND ((1.3 > | |
including) _crsr_loc
self._crsr_loc : Tuple[int, int] # cursor position in logical buffer (lines can be longer than screen width)
# _crsr_loc gets moved by printing, and also by the screen editor.
# It may sit exactly on terminal border (col == _term_size[0]);
# in this case, it will wrap once written to.
self._term_loc : Tuple[int, int] # cursor position on terminal screen (kept in sync with _crsr_loc)
self._term_size : Tuple[int, int] # current terminal width and height in chars
self._term_top_loc : Tuple[int, int] # buffer location of first visible row/col (col != 0 for over-wide lines)
self._dirty : int = 0 # this many chars left of cursor pos have not been drawn yet, and have uniform characteristics
self._cursor_visible : bool = False # true if cursor is visible
self._insert_printable = False # flag for editor that printable chars should be inserted on-screen
# window
self._cell_size : Tuple[int, int] # size of a character in pixels
self._wndw_size : Tuple[int, int] # window size in pixels
self._windowsurface : pygame.Surface # ...correct?
self._term_surface : pygame.Surface # ...what is this?
self._pending_events : List[pygame.event] = []
self._cursor_buffer : pygame.Surface # pixels under cursor are stored here; valid if _cursor_visible
# modifyable user options
self._font : pygame.Font
self._current_colors : Tuple[pygame.Color, pygame.Color] # current fg and bg color
self._current_colors_reversed : Tuple[pygame.Color, pygame.Color] # current fg and bg color
self._rev_mode : bool = False
self._current_colors_or_reversed : Tuple[pygame.Color, pygame.Color] # current fg and bg color reflecting _rev_mode
self._tabsize : int = 8 # how many spaces a tab inserts.
self._autoupdate : bool = True
# modifyable user state
self._input_mode : int = 0 # -1: screen editor; 0..: constrained with #chars of prompt; None: raw
self._escString : str = None
# immutable user options (passed to or determined in constructor)
self._half_height : bool = True
self._managesdisplay : bool
self._autodisplayupdate : bool
self._autoblit : bool
def _update_colors_or_reversed(self):
self._current_colors_or_reversed = self._current_colors if not self._rev_mode else \
self._current_colors_reversed
def _set_colors(self, colors : Tuple[pygame.Color, pygame.Color]):
self._current_colors = colors
self._current_colors_reversed = (colors[1], colors[0])
self._update_colors_or_reversed()
def _set_rev_mode(self, rev_mode : bool):
self._rev_mode = rev_mode
self._update_colors_or_reversed()
# invalidate character left of current location; render if going beyond one line
def _add_to_dirty(self, n : int):
self._dirty += n
def _flush_dirty(self):
if self._dirty:
current_row = self._content[self._crsr_loc[1]]
# now draw s to left of _term_loc. If s spans multiple lines, do multiple draws
crsr_x = self._crsr_loc[0]
term_x = self._term_loc[0]
term_y = self._term_loc[1]
# the dirty section may span multiple terminal lines, which we will draw one by one
while self._dirty > 0:
term_y = self._lazy_scroll(term_y) # make sure this row is visible
n = (self._dirty - 1) % self._term_size[0] + 1
term_x -= n
crsr_x -= n
dirty_cells = current_row[crsr_x : crsr_x + n]
s = "".join(cell_tuple[0] for cell_tuple in dirty_cells)
colors = current_row[crsr_x][1]
self._draw_text((term_x, term_y), colors, s)
self._dirty -= n
term_y -= 1 # in case string is longer, position new end at right of screen one row up
term_x = self._term_size[0]
def _draw_text(self, term_loc : int, colors, s : str):
if s[0] in C64_INV_CHARS: # control chars such as color-change are rendered in reverse
assert len(s) == 1
s = C64_INV_CHARS[s]
x = self._cell_size[0] * term_loc[0]
y = self._cell_size[1] * term_loc[1]
cell_rect = pygame.Rect(x, y, self._cell_size[0] * len(s), self._cell_size[1])
# render the character and draw it to the surface
char_surf = self._font.render(s, 1, colors[0], colors[1])
tgt_rect = char_surf.get_rect()
if self._half_height:
char_surf = pygame.transform.scale(char_surf, (tgt_rect.width, tgt_rect.height // 2))
tgt_rect = char_surf.get_rect()
tgt_rect.topleft = (x, y)
# @TODO: try .bottom, maybe it works better re rounding for small fonts
if cell_rect != tgt_rect:
self._term_surface.fill(colors[1], cell_rect)
self._term_surface.blit(char_surf, tgt_rect)
# @BUGBUG: seems rendered chars are 1 pixel narrower
# if terminal cursor has moved out of the screen, scroll to bring it back
def _lazy_scroll(self, term_y : int):
scroll = 0
if term_y < 0:
scroll = term_y # negative, means scroll down
elif term_y >= self._term_size[1]: # _term_loc[1] always becomes visible
scroll = term_y - self._term_size[1] + 1
if scroll != 0:
self._scroll_vert(scroll)
return term_y - scroll
def _redraw_current_line(self):
rel_loc = self._current_rel_term_loc()
term_y0 = self._term_loc[1] - rel_loc[1]
term_y1 = term_y0 + rel_loc[2]
self._redraw_term_rows((term_y0, term_y1))
def _redraw_screen(self):
self._redraw_term_rows((0, self._term_size[1]))
self._blit_to_screen()
def _redraw_term_rows(self, term_y_range : Tuple[int, int]):
self._show_cursor(False)
# loop over all term rows and redraw those that intersect
crsr_loc = self._term_top_loc
w = self._term_size[0]
for y in range(self._term_size[1]):
if y >= term_y_range[1]:
break
has_line = crsr_loc[1] < len(self._content)
if has_line:
line = self._content[crsr_loc[1]]
rel_loc = self._rel_term_loc(crsr_loc, line)
if y >= term_y_range[0]:
if has_line:
x0 = rel_loc[1] * w
# redraw from crsr_loc to end of line
x1 = x0 + w
if x1 > len(line):
x1 = len(line)
for x in range(x0,x1):
cell = line[x]
self._draw_text((x - x0,y), cell[1], cell[0])
# this function is called after a resize; take chance to fix up term_loc
if crsr_loc[1] == self._crsr_loc[1] and self._crsr_loc[0] >= x0 and self._crsr_loc[0] <= x1:
self._term_loc = (self._crsr_loc[0] - x0, y)
else:
x0 = 0
x1 = 0
# clear the rest
if x1 < w:
bg_rect = (self._cell_size[0] * x1, self._cell_size[1] * y, self._cell_size[0] * (w - x1), self._cell_size[1])
self._term_surface.fill(self._current_colors[1], bg_rect)
if has_line: # advance one term row in crsr space
if rel_loc[1] == rel_loc[2] - 1: # in last row of line: next line
crsr_loc = (0, crsr_loc[1] + 1)
else: # otherwise step through line by term width
crsr_loc = (crsr_loc[0] + w, crsr_loc[1])
# shift (=scroll) up the terminal window into the buffer by 'rows' (which may be negative -> shift down)
def _scroll_vert(self, rows):
# scroll visually
self._term_surface.scroll(0, -rows * self._cell_size[1])
term_y0 = 0 if rows < 0 else \
self._term_size[1] - rows
term_y1 = term_y0 + abs(rows)
# adjust the terminal location
self._term_loc = (self._term_loc[0], self._term_loc[1] - rows)
# adjust the top location
while rows > 0: # scrolling up
self._term_top_loc = (self._term_top_loc[0] + self._term_size[0], self._term_top_loc[1])
if self._term_top_loc[0] >= len(self._content[self._term_top_loc[1]]): # hit end
self._term_top_loc = (0, self._term_top_loc[1] + 1) # top is start of next logical line
rows -= 1
while rows < 0: # scrolling down
self._term_top_loc = (self._term_top_loc[0] - self._term_size[0], self._term_top_loc[1])
if self._term_top_loc[0] < 0: # ran beyond start
self._term_top_loc = ((self._current_rel_term_loc()[2] - 1) * self._term_size[1],
self._term_top_loc[1] - 1)
rows += 1
# redraw the newlty exposed rows (needed to get all state in sync first)
self._redraw_term_rows((term_y0, term_y1))
# make sure cursor location exists in content
def _pad_rows(self):
missing_rows = 1 + self._crsr_loc[1] - len(self._content)
if missing_rows > 0:
self._content += [[]] * missing_rows # make current row itself exist as well
# @TODO: if it gets too long, then drop lines from start (need to adjust _crsr_loc[1] and _term_first_loc[1])
def _pad_current_row(self):
current_row = self._content[self._crsr_loc[1]]
missing_cols = self._crsr_loc[0] - len(current_row)
if missing_cols > 0:
current_row += ((' ', self._current_colors_or_reversed, True),) * missing_cols # do NOT make current col exist
# move cursor to a completely different location
def _move_term_loc_to(self, term_loc):
pass
# move cursor to an on-screen character cell
# Notes:
# - cell may be inside a wrapped line; must handle that
# - location may be off-screen; this forces a scroll
def goto_xy(self, loc):
# @TODO: I think this line is nonsense
self._move_term_loc_to((loc[0] + self._term_top_loc[0], loc[1] + self._term_top_loc[1]))
# determine relative terminal coordinates within current line (used e.g. when moving the cursor)
# returns (rel term_x, rel term_y, total number of term rows)
# @TODO: rel term_x == _crsr_loc[0] i.e. redundant; maybe we can remove it
def _current_rel_term_loc(self):
return self._rel_term_loc(self._crsr_loc, self._current_line())
def _rel_term_loc(self, crsr_loc : Tuple[int, int], current_line : list):
res = (crsr_loc[0] % self._term_size[0],
crsr_loc[0] // self._term_size[0],
self._num_term_rows(current_line)
)
# if we are at end, we wrap back onto the margin
if res[1] == res[2]:
res = (self._term_size[0], res[2] - 1, res[2])
return res
def _num_term_rows(self, current_line : list): # how many terminal rows does 'line' occupy?
return 1 if not current_line else \
(len(current_line) - | |
<gh_stars>1-10
#!/usr/bin/env python
#nknguyen at soe ucsc edu
#Tue Jul 17 09:09:24 PDT 2012
#Immuno-seq pipeline
'''
Overlap stats
'''
import os, sys, re, time, gzip, random
import random as rand
import cPickle as pickle
from optparse import OptionParser
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from sonLib.bioio import logger
from sonLib.bioio import system
import immunoseqLib as iseqlib
from immunoseq.lib.overlapLib import *
from immunoseq.lib.overlap2Lib import *
from immunoseq.lib.lendistLib import *
import numpy as np
from scipy.stats import ttest_ind
###################################
############# OBJECTS #############
###################################
class PairOverlapStats():
def __init__(self, reads1, reads2, clones1, clones2, oreads1, oreads2, oclones1, oclones2):
self.reads1 = reads1
self.reads2 = reads2
self.oreads1 = oreads1
self.oreads2 = oreads2
self.oreadsAvr = float(oreads1 + oreads2)/2.0
self.oreads1pc = iseqlib.getPc(oreads1, reads1)
self.oreads2pc = iseqlib.getPc(oreads2, reads2)
self.oreadsAvrPc = (self.oreads1pc + self.oreads2pc)/2.0
self.clones1 = clones1
self.clones2 = clones2
self.oclones1 = oclones1
self.oclones2 = oclones2
self.oclonesAvr = float(oclones1 + oclones2)/2.0
self.oclones1pc = iseqlib.getPc(oclones1, clones1)
self.oclones2pc = iseqlib.getPc(oclones2, clones2)
self.oclonesAvrPc = (self.oclones1pc + self.oclones2pc)/2.0
##############################################################################################
#========================= MAIN PIPELINE ====================================================#
##############################################################################################
class Setup(Target):
'''Start of the main pipeline.
Firstly, read input fasta files
When done, call following steps to do sampling if needed and all the analyses
'''
def __init__(self, options):
Target.__init__(self)
self.options = options
def run(self):
#read input fasta files:
ext = 'fa'
files = iseqlib.getfiles(self.options.indir, ext)
globalTempDir = self.getGlobalTempDir()
for file in files:
filepath = os.path.join(self.options.indir, file)
self.addChildTarget( ReadFasta(filepath, globalTempDir, self.options.minReadCount) )
self.setFollowOnTarget( SamplingAndAnalyses(globalTempDir, self.options) )
class ReadFasta(Target):
'''Read input fasta file, return an pickle file of the sample object
Where sample.seqs = {header: Seq}
'''
def __init__(self, file, outdir, mincount):
Target.__init__(self)
self.file = file
self.outdir = outdir
self.mincount = mincount
def run(self):
name = os.path.basename(self.file).split('.')[0]
sample = iseqlib.Sample(name)
mode = 1 #seqs = {header: seq}
seqs, total = iseqlib.readFile( self.file, self.mincount, mode)
for s in seqs.values():
s.setFreq(total)
sample.seqs = seqs
sample.setTotal(total)
picklefile = os.path.join(self.outdir, "%s.pickle" %name)
pickle.dump( sample, gzip.open(picklefile, "wb") )
class SamplingAndAnalyses(Target):
'''Sampling if needed and do the analyses.
'''
def __init__(self, indir, options):
Target.__init__(self)
self.indir = indir
self.options = options
def run(self):
globalTempDir = self.getGlobalTempDir()
#Before sampling, chose 'numcombs' of sets of samples:
group2samplesList = [] #list of group2subsetofsamples
if self.options.numsamples:
for i in xrange(self.options.numcombs):
pickedGroup2samples = pickSamples(self.options.group2samples, self.options.numsamples)
group2samplesList.append( pickedGroup2samples )
else:
group2samplesList.append(self.options.group2samples)
self.options.g2slist = group2samplesList
#print sampleSetId, group2subsetofsamples of that set:
outfile = os.path.join(self.options.outdir, 'sampleSets.txt')
f = open(outfile, 'w')
for i, g2s in enumerate(group2samplesList):
f.write("#Set %d\n" %i)
for g, subsamples in g2s.iteritems():
f.write('%s\t%s\n' %(g, ','.join(subsamples)))
f.write("#\n")
f.close()
if self.options.sampling:
for i in xrange(self.options.numsam): #sampling a number of times
samplingdir = os.path.join(globalTempDir, "%d" %i) #temp/i/
self.addChildTarget( SamplingSamples(self.indir, samplingdir, self.options) ) #seqdir, outdir, options
else:
tempoutdir = os.path.join(globalTempDir, "0")
system("mkdir -p %s" %tempoutdir)
filterSamples(self.indir, self.options.vs, self.options.js)
self.addChildTarget( Analyses(self.indir, tempoutdir, self.options) )
#Calculate means & standard deviations of samplings
self.setFollowOnTarget( AverageResults(globalTempDir, self.options) )
class SamplingSamples(Target):
'''Sampling from all samples. When done, call followon jobs to calculate the stats needed
'''
def __init__(self, indir, statsOutdir, options):
Target.__init__(self)
self.indir = indir
self.outdir = statsOutdir
self.options = options
def run(self):
globalTempDir = self.getGlobalTempDir()
ext = "pickle"
files = iseqlib.getfiles(self.indir, ext)
samples = [ '.'.join(file.split('.')[:-1]) for file in files ]
for sample in samples:
samplefile = os.path.join(self.indir, "%s.%s" %(sample, ext))
outfile = os.path.join(globalTempDir, "%s.pickle" %sample) #temp/sample.pickle
self.addChildTarget( Sampling(samplefile, outfile, self.options) )
self.setFollowOnTarget( Analyses(globalTempDir, self.outdir, self.options) )
class Sampling(Target):
'''Sampling and and call analyses
'''
def __init__(self, samplefile, outfile, options):
Target.__init__(self)
self.samplefile = samplefile
self.outfile = outfile
self.options = options
self.size = options.sampling
def run(self):
sample = pickle.load( gzip.open(self.samplefile, "rb") )
if self.options.uniq:
subsample = iseqlib.samplingSample_weightedUniq(sample, self.size)
#subsample = iseqlib.samplingSample_uniq(sample, self.size)
else:
subsample = iseqlib.samplingSample(sample, self.size)
#filtering if selected Vs and/or selected Js were specified
subsample = iseqlib.filterSampleByGenes(subsample, self.options.vs, self.options.js)
pickle.dump(subsample, gzip.open(self.outfile, "wb"))
class Analyses(Target):
'''Call jobs to compute different analyses
'''
def __init__(self, indir, outdir, options):
Target.__init__(self)
self.indir = indir
self.outdir = outdir #temp/sampling#
self.options = options
def run(self):
for i, g2s in enumerate(self.options.g2slist):
outdir = os.path.join(self.outdir, "%d" %i) #outdir/sampling#/set#
system('mkdir -p %s' %outdir)
self.addChildTarget( RunSampleSetAnalyses(self.indir, outdir, g2s, self.options) )
####################################
############# ANALYSES #############
####################################
class RunSampleSetAnalyses(Target):
'''
'''
def __init__(self, indir, outdir, group2samples, options):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
self.g2s = group2samples
self.options = options
def run(self):
#Pairwise overlap:
if 'pairwiseOverlap' in self.options.analyses:
pairOutfile = os.path.join(self.outdir, "pairOverlap.pickle")
self.addChildTarget( PairwiseOverlap(self.indir, pairOutfile, self.g2s, self.options) )
#number of samples versus percentage of clones
if 'numsamVsClones' in self.options.analyses:
numSamOutfile = os.path.join(self.outdir, "numSamVsClones.pickle")
lendistdir = os.path.join(self.outdir, 'lendist')
system('mkdir -p %s' %lendistdir)
self.addChildTarget( NumSamplesVsClones(self.indir, numSamOutfile, lendistdir, self.g2s, self.options) )
class PairwiseOverlap(Target):
'''
'''
def __init__(self, indir, outfile, group2samples, options):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
self.g2s = group2samples
self.options = options
def run(self):
#Load the samples:
g2s = {} #key = group, val = list of Sample objects
for g, names in self.g2s.iteritems():
g2s[g] = []
for s in names:
picklefile = os.path.join(self.indir, "%s.pickle" %s)
sample = pickle.load( gzip.open(picklefile, "rb") )
g2s[g].append(sample)
g2stats = {} #key = group, val = {pair: stats}
cutoffs = [0.0]
mode = 2
discrete = False
#Pairs of intra-group samples
for g, samples in g2s.iteritems():
g2stats[g] = {}
sample2aa2v2j = iseqlib.getsample2aa2v2j(samples)
#all posible pairs
for i in xrange(0, len(samples) -1):
s1 = samples[i]
aa2v2j1 = sample2aa2v2j[ s1.name ]
for j in xrange(i+1, len(samples)):
s2 = samples[j]
pair = '-'.join( sorted([s1.name, s2.name]) ) #pair = name1,name2
aa2v2j2 = sample2aa2v2j[ s2.name ]
reads1, reads2, clones1, clones2, stats1, stats2 = getPairwiseOverlap(s1.seqs, s2.seqs, aa2v2j1, aa2v2j2, cutoffs, mode, discrete)
stats = PairOverlapStats( reads1[0], reads2[0], clones1[0], clones2[0], stats1['oreads'][0], stats2['oreads'][0], stats1['oclones'][0], stats2['oclones'][0] )
g2stats[g][pair] = stats
#Pair of inter-group samples:
if self.options.crossGroup:
groups = g2s.keys()
for i1 in xrange( len(groups) -1 ):
g1 = groups[i1]
samples1 = g2s[g1]
sample2aa2v2j1 = iseqlib.getsample2aa2v2j(samples1)
for i2 in xrange( i1+1, len(groups) ):
g2 = groups[i2]
samples2 = g2s[g2]
sample2aa2v2j2 = iseqlib.getsample2aa2v2j(samples2)
g = '-'.join( sorted([g1, g2]) )
g2stats[g] = {}
for s1 in samples1:
aa2v2j1 = sample2aa2v2j1[s1.name]
for s2 in samples2:
aa2v2j2 = sample2aa2v2j2[s2.name]
pair = '-'.join( sorted([s1.name, s2.name]) ) #pair = name1-name2
reads1, reads2, clones1, clones2, stats1, stats2 = getPairwiseOverlap(s1.seqs, s2.seqs, aa2v2j1, aa2v2j2, cutoffs, mode, discrete)
stats = PairOverlapStats( reads1[0], reads2[0], clones1[0], clones2[0], stats1['oreads'][0], stats2['oreads'][0], stats1['oclones'][0], stats2['oclones'][0] )
g2stats[g][pair] = stats
#pickle group2stats
pickle.dump(g2stats, gzip.open(self.outfile, "wb"))
class NumSamplesVsClones(Target):
'''
'''
def __init__(self, indir, outfile, lendistdir, group2samples, options):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
self.lendistdir = lendistdir
self.g2s = group2samples
self.options = options
def run(self):
#Load the samples:
g2s = {} #key = group, val = list of Sample objects
for g, names in self.g2s.iteritems():
g2s[g] = []
for s in names:
picklefile = os.path.join(self.indir, "%s.pickle" %s)
sample = pickle.load( gzip.open(picklefile, "rb") )
g2s[g].append(sample)
#For each group, calculate the clone vs number of sample shared distribution
g2stats = {}
groupsamples = []
for g, samples in g2s.iteritems():
groupsample = combineSamples(samples, g)
groupsamples.append(groupsample)
uniq = True
group2numsam2uniq = getSharedSeqDist(groupsamples, uniq)
group2numsam2count = getSharedSeqDist(groupsamples, not uniq)
stats = {'uniq': group2numsam2uniq, 'reads': group2numsam2count}
pickle.dump(stats, gzip.open(self.outfile, 'wb'))
#Length distribution for sequences that are shared by 1, 2, 3, ... samples
g2numsam2lenfreq = getGroup2numsam2len2freq(groupsamples)
for g, numsam2lenfreq in g2numsam2lenfreq.iteritems():
for numsam, lenfreq in numsam2lenfreq.iteritems():
lendistdir = os.path.join(self.lendistdir, '%d' %numsam)
system("mkdir -p %s" % lendistdir)
picklefile = os.path.join(lendistdir, "%s.pickle" %g)
pickle.dump(lenfreq, gzip.open(picklefile, 'wb'))
##########################################################
####### COMPUTE MEANS & STDS OF THE SAMPLINGS ############
##########################################################
class AverageResults(Target):
'''
Indir/
Sampling#/
SampleSet#/
pairOverlap.pickle
numSamVsClones.pickle
lendist/
numsam1/
group1.pickle
group2.pickle
'''
def __init__(self, indir, options):
Target.__init__(self)
self.indir = indir
self.options = options
def run(self):
if 'pairwiseOverlap' in self.options.analyses:
self.addChildTarget( PairwiseOverlapSummary(self.indir, self.options) )
if 'numsamVsClones' in self.options.analyses:
self.addChildTarget( NumSamplesVsClonesSummary(self.indir, self.options) )
self.addChildTarget( NumSamplesLendistSummary(self.indir, self.options) )
########################################
########### SUMMARY ####################
########################################
class PairwiseOverlapSummary(Target):
'''
Indir/
Sampling#/
SampleSet#/
pairOverlap.pickle
'''
def __init__(self, indir, options):
Target.__init__(self)
self.indir = indir
self.options = options
def run(self):
#uniq = True
outdir = os.path.join(self.options.outdir, "pairwiseOverlap")
system("mkdir -p %s" % outdir)
aggSet2group2pair2stats = {}
#samplings = os.listdir(self.indir)
samplings = finddirs(self.indir)
#Accumulate samplings
for i, sampling in enumerate(samplings):
samplingdir = os.path.join(self.indir, sampling)
#samplesets = os.listdir(samplingdir)
samplesets = finddirs(samplingdir)
for samset in samplesets:
samsetdir = os.path.join(samplingdir, samset)
picklefile = os.path.join(samsetdir, 'pairOverlap.pickle')
group2pair2stats = pickle.load( gzip.open(picklefile, 'rb') )
if samset not in aggSet2group2pair2stats:
aggSet2group2pair2stats[samset] = {}
for group, pair2stats in group2pair2stats.iteritems():
if group not | |
import os
import stat
import re
import pdb
from pathlib import Path
import getpass
import numpy as np
import h5py as h5
import pandas as pd
import subprocess
from glob import glob
from tqdm import tqdm
from .dataset import _submit_slurm_job
import shutil
import yaml
import threading
import time
from datetime import timedelta, datetime
import IPython.display
from IPython.display import display
from ipywidgets import Layout
import ipywidgets as widgets
from .plotting import AnaFile
def get_running_jobs():
output = subprocess.check_output(
[f"squeue -u {getpass.getuser()} | awk '{{print $1, $2, $3, $5, $6, $8}}'"],
shell=True,
)
jobs = str(output, "utf-8").split("\n")[1:-1]
columns = ["id", "partition", "name", "status", "runtime", "node"]
jobs = np.array(list(map(lambda x: x.split(), jobs)))
if len(jobs):
jobs = pd.DataFrame(columns=columns, data=jobs)
jobs["id"] = pd.to_numeric(jobs["id"])
jobs = jobs.set_index("id")
else:
jobs = pd.DataFrame(columns=columns)
return jobs
def find_jobid(output):
sres = re.search("(?<=SLURM_JOB_ID)\s*\d{8}", output)
return int(sres.group(0)) if bool(sres) else 0
def get_fileid(output):
filename = get_h5filename(output)
if bool(filename):
sres = re.search("\d{3}(?=\.h5)", filename)
if bool(sres):
return int(sres.group(0))
return np.nan
def get_datdir(output):
filename = re.search("(?<=Analyzing ).*", output)
return filename.group(0) if bool(filename) else ""
def is_running(jobid, jobs):
return True if jobid in jobs.index.values else False
def is_saved(s):
return True if re.search("Results saved", s) else False
def get_parser_args(s):
cmd = re.search("midtools .*", s).group(0).split()[1:]
cmd.insert(0, "setupfile")
cmd.insert(2, "analysis")
cmd[6::2] = list(map(lambda x: x.lstrip("-"), cmd[6::2]))
args = dict(zip(cmd[0::2], cmd[1::2]))
return int(args.pop("-r")), args
def get_status(df):
jobs = get_running_jobs()
for idx, row in df.iterrows():
outfile = row["outfile"]
if outfile.is_file():
with open(outfile) as f:
jobc = f.read()
if is_running(row["slurm-id"], jobs):
status = "running"
elif is_saved(jobc):
status = "complete"
else:
status = "error"
else:
status = "unknown"
df.loc[idx, "status"] = status
return df
def get_h5filename(s):
search_strings = ["(?<=Filename: ).*", "(?<=Results saved under ).*"]
for search_string in search_strings:
out = re.search(search_string, s)
if bool(out):
return out.group(0)
return None
def get_proposal(s):
out = re.search("(?<=/p00)\d{4}(?=/)", s)
return int(out.group(0)) if bool(out) else None
def get_walltime(s):
time = re.search("(?<=Finished: elapsed time: )\d{1,}\.\d{1,}(?=min)", s)
return timedelta(minutes=round(float(time.group(0)), 1)) if bool(time) else None
def make_jobtable(jobdir):
if not os.path.isdir(jobdir):
raise FileNotFoundError(f"Directory {jobdir} does not exist")
if not len(os.listdir(jobdir)):
print("Jobdir is empty.")
return pd.DataFrame()
entries = {}
entries["run"] = []
entries["file-id"] = []
entries["slurm-id"] = []
entries["runtime"] = []
entries["datdir"] = []
jobf = list(filter(lambda x: x.endswith("job"), os.listdir(jobdir)))
jobf = [Path(jobdir).joinpath(x) for x in jobf]
jobf = sorted(jobf)
entries.update(
{
"jobfile": jobf,
"outfile": [Path(str(s) + ".out") for s in jobf],
"errfile": [Path(str(s) + ".err") for s in jobf],
}
)
jobs = get_running_jobs()
for jobfile, outfile in zip(entries["jobfile"], entries["outfile"]):
with open(jobfile) as f:
jobc = f.read()
run, args = get_parser_args(jobc)
entries["run"].append(run)
slurm_id = 0
t = "PD"
datdir = ""
file_id = -1
if outfile.is_file():
with open(outfile) as f:
outc = f.read()
file_id = args.get("file-identifier", get_fileid(outc))
datdir = get_datdir(outc)
slurm_id = find_jobid(outc)
if slurm_id in jobs.index:
t = jobs.loc[slurm_id, "runtime"]
else:
mtime = os.path.getmtime(outfile)
t = datetime.fromtimestamp(mtime).strftime("%Y-%m-%d %H:%M")
entries["file-id"].append(file_id)
entries["datdir"].append(datdir)
entries["slurm-id"].append(slurm_id)
entries["runtime"].append(t)
df = pd.DataFrame(entries)
df = get_status(df)
df = df.reindex(columns=["status", *df.drop(columns=["status"]).columns])
return df.sort_values(by=["status", "jobfile", "run", "file-id"])
def get_tcp(s):
out = re.search("tcp://(\d{1,3}\.?){4}:\d{1,6}", s)
if bool(out):
return out.group(0)
return None
from shutil import SameFileError
def log_error(df, jobdir):
failed_jobs_file = (
Path(jobdir).parent.joinpath("failed-jobs").joinpath("failed-jobs.yml")
)
if not failed_jobs_file.is_file():
failed_jobs_file.touch()
with open(failed_jobs_file, "r+") as f:
failed = yaml.load(f, Loader=yaml.FullLoader)
if not bool(failed):
failed = {}
status = "error"
df = df[(df["status"] == status)]
for index, row in df.iterrows():
fcontent = {}
for key in ["job", "out", "err"]:
fname = row[key + "file"]
if os.path.isfile(fname):
with open(fname) as f:
fcontent[key] = f.read()
else:
fcontent[key] = ""
if key == "err":
shutil.copy(fname, failed_jobs_file.parent.joinpath(row[key + "file"].name))
failed[find_jobid(fcontent["out"])] = {
"tcp": get_tcp(fcontent["out"]),
"errlog": row[key + "file"],
"time": datetime.now().strftime("%m/%d/%Y, %H:%M:%S"),
}
with open(failed_jobs_file, "w") as f:
yaml.dump(failed, f)
def handle_failed(df, remove=True, resubmit=True, run=None, subset="error"):
if run is not None:
df = df[df["run"] == run]
for i, row in (
df[df["status"] == subset].drop_duplicates(subset=["run", "file-id"]).iterrows()
):
with open(row["jobfile"]) as f:
jobc = f.read()
run_number, args = get_parser_args(jobc)
args["job_dir"] = str(row['jobfile'].parent)
if remove:
if not np.isnan(row["file-id"]):
try:
anafile = AnaFile(
(int(run_number), int(row["file-id"])), dirname=args["out-dir"]
)
anafile.remove()
except FileNotFoundError:
print(f"Tried to delete analysis result for run"
" {int(run_number)}, but file does not exist.")
if resubmit:
_submit_slurm_job(run_number, args)
if not remove and not resubmit:
print(f"Set either 'remove' or 'resubmit' to True. Otherwise "
"handle_failed will do nothing.")
def get_completed_analysis_jobs(df):
if not len(df):
return df
df = df[df["status"] == "complete"]
for i, row in (
df[df["status"] == "complete"]
.drop_duplicates(subset=["run", "file-id"])
.iterrows()
):
with open(row["jobfile"]) as f:
jobc = f.read()
run_number, args = get_parser_args(jobc)
with open(row["outfile"]) as f:
outc = f.read()
filename = AnaFile(get_h5filename(outc))
df.loc[i, "filename"] = filename.fullname
df.loc[i, "idx"] = int(filename.counter)
df.loc[i, "analysis"] = args["analysis"]
df.loc[i, "proposal"] = get_proposal(outc)
df.loc[i, "walltime"] = get_walltime(outc)
if len(df):
df["analysis"] = df["analysis"].astype(str)
df["idx"] = df["idx"].astype("uint16")
df["proposal"] = df["proposal"].astype("uint16")
cols = ["run", "idx", "analysis"]
cols.extend(df.drop(columns=cols).columns)
df = df.reindex(columns=cols)
df.drop(columns=["jobfile", "outfile", "errfile"], inplace=True)
return df
def stop_running(df):
for jobid in df.loc[df["status"] == "running", "slurm-id"]:
subprocess.run(["scancel", str(jobid)])
def clean_jobdir(df, jobdir, subset=None, run=None):
if run is not None:
df = df[df["run"] == run]
if subset is None:
subset = [
"error",
]
for status in subset:
df2 = df[(df["status"] == status)]
for col in ["jobfile", "outfile", "errfile"]:
files = df2[col].values
for f in files:
if os.path.isfile(f):
os.remove(f)
def merge_files(outfile, filenames, h5_structure, delete_file=False):
"""merge existing HDF5 files for a run"""
keys = [x for y in h5_structure.values() for x in y.keys()]
with h5.File(outfile, "w") as F:
for filename in filenames:
with h5.File(filename, "r") as f:
for method in h5_structure:
for key, value in h5_structure[method].items():
fixed_size = bool(value[0])
if key in f:
data = f[key]
s = data.shape
if not key in F:
# check scalars and fixed_size
if len(s) == 0:
F[key] = np.array(data)
else:
F.create_dataset(
key,
data=data,
compression="gzip",
chunks=True,
maxshape=(None, *s[1:]),
)
else:
if not fixed_size:
F[key].resize((F[key].shape[0] + s[0]), axis=0)
F[key][-s[0] :] = data
if delete_file:
anafile = AnaFile(filename)
anafile.remove()
def load_yaml_to_dict(filename):
with open(filename) as f:
return yaml.load(f, Loader=yaml.FullLoader)
def get_closest_entry(d, proposal, run, verbose=True):
proposals = np.asarray(list(d.keys()))
proposal = proposals[np.argmin(np.abs(proposals-proposal))]
runs = np.asarray(list(d[proposal].keys()))
run = runs[np.argmin(np.abs(runs-run))]
if verbose:
print(
f"Closest database entry found for proposal: {proposal}, run:{run}")
return d[proposal][run]
def get_setupfile_content(proposal, run, masksfile=None, quadposfile=None):
if masksfile is None:
masksfile = r'/home/reiserm/scratch/mid-proteins/masks/masks.yml'
if quadposfile is None:
quadposfile = r'/home/reiserm/scratch/mid-proteins/geoms/quadrant-positions.yml'
out = {}
keys = ['quadrant_positions', 'mask']
files = [quadposfile, masksfile]
for key, f in zip(keys, files):
print(f"Searching for '{key}' in database")
d = load_yaml_to_dict(f)
out[key] = get_closest_entry(d, proposal, run)[key]
return out
def write_tmp_setupfile(d, tmpdir='./tmp/'):
tmpdir = os.path.abspath(tmpdir)
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
tmpname = "tmp_setup-config_" + \
time.strftime("%Y-%m-%d_T%H-%M-%S") + ".yml"
tmpname = os.path.join(tmpdir, tmpname)
with open(tmpname, 'w') as f:
yaml.dump(d, f)
return tmpname
def create_setupfile(proposal, run, basefile=None, tmpdir='./tmp/', search_database=False):
if basefile is None:
basefile = "/".join(__file__.split('/')[:-1]) + "/setup_config/setup.yml"
d = load_yaml_to_dict(basefile)
if search_database:
d.update(get_setupfile_content(proposal, run))
return write_tmp_setupfile(d, tmpdir)
def start_midtools(
proposal,
run_number,
setupfile=None,
pulses_per_train=None,
datdir=None,
first_train=0,
last_train=1000,
first_cell=0,
pulse_step=1,
test=False,
jobdir='./jobs',
base_dir=f'./analyzed_runs/',
saxs=True,
xpcs=True,
):
out_dir = os.path.join(base_dir, f'p{proposal:06d}')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
setupfile = create_setupfile(
proposal, run_number, basefile=setupfile, tmpdir='./tmp/')
analysis = f'1{int(saxs)}{int(xpcs)}0'
args = {'setupfile': setupfile,
'analysis': analysis,
'job_dir': jobdir,
'out-dir': out_dir,
'pulses_per_train': pulses_per_train,
'pulse_step': pulse_step,
'datdir': datdir,
'first-train': first_train,
'last-train': last_train,
'first-cell': first_cell,
}
_submit_slurm_job(run_number, args, test=test)
class Scheduler:
def __init__(self, jobdir):
jobdir = os.path.abspath(jobdir)
if not os.path.exists(jobdir):
os.mkdir(jobdir)
self.jobdir = jobdir
self.df = None
self.sel_run = None
def gui(
self,
):
def on_button_clicked_update_jobtable(b):
with output:
IPython.display.clear_output()
# print(f"Run Number is {run_number_IntText.value}")
self.df = make_jobtable(self.jobdir)
display(self.df.tail(table_length_IntText.value))
def on_button_clicked_clean_jobdir(b):
run = run_number_IntText.value if run_number_IntText.value >= 0 else None
clean_jobdir(self.df, self.jobdir, subset=[subset_Dropdown.value], run=run)
def on_button_clicked_handle_failed(b):
run = run_number_IntText.value if run_number_IntText.value >= 0 else None
with output:
handle_failed(
self.df,
remove=remove_checkbox.value,
resubmit=resubmit_checkbox.value,
run=run,
subset=subset_Dropdown.value,
)
def on_button_clicked_print_file(b):
index = table_index_IntText.value
s = "errfile"
if "out" in print_file_Dropdown.value:
s = "outfile"
elif "job" in print_file_Dropdown.value:
s = "jobfile"
with open(self.df.loc[table_index_IntText.value, s]) as f:
jobc = f.read()
# print(f"Job_ID: {find_jobid(jobc)}")
# lastline = list(filter(lambda x: len(x), jobc.split("\n")))[-10:]
lastline = list(filter(lambda x: "[" not in x, jobc.split("\n")))
# jobc = re.sub("(^\[\#{0,39}\s*\].*$)*", "", jobc, re.MULTILINE)
with output:
IPython.display.clear_output()
# print(jobc)
print("\n".join(lastline))
def on_button_clicked_stop_running_job(b):
table_index = int(table_index_IntText.value)
jobid = int(self.df.loc[table_index, 'slurm-id'])
subprocess.run(["scancel", str(jobid)])
def on_button_clicked_list_jobs(b):
with output:
IPython.display.clear_output()
subout = subprocess.check_output(f"squeue -u {getpass.getuser()}", shell=True)
print(subout.decode('utf-8'))
jobtable_button = widgets.Button(
description="Update Job Table / Clear Output", layout=Layout(flex="1 0 auto", width="auto")
)
clean_jobdir_button = widgets.Button(
description="Clean Jobdir", layout=Layout(flex="1 0 auto", width="auto")
)
print_file_button = widgets.Button(
description="Print File", layout=Layout(flex="1 0 | |
= ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
auto_edge = ET.SubElement(port, "auto-edge")
auto_edge.text = kwargs.pop('auto_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_admin_edge(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
admin_edge = ET.SubElement(port, "admin-edge")
admin_edge.text = kwargs.pop('admin_edge')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_edge_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
edge_delay = ET.SubElement(port, "edge-delay")
edge_delay.text = kwargs.pop('edge_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_configured_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
configured_root_guard = ET.SubElement(port, "configured-root-guard")
configured_root_guard.text = kwargs.pop('configured_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_oper_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
oper_root_guard = ET.SubElement(port, "oper-root-guard")
oper_root_guard.text = kwargs.pop('oper_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_boundary_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
boundary_port = ET.SubElement(port, "boundary-port")
boundary_port.text = kwargs.pop('boundary_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_oper_bpdu_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
oper_bpdu_guard = ET.SubElement(port, "oper-bpdu-guard")
oper_bpdu_guard.text = kwargs.pop('oper_bpdu_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_oper_bpdu_filter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
oper_bpdu_filter = ET.SubElement(port, "oper-bpdu-filter")
oper_bpdu_filter.text = kwargs.pop('oper_bpdu_filter')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_link_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
link_type = ET.SubElement(port, "link-type")
link_type.text = kwargs.pop('link_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_rx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
rx_bpdu_count = ET.SubElement(port, "rx-bpdu-count")
rx_bpdu_count.text = kwargs.pop('rx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_rstp_rstp_port_tx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
rstp = ET.SubElement(spanning_tree_mode, "rstp")
rstp = ET.SubElement(rstp, "rstp")
port = ET.SubElement(rstp, "port")
tx_bpdu_count = ET.SubElement(port, "tx-bpdu-count")
tx_bpdu_count.text = kwargs.pop('tx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_root_bridge_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
root_bridge = ET.SubElement(mstp, "root-bridge")
priority = ET.SubElement(root_bridge, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_root_bridge_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
root_bridge = ET.SubElement(mstp, "root-bridge")
bridge_id = ET.SubElement(root_bridge, "bridge-id")
bridge_id.text = kwargs.pop('bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_root_bridge_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
root_bridge = ET.SubElement(mstp, "root-bridge")
hello_time = ET.SubElement(root_bridge, "hello-time")
hello_time.text = kwargs.pop('hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_root_bridge_max_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
root_bridge = ET.SubElement(mstp, "root-bridge")
max_age = ET.SubElement(root_bridge, "max-age")
max_age.text = kwargs.pop('max_age')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_root_bridge_forward_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
root_bridge = ET.SubElement(mstp, "root-bridge")
forward_delay = ET.SubElement(root_bridge, "forward-delay")
forward_delay.text = kwargs.pop('forward_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_bridge_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
bridge = ET.SubElement(mstp, "bridge")
priority = ET.SubElement(bridge, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_bridge_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
bridge = ET.SubElement(mstp, "bridge")
bridge_id = ET.SubElement(bridge, "bridge-id")
bridge_id.text = kwargs.pop('bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_bridge_hello_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
bridge = ET.SubElement(mstp, "bridge")
hello_time = ET.SubElement(bridge, "hello-time")
hello_time.text = kwargs.pop('hello_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_bridge_max_age(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
bridge = ET.SubElement(mstp, "bridge")
max_age = ET.SubElement(bridge, "max-age")
max_age.text = kwargs.pop('max_age')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_bridge_forward_delay(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
bridge = ET.SubElement(mstp, "bridge")
forward_delay = ET.SubElement(bridge, "forward-delay")
forward_delay.text = kwargs.pop('forward_delay')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_transmit_hold_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
transmit_hold_count = ET.SubElement(mstp, "transmit-hold-count")
transmit_hold_count.text = kwargs.pop('transmit_hold_count')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_migrate_time(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
migrate_time = ET.SubElement(mstp, "migrate-time")
migrate_time.text = kwargs.pop('migrate_time')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_port_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
config = get_stp_brief_info
output = ET.SubElement(get_stp_brief_info, "output")
spanning_tree_info = ET.SubElement(output, "spanning-tree-info")
spanning_tree_mode = ET.SubElement(spanning_tree_info, "spanning-tree-mode")
mstp = ET.SubElement(spanning_tree_mode, "mstp")
mstp = ET.SubElement(mstp, "mstp")
port = ET.SubElement(mstp, "port")
interface_type = ET.SubElement(port, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_stp_brief_info_output_spanning_tree_info_spanning_tree_mode_mstp_mstp_port_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_brief_info = ET.Element("get_stp_brief_info")
| |
name, **kwargs): # noqa: E501
"""get device property by name # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_property_by_name(device_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param str name: (required)
:param str fields:
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_property_by_name_with_http_info(device_id, name, **kwargs) # noqa: E501
else:
(data) = self.get_device_property_by_name_with_http_info(device_id, name, **kwargs) # noqa: E501
return data
def get_device_property_by_name_with_http_info(self, device_id, name, **kwargs): # noqa: E501
"""get device property by name # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_property_by_name_with_http_info(device_id, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param str name: (required)
:param str fields:
:return: EntityProperty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'name', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_property_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_property_by_name`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_device_property_by_name`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_property_by_name`, must conform to the pattern `/\d+/`") # noqa: E501
if 'name' in params and not re.search('[^\/]+', params['name'] if type(params['name']) is str else str(params['name'])): # noqa: E501
raise ValueError("Invalid value for parameter `name` when calling `get_device_property_by_name`, must conform to the pattern `/[^\/]+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/properties/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntityProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_property_list(self, device_id, **kwargs): # noqa: E501
"""get device properties # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_property_list(device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: PropertyPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_property_list_with_http_info(device_id, **kwargs) # noqa: E501
else:
(data) = self.get_device_property_list_with_http_info(device_id, **kwargs) # noqa: E501
return data
def get_device_property_list_with_http_info(self, device_id, **kwargs): # noqa: E501
"""get device properties # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_property_list_with_http_info(device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: PropertyPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_property_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_property_list`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_property_list`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/properties', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PropertyPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_escalation_chain_by_id(self, id, **kwargs): # noqa: E501
"""get escalation chain by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_escalation_chain_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: EscalatingChain
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_escalation_chain_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_escalation_chain_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_escalation_chain_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get escalation chain by id # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_escalation_chain_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:return: EscalatingChain
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_escalation_chain_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_escalation_chain_by_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_escalation_chain_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/alert/chains/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EscalatingChain', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_escalation_chain_list(self, **kwargs): # noqa: E501
"""get escalation chain list # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_escalation_chain_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: EscalationChainPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_escalation_chain_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_escalation_chain_list_with_http_info(**kwargs) # noqa: E501
return data
def get_escalation_chain_list_with_http_info(self, **kwargs): # noqa: E501
"""get escalation chain list # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request | |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\brief Code to prepare data.
\copyright Copyright (c) 2021 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import sys, argparse, os, math, random, time
#import tensorflow as tf
import numpy as np
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#from PIL import Image
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
BASE_DIR = os.path.abspath('.')
PROJECT_DIR = os.path.dirname(BASE_DIR)
ROOT_DIR = os.path.dirname(PROJECT_DIR)
#sys.path.append(ROOT_DIR + '/MCCNN/utils')
sys.path.append('../../code/helpers')
sys.path.append('../../code')
from VQs import *
from MeshHelpers2 import *
#from MCConvBuilder import PointHierarchy
current_milli_time = lambda: time.time() * 1000.0
#### small helper functions
def getPz(model):
return np.load('./param/pz/' + model[11:-4] + '.npz.npy')
def mkdir(path):
if not os.path.exists(path): os.makedirs(path)
def load_data_from_disk(modelPath, delimiter = ','):
fileDataArray = []
with open(modelPath, 'r') as modelFile:
for line in modelFile:
line = line.replace("\n", "")
currPoint = line.split(delimiter)
fileDataArray.append(currPoint)
fileData = np.array(fileDataArray, dtype = float)
if fileData.shape[1] == 1:
fileData = fileData.reshape(-1)
return fileData
def create_dirs(path, folder_list, subfolders=['']):
# creates paths for the results of generate_views
mkdir(path)
for s in subfolders:
mkdir(path+s)
for f in folder_list:
mkdir(path+s+f)
def load_file_names(DATA_DIR, subset, obj = False):
# reads files names from a .txt file
file_list = []
with open(DATA_DIR + subset + '_small.txt','r') as f:
for line in f:
if obj == True:
file_list.append(line[:-4]+'obj')
else:
file_list.append(line[:-1])
return file_list, folder_list
def list_files(in_direction):
root_length = len(in_direction)
file_list = []
folder_list = []
for root, folders, files in os.walk(in_direction):
for f in files:
file_list.append(os.path.join(root,f)[root_length:])
if not root[root_length:] in folder_list:
folder_list.append(root[root_length:])
return file_list, folder_list
def acc_rej_sampling(pdf,n=1000):
# acceptance rejection sampling
# INPUT:
# pdf: (mx list) - probability density function (not necessarily normalized)
# n: (int) - number of samples
# OUTPUT:
# ind: (nx list) - indices of sampled points
ind = []
while(len(ind)<n):
for i,p in enumerate(pdf):
x = np.random.rand()
if x < p:
ind.append(i)
return ind
def cdf_sampling(pdf,n=1000):
# random sampling using the cdf
# INPUT:
# pdf: (mx list) - probability density function (not necessarily normalized)
# n: (int) - number of samples
# OUTPUT:
# ind: (nx list) - indices of sampled points
ind = []
cdf = np.cumsum(pdf)/np.sum(pdf)
while len(ind)<n:
x = np.random.rand()
i = np.argmax(cdf>x)
ind.append(i)
return ind
def cart2pol(xyz):
# transforms cartesian coordinates into polar coordinates
# INPUT:
# xyz: 3x1 - 3 dimensional cartesian coordinates
# OUTPUT:
# longitude: float -polar coordinate in range [0,2*pi]
# latitude: float - polar coordinate in range[-pi/2, pi/2]
longitude = np.arctan2(xyz[1],xyz[0])
if longitude <0:
longitude += 2*np.pi
latitude = np.arcsin(xyz[2])
return longitude, latitude
def fibonacci_sphere(samples=1000,randomize=False):
# creates almost uniformly distributed grid of points over unit sphere
# INPUT:
# samples: int - number of points to be generated
# randomize: bool - if True shuffles the points
# OUTPUT:
# points: list nx3 - points on the ficonacci_sphere
rnd = 1.
if randomize:
rnd = random.random() * samples
points = []
offset = 2./samples
increment = math.pi * (3. - math.sqrt(5.));
for i in range(samples):
y = ((i * offset) - 1) + (offset / 2);
r = math.sqrt(1 - pow(y,2))
phi = ((i + rnd) % samples) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
points.append([x,y,z])
return np.array(points)
#def visualize_fibonacci_sphere(dim = '2D', samples=1000):
## plots a fibonacci sphere using a mercator projection
#views = fibonacci_sphere(samples)
#print(len(views))
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_xlabel('longitude')
#ax.set_ylabel('latitude')
#ax.set_xlim((0,2*np.pi))
#ax.set_ylim((-0.5*np.pi,0.5*np.pi))
#points_x = np.zeros(len(views))
#points_y = np.zeros(len(views))
#for index, view in enumerate(views):
#points_x[index], points_y[index] = cart2pol(view)
#ax.plot(points_x, points_y, 'ko', ms=3)
#fig.show()
#### generative function
def generate_views(DATA_DIR, objfolder, dimension = '3D', resolution=1024, reverse =False, recalc = False, n_points = 1000, subset = 'full', skip = False):
# Samples the view quality measures on a unifrom grid(2D) or a fibonacci sphere (3D).
# Currently supports VQ4, VQ5, VQ7, VQ8 .
# Saves results in 'DATA_DIR/best_views/vqx/dimension'
# Also calculates the highest and lowest VQ-value for each model and saves them in 'DATA_DIR/best_views/vqx/best' .
# INPUT:
# DATA_DIR: string - direction to the dataset
# objfolder: string - name of the folder in DATA_DIR containing the objfiles
# dimension: string - if '3D': evaluates the VQ-measures on points on a fibonacci sphere (for label generation)
# if '2D' evaluates the VQ-measures on a uniform grid in polar coordinates (for contour plots)
# resolution: int - the VQ-measures will be evaluated on resolution x resolution images
# reverse: bool - reverses the order in which the objfiles are processed
# recalc: bool - if True existing results will be overwritter
# n_points: int - number of points to be sampled
from Application import GLScene
print('--Generating View Quality Meshes---')
param_dir = DATA_DIR + 'param/'
mkdir('best_views')
# input direction with .obj files for image rendering
input_dir = DATA_DIR + objfolder + '/'
# get .obj files
# all files from the directory
fileList_1, folderList = list_files(input_dir)
fileList_1=[]
fileList = []
# only files with a limited number of faces (stored in small.txt
with open('small.txt','r') as myFile:
for line in myFile:
fileList.append(line[9:-1])
if reverse:
fileList = fileList[::-1]
if skip:
fileList = fileList[1::2]
# output direction
create_dirs(param_dir, folderList, ['area/','pv/','pz/'])
output_dir4 = DATA_DIR + 'best_views/' + str(resolution) +'vq' + '4' + '/'
output_dir5 = DATA_DIR + 'best_views/' + str(resolution) +'vq' + '5' + '/'
output_dir7 = DATA_DIR + 'best_views/' + str(resolution) +'vq' + '7' + '/'
output_dir8 = DATA_DIR + 'best_views/' + str(resolution) +'vq' + '8' + '/'
if args.resolution == 1024:
output_dir4 = DATA_DIR + 'best_views/' + 'vq' + '4' + '/'
output_dir5 = DATA_DIR + 'best_views/' + 'vq' + '5' + '/'
output_dir7 = DATA_DIR + 'best_views/' + 'vq' + '7' + '/'
output_dir8 = DATA_DIR + 'best_views/' + 'vq' + '8' + '/'
# if recalc is True existing files will be ignored and overwritten
MyGL = GLScene(resolution, resolution)
# create output path if not existent
create_dirs(output_dir4, folderList, ['3D/','2D/','best/'])
create_dirs(output_dir5, folderList, ['3D/','2D/','best/'])
create_dirs(output_dir7, folderList, ['3D/','2D/','best/'])
create_dirs(output_dir8, folderList, ['3D/','2D/','best/'])
if dimension == '3D':
numModels = len(fileList)
# create grid on unit sphere
views = np.array(fibonacci_sphere(n_points))
np.savetxt(output_dir4 + '3D/_point_list.txt', views, delimiter=',', fmt = '%s')
np.savetxt(output_dir5 + '3D/_point_list.txt', views, delimiter=',', fmt = '%s')
np.savetxt(output_dir7 + '3D/_point_list.txt', views, delimiter=',', fmt = '%s')
np.savetxt(output_dir8 + '3D/_point_list.txt', views, delimiter=',', fmt = '%s')
# main part of the code for view quality calculation
for index, file in enumerate(fileList):# get starting time
print(file + ' | %3d / %3d' %(index+1, numModels))
start_time = current_milli_time()
model = input_dir + file
print(model)
# check if current model was already processed
if os.path.exists(output_dir8 + '3D/' + file[:-4] + '_vq_list.txt') and recalc==False:
continue
# loads the model parameters ( True for train set, False for test set)
model_params = read_and_generate_buffers(model)
A_t, A_z = getAs(model)
# reset variables
vqs4 = np.zeros([n_points])
vqs5 = np.zeros([n_points])
vqs7 = np.zeros([n_points])
vqs8 = np.zeros([n_points])
min_vq4 = np.ones(1)
max_vq4 = np.zeros(1)
min_vq5 = np.ones(1)
max_vq5 = np.zeros(1)
min_vq7 = np.ones(1)
max_vq7 = np.zeros(1)
min_vq8 = np.ones(1)
max_vq8 = np.zeros(1)
# go through the grid
numTriangles = len(model_params[2])/3
a_t_list = np.zeros(n_points)
a_z_list = np.zeros([numTriangles, n_points])
pzv_list = np.zeros([numTriangles, n_points])
pv_list = np.zeros(n_points)
pz_list = np.zeros(numTriangles)
for i, curr_view in enumerate(views):
#if i%100 == 0:
#curr_time = (current_milli_time()-start_time)/1000
#print('Step: %3d' %(i))
#print('VQ4: %.6f %.6f' %(max_vq4, min_vq4))
#print('VQ5: %.6f %.6f' %(max_vq5, min_vq5))
#print('VQ7: %.6f %.6f' %(max_vq7, min_vq7))
# calculate the loss
texIds,_,_,numFaces = getIds(model_params, viewDir = curr_view, MyGL = MyGL)
# VQ4
a_t, a_z, vis_z = getProb_and_visz(texIds, numFaces)
curr_vq4 = vq4(A_z, A_t, vis_z)
#VQ5
curr_vq5 = vq5(a_z[1:], a_t)
#VQ7
#A_t, A_z = getAs(model)
#a_z = np.zeros(numFaces+1)
#texIds2 = (texIds).reshape(resolution**2)
#for ind in range(resolution**2):
# a_z[texIds2[ind]] += 1
#a_t = np.sum(texIds!=0)
a_z_list[:,i] = a_z[1:]
a_t_list[i]= a_t.copy()
if a_t != 0:
pzv_list[:,i] = a_z[1:]/a_t
curr_vq7 = vq7(a_z[1:], a_t, A_z, A_t)
vqs4[i] = curr_vq4.copy()
vqs5[i] = curr_vq5.copy()
vqs7[i] = curr_vq7.copy()
if i == 0:
min_vq4 = curr_vq4
max_vq4 = curr_vq4
best_view4 = curr_view.copy()
min_vq5 = curr_vq5
max_vq5 = curr_vq5
best_view5 = curr_view.copy()
min_vq7 = curr_vq7
max_vq7 = curr_vq7
best_view7 = curr_view.copy()
if curr_vq4 | |
None, None, None]], [[6.6, 7.7, 8.8, 9.9, None], [None, None, None, None, None]]]
def test_rpad_regular_array():
content = awkward1.layout.NumpyArray(numpy.array([2.1, 8.4, 7.4, 1.6, 2.2, 3.4, 6.2, 5.4, 1.5, 3.9, 3.8, 3.0, 8.5, 6.9, 4.3, 3.6, 6.7, 1.8, 3.2]))
index = awkward1.layout.Index64(numpy.array([13, 9, 13, 4, 8, 3, 15, -1, 16, 2, 8], dtype=numpy.int64))
indexedarray = awkward1.layout.IndexedOptionArray64(index, content)
array = awkward1.layout.RegularArray(indexedarray, 3)
assert awkward1.to_list(array.rpad(5, 0)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7], None, None]
assert awkward1.to_list(array.rpad(4, 0)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7], None]
assert awkward1.to_list(array.rpad(3, 0)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7]]
assert awkward1.to_list(array.rpad(1, 0)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7]]
assert awkward1.to_list(array.rpad(5, 1)) == [[6.9, 3.9, 6.9, None, None], [2.2, 1.5, 1.6, None, None], [3.6, None, 6.7, None, None]]
assert awkward1.to_list(array.rpad(4, 1)) == [[6.9, 3.9, 6.9, None], [2.2, 1.5, 1.6, None], [3.6, None, 6.7, None]]
assert awkward1.to_list(array.rpad(3, 1)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7]]
assert awkward1.to_list(array.rpad(1, 1)) == [[6.9, 3.9, 6.9], [2.2, 1.5, 1.6], [3.6, None, 6.7]]
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
offsets = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets, content)
regulararray = awkward1.layout.RegularArray(listoffsetarray, 2)
assert awkward1.to_list(regulararray.rpad(1, 0)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]
assert awkward1.to_list(regulararray.rpad(3, 0)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]
assert awkward1.to_list(regulararray.rpad(4, 0)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []], None]
assert awkward1.to_list(regulararray.rpad(7, 0)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []], None, None, None, None]
assert awkward1.to_list(regulararray.rpad(1, 1)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]
assert awkward1.to_list(regulararray.rpad(2, 1)) == [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]]
assert awkward1.to_list(regulararray.rpad(3, 1)) == [[[0.0, 1.1, 2.2], [], None], [[3.3, 4.4], [5.5], None], [[6.6, 7.7, 8.8, 9.9], [], None]]
assert awkward1.to_list(regulararray.rpad(5, 1)) == [[[0.0, 1.1, 2.2], [], None, None, None], [[3.3, 4.4], [5.5], None, None, None], [[6.6, 7.7, 8.8, 9.9], [], None, None, None]]
assert awkward1.to_list(regulararray.rpad(7, 1)) == [[[0.0, 1.1, 2.2], [], None, None, None, None, None], [[3.3, 4.4], [5.5], None, None, None, None, None], [[6.6, 7.7, 8.8, 9.9], [], None, None, None, None, None]]
assert awkward1.to_list(regulararray.rpad(1, 2)) == [[[0.0, 1.1, 2.2], [None]], [[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], [None]]]
assert awkward1.to_list(regulararray.rpad(2, 2)) == [[[0.0, 1.1, 2.2], [None, None]], [[3.3, 4.4], [5.5, None]], [[6.6, 7.7, 8.8, 9.9], [None, None]]]
assert awkward1.to_list(regulararray.rpad(3, 2)) == [[[0.0, 1.1, 2.2], [None, None, None]], [[3.3, 4.4, None], [5.5, None, None]], [[6.6, 7.7, 8.8, 9.9], [None, None, None]]]
assert awkward1.to_list(regulararray.rpad(4, 2)) == [[[0.0, 1.1, 2.2, None], [None, None, None, None]], [[3.3, 4.4, None, None], [5.5, None, None, None]], [[6.6, 7.7, 8.8, 9.9], [None, None, None, None]]]
def test_rpad_and_clip_listoffset_array():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
offsets = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets, content)
assert awkward1.to_list(listoffsetarray) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]
assert awkward1.to_list(listoffsetarray.rpad_and_clip(3,0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4]]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad_and_clip(3,0)))
assert awkward1.to_list(listoffsetarray.rpad_and_clip(7,0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [], None]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad_and_clip(7,0)))
assert awkward1.to_list(listoffsetarray.rpad_and_clip(5,1)) == [[0.0, 1.1, 2.2, None, None], [None, None, None, None, None], [3.3, 4.4, None, None, None], [5.5, None, None, None, None], [6.6, 7.7, 8.8, 9.9, None], [None, None, None, None, None]]
assert str(awkward1.type(listoffsetarray.rpad(5, 1))) == "var * ?float64"
assert str(awkward1.type(listoffsetarray.rpad_and_clip(5, 1))) == "5 * ?float64"
assert awkward1.to_list(listoffsetarray.rpad_and_clip(1,1)) == [[0.0], [None], [3.3], [5.5], [6.6], [None]]
content = awkward1.layout.NumpyArray(numpy.array([1.5, 3.3]))
index = awkward1.layout.Index64(numpy.array([0, -3, 1, -2, 1, 0, 0, -3, -13, 0, 1, 1, 0, 1, 1, 1, 1, -10, 0, -1, 0, 0, 0, 1, -1, 1, 1]))
indexedarray = awkward1.layout.IndexedOptionArray64(index, content)
offsets = awkward1.layout.Index64(numpy.array([14, 15, 15, 15, 26, 26, 26]))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets, indexedarray)
assert awkward1.to_list(listoffsetarray) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]
assert awkward1.to_list(listoffsetarray.rpad_and_clip(1,0)) == [[3.3]]
assert awkward1.to_list(listoffsetarray.rpad_and_clip(1,1)) == [[3.3], [None], [None], [3.3], [None], [None]]
def test_rpad_listoffset_array():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
offsets = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets, content)
assert awkward1.to_list(listoffsetarray) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]
assert awkward1.to_list(listoffsetarray.rpad(3,0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], []]
assert str(awkward1.type(listoffsetarray)) == str(awkward1.type(listoffsetarray.rpad(3,0)))
assert awkward1.to_list(listoffsetarray.rpad(7,0)) == [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [], None]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad(7,0)))
assert awkward1.to_list(listoffsetarray.rpad(5,1)) == [[0.0, 1.1, 2.2, None, None], [None, None, None, None, None], [3.3, 4.4, None, None, None], [5.5, None, None, None, None], [6.6, 7.7, 8.8, 9.9, None], [None, None, None, None, None]]
assert awkward1.to_list(listoffsetarray.rpad(1,1)) == [[0.0, 1.1, 2.2], [None], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9], [None]]
content = awkward1.layout.NumpyArray(numpy.array([1.5, 3.3]))
index = awkward1.layout.Index64(numpy.array([0, -3, 1, -2, 1, 0, 0, -3, -13, 0, 1, 1, 0, 1, 1, 1, 1, -10, 0, -1, 0, 0, 0, 1, -1, 1, 1]))
indexedarray = awkward1.layout.IndexedOptionArray64(index, content)
offsets = awkward1.layout.Index64(numpy.array([14, 15, 15, 15, 26, 26, 26]))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets, indexedarray)
assert awkward1.to_list(listoffsetarray) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]
assert awkward1.to_list(listoffsetarray.rpad(1,0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]
assert str(awkward1.type(listoffsetarray)) == str(awkward1.type(listoffsetarray.rpad(1,0)))
assert awkward1.to_list(listoffsetarray.rpad(6,0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], []]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad(6,0)))
assert awkward1.to_list(listoffsetarray.rpad(7,0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], [], None]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad(7,0)))
assert awkward1.to_list(listoffsetarray.rpad(9,0)) == [[3.3], [], [], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [], [], None, None, None]
assert str("option[") + str(awkward1.type(listoffsetarray)) + str("]") == str(awkward1.type(listoffsetarray.rpad(9,0)))
assert awkward1.to_list(listoffsetarray.rpad(1,1)) == [[3.3], [None], [None], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [None], [None]]
assert str(awkward1.type(listoffsetarray)) == str(awkward1.type(listoffsetarray.rpad(1,1)))
assert awkward1.to_list(listoffsetarray.rpad(4,1)) == [[3.3, None, None, None], [None, None, None, None], [None, None, None, None], [3.3, 3.3, None, 1.5, None, 1.5, 1.5, 1.5, 3.3, None, 3.3], [None, None, None, None], [None, None, None, None]]
assert str(awkward1.type(listoffsetarray)) == str(awkward1.type(listoffsetarray.rpad(4,1)))
def test_rpad_list_array():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
starts = awkward1.layout.Index64(numpy.array([0, 3, 4, 5, 8]))
stops = awkward1.layout.Index64(numpy.array([3, 3, 6, 8, 9]))
array = awkward1.layout.ListArray64(starts, stops, content)
assert awkward1.to_list(array) == [[0.0, 1.1, 2.2], [], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8]]
assert awkward1.to_list(array.rpad(1,0)) == [[0.0, 1.1, 2.2], [], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8]]
assert str(awkward1.type(array)) == str(awkward1.type(array.rpad(1,0)))
assert awkward1.to_list(array.rpad(2,0)) == [[0.0, 1.1, 2.2], [], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8]]
assert str(awkward1.type(array)) == str(awkward1.type(array.rpad(2,0)))
assert awkward1.to_list(array.rpad(7,0)) == [[0.0, 1.1, 2.2], [], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8], None, None]
assert str("option[") + str(awkward1.type(array)) + str("]") == str(awkward1.type(array.rpad(7,0)))
assert awkward1.to_list(array.rpad(1,1)) == [[0.0, 1.1, 2.2], [None], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8]]
assert awkward1.to_list(array.rpad(2,1)) == [[0.0, 1.1, 2.2], [None, None], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8, None]]
assert awkward1.to_list(array.rpad(3,1)) == [[0.0, 1.1, 2.2], [None, None, None], [4.4, 5.5, None], [5.5, 6.6, 7.7], [8.8, None, None]]
assert awkward1.to_list(array.rpad(4,1)) == [[0.0, 1.1, 2.2, None], [None, None, None, None], [4.4, 5.5, None, None], [5.5, 6.6, 7.7, None], [8.8, None, None, None]]
def test_rpad_and_clip_list_array():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
starts = awkward1.layout.Index64(numpy.array([0, 3, 4, 5, 8]))
stops = awkward1.layout.Index64(numpy.array([3, 3, 6, 8, 9]))
array = awkward1.layout.ListArray64(starts, stops, content)
assert awkward1.to_list(array) == [[0.0, 1.1, 2.2], [], [4.4, 5.5], [5.5, 6.6, 7.7], [8.8]]
assert awkward1.to_list(array.rpad_and_clip(1,0)) == [[0.0, 1.1, 2.2]]
assert str("option[") + str(awkward1.type(array)) | |
game.game_type == config['GAMETYPE']['PRESEASON']:
details_game = ("PRESEASON | {} | {}"
.format(game_date_short, game_time))
full_details = "{}\n{}\n{}".format(details_game, game.venue, game.game_hashtag)
details_coords = (145, 160)
else:
details_game = ("{} of 82 | {} | {}"
.format(game.preferred_team.games + 1, game_date_short, game_time))
full_details = "{}\n{}\n{}".format(details_game, game.venue, game.game_hashtag)
details_coords = (145, 160)
# Calculate Font Sizes
teams_length = len(teams_text)
teams_font_size = math.floor(1440 / teams_length)
longest_details = 0
for line in iter(full_details.splitlines()):
longest_details = len(line) if len(line) > longest_details else longest_details
details_font_size = math.floor(1100 / longest_details)
font_large = ImageFont.truetype(teams_font, teams_font_size)
font_small = ImageFont.truetype(details_font, details_font_size)
draw = ImageDraw.Draw(bg)
team_coords = (40, 20)
draw.text(team_coords, teams_text, font_black, font_large)
draw.multiline_text(details_coords, full_details, font_black, font_small, None, 10, "center")
return bg
def final_image(game, boxscore_preferred, boxscore_other):
"""Generates the final boxscore image to send in the GAME_END tweet.
Args:
game (Game): The current game instance.
boxscore_preferred (dict): The boxscore JSON dictionary of preferred team.
boxscore_other (dict): The boxscore JSON dictionary of other team.
Returns:
Image: Image object (from PIL library) to be sent to Twitter.
"""
teams_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Adidas.otf')
details_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Impact.ttf')
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayFinalPrudentialBlank.jpg'))
# Get Game Info for Updated Record
_, schedule_json = is_game_today(get_team(TEAM_BOT))
if game.home_team.preferred:
pref = schedule_json["teams"]["home"]
other = schedule_json["teams"]["away"]
else:
pref = schedule_json["teams"]["away"]
other = schedule_json["teams"]["home"]
# Load & Resize Logos
pref_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.preferred_team.team_name.replace(" ", ""))))
other_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.other_team.team_name.replace(" ", ""))))
resize = (125, 125)
pref_logo.thumbnail(resize, Image.ANTIALIAS)
other_logo.thumbnail(resize, Image.ANTIALIAS)
font_large = ImageFont.truetype(teams_font, 80)
font_small = ImageFont.truetype(details_font, 40)
font_smaller = ImageFont.truetype(details_font, 20)
font_black = (0, 0, 0)
# Setup Coordinates
coords_pref_score = (241, 238)
coords_pref_logo = (279, 240)
coords_pref_record = (270, 328)
coords_other_score = (703, 238)
coords_other_logo = (584, 240)
coords_other_record = (648, 328)
coords_shots = (242, 439)
coords_pk = (465, 439)
coords_pp = (676, 439)
coords_faceoff = (215, 520)
coords_hits = (478, 520)
coords_blocks = (693, 520)
# Setup Text Elements
preferred_team = game.preferred_team
other_team = game.other_team
preferred_stats = boxscore_preferred["teamStats"]["teamSkaterStats"]
other_stats = boxscore_other["teamStats"]["teamSkaterStats"]
preferred_stats_faceoff_percent = float(preferred_stats["faceOffWinPercentage"])
preferred_stats_hits = preferred_stats["hits"]
preferred_stats_ppg = int(preferred_stats["powerPlayGoals"])
preferred_stats_pp = int(preferred_stats["powerPlayOpportunities"])
preferred_stats_blocked = preferred_stats["blocked"]
preferred_stats_pk_against = int(other_stats["powerPlayOpportunities"])
preferred_stats_pk_killed = preferred_stats_pk_against - int(other_stats["powerPlayGoals"])
# Score & Record
text_pref_score = game.preferred_team.score
text_other_score = game.other_team.score
# Update records & get new for final image (Playoffs)
if game.game_type == "P":
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss"
else:
other_outcome = "win"
pref_outcome = "loss"
pref_record_str = preferred_team.get_new_playoff_series(pref_outcome)
other_record_str = other_team.get_new_playoff_series(other_outcome)
else:
if game.preferred_team.score > game.other_team.score:
pref_outcome = "win"
other_outcome = "loss" if game.period.current < 4 else "ot"
else:
other_outcome = "win"
pref_outcome = "loss" if game.period.current < 4 else "ot"
pref_record_str = preferred_team.get_new_record(pref_outcome)
other_record_str = other_team.get_new_record(other_outcome)
text_shots = preferred_team.shots
text_pk = "{} / {}".format(preferred_stats_pk_killed, preferred_stats_pk_against)
text_pp = "{} / {}".format(preferred_stats_ppg, preferred_stats_pp)
text_faceoff = "{}%".format(preferred_stats_faceoff_percent)
text_hits = preferred_stats_hits
text_blocks = preferred_stats_blocked
bg.paste(pref_logo, coords_pref_logo, pref_logo)
bg.paste(other_logo, coords_other_logo, other_logo)
draw = ImageDraw.Draw(bg)
draw.text(coords_pref_score, str(text_pref_score), font_black, font_large)
draw.text(coords_other_score, str(text_other_score), font_black, font_large)
draw.text(coords_pref_record, pref_record_str, font_black, font_smaller)
draw.text(coords_other_record, other_record_str, font_black, font_smaller)
draw.text(coords_shots, str(text_shots), font_black, font_small)
draw.text(coords_pk, str(text_pk), font_black, font_small)
draw.text(coords_pp, str(text_pp), font_black, font_small)
draw.text(coords_faceoff, str(text_faceoff), font_black, font_small)
draw.text(coords_hits, str(text_hits), font_black, font_small)
draw.text(coords_blocks, str(text_blocks), font_black, font_small)
return bg
def stats_image_bar_generator(draw, stat, pref_stat_value, other_stat_value,
pref_colors, other_colors):
logging.debug("Stats Bar Generator: stat - %s, pref_value - %s, other_value - %s, pref_colors - %s, other_colors - %s",
stat, pref_stat_value, other_stat_value, pref_colors, other_colors)
# Load all fonts to be used within the image generator
font_opensans_regular = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Regular.ttf')
font_opensans_italic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Italic.ttf')
font_opensans_bold = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Bold.ttf')
font_opensans_bolditalic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-BoldItalic.ttf')
# Static Font Sizes
font_opensans_regular_large = ImageFont.truetype(font_opensans_regular, 80)
font_opensans_regular_small = ImageFont.truetype(font_opensans_regular, 40)
font_opensans_regular_smaller = ImageFont.truetype(font_opensans_regular, 30)
font_opensans_regular_xxs = ImageFont.truetype(font_opensans_regular, 20)
font_opensans_italic_xs = ImageFont.truetype(font_opensans_italic, 25)
font_opensans_italic_xxs = ImageFont.truetype(font_opensans_italic, 20)
font_opensans_bold_large = ImageFont.truetype(font_opensans_bold, 90)
font_opensans_bold_small = ImageFont.truetype(font_opensans_bold, 40)
font_opensans_bold_smaller = ImageFont.truetype(font_opensans_bold, 30)
font_opensans_bold_xs = ImageFont.truetype(font_opensans_bold, 27)
font_opensans_boldit_small = ImageFont.truetype(font_opensans_bolditalic, 40)
font_opensans_boldit_smallish = ImageFont.truetype(font_opensans_bolditalic, 35)
font_opensans_boldit_smaller = ImageFont.truetype(font_opensans_bolditalic, 30)
font_opensans_boldit_xs = ImageFont.truetype(font_opensans_bolditalic, 25)
font_opensans_boldit_xxs = ImageFont.truetype(font_opensans_bolditalic, 20)
# Define static values, text strings & coordinates
STATS_RECT_WIDTH = 437
STATS_RECT_TOPLEFT_X = 279
STATS_RECT_HEIGHT = 49
FONT_BLACK = (0, 0, 0)
FONT_WHITE = (255, 255, 255)
# Check stat type and set specific parameters here
if stat == "shots":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"SHOTS: {stat_total}"
stat_total_text_coords = (50, 243)
stat_total_text_font = font_opensans_boldit_smaller
stat_rect_pref_topleft_y = 241
elif stat == "blocked shots":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"BLOCKED SHOTS: {stat_total}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 23)
stat_total_text_coords = (50, 335)
stat_rect_pref_topleft_y = 328
elif stat == "hits":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"HITS: {stat_total}"
stat_total_text_font = font_opensans_boldit_smaller
stat_total_text_coords = (50, 510)
stat_rect_pref_topleft_y = 505
elif stat == "power play":
pref_powerplays, pref_ppg = pref_stat_value
other_powerplays, other_ppg = other_stat_value
power_play_pref = f"{int(pref_ppg)} / {int(pref_powerplays)}"
power_play_other = f"{int(other_ppg)} / {int(other_powerplays)}"
# Re-assign values
pref_stat_value = pref_powerplays
other_stat_value = other_powerplays
stat_total = pref_powerplays + other_powerplays
stat_total_text = f"POWER PLAYS: {int(stat_total)}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 23)
stat_total_text_coords = (50, 423)
stat_rect_pref_topleft_y = 416
elif stat == "penalty minutes":
stat_total = pref_stat_value + other_stat_value
stat_total_text = f"PENALTY MINUTES: {stat_total}"
stat_total_text_font = custom_font_size(font_opensans_bolditalic, 20)
stat_total_text_coords = (50, 603)
stat_rect_pref_topleft_y = 592
# Calculate the remainder of the coordinates
stat_rect_width_pref = STATS_RECT_WIDTH * (pref_stat_value / stat_total)
stat_rect_width_other = STATS_RECT_WIDTH * (other_stat_value / stat_total)
stat_rect_pref_topleft_x = STATS_RECT_TOPLEFT_X
stat_rect_pref_bottomright_x = stat_rect_pref_topleft_x + stat_rect_width_pref
stat_rect_pref_bottomright_y = stat_rect_pref_topleft_y + STATS_RECT_HEIGHT
stat_text_pref_coords = (stat_rect_pref_topleft_x + 10, stat_rect_pref_topleft_y + 6)
stat_rect_other_topleft_x = stat_rect_pref_bottomright_x
stat_rect_other_topleft_y = stat_rect_pref_topleft_y
stat_rect_other_bottomright_x = stat_rect_other_topleft_x + stat_rect_width_other
stat_rect_other_bottomright_y = stat_rect_pref_bottomright_y
stat_text_other_coords = (stat_rect_other_topleft_x + 10, stat_rect_other_topleft_y + 6)
# Draw the text fields & bars
if stat == "power play":
draw.rectangle([stat_rect_pref_topleft_x, stat_rect_pref_topleft_y, stat_rect_pref_bottomright_x,
stat_rect_pref_bottomright_y], outline=None, fill=pref_colors["bg"])
draw.rectangle([stat_rect_other_topleft_x, stat_rect_other_topleft_y, stat_rect_other_bottomright_x,
stat_rect_other_bottomright_y], outline=None, fill=other_colors["bg"])
if pref_powerplays != 0:
draw.text(stat_text_pref_coords, power_play_pref, pref_colors["text"], font_opensans_bold_xs)
if other_powerplays != 0:
draw.text(stat_text_other_coords, power_play_other, other_colors["text"], font_opensans_bold_xs)
draw.text(stat_total_text_coords, stat_total_text, FONT_WHITE, stat_total_text_font)
else:
draw.rectangle([stat_rect_pref_topleft_x, stat_rect_pref_topleft_y, stat_rect_pref_bottomright_x,
stat_rect_pref_bottomright_y], outline=None, fill=pref_colors["bg"])
draw.rectangle([stat_rect_other_topleft_x, stat_rect_other_topleft_y, stat_rect_other_bottomright_x,
stat_rect_other_bottomright_y], outline=None, fill=other_colors["bg"])
draw.text(stat_text_pref_coords, str(pref_stat_value), pref_colors["text"], font_opensans_bold_xs)
draw.text(stat_text_other_coords, str(other_stat_value), other_colors["text"], font_opensans_bold_xs)
draw.text(stat_total_text_coords, stat_total_text, FONT_WHITE, stat_total_text_font)
def stats_image_generator(game, bg_type, boxscore_preferred, boxscore_other):
logging.debug("Stats Image Generator Game: %s", game)
logging.debug("Stats Image Generator BG: %s", bg_type)
# logging.debug("Stats Image Generator BOXPREF: %s", boxscore_preferred)
# logging.debug("Stats Image Generator BOXOTHER: %s", boxscore_other)
# Define static values, text strings & coordinates
STATS_RECT_WIDTH = 437
STATS_RECT_TOPLEFT_X = 279
STATS_RECT_HEIGHT = 49
FONT_BLACK = (0, 0, 0)
FONT_WHITE = (255, 255, 255)
COORDS_PREF_LOGO = (840, 120)
COORDS_OTHER_LOGO = (1015, 120)
COORDS_PREF_RECORD = (910, 135)
COORDS_OTHER_RECORD = (1110, 135)
COORDS_LOGO_VS = (960, 130)
COORDS_TEAMS_VS_Y = 198
COORDS_TEAMS_VS_X = 275
WIDTH_TEAMS_VS = 447
COORDS_TEAMS_VS = (335, 198)
TEAMS_VS_W, TEAMS_VS_H = (447, 39)
# Load & Resize Logos
pref_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.preferred_team.team_name.replace(" ", ""))))
other_logo = Image.open(os.path.join(PROJECT_ROOT, 'resources/logos/{}.png'
.format(game.other_team.team_name.replace(" ", ""))))
resize = (120, 120)
pref_logo.thumbnail(resize, Image.ANTIALIAS)
other_logo.thumbnail(resize, Image.ANTIALIAS)
# Change background image based on intermission or game final
# Also change the "losing team" image to grayscale for final
if bg_type == "intermission":
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayIntermissionFinal-V3Larger.png'))
bg.paste(pref_logo, COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo, COORDS_OTHER_LOGO, other_logo)
else:
bg = Image.open(os.path.join(PROJECT_ROOT, 'resources/images/GamedayRecapFinalV3-Larger.png'))
COORDS_PREF_LOGO = (780, 120)
COORDS_OTHER_LOGO = (985, 120)
COORDS_LOGO_VS = (-100, -100)
if game.preferred_team.score > game.other_team.score:
bg.paste(pref_logo, COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo.convert('LA'), COORDS_OTHER_LOGO, other_logo)
else:
bg.paste(pref_logo.convert('LA'), COORDS_PREF_LOGO, pref_logo)
bg.paste(other_logo, COORDS_OTHER_LOGO, other_logo)
# Load all fonts to be used within the image generator
teams_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Adidas.otf')
details_font = os.path.join(PROJECT_ROOT, 'resources/fonts/Impact.ttf')
font_opensans_regular = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Regular.ttf')
font_opensans_italic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Italic.ttf')
font_opensans_bold = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-Bold.ttf')
font_opensans_bolditalic = os.path.join(PROJECT_ROOT, 'resources/fonts/OpenSans-BoldItalic.ttf')
# Static Font Sizes
font_opensans_regular_large = ImageFont.truetype(font_opensans_regular, 80)
font_opensans_regular_small = ImageFont.truetype(font_opensans_regular, 40)
font_opensans_regular_smaller = ImageFont.truetype(font_opensans_regular, 30)
font_opensans_regular_xxs = ImageFont.truetype(font_opensans_regular, 20)
font_opensans_italic_xs = ImageFont.truetype(font_opensans_italic, 25)
font_opensans_italic_xxs = ImageFont.truetype(font_opensans_italic, 20)
font_opensans_bold_large = ImageFont.truetype(font_opensans_bold, 90)
font_opensans_bold_small = ImageFont.truetype(font_opensans_bold, 40)
font_opensans_bold_smaller = ImageFont.truetype(font_opensans_bold, 30)
font_opensans_bold_xs = ImageFont.truetype(font_opensans_bold, 27)
font_opensans_boldit_small = ImageFont.truetype(font_opensans_bolditalic, 40)
font_opensans_boldit_smallish = ImageFont.truetype(font_opensans_bolditalic, 35)
font_opensans_boldit_smaller = ImageFont.truetype(font_opensans_bolditalic, 30)
font_opensans_boldit_xs = ImageFont.truetype(font_opensans_bolditalic, 25)
font_opensans_boldit_xxs = ImageFont.truetype(font_opensans_bolditalic, 20)
# Setup Colors (via functions)
pref_colors = nhl_game_events.team_colors(game.preferred_team.team_name)
other_colors = nhl_game_events.team_colors(game.other_team.team_name)
logging.debug("Pref Colors - %s // Other Colors - %s", pref_colors, other_colors)
if are_colors_similar(pref_colors["primary"]["bg"], other_colors["primary"]["bg"]):
logging.debug("Primary Colors are Similar!")
pref_colors_all = pref_colors["primary"]
pref_colors_bg = pref_colors["primary"]["bg"]
pref_colors_text = pref_colors["primary"]["text"]
other_colors_all = other_colors["secondary"]
other_colors_bg = other_colors["secondary"]["bg"]
other_colors_text = other_colors["secondary"]["text"]
else:
pref_colors_all = pref_colors["primary"]
pref_colors_bg = pref_colors["primary"]["bg"]
pref_colors_text = pref_colors["primary"]["text"]
other_colors_all = other_colors["primary"]
other_colors_bg = | |
of second basis functions: 3
Open in the first direction
Open in the second direction
Polynomial
Periodic in the first direction
Periodic in the second direction
Knot 1: [0. 0. 0. 0. 1. 1. 1. 1.]
Knot 2: [0. 0. 0. 0. 1. 1. 1. 1.]
u0: 1.000000
u1: 0.000000
v0: 1.000000
v1: 128.000000
Control Points: 16
>>> bsurf.control_points
array([[-26.90290533, -16.51153913, -8.87632351],
[-25.85182035, -15.86644037, -21.16779478],
[-25.99572556, -15.95476156, -33.51982653],
[-27.33276363, -16.77536276, -45.77299513],
[-28.23297477, -14.34440426, -8.87632351],
[-27.12992455, -13.78397453, -21.16779478],
[-27.28094438, -13.86070358, -33.51982653],
[-28.6840851 , -14.57360111, -45.77299513],
[-29.29280315, -12.03305788, -8.87632351],
[-28.14834588, -11.56293146, -21.16779478],
[-28.3050348 , -11.62729699, -33.51982653],
[-29.76084756, -12.22532372, -45.77299513],
[-30.06701039, -9.61104189, -8.87632351],
[-28.89230518, -9.2355426 , -21.16779478],
[-29.05313537, -9.28695263, -33.51982653],
[-30.54742519, -9.76460843, -45.77299513]])
"""
@property
def k1(self):
""" Upper index of first sum"""
return self._k1
@property
def k2(self):
""" Upper index of second sum"""
return self._k2
@property
def m1(self):
""" Degree of first basis functions"""
return self._m1
@property
def m2(self):
"""Degree of second basis functions"""
return self._m2
@property
def flag1(self):
"""Closed in the first direction"""
return self._flag1
@property
def flag2(self):
"""Closed in the second direction"""
return self._flag2
@property
def flag3(self):
"""Polynominal
``False`` - rational
``True`` - polynomial
"""
return self._flag3
@property
def flag4(self):
"""First direction periodic"""
return self._flag4
@property
def flag5(self):
"""Second direction Periodic"""
return self._flag5
@property
def knot1(self):
"""First Knot Sequences"""
return self._knot1
@property
def knot2(self):
"""Second Knot Sequences"""
return self._knot2
@property
def weights(self):
"""First Knot Sequences"""
return self._weights
def control_points(self):
"""Control points"""
return self._cp
@property
def u0(self):
"""Start first parameter value"""
return self._u0
@property
def u1(self):
"""End first parameter value"""
return self._u1
@property
def v0(self):
"""Start second parameter value"""
return self._v0
@property
def v1(self):
"""End second parameter value"""
return self._v1
def _add_parameters(self, input_parameters):
parameters = np.array([parse_float(param) for param in input_parameters], dtype=float)
self._k1 = int(parameters[1]) # Upper index of first sum
self._k2 = int(parameters[2]) # Upper index of second sum
self._m1 = int(parameters[3]) # Degree of first basis functions
self._m2 = int(parameters[4]) # Degree of second basis functions
self._flag1 = bool(parameters[5]) # 0=closed in first direction, 1=not closed
self._flag2 = bool(parameters[6]) # 0=closed in second direction, 1=not closed
self._flag3 = bool(parameters[7]) # 0=rational, 1=polynomial
self._flag4 = bool(parameters[8]) # 0=nonperiodic in first direction , 1=periodic
self._flag5 = bool(parameters[9]) # 0=nonperiodic in second direction , 1=periodic
# load knot sequences
self._knot1 = parameters[10:12 + self._k1 + self._m1]
self._knot2 = parameters[12 + self._k1 + self._m1: 14 + self._k2 + self._m1 + self._k1 + self._m2]
# weights
st = 14 + self._k2 + self._m1 + self._k1 + self._m2
en = st + (1 + self._k2)*(1 + self._k1)
self._weights = parameters[st:en]
# control points
st = 14 + self._k2 + self._k1 + self._m1 + self._m2 + (1 + self._k2)*(1 + self._k1)
en = st + 3*(1 + self._k2)*(1 + self._k1)
self._cp = parameters[st:en].reshape(-1, 3)
self._u0 = parameters[-3] # Start first parameter value
self._u1 = parameters[-2] # End first parameter value
self._v0 = parameters[-1] # Start second parameter value
self._v1 = parameters[-0] # End second parameter value
def __repr__(self):
info = 'Rational B-Spline Surface\n'
info += ' Upper index of first sum: %d\n' % self._k1
info += ' Upper index of second sum: %d\n' % self._k2
info += ' Degree of first basis functions: %d\n' % self._m1
info += ' Degree of second basis functions: %d\n' % self._m2
if self.flag1:
info += ' Closed in the first direction\n'
else:
info += ' Open in the first direction\n'
if self.flag2:
info += ' Closed in the second direction\n'
else:
info += ' Open in the second direction\n'
if self.flag3:
info += ' Rational\n'
else:
info += ' Polynomial\n'
if self.flag4:
info += ' Nonperiodic in first direction\n'
else:
info += ' Periodic in the first direction\n'
if self.flag5:
info += ' Nonperiodic in second direction\n'
else:
info += ' Periodic in the second direction\n'
info += ' Knot 1: %s\n' % str(self.knot1)
info += ' Knot 2: %s\n' % str(self.knot2)
info += ' u0: %f\n' % self.u0
info += ' u1: %f\n' % self.u1
info += ' v0: %f\n' % self.v0
info += ' v1: %f\n' % self.v1
info += ' Control Points: %d' % len(self._cp)
def to_geomdl(self):
"""Return a ``geommdl.BSpline.Surface``"""
surf = BSpline.Surface()
# Set degrees
surf.degree_u = self._m2
surf.degree_v = self._m1
# set control points and knots
cp2d = self._cp.reshape(self._k2 + 1, self._k1 + 1, 3)
surf.ctrlpts2d = cp2d.tolist()
surf.knotvector_u = self._knot2
surf.knotvector_v = self._knot1
# set weights
surf.weights = self._weights
return surf
def to_vtk(self, delta=0.025):
"""Return a pyvista.PolyData Mesh
Parameters
----------
delta : float, optional
Resolution of the surface. Higher number result in a
denser mesh at the cost of compute time.
Returns
-------
mesh : ``pyvista.PolyData``
``pyvista`` mesh
Examples
--------
>>> mesh = bsurf.to_vtk()
>>> mesh.plot()
"""
surf = self.to_geomdl()
# Set evaluation delta
surf.delta = delta
# Evaluate surface points
surf.evaluate()
faces = []
for face in surf.faces:
faces.extend([3] + face.vertex_ids)
return pv.PolyData(np.array(surf.vertices), np.array(faces))
class CircularArc(Entity):
"""Circular Arc
Type 100: Simple circular arc of constant radius. Usually defined
with a Transformation Matrix Entity (Type 124).
"""
def _add_parameters(self, parameters):
# Index in list Type of data Name Description
# 1 REAL Z z displacement on XT,YT plane
# 2 REAL X x coordinate of center
# 3 REAL Y y coordinate of center
# 4 REAL X1 x coordinate of start
# 5 REAL Y1 y coordinate of start
# 6 REAL X2 x coordinate of end
# 7 REAL Y2 y coordinate of end
self.z = parse_float(parameters[1])
self.x = parse_float(parameters[2])
self.y = parse_float(parameters[3])
self.x1 = parse_float(parameters[4])
self.y1 = parse_float(parameters[5])
self.x2 = parse_float(parameters[6])
self.y2 = parse_float(parameters[7])
self._transform = self.d.get('transform', None)
def to_vtk(self, resolution=20):
"""Circular arc represented as a ``pyvista.PolyData`` Mesh
Returns
-------
mesh : ``pyvista.PolyData``
``pyvista`` mesh
"""
start = [self.x1, self.y1, 0]
end = [self.x2, self.y2, 0]
center = [self.x, self.y, 0],
arc = pv.CircularArc(center=center,
pointa=start,
pointb=end,
resolution=resolution)
arc.points += [0, 0, self.z]
if self.transform is not None:
arc.transform(self.transform._to_vtk())
return arc
@property
def transform(self):
if self._transform is not None:
return self.iges[self._transform]
def __repr__(self):
info = 'Circular Arc\nIGES Type 100\n'
info += 'Center: (%f, %f)\n' % (self.x, self.y)
info += 'Start: (%f, %f)\n' % (self.x1, self.y1)
info += 'End: (%f, %f)\n' % (self.x2, self.y2)
info += 'Z Disp: %f' % self.z
return info
class Face(Entity):
"""Defines a bound portion of three dimensional space (R^3) which
has a finite area. Used to construct B-Rep Geometries."""
def _add_parameters(self, parameters):
"""
Parameter Data
Index Type Name Description
Pointer Surface Underlying surface
2 INT N Number of loops
3 BOOL Flag Outer loop flag:
True indicates Loop1 is outer loop.
False indicates no outer loop.
4 Pointer Loop1 Pointer to first loop of the face
3+N Pointer LoopN Pointer to last loop of the face
"""
self.surf_pointer = int(parameters[1])
self.n_loops = int(parameters[2])
self.outer_loop_flag = bool(parameters[3])
self.loop_pointers = []
for i in range(self.n_loops):
self.loop_pointers.append(int(parameters[4 + i]))
@property
def loops(self):
loops = []
for ptr in self.loop_pointers:
loops.append(self.iges.from_pointer(ptr))
return loops
def __repr__(self):
info = 'IGES Type 510: Face\n'
# info += 'Center: (%f, %f)\n' % (self.x, self.y)
# info += 'Start: (%f, %f)\n' % (self.x1, self.y1)
# info += 'End: (%f, %f)\n' % (self.x2, self.y2)
# info += 'Z Disp: %f' % self.z
return info
class Loop(Entity):
"""Defines a loop, specifying a bounded face, for B-Rep
geometries."""
def _add_parameters(self, parameters):
"""Parameter Data
Index Type Name Description
1 INT N N Edges in loop
2 INT Type1 Type of Edge 1
0 = Edge
1 = Vertex
3 Pointer E1 First vertex list or edge list
4 INT Index1 Index of edge/vertex in E1
5 BOOL Flag1 Orientation flag -
True = Agrees with model curve
6 INT K1 Number of parametric space curves
7 BOOL ISO(1, 1) Isoparametric flag of first
parameter space curve
8 Pointer PSC(1, 1) First parametric space curve of E1
.
6+2K1 Pointer PSC(1, K1) Last parametric space curve of E1
7+2K1 INT Type2 Type of Edge 2
"""
self.parameters = parameters
self.n_edges = int(self.parameters[1])
self._edges = []
c = 0
for i in range(self.n_edges):
edge = {'type': int(self.parameters[2 + c]),
'e1': int(self.parameters[3 + c]), # first vertex or edge list
'index1': int(self.parameters[4 + | |
= "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```""".format(
_CHECKPOINT_FOR_DOC
)
image_outputs = self.image_model(
pixel_values=pixel_values,
bool_masked_pos=bool_masked_pos,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = image_outputs[0] # last_hidden_state
image_features = self.image_projection(pooled_output)
return image_features
@add_start_docstrings_to_model_forward(
FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
)
@replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
skip_multimodal_encoder: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: bool = True,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FlavaOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("facebook/flava-full")
>>> processor = FlavaProcessor.from_pretrained("facebook/flava-full")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.contrastive_logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if not output_hidden_states:
raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`")
image_embeddings = None
image_states = None
image_mm_projection = None
image_output = None
if pixel_values is not None:
image_output = self.image_model(
pixel_values=pixel_values,
bool_masked_pos=bool_masked_pos,
attention_mask=image_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeddings, image_states = image_output[0], image_output[2]
# Note that these states don't use final layernorm in the transformer model
image_mm_projection = self.image_to_mm_projection(image_states[-1])
text_embeddings = None
text_states = None
text_mm_projection = None
text_output = None
if input_ids is not None:
text_output = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_embeddings, text_states = text_output[0], text_output[2]
# Note that these states don't use final layernorm in the transformer model
text_mm_projection = self.text_to_mm_projection(text_states[-1])
multimodal_embeddings = None
multimodal_output = None
if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder:
multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
multimodal_output = self.multimodal_model(multimodal_input, return_dict=return_dict)
multimodal_embeddings = multimodal_output[0]
if not return_dict:
return (
image_embeddings,
image_output,
text_embeddings,
text_output,
multimodal_embeddings,
multimodal_output,
)
return FlavaModelOutput(
image_embeddings=image_embeddings,
image_output=image_output,
text_embeddings=text_embeddings,
text_output=text_output,
multimodal_embeddings=multimodal_embeddings,
multimodal_output=multimodal_output,
)
class FlavaImageCodebookResPath(nn.Module):
def __init__(self, in_size: int, out_size: int, **kwargs):
super().__init__()
hid_size = out_size // 4
path = OrderedDict()
path["relu_1"] = nn.ReLU()
path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
path["relu_2"] = nn.ReLU()
path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path["relu_3"] = nn.ReLU()
path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path["relu_4"] = nn.ReLU()
path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
self.path = nn.Sequential(path)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.path(x)
class FlavaImageCodebookBlock(nn.Module):
def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
super().__init__()
self.post_gain = 1 / (num_layers**2)
if in_size != out_size:
self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
else:
self.id_path = nn.Identity()
self.res_path = FlavaImageCodebookResPath(in_size, out_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
class FlavaImageCodebookLayerGroup(nn.Module):
def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True):
super().__init__()
blocks = OrderedDict()
for i in range(num_blocks):
if i == 0:
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
else:
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
if use_pool:
blocks["pool"] = nn.MaxPool2d(kernel_size=2)
self.group = nn.Sequential(blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.group(x)
# Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42
@add_start_docstrings(
"""
The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used
to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use
`get_codebook_indices` to get image tokens for an image.
""",
FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"),
)
class FlavaImageCodebook(FlavaPreTrainedModel):
base_model_prefix = ""
config_class = FlavaImageCodebookConfig
main_input_name = "pixel_values"
supports_gradient_checkpointing = False
def __init__(
self,
config: FlavaImageCodebookConfig,
**kwargs: Any,
):
super().__init__(config)
self.config = config
self.num_groups = config.num_groups
self.input_channels = config.input_channels
self.num_blocks_per_group = config.num_blocks_per_group
self.hidden_size = config.hidden_size
self.vocab_size = config.vocab_size
num_layers = self.num_groups * self.num_blocks_per_group
output_blocks = OrderedDict()
output_blocks["relu"] = nn.ReLU()
output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
blocks = OrderedDict()
blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
blocks["group_1"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size
)
blocks["group_2"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size
)
blocks["group_3"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size
)
blocks["group_4"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False
)
blocks["output"] = nn.Sequential(output_blocks)
self.blocks = nn.Sequential(blocks)
self.post_init()
if self.config.freeze:
for param in self.parameters():
param.requires_grad = False
def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Codebook pixel values can be obtained using [`FlavaFeatureExtractor`] by passing
`return_codebook_pixels=True`. See [`FlavaFeatureExtractor.__call__`] for details.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaFeatureExtractor, FlavaImageCodebook
>>> model = FlavaImageCodebook.from_pretrained("{0}")
>>> feature_extractor = FlavaFeatureExtractor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = feature_extractor([image], return_codebook_pixels=True, return_tensors="pt")
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
>>> outputs = model.get_codebook_indices(**inputs)
```
""".format(
_CHECKPOINT_FOR_CODEBOOK_DOC
)
z_logits = self.blocks(pixel_values)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
z_logits = self.blocks(pixel_values)
return nn.Softmax(dim=1)(z_logits)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Codebook pixel values can be obtained using [`FlavaFeatureExtractor`] by passing
`return_codebook_pixels=True`. See [`FlavaFeatureExtractor.__call__`] for details.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaFeatureExtractor, FlavaImageCodebook
>>> model = FlavaImageCodebook.from_pretrained("{0}")
>>> feature_extractor = FlavaFeatureExtractor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = feature_extractor([image], return_codebook_pixels=True, return_tensors="pt")
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
>>> outputs = model(**inputs)
>>> print(outputs.shape)
(1, 196)
```
""".format(
_CHECKPOINT_FOR_CODEBOOK_DOC
)
if len(pixel_values.shape) != 4:
raise ValueError(f"input shape {pixel_values.shape} is not 4d")
if pixel_values.shape[1] != self.input_channels:
raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}")
return self.blocks(pixel_values)
class FlavaPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class FlavaMaskedPredictionHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = FlavaPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
class FlavaITMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pooler = FlavaPooler(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, x):
x = self.pooler(x)
x = self.seq_relationship(x)
return x
class FlavaGlobalContrastiveHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.global_backprop_contrastive = config.global_backprop_contrastive
def forward(self, image_embeddings, text_embeddings, logit_scale):
temperature = torch.exp(logit_scale)
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
image_embeddings_all = [image_embeddings]
text_embeddings_all = [text_embeddings]
else:
local_batch_size = image_embeddings.size(0)
world_size = torch.distributed.get_world_size()
if self.global_backprop_contrastive:
image_embeddings_all = torch.distributed.nn.functional.all_gather_with_backprop(image_embeddings)
text_embeddings_all = torch.distributed.nn.functional.all_gather_with_backprop(text_embeddings)
else:
image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
torch.distributed.all_gather(image_embeddings_all, image_embeddings)
torch.distributed.all_gather(text_embeddings_all, text_embeddings)
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
local_batch_size, device=image_embeddings.device
)
image_embeddings_all = torch.cat(image_embeddings_all)
text_embeddings_all = torch.cat(text_embeddings_all)
logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
return logits_per_image, logits_per_text, labels
@add_start_docstrings(
"""
The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
""",
FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA,
)
class FlavaForPreTraining(FlavaPreTrainedModel):
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
super().__init__(config)
self.flava = FlavaModel(config)
self.image_codebook = image_codebook
if self.image_codebook is None and config.init_codebook:
self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
# Levarage text and image encoder configs to create the masked
# head since it has the right vocab
self.mim_head = FlavaMaskedPredictionHead(config.image_config)
self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
self.itm_head = FlavaITMHead(config)
self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
self.image_vocab_size = config.image_config.vocab_size
self.text_vocab_size = config.text_config.vocab_size
self.mlm_weight = config.mlm_weight
self.mim_weight = config.mim_weight
self.global_contrastive_weight = config.global_contrastive_weight
self.ce_ignore_index = config.ce_ignore_index
self.itm_weight = config.itm_weight
self.mmm_image_weight = config.mmm_image_weight
self.mmm_text_weight = config.mmm_text_weight
self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
self.post_init()
def _resize_to_2d(self, x: torch.Tensor):
if x.dim() > 2:
x = x.view(x.size(0), -1)
return x
@add_start_docstrings_to_model_forward(
FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches")
)
@replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig)
def forward(
self,
| |
have taken it up, we have lunatics coming out of the woodwork to confess.",
"Every man-jack in the east end pointing the finger at everyone else.",
"Were they drained of blood? Excuse me? Were they drained of blood? No.",
"How old was she, the daughter? Seven.",
"What was her name? Charlotte.",
"Charlotte.",
"Is it the Ripper, back again? Unlikely.",
"He only did whores.",
"When the next one occurs, I shall need to see the crime scene immediately.",
"Will there be a next time? Assuredly.",
"Thank you for your time, Inspector.",
"When I have information to share with you, I promise I shall.",
"I'll tell you one thing though, and you can take it to heart, if you don't change your tactics, you'll never stop him.",
"You see, you're hunting for a man; you need to start hunting for a beast.",
"Would you remove your corset, <NAME>? There's blood.",
"Is it consumption? Shall I go, sir? Not unless you want to.",
"May I? Sir, you shouldn't.",
"I don't know the word.",
"I mean, my sickness, sir.",
"Expose your plates, Mr.",
"Frawley.",
"You don't mind? No.",
"I've never fucked a dying creature before.",
"Do you feel things more deeply, I wonder? Do you feel pain? - Do you? - Find out.",
"Excellent.",
"Stamp this for me, will you? Cable for you, sir.",
"Ethan, it's time for you to come home.",
"You can't run away forever.",
"I can handle your legal problems.",
"The Federal Marshall has been paid.",
"Stop your foolishness and do as I instruct.",
"Your father.",
"oh, the times wuz hard and the wages low leave her, Johnny, leave her but now once more, ashore we'll go and it's time for us to leave her Do you recognize that? Boa - Boat.",
"- Boat.",
"Boat.",
"Do you recognize that? That animal? Whale.",
"Yes, it's a whale.",
"A sea creature.",
"Whale.",
"Whale.",
"Hunting.",
"It's all right.",
"Shhh calm down.",
"Did you did you kill a whale? You know the word.",
"- Proteus - Perhaps this was your trade.",
"You could've been a whaler as I am a doctor.",
"You needn't feel ashamed of that.",
"Kill.",
"Yes, I know.",
"As you grow up, you'll learn we all do things which cause us shame.",
"Sins we have committed.",
"Thank you, Proteus.",
"Victor.",
"Leave her but now once more ashore we'll go and it's time for us to leave her Sir Malcolm! Sir Malcolm.",
"Miss Ives.",
"I'm so glad you could attend.",
"Thank you for coming to my wee fête.",
"Thank you for having us.",
"I have the additional photographs in my coach.",
"How forthright he is.",
"Must have served him well in Darkest Africa.",
"Uh, my wife is somewhere about, close by the gin, I would hazard.",
"In the parlor, perhaps.",
"Why don't you make an introduction? Of course, Sir Malcolm.",
"Come along.",
"Let's just stroll, see what wickedness we can find.",
"Come! You're too sweet.",
"My name is <NAME>.",
"<NAME>.",
"It's a pleasure, <NAME>.",
"I couldn't help but notice your skepticism.",
"Am I skeptical? About the room.",
"Rather aggressive in the chinoiserie and geographically capricious to say the least.",
"In this one room, there's Japanese, Siamese, Egyptian, Balinese, and something I take to be last season's Panto of Aladdin.",
"Are you a friend of Mr.",
"Lyle's? Never met him before tonight.",
"It was more of a random invitation.",
"Do you get many of those? Entirely.",
"You could say 'no.'",
" I never say 'no.'",
" I wasn't skeptical about the room.",
"What then? Shall I guess? Have on, Mr.",
"Gray.",
"You do not belong here.",
"Even less than I.",
"You are not frivolous.",
"Your eye is careful and appraising.",
"This is not a careful room, although there is much to appraise.",
"That can divert you for only so long.",
"You do not like it here.",
"You are closed to it.",
"Yet you're the only woman in this house not wearing gloves.",
"Your hands want to touch, but your head wants to appraise.",
"Your heart is torn between the two.",
"You were skeptical because you thought This was going to be a wasted evening, but now you're not so sure.",
"Ladies and gentlemen, your attention please.",
"Your attention pl please, you must pay attention to me.",
"My friends, our guest of honor has arrived.",
"May I present the renowned Madame Kali! So come along, come along around the table.",
"Those of you without fear, we need at least eight people of courage.",
"Well done, well done, well done.",
"Excellent! Sit.",
"Sir Malcolm, take a seat.",
"Splendid.",
"Sit.",
"Sit.",
"Gentlemen, please remove your jewelry.",
"What's going on? Ladies, please remove your gloves.",
"I believe we're about to commune with the spirits.",
"This is such fun.",
"Please join hands.",
"Join hands I ask forbearance.",
"I ask you to suspend your disbelief and imagine your minds floating in the darkness of time.",
"Let your imaginations be liberate and roam with me back in time to the ancient seas.",
"Back in time to the time before when the spirits walked, when the sun was new and the old gods walked.",
"I call forth Mut, mother goddess.",
"I call to the speakers of the dead.",
"Come to me, come to me.",
"What summoned me? I speak for the dead.",
"For the undying.",
"There's another here.",
"There's another here.",
"What does she mean? Amunet Amunet Amunet.",
"Amunet.",
"Serpent.",
"Hidden one.",
"Know your master.",
"Your lover.",
"Your master.",
"Father.",
"Father, mine, let me come with you.",
"What a ripping time we'll have.",
"Let me come with you.",
"It'll be an adventure.",
"You'll teach me! I'll prove myself a proper explorer.",
"Peter loves you, Father.",
"But Father, if the porters go, how are we to survive? I'm not frightened, I'm not.",
"What an adventure.",
"It's so green, so beautiful.",
"But the porters are dying and I can't go on, I'm sick.",
"Is it the dysentery? I'm bleeding.",
"Oh God, I'm bleeding.",
"I'm shitting blood now.",
"I have no more clean trousers.",
"I'm sorry.",
"I'll stay at base camp.",
"You go.",
"Leave me.",
"Will you name a mountain after me? Are you proud of me? Go! Goodbye.",
"I'll see you soon, Father.",
"Father.",
"Cold.",
"close the window, my true love and gently drops the rain I have never had but one true love I am weak.",
"Can't feel my hands.",
"There's no water.",
"I can't swallow.",
"You knew I was dying didn't you, Father? Did you name a mountain after me? Oh! Amunet, girl? No, much older.",
"Father, Mina's waiting.",
"No! Shhh I wonder I wonder when was the moment you knew you wanted to fuck her? Why were you not more discrete? Vanessa heard you.",
"The two of you.",
"She heard you fucking and she was curious.",
"She walked closer, she rounded the corner and discovered you, the two of you, you know, fucking.",
"Fucking her cunt.",
"Vanessa saw that.",
"Fucking animal.",
"You man.",
"You animal.",
"You man.",
"You animal.",
"Betrayer! Creature! I look into his eyes and they are red with blood like from Peter's ass.",
"His lips are red like blood from her cunt when you fucked her.",
"His teeth are sharp like yours when you bit her cunt and it's so cold and dark and wet like the jungle.",
"Like tears, I am crying.",
"I am so afraid, Father.",
"Find me! Find me! Save me! Save me!",
"Breakfast, Miss Croft? Our usual repast? I won't say 'no.'",
" And call me Bronagh, will ya? The way you say that is ridiculous.",
"<NAME>.",
"There's no 'Z' in it.",
"Just being laconic, I suppose.",
"- Oh, like all Americans.",
"- Precisely.",
"So how goes the job hunt? Employment on your feet is hard to come by.",
"I've found a bit of a sideline though.",
"Photographic subject, don't you know.",
"For the calendars and such? If you keep your calendar in a whorehouse.",
"The pictures are a tad risque.",
"What will the bishop think? And what about you? Don't you need to be pursuing work? I have work.",
"I am a maritime supervisor.",
"I sit here and make sure all the boats get past okay.",
"Almost had a collision last night.",
"It was thrilling.",
"- You eaten? - No.",
"Let me buy us some real breakfast.",
"I've got to sleep, but thanks.",
"Well, how about dinner tonight? You're not mistaking me for a sweet little debutante at the summer fair, are you? That kind never did take my interest.",
"Oh, you like things back-alley.",
"I like things to be what they are.",
"Then dinner it is.",
"Anywhere but here.",
"Well, I don't know London much.",
"I do.",
"I'm not asking her back.",
"Although she put Madame Kali to shame, I must say.",
"It was a riveting performance.",
"The dilettantes among the company were outraged, but if one is to engage with the primordial forces of darkness, one must expect a bit of social awkwardness.",
"Although the language tsk-tsk-tsk-tsk! My apologies.",
"My wife mortified, although the gin helped.",
"All right, then, | |
<gh_stars>1-10
import asyncio
import logging
import re
from cogs.utils.checks import *
from cogs.utils.mojang import *
log = logging.getLogger(__name__)
def list_factory(data):
items = []
for i in data:
items.append(i[0])
return items
class Hypixel(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def description(self, ctx, *, text):
"""Sets up a description which is shown on the hypixel command."""
if not (await self.bot.is_owner(ctx.author)):
user = await self.bot.pool.fetchrow('select * from donators where userid=$1', ctx.author.id)
if not user: return await ctx.send("You don\'t seem to qualify to use this command.\nThis feature is limited to donors only.")
if len(text)>150: return await ctx.send('Oops, that description looks bigger than what I can handle. (Max. 150)')
await self.bot.pool.execute("""with upsert as (update donators set phrase=$2 where userid=$1 returning *)
insert into donators (userid, phrase) select $1, $2 where not exists (select * from upsert)""", ctx.author.id, text)
await ctx.send(f'Your description has successfully been set to "{text}"')
@commands.group(invoke_without_command = True, name = 'hypixel', aliases=['h'], case_insensitive=True)
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cHypixel(self, ctx, *, user:str = ''):
"""Type `[p]help hypixel` for more information about the command.
Use hypixel [gamemode] [user] to get game specific stats"""
return await ctx.invoke(self.bot.get_command("HiddenHypixel"), user)
### Bedwars ###
@cHypixel.group(name ='bedwars', aliases = ['bw', 'bedw', 'bwars'], invoke_without_command=True, case_insensitive=True)
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cBedwars(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenBedwars"), user)
@cBedwars.command(name='compare', aliases = ["c"])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cBedwarsCompare(self, ctx, user1=None, user2=None):
return await ctx.invoke(self.bot.get_command("HiddenBedwarsCompare"), user1, user2)
### The Pit ###
@cHypixel.group(name ='pit', invoke_without_command=True)
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPit(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenPit"), user)
@cPit.command(name='contracts')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitContracts(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenPitContract"), user)
@cPit.command(name='lastkills')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitKills(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenPitKills"), user)
@cPit.command(name='position')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitPosition(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenPitPosition"), user)
@cPit.command(name='progress')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitProgress(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenPitProgress"), user)
@cPit.group(name='top')
async def cPitTop(self, ctx):
"""Shows the leaderboard for the specified game"""
if ctx.invoked_subcommand is None: return await ctx.send_help(ctx.command)
@cPitTop.command(name='exp')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopExp(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopExp"))
@cPitTop.command(name='gold')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopCash(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopCash"))
@cPitTop.command(name='playtime')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopPlaytime(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopPlaytime"))
@cPitTop.command(name='kills')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopKills(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopKills"))
@cPitTop.command(name='renown')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopRenown(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopRenown"))
@cPitTop.command(name='clicks')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopClicks(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopClicks"))
@cPitTop.command(name='messages')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopMessages(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopMessages"))
@cPitTop.command(name='contracts')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopContracts(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopContracts"))
@cPitTop.command(name='bounty')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopBounty(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopBounty"))
@cPitTop.command(name='deaths')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopDeaths(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopDeaths"))
@cPitTop.command(name='streak')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def cPitTopStreak(self, ctx):
return await ctx.invoke(self.bot.get_command("HiddenPitTopStreak"))
### Murder Mystery ###
@cHypixel.command(name ='murdermystery', aliases = ['mm', 'murderm', 'mmystery'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_mmystery(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenMurderMystery"), user)
### Arcade ###
@cHypixel.command(name ='arcade')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_arcade(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenArcade"), user)
### Blitz Survival Games ###
@cHypixel.command(name ='blitz', aliases = ['survivalgames', 'hungergames', 'bsg'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_blitz(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenBlitz"), user)
### Cops And Crims ###
@cHypixel.command(name ='copsandcrims', aliases = ['cc', 'copsncrims', 'cvc'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_copsncrims(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenCopsAndCrims"), user)
### Crazy Walls ###
@cHypixel.command(name ='crazywalls', aliases = ['cw'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_crazywalls(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenCrazyWalls"), user)
### Mega Walls ###
@cHypixel.command(name ='megawalls', aliases = ['mw'])
async def _hypixel_megawalls(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenMegaWalls"), user)
### The Walls ###
@cHypixel.command(name ='walls')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_walls(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenWalls"), user)
### SkyClash ###
# @cHypixel.command(name ='skyclash', aliases=['sc'])
# @commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
# async def _hypixel_skyclash(self, ctx, user:str = ''):
# return await ctx.invoke(self.bot.get_command("HiddenSkyClash"), user)
### SkyWars ###
@cHypixel.command(name ='skywars', aliases = ['sw', 'skyw', 'swars'], no_pm = True)
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_skywars(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenSkyWars"), user)
### Smash Heroes ###
@cHypixel.command(name ='smashheroes', aliases=['sh'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
@commands.is_owner()
async def _hypixel_smashheroes(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenSmashHeroes"), user)
### Speed UHC ###
@cHypixel.command(name ='speeduhc', aliases=['suhc'])
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_speeduhc(self, ctx, user:str = ''):
return await ctx.invoke(self.bot.get_command("HiddenSpeedUHC"), user)
### UHC ###
@cHypixel.command(name ='uhc')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_uhc(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenUHC"), user)
### Duels ###
@cHypixel.command(name ='duels')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_duels(self, ctx, user:str=''):
return await ctx.invoke(self.bot.get_command("HiddenDuels"), user)
### Paintball ###
@cHypixel.command(name='paintball')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_paintball(self, ctx, user: str = ''):
return await ctx.invoke(self.bot.get_command("HiddenPaintball"), user)
### Quake ###
@cHypixel.command(name='quake')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_quake(self, ctx, user: str = ''):
return await ctx.invoke(self.bot.get_command("HiddenQuake"), user)
### VampireZ ###
@cHypixel.command(name='vampirez')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_vampirez(self, ctx, user: str = ''):
return await ctx.invoke(self.bot.get_command("HiddenVampireZ"), user)
### BuildBattle ###
@cHypixel.command(name='buildbattle')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_buildbattle(self, ctx, user: str = ''):
return await ctx.invoke(self.bot.get_command("HiddenBuildBattle"), user)
### TnT Games ###
@cHypixel.command(name='tntgames')
@commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
async def _hypixel_tntgames(self, ctx, user: str = ''):
return await ctx.invoke(self.bot.get_command("HiddenTnTGames"), user)
### Verify ###
@commands.command(name='verify')
@commands.cooldown(rate = 1, per = 10, type = commands.BucketType.user)
async def _verify(self, ctx, *, InGameName:str=None):
"""Verifies your Minecraft account so you don't need to enter your username to check your stats"""
return await ctx.invoke(self.bot.get_command("HiddenVerify"), InGameName)
@commands.command(name='unverify')
@commands.cooldown(rate = 1, per = 10, type = commands.BucketType.user)
async def _unverify(self, ctx):
"""Unverifies your Minecraft account."""
return await ctx.invoke(self.bot.get_command("HiddenUnerify"))
# @commands.group('leaderboard', invoke_without_command = True, aliases=['lb'], case_insensitive=True)
# @has_voted()
# async def _leaderboard(self, ctx):
# """Shows the leaderboard for the specified game"""
# if ctx.invoked_subcommand is None: return await ctx.send_help(ctx.command)
#
# @_leaderboard.group('bedwars', invoke_without_command=True, aliases=['bw'], case_insensitive=True)
# @has_voted()
# async def bw_lb(self, ctx):
# if ctx.invoked_subcommand is None: return await ctx.send_help(ctx.command)
#
# @bw_lb.command('level', aliases=['lvl'])
# @commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
# @has_voted()
# async def bw_lb_level(self, ctx):
# """Returns Bedwars Level Leaderboard"""
# start = datetime.datetime.now()
# permissions = ctx.channel.permissions_for(ctx.me)
# if not permissions.send_messages: return await ctx.author.send(f'The bot doesn\'t have permissions to send messages in <#{ctx.channel.id}>')
# if not permissions.embed_links: return await ctx.send('Bot does not have embed links permission.')
# if not permissions.add_reactions: return await ctx.send('Bot does not have add reactions permission.')
# if not permissions.read_message_history: return await ctx.send('Bot does not have permissions to read message history.')
# leaderboard = hypixel.Leaderboards().Bedwars()
# leaders = hypixel.GetUsers(leaderboard.bedwars_level()['leaders'])
# top = '\n'.join([f'{i+1}. {escape(m)}' for i, m in enumerate(leaders)])
# em = discord.Embed(title='Bedwars Level Leaderboard')
# em.description = top
# message = await ctx.send(embed = em)
# for i, leader in enumerate(leaders):
# start = datetime.datetime.now()
# try:
# player = self.bot.hypixel.Player(leader)
# stats = round(float(player.getBedwarsLevel()))
# except: stats = 'Non'
# leaders[i] = f'{leader} - {stats}'
# em.description = '\n'.join([f'{i+1}. {escape(m)}' for i, m in enumerate(leaders)])
# time_taken = (datetime.datetime.now() - start).total_seconds()
# if time_taken > 0.5: await message.edit(content=None, embed = em)
# await message.edit(content=None, embed=em)
# time_taken = datetime.datetime.now() - start
# logging.info(f'{ctx.message.content} - {time_taken.total_seconds()}s [{ctx.message.author.id}]')
#
# @bw_lb.command('wins')
# @commands.cooldown(rate=1, per=5, type=commands.BucketType.user)
# @has_voted()
# async def bw_lb_wins(self, ctx, default:str = 'overall'):
# """Returns Bedwars Wins Leaderboard. Modes: overall/weekly"""
# start = datetime.datetime.now()
# permissions = ctx.channel.permissions_for(ctx.me)
# if not permissions.send_messages: return await ctx.author.send(f'The bot doesn\'t have permissions to send messages in <#{ctx.channel.id}>')
# if not permissions.embed_links: return await ctx.send('Bot does not have embed links permission.')
# if not permissions.add_reactions: return await ctx.send('Bot does not have add reactions permission.')
# if not permissions.read_message_history: return await ctx.send('Bot does not have permissions to read message history.')
# leaderboard = hypixel.Leaderboards().Bedwars()
# if default == 'weekly':
# leaders = hypixel.GetUsers(leaderboard.wins_1()['leaders'])
# default = 'Weekly'
# else:
# leaders = hypixel.GetUsers(leaderboard.wins()['leaders'])
# default = 'Overall'
# top = '\n'.join([f'{i+1}. {escape(m)}' for i, m in enumerate(leaders)])
# em = discord.Embed(title=f'Bedwars {default} Wins Leaderboard')
# em.description = top
# message = await ctx.send(embed = em)
# for i, leader in enumerate(leaders):
# start = datetime.datetime.now()
# try:
# player = self.bot.hypixel.Player(leader)
# stats | |
\
'<tr>' + \
'<td>Supplies expense</td>' + \
'<td>' + r22c1 + '</td>' + \
'<td>' + r22c2 + '</td>' + \
'<td>' + r22c3 + '</td>' + \
'<td>' + r22c4 + '</td>' + \
'<td>' + r22c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Travel expense</td>' + \
'<td>' + r23c1 + '</td>' + \
'<td>' + r23c2 + '</td>' + \
'<td>' + r23c3 + '</td>' + \
'<td>' + r23c4 + '</td>' + \
'<td>' + r23c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other expense</td>' + \
'<td>' + r24c1 + '</td>' + \
'<td>' + r24c2 + '</td>' + \
'<td>' + r24c3 + '</td>' + \
'<td>' + r24c4 + '</td>' + \
'<td>' + r24c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Incomes totals</td>' + \
'<td>' + r25c1 + '</td>' + \
'<td>' + r25c2 + '</td>' + \
'<td>' + r25c3 + '</td>' + \
'<td>' + r25c4 + '</td>' + \
'<td>' + r25c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Expenses totals</td>' + \
'<td>' + r26c1 + '</td>' + \
'<td>' + r26c2 + '</td>' + \
'<td>' + r26c3 + '</td>' + \
'<td>' + r26c4 + '</td>' + \
'<td>' + r26c5 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Total</td>' + \
'<td>' + r27c1 + '</td>' + \
'<td>' + r27c2 + '</td>' + \
'<td>' + r27c3 + '</td>' + \
'<td>' + r27c4 + '</td>' + \
'<td>' + r27c5 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'header-center': 'Department budget',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="department_budget.pdf"'
return response
def twelve_month_operating_budget(request):
return render(request, 'reporting/twelve_month_operating_budget.html')
def generate_html_to_pdf_twelve_month_operating_budget(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c6 = request.POST.get('r1c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c7 = request.POST.get('r1c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c8 = request.POST.get('r1c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c9 = request.POST.get('r1c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c10 = request.POST.get('r1c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c11 = request.POST.get('r1c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c12 = request.POST.get('r1c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c13 = request.POST.get('r1c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c7 = request.POST.get('r2c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c8 = request.POST.get('r2c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c9 = request.POST.get('r2c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c10 = request.POST.get('r2c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c11 = request.POST.get('r2c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c12 = request.POST.get('r2c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c13 = request.POST.get('r2c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c7 = request.POST.get('r3c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c8 = request.POST.get('r3c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c9 = request.POST.get('r3c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c10 = request.POST.get('r3c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c11 = request.POST.get('r3c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c12 = request.POST.get('r3c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c13 = request.POST.get('r3c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c7 = request.POST.get('r4c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c8 = request.POST.get('r4c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c9 = request.POST.get('r4c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c10 = request.POST.get('r4c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c11 = request.POST.get('r4c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c12 = request.POST.get('r4c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c13 = request.POST.get('r4c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c7 = request.POST.get('r5c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c8 = request.POST.get('r5c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c9 = request.POST.get('r5c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c10 = request.POST.get('r5c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c11 = request.POST.get('r5c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c12 = request.POST.get('r5c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c13 = request.POST.get('r5c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c7 = request.POST.get('r6c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c8 = request.POST.get('r6c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c9 = request.POST.get('r6c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c10 = request.POST.get('r6c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c11 = request.POST.get('r6c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c12 = request.POST.get('r6c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c13 = request.POST.get('r6c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c7 = request.POST.get('r7c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c8 = request.POST.get('r7c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c9 = request.POST.get('r7c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c10 = request.POST.get('r7c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c11 = request.POST.get('r7c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c12 = request.POST.get('r7c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c13 = request.POST.get('r7c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c7 = request.POST.get('r8c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c8 = request.POST.get('r8c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c9 = request.POST.get('r8c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c10 = request.POST.get('r8c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c11 = request.POST.get('r8c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c12 = request.POST.get('r8c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c13 = request.POST.get('r8c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c7 = request.POST.get('r9c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c8 = request.POST.get('r9c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c9 = | |
GL/glext.h:693
# ARB_texture_mirrored_repeat (GL/glext.h:696)
GL_MIRRORED_REPEAT_ARB = 33648 # GL/glext.h:697
# ARB_depth_texture (GL/glext.h:700)
GL_DEPTH_COMPONENT16_ARB = 33189 # GL/glext.h:701
GL_DEPTH_COMPONENT24_ARB = 33190 # GL/glext.h:702
GL_DEPTH_COMPONENT32_ARB = 33191 # GL/glext.h:703
GL_TEXTURE_DEPTH_SIZE_ARB = 34890 # GL/glext.h:704
GL_DEPTH_TEXTURE_MODE_ARB = 34891 # GL/glext.h:705
# ARB_shadow (GL/glext.h:708)
GL_TEXTURE_COMPARE_MODE_ARB = 34892 # GL/glext.h:709
GL_TEXTURE_COMPARE_FUNC_ARB = 34893 # GL/glext.h:710
GL_COMPARE_R_TO_TEXTURE_ARB = 34894 # GL/glext.h:711
# ARB_shadow_ambient (GL/glext.h:714)
GL_TEXTURE_COMPARE_FAIL_VALUE_ARB = 32959 # GL/glext.h:715
# ARB_window_pos (GL/glext.h:718)
# ARB_vertex_program (GL/glext.h:721)
GL_COLOR_SUM_ARB = 33880 # GL/glext.h:722
GL_VERTEX_PROGRAM_ARB = 34336 # GL/glext.h:723
GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB = 34338 # GL/glext.h:724
GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB = 34339 # GL/glext.h:725
GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB = 34340 # GL/glext.h:726
GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB = 34341 # GL/glext.h:727
GL_CURRENT_VERTEX_ATTRIB_ARB = 34342 # GL/glext.h:728
GL_PROGRAM_LENGTH_ARB = 34343 # GL/glext.h:729
GL_PROGRAM_STRING_ARB = 34344 # GL/glext.h:730
GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB = 34350 # GL/glext.h:731
GL_MAX_PROGRAM_MATRICES_ARB = 34351 # GL/glext.h:732
GL_CURRENT_MATRIX_STACK_DEPTH_ARB = 34368 # GL/glext.h:733
GL_CURRENT_MATRIX_ARB = 34369 # GL/glext.h:734
GL_VERTEX_PROGRAM_POINT_SIZE_ARB = 34370 # GL/glext.h:735
GL_VERTEX_PROGRAM_TWO_SIDE_ARB = 34371 # GL/glext.h:736
GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB = 34373 # GL/glext.h:737
GL_PROGRAM_ERROR_POSITION_ARB = 34379 # GL/glext.h:738
GL_PROGRAM_BINDING_ARB = 34423 # GL/glext.h:739
GL_MAX_VERTEX_ATTRIBS_ARB = 34921 # GL/glext.h:740
GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB = 34922 # GL/glext.h:741
GL_PROGRAM_ERROR_STRING_ARB = 34932 # GL/glext.h:742
GL_PROGRAM_FORMAT_ASCII_ARB = 34933 # GL/glext.h:743
GL_PROGRAM_FORMAT_ARB = 34934 # GL/glext.h:744
GL_PROGRAM_INSTRUCTIONS_ARB = 34976 # GL/glext.h:745
GL_MAX_PROGRAM_INSTRUCTIONS_ARB = 34977 # GL/glext.h:746
GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB = 34978 # GL/glext.h:747
GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB = 34979 # GL/glext.h:748
GL_PROGRAM_TEMPORARIES_ARB = 34980 # GL/glext.h:749
GL_MAX_PROGRAM_TEMPORARIES_ARB = 34981 # GL/glext.h:750
GL_PROGRAM_NATIVE_TEMPORARIES_ARB = 34982 # GL/glext.h:751
GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB = 34983 # GL/glext.h:752
GL_PROGRAM_PARAMETERS_ARB = 34984 # GL/glext.h:753
GL_MAX_PROGRAM_PARAMETERS_ARB = 34985 # GL/glext.h:754
GL_PROGRAM_NATIVE_PARAMETERS_ARB = 34986 # GL/glext.h:755
GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB = 34987 # GL/glext.h:756
GL_PROGRAM_ATTRIBS_ARB = 34988 # GL/glext.h:757
GL_MAX_PROGRAM_ATTRIBS_ARB = 34989 # GL/glext.h:758
GL_PROGRAM_NATIVE_ATTRIBS_ARB = 34990 # GL/glext.h:759
GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB = 34991 # GL/glext.h:760
GL_PROGRAM_ADDRESS_REGISTERS_ARB = 34992 # GL/glext.h:761
GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB = 34993 # GL/glext.h:762
GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB = 34994 # GL/glext.h:763
GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB = 34995 # GL/glext.h:764
GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB = 34996 # GL/glext.h:765
GL_MAX_PROGRAM_ENV_PARAMETERS_ARB = 34997 # GL/glext.h:766
GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB = 34998 # GL/glext.h:767
GL_TRANSPOSE_CURRENT_MATRIX_ARB = 34999 # GL/glext.h:768
GL_MATRIX0_ARB = 35008 # GL/glext.h:769
GL_MATRIX1_ARB = 35009 # GL/glext.h:770
GL_MATRIX2_ARB = 35010 # GL/glext.h:771
GL_MATRIX3_ARB = 35011 # GL/glext.h:772
GL_MATRIX4_ARB = 35012 # GL/glext.h:773
GL_MATRIX5_ARB = 35013 # GL/glext.h:774
GL_MATRIX6_ARB = 35014 # GL/glext.h:775
GL_MATRIX7_ARB = 35015 # GL/glext.h:776
GL_MATRIX8_ARB = 35016 # GL/glext.h:777
GL_MATRIX9_ARB = 35017 # GL/glext.h:778
GL_MATRIX10_ARB = 35018 # GL/glext.h:779
GL_MATRIX11_ARB = 35019 # GL/glext.h:780
GL_MATRIX12_ARB = 35020 # GL/glext.h:781
GL_MATRIX13_ARB = 35021 # GL/glext.h:782
GL_MATRIX14_ARB = 35022 # GL/glext.h:783
GL_MATRIX15_ARB = 35023 # GL/glext.h:784
GL_MATRIX16_ARB = 35024 # GL/glext.h:785
GL_MATRIX17_ARB = 35025 # GL/glext.h:786
GL_MATRIX18_ARB = 35026 # GL/glext.h:787
GL_MATRIX19_ARB = 35027 # GL/glext.h:788
GL_MATRIX20_ARB = 35028 # GL/glext.h:789
GL_MATRIX21_ARB = 35029 # GL/glext.h:790
GL_MATRIX22_ARB = 35030 # GL/glext.h:791
GL_MATRIX23_ARB = 35031 # GL/glext.h:792
GL_MATRIX24_ARB = 35032 # GL/glext.h:793
GL_MATRIX25_ARB = 35033 # GL/glext.h:794
GL_MATRIX26_ARB = 35034 # GL/glext.h:795
GL_MATRIX27_ARB = 35035 # GL/glext.h:796
GL_MATRIX28_ARB = 35036 # GL/glext.h:797
GL_MATRIX29_ARB = 35037 # GL/glext.h:798
GL_MATRIX30_ARB = 35038 # GL/glext.h:799
GL_MATRIX31_ARB = 35039 # GL/glext.h:800
# ARB_fragment_program (GL/glext.h:803)
GL_FRAGMENT_PROGRAM_ARB = 34820 # GL/glext.h:804
GL_PROGRAM_ALU_INSTRUCTIONS_ARB = 34821 # GL/glext.h:805
GL_PROGRAM_TEX_INSTRUCTIONS_ARB = 34822 # GL/glext.h:806
GL_PROGRAM_TEX_INDIRECTIONS_ARB = 34823 # GL/glext.h:807
GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB = 34824 # GL/glext.h:808
GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB = 34825 # GL/glext.h:809
GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB = 34826 # GL/glext.h:810
GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB = 34827 # GL/glext.h:811
GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB = 34828 # GL/glext.h:812
GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB = 34829 # GL/glext.h:813
GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB = 34830 # GL/glext.h:814
GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB = 34831 # GL/glext.h:815
GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB = 34832 # GL/glext.h:816
GL_MAX_TEXTURE_COORDS_ARB = 34929 # GL/glext.h:817
GL_MAX_TEXTURE_IMAGE_UNITS_ARB = 34930 # GL/glext.h:818
# ARB_vertex_buffer_object (GL/glext.h:821)
GL_BUFFER_SIZE_ARB = 34660 # GL/glext.h:822
GL_BUFFER_USAGE_ARB = 34661 # GL/glext.h:823
GL_ARRAY_BUFFER_ARB = 34962 # GL/glext.h:824
GL_ELEMENT_ARRAY_BUFFER_ARB = 34963 # GL/glext.h:825
GL_ARRAY_BUFFER_BINDING_ARB = 34964 # GL/glext.h:826
GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB = 34965 # GL/glext.h:827
GL_VERTEX_ARRAY_BUFFER_BINDING_ARB = 34966 # GL/glext.h:828
GL_NORMAL_ARRAY_BUFFER_BINDING_ARB = 34967 # GL/glext.h:829
GL_COLOR_ARRAY_BUFFER_BINDING_ARB = 34968 # GL/glext.h:830
GL_INDEX_ARRAY_BUFFER_BINDING_ARB = 34969 # GL/glext.h:831
GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB = 34970 # GL/glext.h:832
GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB = 34971 # GL/glext.h:833
GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB = 34972 # GL/glext.h:834
GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB = 34973 # GL/glext.h:835
GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB = 34974 # GL/glext.h:836
GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB = 34975 # GL/glext.h:837
GL_READ_ONLY_ARB = 35000 # GL/glext.h:838
GL_WRITE_ONLY_ARB = 35001 # GL/glext.h:839
GL_READ_WRITE_ARB = 35002 # GL/glext.h:840
GL_BUFFER_ACCESS_ARB = 35003 # GL/glext.h:841
GL_BUFFER_MAPPED_ARB = 35004 # GL/glext.h:842
GL_BUFFER_MAP_POINTER_ARB = 35005 # GL/glext.h:843
GL_STREAM_DRAW_ARB = 35040 # GL/glext.h:844
GL_STREAM_READ_ARB = 35041 # GL/glext.h:845
GL_STREAM_COPY_ARB = 35042 # GL/glext.h:846
GL_STATIC_DRAW_ARB = 35044 # GL/glext.h:847
GL_STATIC_READ_ARB = 35045 # GL/glext.h:848
GL_STATIC_COPY_ARB = 35046 # GL/glext.h:849
GL_DYNAMIC_DRAW_ARB = 35048 # GL/glext.h:850
GL_DYNAMIC_READ_ARB = 35049 # GL/glext.h:851
GL_DYNAMIC_COPY_ARB = 35050 # GL/glext.h:852
# ARB_occlusion_query (GL/glext.h:855)
GL_QUERY_COUNTER_BITS_ARB = 34916 # GL/glext.h:856
GL_CURRENT_QUERY_ARB = 34917 # GL/glext.h:857
GL_QUERY_RESULT_ARB = 34918 # GL/glext.h:858
GL_QUERY_RESULT_AVAILABLE_ARB = 34919 # GL/glext.h:859
GL_SAMPLES_PASSED_ARB = 35092 # GL/glext.h:860
# ARB_shader_objects (GL/glext.h:863)
GL_PROGRAM_OBJECT_ARB = 35648 # GL/glext.h:864
GL_SHADER_OBJECT_ARB = 35656 # GL/glext.h:865
GL_OBJECT_TYPE_ARB = 35662 # GL/glext.h:866
GL_OBJECT_SUBTYPE_ARB = 35663 # GL/glext.h:867
GL_FLOAT_VEC2_ARB = 35664 # GL/glext.h:868
GL_FLOAT_VEC3_ARB = 35665 # GL/glext.h:869
GL_FLOAT_VEC4_ARB = 35666 # GL/glext.h:870
GL_INT_VEC2_ARB = 35667 # GL/glext.h:871
GL_INT_VEC3_ARB = 35668 # GL/glext.h:872
GL_INT_VEC4_ARB = 35669 # GL/glext.h:873
GL_BOOL_ARB = 35670 # GL/glext.h:874
GL_BOOL_VEC2_ARB = 35671 # GL/glext.h:875
GL_BOOL_VEC3_ARB = 35672 # GL/glext.h:876
GL_BOOL_VEC4_ARB = 35673 # GL/glext.h:877
GL_FLOAT_MAT2_ARB = 35674 # GL/glext.h:878
GL_FLOAT_MAT3_ARB = 35675 # GL/glext.h:879
GL_FLOAT_MAT4_ARB = 35676 # GL/glext.h:880
GL_SAMPLER_1D_ARB = 35677 # GL/glext.h:881
GL_SAMPLER_2D_ARB = 35678 # GL/glext.h:882
GL_SAMPLER_3D_ARB = 35679 # GL/glext.h:883
GL_SAMPLER_CUBE_ARB = 35680 # GL/glext.h:884
GL_SAMPLER_1D_SHADOW_ARB = 35681 # GL/glext.h:885
GL_SAMPLER_2D_SHADOW_ARB = 35682 # GL/glext.h:886
GL_SAMPLER_2D_RECT_ARB = 35683 # GL/glext.h:887
GL_SAMPLER_2D_RECT_SHADOW_ARB = 35684 # GL/glext.h:888
GL_OBJECT_DELETE_STATUS_ARB = 35712 # GL/glext.h:889
GL_OBJECT_COMPILE_STATUS_ARB = 35713 # GL/glext.h:890
GL_OBJECT_LINK_STATUS_ARB = 35714 # GL/glext.h:891
GL_OBJECT_VALIDATE_STATUS_ARB = 35715 # GL/glext.h:892
GL_OBJECT_INFO_LOG_LENGTH_ARB = 35716 # GL/glext.h:893
GL_OBJECT_ATTACHED_OBJECTS_ARB = 35717 # GL/glext.h:894
GL_OBJECT_ACTIVE_UNIFORMS_ARB = 35718 # GL/glext.h:895
GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB = 35719 # GL/glext.h:896
GL_OBJECT_SHADER_SOURCE_LENGTH_ARB = 35720 # GL/glext.h:897
# ARB_vertex_shader (GL/glext.h:900)
GL_VERTEX_SHADER_ARB = 35633 # GL/glext.h:901
GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB = 35658 # GL/glext.h:902
GL_MAX_VARYING_FLOATS_ARB = 35659 # GL/glext.h:903
GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB = 35660 # GL/glext.h:904
GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB = 35661 # GL/glext.h:905
GL_OBJECT_ACTIVE_ATTRIBUTES_ARB = 35721 # GL/glext.h:906
GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB = 35722 # GL/glext.h:907
# ARB_fragment_shader (GL/glext.h:910)
GL_FRAGMENT_SHADER_ARB = 35632 # GL/glext.h:911
GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB = 35657 # GL/glext.h:912
GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB = 35723 # GL/glext.h:913
# ARB_shading_language_100 (GL/glext.h:916)
GL_SHADING_LANGUAGE_VERSION_ARB = 35724 # GL/glext.h:917
# ARB_texture_non_power_of_two (GL/glext.h:920)
# ARB_point_sprite (GL/glext.h:923)
GL_POINT_SPRITE_ARB = 34913 # GL/glext.h:924
GL_COORD_REPLACE_ARB = 34914 # GL/glext.h:925
# ARB_fragment_program_shadow (GL/glext.h:928)
# ARB_draw_buffers (GL/glext.h:931)
GL_MAX_DRAW_BUFFERS_ARB = 34852 # GL/glext.h:932
GL_DRAW_BUFFER0_ARB = 34853 # GL/glext.h:933
GL_DRAW_BUFFER1_ARB = 34854 # GL/glext.h:934
GL_DRAW_BUFFER2_ARB = 34855 # GL/glext.h:935
GL_DRAW_BUFFER3_ARB = 34856 # GL/glext.h:936
GL_DRAW_BUFFER4_ARB = 34857 # GL/glext.h:937
GL_DRAW_BUFFER5_ARB = 34858 # GL/glext.h:938
GL_DRAW_BUFFER6_ARB = 34859 # GL/glext.h:939
GL_DRAW_BUFFER7_ARB = 34860 # GL/glext.h:940
GL_DRAW_BUFFER8_ARB = 34861 # GL/glext.h:941
GL_DRAW_BUFFER9_ARB = 34862 # GL/glext.h:942
GL_DRAW_BUFFER10_ARB = 34863 # GL/glext.h:943
GL_DRAW_BUFFER11_ARB = 34864 # GL/glext.h:944
GL_DRAW_BUFFER12_ARB = 34865 # GL/glext.h:945
GL_DRAW_BUFFER13_ARB = 34866 # GL/glext.h:946
GL_DRAW_BUFFER14_ARB = 34867 # GL/glext.h:947
GL_DRAW_BUFFER15_ARB = 34868 # GL/glext.h:948
# ARB_texture_rectangle (GL/glext.h:951)
GL_TEXTURE_RECTANGLE_ARB = 34037 # GL/glext.h:952
GL_TEXTURE_BINDING_RECTANGLE_ARB = 34038 # GL/glext.h:953
GL_PROXY_TEXTURE_RECTANGLE_ARB = 34039 # GL/glext.h:954
GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB = 34040 # GL/glext.h:955
# ARB_color_buffer_float (GL/glext.h:958)
GL_RGBA_FLOAT_MODE_ARB = 34848 # GL/glext.h:959
GL_CLAMP_VERTEX_COLOR_ARB = 35098 # GL/glext.h:960
GL_CLAMP_FRAGMENT_COLOR_ARB = 35099 # GL/glext.h:961
GL_CLAMP_READ_COLOR_ARB = 35100 # GL/glext.h:962
GL_FIXED_ONLY_ARB = 35101 # GL/glext.h:963
# ARB_half_float_pixel (GL/glext.h:966)
GL_HALF_FLOAT_ARB = 5131 # GL/glext.h:967
# ARB_texture_float (GL/glext.h:970)
GL_TEXTURE_RED_TYPE_ARB = 35856 # GL/glext.h:971
GL_TEXTURE_GREEN_TYPE_ARB = 35857 # GL/glext.h:972
GL_TEXTURE_BLUE_TYPE_ARB = 35858 # GL/glext.h:973
GL_TEXTURE_ALPHA_TYPE_ARB = 35859 # GL/glext.h:974
GL_TEXTURE_LUMINANCE_TYPE_ARB = 35860 # GL/glext.h:975
GL_TEXTURE_INTENSITY_TYPE_ARB = 35861 # GL/glext.h:976
GL_TEXTURE_DEPTH_TYPE_ARB = 35862 # GL/glext.h:977
GL_UNSIGNED_NORMALIZED_ARB = 35863 # GL/glext.h:978
GL_RGBA32F_ARB = 34836 # GL/glext.h:979
GL_RGB32F_ARB = 34837 # GL/glext.h:980
GL_ALPHA32F_ARB = 34838 # GL/glext.h:981
GL_INTENSITY32F_ARB = 34839 # GL/glext.h:982
GL_LUMINANCE32F_ARB = 34840 # GL/glext.h:983
GL_LUMINANCE_ALPHA32F_ARB = 34841 # GL/glext.h:984
GL_RGBA16F_ARB = 34842 # GL/glext.h:985
GL_RGB16F_ARB = 34843 # GL/glext.h:986
GL_ALPHA16F_ARB = 34844 # GL/glext.h:987
GL_INTENSITY16F_ARB = 34845 # GL/glext.h:988
GL_LUMINANCE16F_ARB = 34846 # GL/glext.h:989
GL_LUMINANCE_ALPHA16F_ARB = 34847 # GL/glext.h:990
# ARB_pixel_buffer_object (GL/glext.h:993)
GL_PIXEL_PACK_BUFFER_ARB = 35051 # GL/glext.h:994
GL_PIXEL_UNPACK_BUFFER_ARB = 35052 # GL/glext.h:995
GL_PIXEL_PACK_BUFFER_BINDING_ARB = 35053 # GL/glext.h:996
GL_PIXEL_UNPACK_BUFFER_BINDING_ARB = 35055 # GL/glext.h:997
# EXT_abgr (GL/glext.h:1000)
GL_ABGR_EXT = 32768 # GL/glext.h:1001
# EXT_blend_color (GL/glext.h:1004)
GL_CONSTANT_COLOR_EXT = 32769 # GL/glext.h:1005
GL_ONE_MINUS_CONSTANT_COLOR_EXT = 32770 # GL/glext.h:1006
GL_CONSTANT_ALPHA_EXT = 32771 # GL/glext.h:1007
GL_ONE_MINUS_CONSTANT_ALPHA_EXT = 32772 # GL/glext.h:1008
GL_BLEND_COLOR_EXT = 32773 # GL/glext.h:1009
# EXT_polygon_offset (GL/glext.h:1012)
GL_POLYGON_OFFSET_EXT = 32823 # GL/glext.h:1013
GL_POLYGON_OFFSET_FACTOR_EXT = 32824 # GL/glext.h:1014
GL_POLYGON_OFFSET_BIAS_EXT = 32825 # GL/glext.h:1015
# EXT_texture (GL/glext.h:1018)
GL_ALPHA4_EXT = 32827 # GL/glext.h:1019
GL_ALPHA8_EXT = 32828 # GL/glext.h:1020
GL_ALPHA12_EXT = 32829 # GL/glext.h:1021
GL_ALPHA16_EXT = 32830 # GL/glext.h:1022
GL_LUMINANCE4_EXT = 32831 # GL/glext.h:1023
GL_LUMINANCE8_EXT = 32832 # GL/glext.h:1024
GL_LUMINANCE12_EXT = 32833 # GL/glext.h:1025
GL_LUMINANCE16_EXT = 32834 # GL/glext.h:1026
GL_LUMINANCE4_ALPHA4_EXT = 32835 # GL/glext.h:1027
GL_LUMINANCE6_ALPHA2_EXT = 32836 # GL/glext.h:1028
GL_LUMINANCE8_ALPHA8_EXT = 32837 # GL/glext.h:1029
GL_LUMINANCE12_ALPHA4_EXT = 32838 # GL/glext.h:1030
GL_LUMINANCE12_ALPHA12_EXT = 32839 # GL/glext.h:1031
GL_LUMINANCE16_ALPHA16_EXT = 32840 # GL/glext.h:1032
GL_INTENSITY_EXT = 32841 # GL/glext.h:1033
GL_INTENSITY4_EXT = 32842 # GL/glext.h:1034
GL_INTENSITY8_EXT = 32843 # GL/glext.h:1035
GL_INTENSITY12_EXT = 32844 # GL/glext.h:1036
GL_INTENSITY16_EXT = 32845 # GL/glext.h:1037
GL_RGB2_EXT = 32846 # GL/glext.h:1038
GL_RGB4_EXT = 32847 # GL/glext.h:1039
GL_RGB5_EXT = 32848 # GL/glext.h:1040
GL_RGB8_EXT = 32849 # GL/glext.h:1041
GL_RGB10_EXT = 32850 # GL/glext.h:1042
GL_RGB12_EXT = 32851 # GL/glext.h:1043
GL_RGB16_EXT = 32852 # GL/glext.h:1044
GL_RGBA2_EXT = 32853 # GL/glext.h:1045
GL_RGBA4_EXT = 32854 # GL/glext.h:1046
GL_RGB5_A1_EXT = 32855 # GL/glext.h:1047
GL_RGBA8_EXT = 32856 # GL/glext.h:1048
GL_RGB10_A2_EXT = 32857 # GL/glext.h:1049
GL_RGBA12_EXT = 32858 # GL/glext.h:1050
GL_RGBA16_EXT = 32859 # GL/glext.h:1051
GL_TEXTURE_RED_SIZE_EXT = 32860 # GL/glext.h:1052
GL_TEXTURE_GREEN_SIZE_EXT = 32861 # GL/glext.h:1053
GL_TEXTURE_BLUE_SIZE_EXT = 32862 # GL/glext.h:1054
GL_TEXTURE_ALPHA_SIZE_EXT = 32863 # GL/glext.h:1055
GL_TEXTURE_LUMINANCE_SIZE_EXT = 32864 # GL/glext.h:1056
GL_TEXTURE_INTENSITY_SIZE_EXT = 32865 # GL/glext.h:1057
GL_REPLACE_EXT = 32866 # GL/glext.h:1058
GL_PROXY_TEXTURE_1D_EXT = 32867 # GL/glext.h:1059
GL_PROXY_TEXTURE_2D_EXT = 32868 # GL/glext.h:1060
GL_TEXTURE_TOO_LARGE_EXT = 32869 # GL/glext.h:1061
# EXT_texture3D (GL/glext.h:1064)
GL_PACK_SKIP_IMAGES_EXT = 32875 # GL/glext.h:1065
GL_PACK_IMAGE_HEIGHT_EXT = 32876 # GL/glext.h:1066
GL_UNPACK_SKIP_IMAGES_EXT = 32877 # GL/glext.h:1067
GL_UNPACK_IMAGE_HEIGHT_EXT = 32878 # GL/glext.h:1068
GL_TEXTURE_3D_EXT = 32879 # GL/glext.h:1069
GL_PROXY_TEXTURE_3D_EXT = 32880 # GL/glext.h:1070
GL_TEXTURE_DEPTH_EXT = 32881 # GL/glext.h:1071
GL_TEXTURE_WRAP_R_EXT = 32882 # GL/glext.h:1072
GL_MAX_3D_TEXTURE_SIZE_EXT = 32883 # GL/glext.h:1073
# SGIS_texture_filter4 (GL/glext.h:1076)
GL_FILTER4_SGIS = 33094 # GL/glext.h:1077
GL_TEXTURE_FILTER4_SIZE_SGIS = 33095 # GL/glext.h:1078
# EXT_subtexture (GL/glext.h:1081)
# EXT_copy_texture (GL/glext.h:1084)
# EXT_histogram (GL/glext.h:1087)
GL_HISTOGRAM_EXT = 32804 # GL/glext.h:1088
GL_PROXY_HISTOGRAM_EXT = 32805 # GL/glext.h:1089
GL_HISTOGRAM_WIDTH_EXT = 32806 # GL/glext.h:1090
GL_HISTOGRAM_FORMAT_EXT = | |
valid values are: `slb.s1.small`, `slb.s2.small`, `slb.s2.medium`,
`slb.s3.small`, `slb.s3.medium`, `slb.s3.large` and `slb.s4.large`.
"""
return pulumi.get(self, "load_balancer_spec")
@load_balancer_spec.setter
def load_balancer_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_spec", value)
@property
@pulumi.getter(name="masterZoneId")
def master_zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The primary zone ID of the SLB instance. If not specified, the system will be randomly assigned. You can query the primary and standby zones in a region by calling the [DescribeZone](https://help.aliyun.com/document_detail/27585.htm) API.
"""
return pulumi.get(self, "master_zone_id")
@master_zone_id.setter
def master_zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_zone_id", value)
@property
@pulumi.getter(name="modificationProtectionReason")
def modification_protection_reason(self) -> Optional[pulumi.Input[str]]:
"""
The resource of modification protection. It's effective when modification protection is `ConsoleProtection`.
"""
return pulumi.get(self, "modification_protection_reason")
@modification_protection_reason.setter
def modification_protection_reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modification_protection_reason", value)
@property
@pulumi.getter(name="modificationProtectionStatus")
def modification_protection_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of modification protection. Valid values: `ConsoleProtection` and `NonProtection`. Default value is `NonProtection`.
"""
return pulumi.get(self, "modification_protection_status")
@modification_protection_status.setter
def modification_protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modification_protection_status", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="paymentType")
def payment_type(self) -> Optional[pulumi.Input[str]]:
"""
The billing method of the load balancer. Valid values are `PayAsYouGo` and `Subscription`. Default to `PayAsYouGo`.
"""
return pulumi.get(self, "payment_type")
@payment_type.setter
def payment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "payment_type", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of resource group which the SLB belongs.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="slaveZoneId")
def slave_zone_id(self) -> Optional[pulumi.Input[str]]:
"""
The standby zone ID of the SLB instance. If not specified, the system will be randomly assigned. You can query the primary and standby zones in a region by calling the DescribeZone API.
"""
return pulumi.get(self, "slave_zone_id")
@slave_zone_id.setter
def slave_zone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "slave_zone_id", value)
@property
@pulumi.getter
def specification(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "specification")
@specification.setter
def specification(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "specification", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of slb load balancer. Valid values: `actice` and `inactice`. The system default value is `active`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource. The `tags` can have a maximum of 10 tag for every load balancer instance.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
The VSwitch ID to launch in. If `address_type` is internet, it will be ignore.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@pulumi.input_type
class _ApplicationLoadBalancerState:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
address_ip_version: Optional[pulumi.Input[str]] = None,
address_type: Optional[pulumi.Input[str]] = None,
bandwidth: Optional[pulumi.Input[int]] = None,
delete_protection: Optional[pulumi.Input[str]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
internet_charge_type: Optional[pulumi.Input[str]] = None,
load_balancer_name: Optional[pulumi.Input[str]] = None,
load_balancer_spec: Optional[pulumi.Input[str]] = None,
master_zone_id: Optional[pulumi.Input[str]] = None,
modification_protection_reason: Optional[pulumi.Input[str]] = None,
modification_protection_status: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
slave_zone_id: Optional[pulumi.Input[str]] = None,
specification: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ApplicationLoadBalancer resources.
:param pulumi.Input[str] address: Specify the IP address of the private network for the SLB instance, which must be in the destination CIDR block of the correspond ing switch.
:param pulumi.Input[str] address_ip_version: The IP version of the SLB instance to be created, which can be set to `ipv4` or `ipv6` . Default to `ipv4`. Now, only internet instance support `ipv6` address.
:param pulumi.Input[str] address_type: The network type of the SLB instance. Valid values: ["internet", "intranet"]. If load balancer launched in VPC, this value must be `intranet`.
- internet: After an Internet SLB instance is created, the system allocates a public IP address so that the instance can forward requests from the Internet.
- intranet: After an intranet SLB instance is created, the system allocates an intranet IP address so that the instance can only forward intranet requests.
:param pulumi.Input[int] bandwidth: Valid value is between 1 and 1000, If argument `internet_charge_type` is `PayByTraffic`, then this value will be ignore.
:param pulumi.Input[str] delete_protection: Whether enable the deletion protection or not. on: Enable deletion protection. off: Disable deletion protection. Default to off. Only postpaid instance support this function.
:param pulumi.Input[str] internet_charge_type: Valid values are `PayByBandwidth`, `PayByTraffic`. If this value is `PayByBandwidth`, then argument `address_type` must be `internet`. Default is `PayByTraffic`. If load balancer launched in VPC, this value must be `PayByTraffic`. Before version 1.10.1, the valid values are `paybybandwidth` and `paybytraffic`.
:param pulumi.Input[str] load_balancer_spec: The specification of the Server Load Balancer instance. Default to empty string indicating it is "Shared-Performance" instance.
Launching "[Performance-guaranteed](https://www.alibabacloud.com/help/doc-detail/27657.htm)" instance, it is must be specified and it valid values are: `slb.s1.small`, `slb.s2.small`, `slb.s2.medium`,
`slb.s3.small`, `slb.s3.medium`, `slb.s3.large` and `slb.s4.large`.
:param pulumi.Input[str] master_zone_id: The primary zone ID of the SLB instance. If not specified, the system will be randomly assigned. You can query the primary and standby zones in a region by calling the [DescribeZone](https://help.aliyun.com/document_detail/27585.htm) API.
:param pulumi.Input[str] modification_protection_reason: The resource of modification protection. It's effective when modification protection is `ConsoleProtection`.
:param pulumi.Input[str] modification_protection_status: The status of modification protection. Valid values: `ConsoleProtection` and `NonProtection`. Default value is `NonProtection`.
:param pulumi.Input[str] payment_type: The billing method of the load balancer. Valid values are `PayAsYouGo` and `Subscription`. Default to `PayAsYouGo`.
:param pulumi.Input[str] resource_group_id: The Id of resource group which the SLB belongs.
:param pulumi.Input[str] slave_zone_id: The standby zone ID of the SLB instance. If not specified, the system will be randomly assigned. You can query the primary and standby zones in a region by calling the DescribeZone API.
:param pulumi.Input[str] status: The status of slb load balancer. Valid values: `actice` and `inactice`. The system default value is `active`.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource. The `tags` can have a maximum of 10 tag for every load balancer instance.
:param pulumi.Input[str] vswitch_id: The VSwitch ID to launch in. If `address_type` is internet, it will be ignore.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if address_ip_version is not None:
pulumi.set(__self__, "address_ip_version", address_ip_version)
if address_type is not None:
pulumi.set(__self__, "address_type", address_type)
if bandwidth is not None:
pulumi.set(__self__, "bandwidth", bandwidth)
if delete_protection is not None:
pulumi.set(__self__, "delete_protection", delete_protection)
if instance_charge_type is not None:
warnings.warn("""Field 'instance_charge_type' has been deprecated from provider version 1.124. Use 'payment_type' replaces it.""", DeprecationWarning)
pulumi.log.warn("""instance_charge_type is deprecated: Field 'instance_charge_type' has been deprecated from provider version 1.124. Use 'payment_type' replaces it.""")
if instance_charge_type is not None:
pulumi.set(__self__, "instance_charge_type", instance_charge_type)
if internet_charge_type is not None:
pulumi.set(__self__, "internet_charge_type", internet_charge_type)
if load_balancer_name is not None:
pulumi.set(__self__, "load_balancer_name", load_balancer_name)
if load_balancer_spec is not None:
pulumi.set(__self__, "load_balancer_spec", load_balancer_spec)
if master_zone_id is not None:
pulumi.set(__self__, "master_zone_id", master_zone_id)
if modification_protection_reason is not None:
pulumi.set(__self__, "modification_protection_reason", modification_protection_reason)
if modification_protection_status is not None:
pulumi.set(__self__, "modification_protection_status", modification_protection_status)
if name is not None:
warnings.warn("""Field 'name' has been deprecated from provider version 1.123.1. New field 'load_balancer_name' instead""", DeprecationWarning)
pulumi.log.warn("""name is deprecated: Field 'name' has been deprecated from provider version 1.123.1. New field 'load_balancer_name' instead""")
if name is not None:
pulumi.set(__self__, "name", name)
if payment_type is not None:
pulumi.set(__self__, "payment_type", payment_type)
if period is not None:
pulumi.set(__self__, "period", period)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if slave_zone_id is not None:
pulumi.set(__self__, "slave_zone_id", slave_zone_id)
if specification is not None:
warnings.warn("""Field 'specification' has been deprecated from provider version 1.123.1. New field 'load_balancer_spec' instead""", DeprecationWarning)
pulumi.log.warn("""specification is deprecated: Field 'specification' has been deprecated from provider version 1.123.1. New field 'load_balancer_spec' instead""")
if specification is not None:
pulumi.set(__self__, "specification", specification)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
Specify the IP address of the private network for the SLB instance, which must | |
datetime.timedelta(0, DEFAULT_STUDY_EXPIRATION_TIME)), cache_safe='true' if study.is_cache_safe else 'false')#time zone study.expiration_time.isoformat()
data_request_xml += '</DataRequest>'
logger.debug('data request %s', data_request_xml)
try:
response_from_node = urllib.request.urlopen(request_to_node, data_request_xml.encode(), cafile=CA_CERTIFICATE_FILE, cadefault=CA_DEFAULT)
resp_content = response_from_node.read().decode('utf-8')
logger.debug('response:%s',resp_content)
except urllib.error.HTTPError as http_error:
# Not succeeded
# 400, 401, 403 or 500
# Parse response XML
resp_content = http_error.read().decode('utf-8')
logger.error('error response:%s',resp_content)
try:
root = ElementTree.fromstring(resp_content)
error_code = root.findtext('./ErrorCode')
error_message = root.findtext('./ErrorMessage')
# Log error code and error message
logger.error('error code:%s',error_code)
logger.error('error message:%s',error_message)
#handle waiting approval message
if error_message=='Waiting Approval':
return True
except Exception as e:
logger.exception('')
return False
# Success, update study status
Study.objects.filter(pk=study.id).update(status=1, data_request_id=data_request_id, executed_on=timezone.localtime(timezone.now()))
return True
else:
return False
def import_response_data(data, study):
uncompressed_file = tempfile.NamedTemporaryFile(delete=False).name
try:
# Decode base64
decoded_data = zlib.decompress(base64.b64decode(data))
# logger.debug(decoded_data)
#with open(uncompressed_file, 'wb') as out_file:
# out_file.write(decoded_data)
lines = decoded_data.decode().split('<?xml version="1.0" encoding="UTF-8"?>')
for line in lines[1:]:
# Parse data XML & import to database
root = ElementTree.fromstring(line)#parse(uncompressed_file)
for beneficiary_summary in root.findall('.//BeneficiarySummary'):
properties = helper.parseBeneficiaryClaim({}, beneficiary_summary)
BeneficiaryClaimData.objects.create(study=study, **properties)
for carrier_claim in root.findall('.//CarrierClaim'):
properties = helper.parseCarrierClaimData({}, carrier_claim)
CarrierClaimData.objects.create(study=study, **properties)
for inpatient_claim in root.findall('.//InpatientClaim'):
properties = helper.parseInpatientClaimData({}, inpatient_claim)
InpatientClaimData.objects.create(study=study, **properties)
for outpatient_claim in root.findall('.//OutpatientClaim'):
properties = helper.parseOutpatientClaimData({}, outpatient_claim)
OutpatientClaimData.objects.create(study=study, **properties)
for prescription_claim in root.findall('.//PrescriptionEvent'):
properties = helper.parsePrescriptionClaimData({}, prescription_claim)
PrescriptionClaimData.objects.create(study=study, **properties)
except Exception as e:
logger.exception("")
finally:
# Remove temporary files
# os.remove(compressed_file)
os.remove(uncompressed_file)
#search partner
def search_partner(request):
search_key = request.GET.get('company_name', '')
partner_id = None
partner_tag_id = None
if search_key.startswith('c__'):
partner_id = search_key[len('c__'):]
elif search_key.startswith('t__'):
partner_tag_id = search_key[len('t__'):]
else:
company_name = search_key
page = int(request.GET.get('page', 0))
pageSize = PARTNER_SEARCH_PAGE_SIZE
if request.GET.get('pageSize') == 'All':
pageSize = MAX_PAGE_SIZE
else:
pageSize = int(request.GET.get('pageSize', PARTNER_SEARCH_PAGE_SIZE))
page = page;
if partner_id:
partners = [Partner.objects.get(pk=partner_id)]
elif partner_tag_id:
partners = Partner.objects.filter(tags__id=partner_tag_id)
elif company_name=="":
pageSize = MAX_PAGE_SIZE
partners = Partner.objects.all()
else:
partners = list(Partner.objects.filter(company_name__contains=company_name))
total = len(partners)
totalPages = (total // pageSize) + ((total % pageSize) > 0 and 1 or 0)
totalPages = max(1, totalPages)
if page >= totalPages:
page = totalPages - 1
start = page * pageSize
end = min(total, start + pageSize)
partners = partners[start:end]
partnerData = []
for partner in partners:
partnerData.append({'id': partner.id, 'name':partner.company_name})
result = {'total': total, 'page': page, 'pageSize': pageSize, 'totalPages': totalPages, 'partners': partnerData}
return HttpResponse(json.dumps(result), content_type='application/json')
class LoginRequiredMixin():
'''
This is a utility mixin class which requires login for view operations.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@author: caoweiquan322
@version: 1.0
'''
# Here we setup the member variables for logging.
CLASS_NAME = 'hfppnetwork.sms.views.LoginRequiredMixin'
LOGGER = logging.getLogger(CLASS_NAME)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
'''
Dispatch the operations.
@param self: the object itself
@param args: the arguments without key words
@param kwargs: key word argumens
@return: dispatched result
'''
# Do logging
signature = self.CLASS_NAME + '.dispatch'
helper.log_entrance(self.LOGGER, signature, {'args': args, 'kwargs': kwargs})
ret = super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
# Do logging
helper.log_exit(self.LOGGER, signature, [ret])
return ret
class SuperuserRequiredMixin(object):
'''
This is a utility mixin class which requires login for partners view operations.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@author: muzehyun
@version: 1.0
'''
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return HttpResponse('Unauthorized', status=401)
return super(SuperuserRequiredMixin, self).dispatch(request, *args, **kwargs)
class JSONResponseMixin(object):
'''
This is a utility mixin class which is able to respond with JSON.
Thread Safety:
The implementation is not thread safe but it will be used in a thread-safe manner.
@author: caoweiquan322
@version: 1.0
'''
# Here we setup the member variables for logging.
CLASS_NAME = 'hfppnetwork.sms.views.JSONResponseMixin'
LOGGER = logging.getLogger(CLASS_NAME)
def render_to_json_response(self, context, **response_kwargs):
'''
This method is used to render response to JSON.
@param self: the object itself
@param context: context of the server
@param response_kwargs: key word arguments to respond to client
@return: the http response
'''
# Do logging
signature = self.CLASS_NAME + '.render_to_json_response'
helper.log_entrance(self.LOGGER, signature,
{'context': context, 'response_kwargs': response_kwargs})
data = json.dumps(context)
response_kwargs['content_type'] = 'application/json'
ret = HttpResponse(data, **response_kwargs)
# Do logging
helper.log_exit(self.LOGGER, signature, [ret])
return ret
def form_invalid(self, form):
'''
This method will be called if the submitted form is invalid.
@param self: the object itself
@param form: the invalid form
'''
# Do logging
signature = self.CLASS_NAME + '.form_invalid'
helper.log_entrance(self.LOGGER, signature, {'form': form})
if self.request.is_ajax():
ret = self.render_to_json_response({'errors': form.errors}, status=400)
else:
ret = super(JSONResponseMixin, self).form_invalid(form)
# Do logging
helper.log_exit(self.LOGGER, signature, [ret])
return ret
def form_valid(self, form):
'''
This method will be called if the submitted form is valid.
@param self: the object itself
@param form: the validated form
'''
# Do logging
signature = self.CLASS_NAME + '.form_valid'
helper.log_entrance(self.LOGGER, signature, {'form': form})
form.instance.owner = self.request.user;
response = super(JSONResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
ret = self.render_to_json_response(data)
else:
ret = response
# Do logging
helper.log_exit(self.LOGGER, signature, [ret])
return ret
class ListStudyView(LoginRequiredMixin, ListView):
"""
This is the Django view implementation to list studies.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the page size. Default to 10.
paginate_by = 10
#Represents the template name. This value isn't supposed to change.
template_name = "studies/list.html"
#Represents the context object name for studies. This value isn't supposed to change.
context_object_name = "studies"
def get_paginate_by(self, queryset):
"""
Get the page size.
Parameters:
- self : the object itself
- queryset : the query set
Returns:
the page size
"""
return self.request.REQUEST.get('page_size', self.paginate_by)
def get_search_form(self):
"""
Return the StudySearchForm on the page.
Parameters:
- self : the object itself
Returns:
the StudySearchForm on the page.
"""
search_form = self.form = StudySearchForm(self.request.REQUEST)
return search_form
def get_queryset(self):
"""
Return the query set for the view.
Parameters:
- self : the object itself
Returns:
the query set
"""
#Filter by role
if self.request.user.is_staff == True:
qs = Study.objects.all()
else:
qs = Study.objects.filter(owner__exact=self.request.user)
#Filter by search form
search_form = self.get_search_form()
if search_form.is_valid():
if search_form.cleaned_data.get("id"):
qs = qs.filter(id__exact=search_form.cleaned_data.get("id"))
if search_form.cleaned_data.get("description"):
qs = qs.filter(description__contains=search_form.cleaned_data.get("description"))
if search_form.cleaned_data.get("status"):
qs = qs.filter(status__exact=search_form.cleaned_data.get("status"))
else:
qs = qs.filter(status__exact=0)
if search_form.cleaned_data.get("created_on_from"):
qs = qs.filter(created_on__gte=datetime.datetime.combine(search_form.cleaned_data.get("created_on_from"),
datetime.datetime.min.time()))
if search_form.cleaned_data.get("created_on_to"):
qs = qs.filter(created_on__lte=datetime.datetime.combine(search_form.cleaned_data.get("created_on_to"),
datetime.datetime.max.time()))
if search_form.cleaned_data.get("last_modified_on_from"):
qs = qs.filter(last_modified_on__gte=datetime.datetime.combine(
search_form.cleaned_data.get("last_modified_on_from"),datetime.datetime.min.time()))
if search_form.cleaned_data.get("last_modified_on_to"):
qs = qs.filter(last_modified_on__lte=datetime.datetime.combine(
search_form.cleaned_data.get("last_modified_on_to"),datetime.datetime.max.time()))
if search_form.cleaned_data.get("executed_on_from"):
qs = qs.filter(executed_on__gte=datetime.datetime.combine(search_form.cleaned_data.get("executed_on_from"),
datetime.datetime.min.time()))
if search_form.cleaned_data.get("executed_on_to"):
qs = qs.filter(executed_on__lte=datetime.datetime.combine(search_form.cleaned_data.get("executed_on_to"),
datetime.datetime.max.time()))
if search_form.cleaned_data.get("completed_on_from"):
qs = qs.filter(completed_on__gte=datetime.datetime.combine(search_form.cleaned_data.get("completed_on_from"),
datetime.datetime.min.time()))
if search_form.cleaned_data.get("completed_on_to"):
qs = qs.filter(completed_on__lte=datetime.datetime.combine(search_form.cleaned_data.get("completed_on_to"),
datetime.datetime.max.time()))
#Return the query
return qs
def get_context_data(self, **kwargs):
context = super(ListStudyView, self).get_context_data(**kwargs)
# Process paging data
get = self.request.GET.copy()
page = get.pop("page", None)
extra = '&'+get.urlencode()
context['page'] = page
context['extra_vars'] = extra
context['search_form'] = self.get_search_form()
# Staff(Admin) can view all studies
if self.request.user.is_staff == True:
context['count_of_draft_studies'] = Study.objects.filter(status__exact=0).count()
context['count_of_in_progress_studies'] = Study.objects.filter(status__exact=1).count()
context['count_of_analysis_studies'] = Study.objects.filter(status__exact=2).count()
context['count_of_archived_studies'] = Study.objects.filter(status__exact=3).count()
# Regular user can view own studies
else:
context['count_of_draft_studies'] = Study.objects.filter(owner__exact=self.request.user).filter(status__exact=0).count()
context['count_of_in_progress_studies'] = Study.objects.filter(owner__exact=self.request.user).filter(status__exact=1).count()
context['count_of_analysis_studies'] = Study.objects.filter(owner__exact=self.request.user).filter(status__exact=2).count()
context['count_of_archived_studies'] = Study.objects.filter(owner__exact=self.request.user).filter(status__exact=3).count()
return context
class ViewStudyTransactionsView(LoginRequiredMixin, ListView):
"""
This is the Django view implementation to list study transactions.
@author: TCSASSEMBLER
@version: 1.0
"""
#Represents the page size. Default to 10.
paginate_by = 10
#Represents the template name. This value isn't supposed to change.
template_name = "studies/transactions.html"
#Represents the context object name for transactions. This value isn't supposed to change.
context_object_name = "transactions"
def get_paginate_by(self, queryset):
"""
Get the page size.
Parameters:
- self : the object itself
- queryset : the query set
Returns:
the page size
"""
return self.request.REQUEST.get('page_size', self.paginate_by)
def get_context_data(self, **kwargs):
"""
Return the context data.
Parameters:
- self : the object itself
- kwargs : the key/value arguments
Returns:
the context data
"""
context = super(ViewStudyTransactionsView, self).get_context_data(**kwargs)
# Process paging data
get = self.request.GET.copy()
page = get.pop("page", None)
extra = '&'+get.urlencode()
context['page'] = page
context['extra_vars'] = extra
context['search_form'] = self.get_search_form()
response_rate = float(0)
count_of_data_responsed = self.study.count_of_data_requests_sent - self.study.count_of_data_requests_pending
if self.study.count_of_data_requests_sent:
response_rate = float(count_of_data_responsed) / self.study.count_of_data_requests_sent
context['count_of_data_responsed'] = count_of_data_responsed
context['response_rate'] = response_rate * 100
# Study object
context['study'] = self.study
return context
def get_queryset(self):
"""
Return the query set for the view.
Parameters:
- self : the object itself
Returns:
the query set
"""
self.study = Study.objects.get(pk=self.kwargs['pk'])
qs = StudyDataRequest.objects.filter(study=self.study)
# Filter by search form
search_form = self.get_search_form()
if search_form.is_valid():
if search_form.cleaned_data.get("company_name") is not None:
qs = qs.filter(partner__company_name__contains=search_form.cleaned_data.get("company_name"))
# Return the query
| |
from libsbml import *
import math
import new
import urllib2
# indicates whether we should perform SBML document consistency check.
# supressed by default because the MSR SBML library does not produce valid SBML.
supressConsistencyCheck = True
# get the array index of given species ID:
def getSpecIndex(specIdToArrayIndexDict, specId):
if not specIdToArrayIndexDict.has_key(specId):
specIdToArrayIndexDict[specId] = len(specIdToArrayIndexDict.keys())
return specIdToArrayIndexDict[specId]
# get python variable name for the ODE of given species ID:
def specOdeNameFromId(specId):
return "d_" + specId
# checks the given SBML document for consistency and raises
# an exception if internal inconsistency is detected.
def checkSBMLConsistency(document):
if supressConsistencyCheck:
return
numFailures = document.checkInternalConsistency()
if numFailures > 0:
failureMsg = ""
for failureNum in range(numFailures):
print "Failure " + str(failureNum) + ": " + document.getError(failureNum).getShortMessage() + "\n"
raise Exception(failureMsg)
# load SBML model from given file.
def SBMLModelFromSBMLFile(sbmlFile):
reader = SBMLReader()
document = reader.readSBML(sbmlFile)
if document.getNumErrors()>0:
print "Errors in reading SBML file..."
checkSBMLConsistency(document)
model = document.getModel()
if not model:
print "No model!"
return model
# load SBML model form given string.
def SBMLModelFromSBMLStr(sbmlStr):
# would be nice if SBMLReader.readSBMLFromStr worked - but it does not,
# there seems to be some SWIG fuck-up. so we are forced to first write
# to temporary file:
file = open("tmp.sbml", 'w')
file.write(sbmlStr)
file.close()
# then read from file:
reader = SBMLReader()
document = reader.readSBML("tmp.sbml")
checkSBMLConsistency(document)
model = document.getModel()
return model
# helper function for composing a python string from a binary AST math node.
# mutually recursive with pythonMathFromASTNode.
def splitBinary(astNode, operand, kineticLaw, model):
str1 = pythonMathFromASTNode(astNode.getLeftChild(), kineticLaw, model)
str2 = pythonMathFromASTNode(astNode.getRightChild(), kineticLaw, model)
return "(" + str1 + " " + operand + " " + str2 + ")"
# constructs a python string representing the math expresson in an AST node.
# currently only covers basic cases, and not all the ASTNode types; an
# exception is thrown if an unsupported type is encountered.
# more types can easily be added.
def pythonMathFromASTNode(astNode, kineticLaw, model):
if (astNode.getType() == AST_PLUS):
return splitBinary(astNode, "+", kineticLaw, model)
elif (astNode.getType() == AST_MINUS):
return splitBinary(astNode, "-", kineticLaw, model)
elif (astNode.getType() == AST_DIVIDE):
return splitBinary(astNode, "/", kineticLaw, model)
elif (astNode.getType() == AST_TIMES):
return splitBinary(astNode, "*", kineticLaw, model)
elif (astNode.getType() == AST_FUNCTION_POWER):
return splitBinary(astNode, "**", kineticLaw, model)
elif astNode.isNumber():
if astNode.isReal():
return str(astNode.getReal())
elif astNode.isInteger():
return str(astNode.getInteger())
elif astNode.isName():
# check if this name is a defined parameter. can't see any way of distinguishing between
# parameters and species identifiers, other than checking for definition this way?
nodeName = astNode.getName()
parameterLocal = kineticLaw.getParameter(nodeName)
parameterGlobal = model.getParameter(nodeName)
if parameterLocal != None:
val = str(parameterLocal.getValue())
return val
elif parameterGlobal != None:
val = str(parameterGlobal.getValue())
return val
# if not parameter, then check if this is a species id:
elif model.getSpecies(nodeName) != None:
return nodeName
# something weird -- return 1 and issue warning:
else:
print "WARNING: unknown identifier " + nodeName + ", defaulting to 1. Globals: " + str(model.getNumParameters()) + ". Locals: " + str(kineticLaw.getNumParameters())
return "1"
else:
raise Exception("Un-supported AST node type: " + str(astNode.isName()) + ", node: " + str(astNode.getType()))
# constructs a python program string from given SBML model.
def pythonStrFromSBMLModel(model):
# initialise dictionaries and python code string:
specIdToArrayIndexDict = {}
specIdToExpDict = {}
# define indentation for python code:
indent = " "
indent2 = indent + indent
# iterate over reactions:
for reactionNum in range(model.getNumReactions()):
reaction = model.getReaction(reactionNum)
# compute rate expression.
# UPDATE: first tried to use the formulaToString libSBML function,
# which should have worked in most cases. but it doesn't substitute
# in defined parameters. hence we use our own ASTNode parsing function.
#mathAstNode = reaction.getKineticLaw().getMath()
#pythonRateExp = "(" + formulaToString(mathAstNode) + ")"
#pythonRateExp = reaction.getKineticLaw().getFormula()
pythonRateExp = pythonMathFromASTNode(reaction.getKineticLaw().getMath(), reaction.getKineticLaw(), model)
"""
# iterate over reactants to create mass-action rate expression:
# UPDATE: don't assume mass-action, just read the rate expression literally.
for reactantNum in range(reaction.getNumReactants()):
reactantSpecRef = reaction.getReactant(reactantNum)
stoich = reactantSpecRef.getStoichiometry()
# for some reason stoichiometry is sometimes NaN:
if math.isnan(stoich):
stoich = 1
reactantSpecId = reactantSpecRef.getSpecies()
if stoich == 1:
pythonRateExp += "*" + reactantSpecId
else:
pythonRateExp += "*" + reactantSpecId + "^" + str(stoich)
"""
# iterate over reactants and update ODE for each:
for reactantNum in range(reaction.getNumReactants()):
reactantSpecRef = reaction.getReactant(reactantNum)
stoich = reactantSpecRef.getStoichiometry()
# for some reason stoichiometry is sometimes NaN:
if math.isnan(stoich):
stoich = 1
reactantSpecId = reactantSpecRef.getSpecies()
# add key to hash table if it doesn't exist:
if not specIdToExpDict.has_key(reactantSpecId):
specIdToExpDict[reactantSpecId] = ""
if stoich == 1:
specIdToExpDict[reactantSpecId] += " - " + pythonRateExp
else:
specIdToExpDict[reactantSpecId] += " - " + str(stoich) + "*" + pythonRateExp
# do the same for products:
for productNum in range(reaction.getNumProducts()):
productSpecRef = reaction.getProduct(productNum)
stoich = productSpecRef.getStoichiometry()
# for some reason stoichiometry is sometimes NaN:
if math.isnan(stoich):
stoich = 1
productSpecId = productSpecRef.getSpecies()
# add key to hash table if it doesn't exist:
if not specIdToExpDict.has_key(productSpecId):
specIdToExpDict[productSpecId] = ""
if stoich == 1:
specIdToExpDict[productSpecId] += " + " + pythonRateExp
else:
specIdToExpDict[productSpecId] += " + " + str(stoich) + "*" + pythonRateExp
# create python variable definitions, python updated rate definitions,
# and a list of species IDs in the relevant order.
pythonVarDefs = ""
pythonRateUpdateExps = ""
specIdLst = [0]*len(specIdToExpDict)
for specId in specIdToExpDict.keys():
# get index into species array:
index = getSpecIndex(specIdToArrayIndexDict, specId)
# create python variable definition:
pythonVarDefs += indent + indent + specId + " = cell.species[" + str(index) + "]\n"
# create python rate update expression:
exp = specIdToExpDict[specId]
varname = specOdeNameFromId(specId)
pythonRateUpdateExps += indent2 + varname + " = " + exp + "\n"
# insert spec id into correct position in list:
specIdLst[index] = specId #.insert(index, specId)
# create a python list with updated rate expressions,
# and python lists with with species IDs and names in the relevant order.
pythonRateUpdateLst = ""
pythonSpecIDLst = ""
pythonSpecNameLst = ""
for specId in specIdLst:
# get species name from model:
specName = model.getSpecies(specId).getName()
if specName == None:
specName = "Undefined"
if pythonRateUpdateLst == "":
pythonRateUpdateLst += "[" + specOdeNameFromId(specId)
pythonSpecIDLst += "[\"" + specId + "\""
pythonSpecNameLst += "[\"" + specName + "\""
else:
pythonRateUpdateLst += ", " + specOdeNameFromId(specId)
pythonSpecIDLst += ", \"" + specId + "\""
pythonSpecNameLst += ", \"" + specName + "\""
pythonRateUpdateLst += "]"
pythonSpecIDLst += "]"
pythonSpecNameLst += "]"
# build initial population list:
initPopLst = [0]*model.getNumSpecies()
for specId in specIdLst:
sbmlSpec = model.getSpecies(specId)
initAmount = sbmlSpec.getInitialAmount()
initConc = sbmlSpec.getInitialConcentration()
init = 0.0
if not math.isnan(initAmount) and initAmount > 0:
init = initAmount
elif not math.isnan(initConc) and initConc > 0:
init = initConc
index = getSpecIndex(specIdToArrayIndexDict, specId)
initPopLst[index] = init
# build initial population python string:
pythonInitPopLst = ""
for init in initPopLst:
if pythonInitPopLst == "":
pythonInitPopLst += "[" + str(init)
else:
pythonInitPopLst += "," + str(init)
pythonInitPopLst += "]"
# compose the complete python program string.
pythonStr = ""
pythonStr += "def getRates(cells):\n"
pythonStr += indent + "df = []\n"
pythonStr += indent + "for cell in cells:\n"
pythonStr += pythonVarDefs + "\n"
pythonStr += pythonRateUpdateExps + "\n"
pythonStr += indent2 + "df.append(" + pythonRateUpdateLst + ")\n"
pythonStr += indent + "return df"
pythonStr += "\n\n"
pythonStr += "specIdLst = " + pythonSpecIDLst + "\n\n"
pythonStr += "specNameLst = " + pythonSpecNameLst + "\n\n"
pythonStr += "specInitPopLst = " + pythonInitPopLst + "\n\n"
return pythonStr
# create python module from code dynamically.
def pythonModuleFromPythonStr(pythonStr):
module = new.module("sbmlPythonEncoding")
exec | |
+ self.sample_array_offset + 4])[0]
self.sample_array_offset += 4
self.decoration += ' offset:%d' % self.data_offset
if self.has_first_sample_flags:
self.first_sample_flags = struct.unpack('>I', \
self.fmap[self.offset+self.sample_array_offset:self.offset+self.sample_array_offset+4])[0]
self.sample_array_offset += 4
self.decoration += ' fs_flags:%d' % self.first_sample_flags
self.sample_row_size = (self.has_sample_duration and 4) + \
(self.has_sample_size and 4) + (self.has_sample_flags and 4) + \
(self.has_sample_composition_time_offset and 4)
if self.has_sample_duration:
self.total_duration = sum([struct.unpack_from('>I', self.fmap, self.offset + self.sample_array_offset + i * self.sample_row_size)[0] for i in range(self.sample_count)])
else:
self.total_duration = self.parent.find('tfhd').default_sample_duration * self.sample_count
self.decoration += ' tdur:%d' % self.total_duration
#@property
#def has_data_offset(self):
# return self.flags & 0x0001
#@property
#def data_offset(self):
# return struct.unpack('>i', \
# self.fmap[self.offset+self.sample_array_offset:self.offset+self.sample_array_offset+4])[0]
@property
def sample_count(self):
return struct.unpack('>I', self.fmap[self.offset+12:self.offset+16])[0]
def sample_entry(self, i):
row = {}
offset = self.offset + self.sample_array_offset + i * self.sample_row_size
if self.has_sample_duration:
row['duration'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_size:
row['size'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_flags:
row['flags'] = '0x%x' % struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_composition_time_offset:
row['time_offset'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
return row
def description(self):
root = self
while root.parent:
root = root.parent
current_track_id = self.parent.find('tfhd').track_id
# track_id, timescale = root.get_video_info()
# if track_id != current_track_id:
# track_id, timescale = root.get_audio_info()
#
# self.decoration += ' tdur:%f' % (self.total_duration / float(timescale))
ret = full_box.description(self)
if VERBOSE > 1:
for i in range(self.sample_count):
row = {}
offset = self.offset + self.sample_array_offset + i * self.sample_row_size
if self.has_sample_duration:
row['duration'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_size:
row['size'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_flags:
row['flags'] = '0x%x' % struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
if self.has_sample_composition_time_offset:
row['time_offset'] = struct.unpack('>I', self.fmap[offset:offset+4])[0]
offset += 4
ret += ' - ' + ' '.join(['%s:%s' % (k, v) for k, v in row.iteritems()]) + '\n'
return ret
class tfra_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
self.random_access_time = []
self.random_access_moof_offset = []
@property
def track_id(self):
return struct.unpack('>I', self.fmap[self.offset+12:self.offset+16])[0]
@property
def length_size_of_traf_num(self):
return (struct.unpack('>B', self.fmap[self.offset+19])[0] & 0x30) >> 4
@property
def length_size_of_trun_num(self):
return (struct.unpack('>B', self.fmap[self.offset+19])[0] & 0x0C) >> 2
@property
def length_size_of_sample_num(self):
return struct.unpack('>B', self.fmap[self.offset+19])[0] & 0x03
@property
def number_of_entry(self):
return struct.unpack('>I', self.fmap[self.offset+20:self.offset+24])[0]
@property
def end_time(self):
if self.number_of_entry == 0:
return 0
# This is an approx. Assumes a full GOP.
last_keyframe_time = self.entry(self.number_of_entry - 1)[0]
prev_keyframe_time = self.entry(self.number_of_entry - 2)[0]
return last_keyframe_time + (last_keyframe_time - prev_keyframe_time)
def entry(self, index):
intro_format, intro_length = self.version and ('>Q', 16) or ('>I', 8)
row_length = (intro_length +
1 + self.length_size_of_traf_num +
1 + self.length_size_of_trun_num +
1 + self.length_size_of_sample_num)
row_start = self.offset + 24 + (row_length * index)
# sys.stderr.write(str(locals())+'\n')
# sys.stderr.write('start:{row_start} len:{row_length}\n'.format(**locals()))
p = parse_generator(self.fmap[row_start:row_start+row_length], intro_format)
time = p.next()[0]
moof_offset = p.next()[0]
traf = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_traf_num])[-1]
trun = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_trun_num])[-1]
sample = p.send(['>B', '>H', '>BH', '>I'][self.length_size_of_sample_num])[-1]
return time, moof_offset, traf, trun, sample
def parse_random_access_table(self):
intro_format, intro_length = self.version and ('>QQ', 16) or ('>II', 8)
row_length = (intro_length +
1 + self.length_size_of_traf_num +
1 + self.length_size_of_trun_num +
1 + self.length_size_of_sample_num)
self.random_access_time = []
self.random_access_moof_offset = []
for i in range(self.number_of_entry):
row_start = self.offset + 24 + (row_length * i)
time, moof_offset = struct.unpack(intro_format, self.fmap[row_start:row_start+intro_length])
if not self.random_access_moof_offset or self.random_access_moof_offset[-1] != moof_offset:
self.random_access_time.append(time)
self.random_access_moof_offset.append(moof_offset)
def time_for_fragment(self, fragment):
if not self.random_access_time:
self.parse_random_access_table()
if len(self.random_access_time) < fragment:
return None
return self.random_access_time[fragment - 1]
def moof_offset_for_fragment(self, fragment):
if not self.random_access_moof_offset:
self.parse_random_access_table()
if len(self.random_access_moof_offset) < fragment:
return None, None
offset = self.random_access_moof_offset[fragment - 1]
size = 0
if len(self.random_access_moof_offset) > fragment:
size = self.random_access_moof_offset[fragment] - offset
return offset, size
def moof_offset_for_time(self, seek_time):
if not self.random_access_moof_offset:
self.parse_random_access_table()
# float_time = seek_time/90000.0
# print('Searching for {float_time:.2f} ({seek_time})'.format(**locals()))
index = bisect.bisect_left(self.random_access_time, seek_time)
# print(' - got index:{0} index_time:{1} ({2})'\
# .format(index, self.random_access_time[index]/90000.0, self.random_access_time[index]))
index = max(index-1, 0)
# print(' - adjusted index:{0} index_time:{1} ({2})'\
# .format(index, self.random_access_time[index]/90000.0, self.random_access_time[index]))
# print(' - obj: {0!s}'.format(self.parent.parent.find('moof[offset={0}]'\
# .format(self.random_access_moof_offset[index]))))
return self.random_access_moof_offset[index]
def time_for_moof_offset(self, offset):
if not self.random_access_moof_offset:
self.parse_random_access_table()
index = self.random_access_moof_offset.index(offset)
return self.random_access_time[index]
@property
def fragment_count(self):
if not self.random_access_moof_offset:
self.parse_random_access_table()
return len(self.random_access_moof_offset)
@property
def decoration(self):
extras = 'track_id:%d #traf:%d #trun:%d #sample:%d #entries:%d end_time:%d' % (self.track_id,
self.length_size_of_traf_num,
self.length_size_of_trun_num,
self.length_size_of_sample_num,
self.number_of_entry,
self.end_time)
entries = ['\n']
if VERBOSE > 1:
timescale = 0
format_time = lambda x: '{0}'.format(x)
moov = self.root.find('moov')
if moov:
track_id, track_timescale = self.root.get_video_info()
if track_id == self.track_id:
timescale = float(track_timescale)
else:
timescale = float(self.root.get_audio_info()[1])
format_time = lambda x: '{0:.3f}'.format(x/timescale)
for i in range(self.number_of_entry):
data = self.entry(i)
entry_time = format_time(data[0])
entries.append(' - #{index:03d} time:{time} moof:{1} traf:{2} trun:{3} sample:{4}\n'\
.format(*data, index=i, time=entry_time))
return extras + ''.join(entries)[:-1]
class mfro_box(full_box):
@property
def decoration(self):
return 'size:%d' % struct.unpack('>I', self.fmap[self.offset+12:self.offset+16])[0]
class stbl_box(box):
def __init__(self, *args):
box.__init__(self, *args)
class stts_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self._entries = []
for j in range(self.entry_count):
self._entries.append({'sample_count' : i.send('>I')[0], 'sample_delta' : i.send('>I')[0]})
self.array = []
self.unroll()
def entry(self, index):
return self._entries[index]
def unroll(self):
time = 0
self.array.append({'time' : 0, 'delta' : 0})
for entry in self._entries:
delta = entry['sample_delta']
for i in range(entry['sample_count']):
self.array.append({'time' : time, 'delta' : delta})
time = time + delta
class ctts_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self._entries = []
for j in range(self.entry_count):
self._entries.append({'sample_count' : i.send('>I')[0], 'sample_offset' : i.send('>I')[0]})
self.array = []
self.unroll()
def entry(self, index):
return self._entries[index]
def unroll(self):
self.array.append(0)
for entry in self._entries:
offset = entry['sample_offset']
for i in range(entry['sample_count']):
self.array.append(offset)
class stss_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self._entries = []
for j in range(self.entry_count):
self._entries.append({'sample_number' : i.send('>I')[0]})
def entry(self, index):
return self._entries[index]
def has_index(self, index):
for i in self._entries:
if i['sample_number'] == index:
return True
return False
class stsz_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.sample_size = i.send('>I')[0]
self.sample_count = i.send('>I')[0]
self.decoration = 'sample_size=' + str(self.sample_size) + ' sample_count=' + str(self.sample_count)
self._entries = []
if self.sample_size == 0:
for j in range(self.sample_count):
self._entries.append({'entry_size' : i.send('>I')[0]})
def entry(self, index):
return self._entries[index]
class stsc_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self.decoration = 'entry_count=' + str(self.entry_count)
self._entries = []
for j in range(self.entry_count):
self._entries.append({'first_chunk' : i.send('>I')[0], \
'samples_per_chunk' : i.send('>I')[0], \
'sample_description_index' : i.send('>I')[0]})
self.array = []
self.unroll()
def entry(self, index):
return self._entries[index]
def unroll(self):
self.array.append([0, 0, 0])
last_chunk = 0
last_num_samples = 0
for entry in self._entries:
first_chunk = entry['first_chunk']
samples_per_chunk = entry['samples_per_chunk']
sample_description_index = entry['sample_description_index']
for i in range(last_chunk + 1, first_chunk):
for j in range(last_num_samples):
self.array.append([i, j, last_num_samples])
for i in range(samples_per_chunk):
self.array.append([first_chunk, i, samples_per_chunk])
last_chunk = first_chunk
last_num_samples = samples_per_chunk
#print self.array
def get_unrolled(self, idx):
if idx < len(self.array):
return self.array[idx][0], self.array[idx][1]
else:
while True:
#print 'add chunk'
self.add_chunk()
#print self.array
if idx < len(self.array):
return self.array[idx][0], self.array[idx][1]
def add_chunk(self):
last_chunk = self.array[-1][0]
num_samples = self.array[-1][2]
for i in range(num_samples):
self.array.append([last_chunk + 1, i, num_samples])
class stco_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self.decoration = 'entry_count=' + str(self.entry_count)
self._entries = []
for j in range(self.entry_count):
val = i.send('>I')
self._entries.append({'chunk_offset' : val[0]})
def entry(self, index):
return self._entries[index]
class ftyp_box(box):
def __init__(self, *args):
box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+8:self.offset+self.size])
i.next() # prime
self.major_brand = i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0]
self.minor_version = i.send('>I')[0]
self.brands = []
num_brands = (self.size - 16) / 4
for j in range(num_brands):
self.brands.append(i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0])
@property
def decoration(self):
ret = self.major_brand + ' ' + ','.join(brand for brand in self.brands)
return ret
class styp_box(box):
def __init__(self, *args):
box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+8:self.offset+self.size])
i.next() # prime
self.major_brand = i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0]
self.minor_version = i.send('>I')[0]
self.brands = []
num_brands = (self.size - 16) / 4
for j in range(num_brands):
self.brands.append(i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0] + i.send('>c')[0])
@property
def decoration(self):
ret = self.major_brand + ' ' + ','.join(brand for brand in self.brands)
return ret
class tfma_box(full_box):
def __init__(self, *args):
full_box.__init__(self, *args)
i = parse_generator(self.fmap[self.offset+12:self.offset+self.size])
i.next() # prime
self.entry_count = i.send('>I')[0]
self._entries = []
for j in range(self.entry_count):
segment_duration = i.send(self.version and '>Q' or '>I')[0]
media_time = i.send(self.version and '>q' or '>i')[0]
media_rate_integer = i.send('>H')[0]
media_rate_fraction | |
'_inner'
from pymbolic import var, substitute
from pymbolic.mapper import IdentityMapper
from loopy.symbolic import get_dependencies
names = set(arrays)
class SubstMapper(IdentityMapper):
def map_subscript(self, expr, *args, **kwargs):
if expr.aggregate.name in names:
# get old index
old = var(owner.pre_split + '_outer')
new = var(owner.pre_split + '_outer') * owner.vector_width + \
var(new_var)
expr.index = substitute(expr.index, {old: new})
return super(SubstMapper, self).map_subscript(
expr, *args, **kwargs)
insns = []
mapper = SubstMapper()
for insn in knl.instructions:
try:
if get_dependencies(insn.assignee) & names:
insn = insn.copy(assignee=mapper(insn.assignee),
within_inames=insn.within_inames | set([
new_var]))
if get_dependencies(insn.expression) & names:
insn = insn.copy(expression=mapper(insn.expression),
within_inames=insn.within_inames | set([
new_var]))
except AttributeError:
pass
insns.append(insn)
return knl.copy(instructions=insns)
def split_loopy_arrays(self, kernel, dont_split=[], **kwargs):
"""
Splits the :class:`loopy.GlobalArg`'s that form the given kernel's arguements
to conform to this split pattern
Parameters
----------
kernel : `loopy.LoopKernel`
The kernel to apply the splits to
dont_split: list of str
List of array names that should not be split (typically representing
global inputs / outputs)
Keyword Arguments
-----------------
ignore_split_rename_errors: bool [False]
If True, ignore errors that would result from the vector index not being
the pre-split index, as expected. Used in testing.
Returns
-------
split_kernel : `loopy.LoopKernel`
The kernel with the array splittings applied
"""
if not self._have_split():
return kernel
arrays = [(x.name, x) for x in kernel.args
if isinstance(x, lp.ArrayArg)
and x.name not in dont_split]
to_split, not_to_split = partition(
arrays, lambda x: self._should_split(x[1]))
# add to don't split list for iname access handling
dont_split += [x[0] for x in not_to_split]
if self.pre_split and dont_split:
# we still have to split potential iname accesses in this array
# to maintain correctness
kernel = self.__split_iname_access(kernel, dont_split)
for array_name, arr in to_split:
split_axis, vec_axis = self.split_and_vec_axes(arr)
kernel = self._split_array_axis_inner(
kernel, array_name, split_axis, vec_axis,
self.vector_width, self.data_order, self.is_simd,
**kwargs)
return kernel
def _split_numpy_array(self, input_array):
"""
Spits the supplied numpy array according to desired pattern
Parameters
----------
input_array : :class:`numpy.ndarray`
The input array to split
Returns
-------
output : :class:`numpy.ndarray`
The properly split / resized numpy array
"""
if not self._have_split() or not self._should_split(input_array):
return input_array
def _split_and_pad(arr, axis, width, dest_axis):
# get the last split as the ceiling
end = np.ceil(arr.shape[axis] / width) * width
# create split indicies
indicies = np.arange(width, end + 1, width, dtype=kint_type)
# split array
arr = np.split(arr, indicies, axis=axis)
# filter out empties
arr = [a for a in arr if a.size]
# check for pad
if arr[-1].shape[axis] != width:
pads = [(0, 0) for x in arr[-1].shape]
pads[axis] = (0, width - arr[-1].shape[axis])
arr[-1] = np.pad(arr[-1], pads, 'constant')
# get joined
arr = np.stack(arr, axis=axis)
# and move array axes
# the created axis is at axis + 1, and should be moved to
# the destination
return np.moveaxis(arr, axis + 1, dest_axis).copy(order=self.data_order)
# figure out split
split_axis, vec_axis = self.split_and_vec_axes(input_array)
return _split_and_pad(input_array, split_axis, self.vector_width,
vec_axis)
def split_numpy_arrays(self, arrays):
"""
Splits the provided numpy arrays
See :func:`_split_numpy_array`
Parameters
----------
arrays: list of :class:`numpy.ndarray`
The arrays to split
Returns
-------
out_arrays: list of :class:`numpy.ndarray`
The split arrays
"""
if isinstance(arrays, np.ndarray):
arrays = [arrays]
elif isinstance(arrays, dict):
return {k: self._split_numpy_array(v) for k, v in six.iteritems(arrays)}
return [self._split_numpy_array(a) for a in arrays]
kint_type = np.int32
"""
The integer type to use for kernel indicies, eventually I'd like to make this
user-specifiable
"""
problem_size = lp.ValueArg('problem_size', dtype=kint_type)
"""
The problem size variable for generated kernels, describes the size of
input/output arrays used in drivers
"""
work_size = lp.ValueArg('work_size', dtype=kint_type)
"""
The global work size of the generated kernel.
Roughly speaking, this corresponds to:
- The number of cores to utilize on the CPU
- The number of blocks to launch on the GPU
This may be specified at run-time or during kernel-generation.
"""
local_name_suffix = '_local'
"""
The suffix to append to 'local' versions of arrays (i.e., those in the working buffer
in use in the driver)
"""
global_ind = 'j'
"""str: The global initial condition index
This is the string index for the global condition loop in generated kernels
of :module:`rate_subs`
"""
var_name = 'i'
"""str: The inner loop index
This is the string index for the inner loops in generated kernels of
:module:`rate_subs`
"""
default_inds = (global_ind, var_name)
"""str: The default indicies used in main loops of :module:`rate_subs`
This is the string indicies for the main loops for generated kernels in
:module:`rate_subs`
"""
def initial_condition_dimension_vars(loopy_opts, test_size, is_driver_kernel=False):
"""
Return the size to use for the initial condition dimension, considering whether
we're in unit-testing, a driver kernel or simple kernel generation
Parameters
----------
loopy_opts: :class:`loopy_options`
The loopy options to be used during kernel creation
test_size: int or None
The test size option, indicates whether we're in unit-testing
is_driver_kernel: bool [False]
If True, and not-unit testing, use the _full_ `problem_size` for the
IC dim. size
Returns
-------
size: list of :class:`loopy.ValueArg`
The initial condition dimension size variables
"""
if isinstance(test_size, int) or loopy_opts.unique_pointers:
return []
if is_driver_kernel:
return [work_size, problem_size]
else:
return [work_size]
class tree_node(object):
"""
A node in the :class:`MapStore`'s domain tree.
Contains a base domain, a list of child domains, and a list of
variables depending on this domain
Parameters
----------
owner: :class:`MapStore`
The owning mapstore, used for iname creation
parent : :class:`tree_node`
The parent domain of this tree node
domain : :class:`creator`
The domain this :class:`tree_node` represents
children : list of :class:`tree_node`
The leaves of this node
iname : str
The iname of this :class:`tree_node`
"""
def __add_to_owner(self, domain):
assert domain not in self.owner.domain_to_nodes, (
'Domain {} is already present in the tree!'.format(
domain.name))
self.owner.domain_to_nodes[self.domain] = self
def __init__(self, owner, domain, children=[],
parent=None, iname=None):
self.domain = domain
self.owner = owner
try:
self.children = set(children)
except:
self.children = set([children])
self.parent = parent
self.transform = None
self._iname = iname
self.insn = None
self.domain_transform = None
# book keeping
self.__add_to_owner(domain)
for child in self.children:
self.__add_to_owner(child)
def is_leaf(self):
return not self.children and self != self.owner.tree
@property
def iname(self):
return self._iname
@iname.setter
def iname(self, value):
if value is None:
assert self.parent is not None, (
"Can't set empty (untransformed) iname for root!")
self._iname = self.parent.iname
else:
self._iname = value
@property
def name(self):
return self.domain.name
def set_transform(self, iname, insn, domain_transform):
self.iname = iname
self.insn = insn
self.domain_transform = domain_transform
def add_child(self, domain):
"""
Adds a child domain (if not already present) to this node
Parameters
----------
domain : :class:`creator`
The domain to create a the child node with
Returns
-------
child : :class:`tree_node`
The newly created tree node
"""
# check for existing child
child = next((x for x in self.children if x.domain == domain), None)
if child is None:
child = tree_node(self.owner, domain, parent=self)
self.children.add(child)
self.__add_to_owner(child)
return child
def has_children(self, arrays):
"""
Checks whether an array is present in this :class:`tree_node`'s children
Parameters
----------
arrays: str, or :class:`creator`, or list of str/:class:`creator`
The arrays to check for
Returns
-------
present: list of bool
True if array is present in children
"""
arrays = [ary.name if isinstance(ary, creator) else ary
for ary in listify(arrays)]
assert all(isinstance(x, str) for x in arrays)
present = []
for ary in arrays:
child = next((x for x in self.children if x.name == ary), None)
present.append(bool(child))
return present
def __repr__(self):
return ', '.join(['{}'.format(x) for x in
(self.domain.name, self.iname, self.insn)])
def search_tree(root, arrays):
"""
Searches the tree from the root supplied to see which tree node's (if any)
the list of arrays are children of
Parameters
----------
root: :class:`tree_node`
The root of the tree to search
arrays: str, or :class:`creator`, or list of str/:class:`creator`
The arrays to search for
Returns
-------
parents: list of :class:`tree_node`
A list of tree node's who are the parents of the supplied arrays
If the array is not found, then the corresponding parent will be None
"""
assert isinstance(root, tree_node), (
"Can't start tree search from type {}, an instance of :class:`tree_node` "
"expected.")
# first, check the root level
parents = root.has_children(arrays)
parents = [root if p else None for p in parents]
# now call recursively for the not-found arrays
missing_inds, missing = list(zip(*enumerate(arrays)))
for child in [c for c in root.children if isinstance(c, tree_node)]:
found = search_tree(child, missing)
for | |
0)
final_output = torch.cat([final_output_forward, sent_output_backward[0]], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
sememe_c, sememe_h = self.sememesumlstm(input_sememe)
return sememe_c, sememe_h
class BILSTM_extra_void(nn.Module):
def __init__(self, config):
super(BILSTM_extra_void, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
#ious是专门处理sememe传过来的c 和 h,c和h都是mem_dim维的
self.ious = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ious_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s = nn.Linear(self.in_dim, self.mem_dim)
self.fx_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
#fs是专门处理sememe传过来的c和h
self.fs = nn.Linear(self.mem_dim, self.mem_dim)
self.fs_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.W_s = nn.Linear(config['sememe_size'], self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s_b = nn.Linear(config['sememe_size'], self.mem_dim)
self.W_b = nn.Linear(self.mem_dim, self.mem_dim)
self.query_b = nn.Embedding(2*self.mem_dim, 1)
self.W_p_b = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x_b = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.ious, self.ious_b, self.fx, self.fx_b, self.fx_s, self.fx_s_b, self.fh, self.fh_b, self.fs, self.fs_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx):
child_c = hx[0]
child_h = hx[1]
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc #sum means sigma
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
emb_s = sememe_data.float().cuda()
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward)
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward)
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
new_output_forward = []
new_output_2_forward = []
new_output_backward = []
for i in range(len(sent_len)):
hidden_old_forward = sent_output_forward[0:sent_len[i], i, :]
new_output_2_forward.append(sent_output_forward[sent_len[i]-1, i])
hidden = self.W(hidden_old_forward)
emb_s_sum = emb_s[0:sent_len[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output_forward.append(torch.mm(att.transpose(1,0), hidden_old_forward))
new_output_forward = self.W_p(torch.squeeze(torch.stack(new_output_forward, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2_forward, dim = 0)))
new_output_forward = torch.tanh(new_output_forward)
for i in range(len(sent_len)):
hidden_old_backward = sent_output_backward[0:sent_len[i], i, :]
hidden = self.W_b(hidden_old_backward)
emb_s_sum = emb_s[0:sent_len[i], i, :]
emb_s_sum = self.W_s_b(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query_b.weight))
new_output_backward.append(torch.mm(att.transpose(1,0), hidden_old_backward))
new_output_backward = self.W_p_b(torch.squeeze(torch.stack(new_output_backward, dim = 0))) + self.W_x_b(sent_output_backward[0])
new_output_backward = torch.tanh(new_output_backward)
final_output = torch.cat([new_output_forward, new_output_backward], dim = 1)
return final_output
class BILSTM_extra_concat(nn.Module):
def __init__(self, config):
super(BILSTM_extra_concat, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = config['sememe_size']
self.emb_sememe = nn.Embedding(self.sememe_size, self.sememe_dim)
self.in_dim = config['word_emb_dim']
self.mem_dim = config['enc_lstm_dim']
#self.pool_type = config['pool_type']
#乘3代表3种矩阵,它后来用split分开了
self.ioux = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.ioux_b = nn.Linear(2 * self.in_dim, 3 * self.mem_dim)
self.iouh_b = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fx_b = nn.Linear(2 * self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
self.fh_b = nn.Linear(self.mem_dim, self.mem_dim)
self.max_pad = True
self.W_s = nn.Linear(self.in_dim, self.mem_dim)
self.W = nn.Linear(self.mem_dim, self.mem_dim)
self.query = nn.Embedding(2*self.mem_dim, 1)
self.W_p = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x = nn.Linear(self.mem_dim, self.mem_dim)
self.W_s_b = nn.Linear(self.in_dim, self.mem_dim)
self.W_b = nn.Linear(self.mem_dim, self.mem_dim)
self.query_b = nn.Embedding(2*self.mem_dim, 1)
self.W_p_b = nn.Linear(self.mem_dim, self.mem_dim)
self.W_x_b = nn.Linear(self.mem_dim, self.mem_dim)
self.reset_parameters()
def reset_parameters(self):
layers = [self.ioux, self.ioux_b, self.iouh, self.iouh_b, self.fx, self.fx_b, self.fh, self.fh_b]
for layer in layers:
init.kaiming_normal_(layer.weight)
if layer.bias is not None:
init.constant_(layer.bias, val=0)
def node_forward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux(inputs) + self.iouh(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh(child_h) + self.fx(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def node_backward(self, inputs, hx, sememe_h):
child_c = hx[0]
child_h = hx[1]
inputs = torch.cat([inputs, sememe_h], dim = 1)
iou = self.ioux_b(inputs) + self.iouh_b(child_h)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)
f = torch.sigmoid(
self.fh_b(child_h) + self.fx_b(inputs)
)
fc = torch.mul(f, child_c) #part of memory cell induced by word-child
c = torch.mul(i, u) + fc
h = torch.mul(o, torch.tanh(c))
return (c, h)
def forward(self, sent, sent_len, sememe_data):
# hx: (child_c, child_h)
sememe_h = self.sememe_sum(sememe_data)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda()
sent = sent.index_select(1, idx_sort)
sememe_h = sememe_h.index_select(1, idx_sort)
max_time, batch_size, _ = sent.size()
pack_length = np.zeros([max_time, 1], dtype = np.int)
time_point = batch_size-1
last_point = 0
while(True):
pack_length[last_point: sent_len_sorted[time_point]] = time_point+1
last_point = sent_len_sorted[time_point]
if(sent_len_sorted[time_point] == max_time):
break
time_point = time_point-1
pack_length = torch.from_numpy(pack_length).cuda()
output_forward = []
hx_forward = (inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(batch_size, self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_forward(sent[time, 0:pack_length[time]], hx_forward, sememe_h[time, 0:pack_length[time]])
output_forward.append(torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0))
if(time < max_time-1):
hx_forward = (next_hx[0][0:pack_length[time+1]], next_hx[1][0:pack_length[time+1]])
output_backward = [[] for i in range(max_time)]
hx_backward = (inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_(),
inputs[0][0].detach().new(pack_length[max_time-1], self.mem_dim).fill_(0.).requires_grad_())
for time in range(max_time):
next_hx = self.node_backward(sent[max_time-time-1, 0:pack_length[max_time-time-1]], hx_backward, sememe_h[max_time-time-1, 0:pack_length[max_time-time-1]])
output_backward[max_time-time-1] = torch.cat([next_hx[1], torch.zeros([batch_size-next_hx[1].size()[0], self.mem_dim], device = 'cuda')], dim = 0)
if(time < max_time-1):
hx_backward = (torch.cat([next_hx[0], torch.zeros([pack_length[max_time-time-2]-next_hx[0].size()[0], self.mem_dim]).cuda()], dim = 0), \
torch.cat([next_hx[1], torch.zeros([pack_length[max_time-time-2]-next_hx[1].size()[0], self.mem_dim]).cuda()], dim = 0))
a = torch.stack(output_forward, dim = 0)
b = torch.stack(output_backward, dim = 0)
idx_unsort = torch.from_numpy(idx_unsort).cuda()
sent_output_forward = a.index_select(1, idx_unsort)
sent_output_backward = b.index_select(1, idx_unsort)
sememe_h = sememe_h.index_select(1, idx_unsort)
new_output_forward = []
new_output_2_forward = []
new_output_backward = []
for i in range(len(sent_len)):
hidden_old_forward = sent_output_forward[0:sent_len[i], i, :]
new_output_2_forward.append(sent_output_forward[sent_len[i]-1, i])
hidden = self.W(hidden_old_forward)
emb_s_sum = sememe_h[0:sent_len[i], i, :]
emb_s_sum = self.W_s(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query.weight))
new_output_forward.append(torch.mm(att.transpose(1,0), hidden_old_forward))
new_output_forward = self.W_p(torch.squeeze(torch.stack(new_output_forward, dim = 0))) + self.W_x(torch.squeeze(torch.stack(new_output_2_forward, dim = 0)))
new_output_forward = torch.tanh(new_output_forward)
for i in range(len(sent_len)):
hidden_old_backward = sent_output_backward[0:sent_len[i], i, :]
hidden = self.W_b(hidden_old_backward)
emb_s_sum = sememe_h[0:sent_len[i], i, :]
emb_s_sum = self.W_s_b(emb_s_sum)
hidden = torch.cat([hidden, emb_s_sum], dim = 1)
att = torch.tanh(torch.mm(hidden, self.query_b.weight))
new_output_backward.append(torch.mm(att.transpose(1,0), hidden_old_backward))
new_output_backward = self.W_p_b(torch.squeeze(torch.stack(new_output_backward, dim = 0))) + self.W_x_b(sent_output_backward[0])
new_output_backward = torch.tanh(new_output_backward)
final_output = torch.cat([new_output_forward, new_output_backward], dim = 1)
return final_output
def sememe_sum(self, input_s):
emb_sememe = self.emb_sememe.weight
input_sememe = []
for i in range(input_s.size()[0]):
input_sememe.append(torch.mm(input_s[i].float(), emb_sememe))
input_sememe = torch.stack(input_sememe, dim = 0)
return input_sememe
class BILSTM_extra_gate(nn.Module):
def __init__(self, config):
super(BILSTM_extra_gate, self).__init__()
self.enc_lstm_dim = config['enc_lstm_dim']
self.sememe_dim = config['sememe_dim']
self.sememesumlstm = SememeSumLstm(self.sememe_dim, self.enc_lstm_dim)
self.sememesumGRU = SememeSumGRU(self.sememe_dim, self.enc_lstm_dim)
self.sememe_dim = config['sememe_dim']
self.sememe_size = | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 12 17:17:30 2021
@author: <NAME>
"""
import time
import itertools
import os
import sys
import numpy as np
import scipy as sc
from skimage import segmentation, measure, morphology, filters, transform
import stl
from numba import njit, prange
from src.Tools.conductivity_solver import ConductivitySolver
from src.Tools.rev_estudos_numba import maxi_balls
from src.Tools.jit_transport_solver import calculate_transport
from src.Tools.jit_minkowsky import get_minkowsky_functionals, get_minkowsky_functionals_parallel, minkowsky_names
PI = np.pi
SRC_FOLDER = os.path.dirname(os.path.realpath(__file__))
MC_TEMPLATES_FILE = "marching cubes templates.dat"
################
# HELPER FUNCTIONS #
################
def face_orientation(v0, v1, v2):
'''
Return outward perpendicular vector distance of face along the z axis
'''
v0 = np.array(v0),
v1 = np.array(v1)
v2 = np.array(v2)
vector = np.cross(v1 - v0, v2 - v0)
z_comp = vector[0][2]
if z_comp > 0.1:
return -1
elif z_comp < -0.1:
return 1
else:
return 0
def area_of_triangle(p0, p1, p2):
'''
As per Herons formula
'''
lines = list(itertools.combinations((p0, p1, p2), 2))
distances = [(sc.spatial.distance.euclidean(i[0], i[1])) for i in lines]
s = sum(distances)/2
product_of_diferences = np.prod([(s-i) for i in distances])
area = np.sqrt(s * product_of_diferences)
return area
def mc_templates_generator(override = False):
'''
Generates a marching cubes template list file, if one is not available
'''
if MC_TEMPLATES_FILE in os.listdir(SRC_FOLDER) and not override:
return
summation_to_coordinate = {}
for i in [(x, y, z) for x in range(2)
for y in range(2)
for z in range(2)]:
summation_to_coordinate[2 ** (i[0] + 2*i[1] + 4*i[2])] = i
templates_triangles = []
for _ in range(256):
templates_triangles.append( [[],[]] )
for i in range(1,255):
array = np.zeros((2, 2, 2))
index = i
for j in range(7, -1, -1):
e = 2**j
if index >= e:
index -= e
array[summation_to_coordinate[e]] = 1
verts, faces = measure.marching_cubes_lewiner(array)[0:2]
templates_triangles[i][0] = verts
templates_triangles[i][1] = faces
with open(os.path.join(SRC_FOLDER, MC_TEMPLATES_FILE), mode = 'w') as file:
for i in range(256):
verts, faces = templates_triangles[i]
file.write(f'{i};')
for v in verts:
file.write(f'[{v[0]},{v[1]},{v[2]}]')
file.write(';')
for f in faces:
file.write(f'[{f[0]},{f[1]},{f[2]}]')
file.write('\n')
def create_mc_template_list(spacing = (1, 1, 1)):
'''
Return area and volume lists for the marching cubes templates
Reads the templates file
Input:
Tuple with three values for x, y, and z lengths of the voxel edges
'''
areas = {}
volumes = {}
triangles = {}
vertices_on_top = set((16, 32, 64, 128))
with open(os.path.join(SRC_FOLDER, MC_TEMPLATES_FILE), mode = 'r') as file:
for line in file:
index, verts, faces = line.split(';')
index = int(index)
if len(verts) > 0:
verts = verts.strip()[1:-1].split('][')
verts = [v.split(',') for v in verts]
verts = [[float(edge) for edge in v] for v in verts]
faces = faces.strip()[1:-1].split('][')
faces = [f.split(',') for f in faces]
faces = [[int(edge) for edge in f] for f in faces]
else:
verts = []
faces = []
occupied_vertices = set()
sub_index = index
for i in range(7,-1,-1):
e = 2 ** i
if sub_index >= e:
occupied_vertices.add(e)
sub_index -= e
total_vertices_on_top = len(occupied_vertices & vertices_on_top)
if total_vertices_on_top == 0:
basic_volume = 0
elif total_vertices_on_top == 1:
basic_volume = 1/8
elif total_vertices_on_top == 2:
if ((16 in occupied_vertices and 128 in occupied_vertices) or
(32 in occupied_vertices and 64 in occupied_vertices)):
basic_volume = 1/4
else:
basic_volume = 1/2
elif total_vertices_on_top == 3:
basic_volume = 7/8
elif total_vertices_on_top == 4:
basic_volume = 1
for f in faces:
v0, v1, v2 = [verts[i] for i in f]
v0_proj, v1_proj, v2_proj = [(i[0], i[1], 0) for i in (v0, v1, v2)]
mean_z = sum([i[2] for i in (v0, v1, v2)])/3
proj_area = area_of_triangle(v0_proj, v1_proj, v2_proj)
direction = face_orientation(v0, v1, v2)
basic_volume += mean_z * proj_area * direction
for i in range(len(verts)):
verts[i] = [j[0] * j[1] for j in zip(verts[i], spacing)]
triangles[index] = (tuple(verts), tuple(faces), basic_volume)
voxel_volume = np.prod(np.array(spacing))
for i in triangles:
area = 0
verts, faces, relative_volume = triangles[i]
for f in faces:
triangle_area = area_of_triangle(verts[f[0]],
verts[f[1]],
verts[f[2]])
area += triangle_area
volume = voxel_volume * relative_volume
areas[i] = area
volumes[i] = volume
return areas, volumes
def cube_generator():
'''
Generator yelds (x, y, z) coordinates for hollow cubes centered in (0, 0, 0)
and edge length increasing by 2 each new cube, starting with edge
length equal to 3.
'''
x = -1
y = -1
z = -1
while 1:
out = (x, y, z)
if abs(x) == abs(y) and abs(z) <= abs(x):
if -abs(x) <= z and z < abs(x):
z += 1
elif -abs(x) <= z and z == abs(x):
if x < 0 and y < 0:
z = -z
x += 1
elif x > 0 and y < 0:
z = -z
x = -x
y += 1
elif x < 0 and y > 0:
z = -z
x += 1
elif x > 0 and y > 0:
x = -z - 1
y = -z - 1
z = -z - 1
elif abs(x) < abs(y) and z == -abs(y):
z += 1
elif abs(x) < abs(y) and z == abs(y):
z = -z
x += 1
elif abs(x) > abs(y) and z == -abs(x):
z += 1
elif abs(x) > abs(y) and z == abs(x):
z = -z
if x < 0:
x += 1
elif x > 0:
x = -x
y += 1
elif z < 0 and abs(x) < abs(z) and abs(y) < abs(z):
z = -z
elif z > 0 and abs(x) < z and abs(y) < z:
z = -z
x += 1
elif abs(x) < abs(y) and abs(z) < abs(y):
z += 1
elif abs(y) < abs(x) and abs(z) < abs(x):
z += 1
else:
print("Error: ", x, y, z)
yield out
def check_percolation(img):
'''
Returns True if binary image percolates along the z axis
'''
labeled = sc.ndimage.label(img)[0]
bottom_labels = np.unique(labeled[:, :, 0])
top_labels = np.unique(labeled[:, :, -1])
percolating_labels = np.intersect1d(
bottom_labels,
top_labels,
assume_unique = True
)
percolating_labels_total = (percolating_labels > 0).sum()
return percolating_labels_total > 0
def remove_non_percolating(img):
'''
return image with non-percolating elements changed to 0
'''
labeled = sc.ndimage.label(img)[0]
bottom_labels = np.unique(labeled[:, :, 0])
top_labels = np.unique(labeled[:, :, -1])
percolating_labels = np.intersect1d(
bottom_labels,
top_labels,
assume_unique = True
)
if percolating_labels[0] == 0:
percolating_labels = percolating_labels[1:]
return img * np.isin(img, percolating_labels)
def wrap_sample(img, label = -1):
'''
Assigns "-1" to elements outside de convex hull of an image
computed slicewise along de X axis
'''
print ('Wraping sample')
x, y, z = img.shape
outside =np.zeros((x, y, z), dtype = np.int8)
if img.max() > 127:
img = img // 2
img = img.astype('int8')
for i in range(x):
sys.stdout.write(f"\rWraping {(100 * i / x):.2f} %")
sys.stdout.flush()
outside[i, :, :] = (
np.int8(1)
- morphology.convex_hull_image(img[i, :, :])
)
print()
return img - outside
###########
# OPERATIONS #
###########
def otsu_threshold(img):
val = filters.threshold_otsu(img)
return (img >= val).astype('int8')
def watershed(img, compactness, two_d = False):
if np.max(img) > 1:
img = otsu_threshold(img)
img[0, :, :]=0
img[-1, :, :] = 0
img[:, 0, :] = 0
img[:, -1, :] = 0
if img.shape[2] >= 3:
img[:, :, 0] = 0
img[:, :, -1] = 0
else:
x, y, z = img.shape
temp_img = np.zeros((x, y, z+2))
temp_img[:, :, 1:-1] = img
img = temp_img
tempo = time.process_time()
print ('Start', time.process_time() - tempo)
tempo = time.process_time()
if two_d:
sampling = (1, 1, 1000)
else:
sampling = (1, 1, 1)
#Calcular mapa de distância
distance_map = sc.ndimage.morphology.distance_transform_edt(
img,
sampling = sampling
)
h, w, d = img.shape
print ('Finished distance map', time.process_time() - tempo)
tempo = time.process_time()
#Identificar máxmos locais
it = ((i, j, k) for i in range(1, h-1)
for j in range(1, w-1)
for k in range(1, d-1))
mask = np.ones((3, 3, 3))
mask[1, 1, 1] = 0
markers = np.zeros_like(img).astype('uint32')
disp_it = ((i, j, k) for i in range(-1, 2)
for j in range(-1, 2)
for k in range(-1, 2))
x, y, z = markers.shape
for dx, dy, dz in disp_it:
markers[1:-1, 1:-1, 1:-1] = np.maximum(
distance_map[slice(1+dx, (-1+dx if -1+dx !=0 else None)),
slice(1+dy, (-1+dy if -1+dy !=0 else None)),
slice(1+dz, (-1+dz if -1+dz !=0 else None))
],
markers[slice(1, -1),slice(1, -1),slice(1, -1)])
markers = distance_map >= markers
markers = markers.astype('uint32')
print ('Finished local maxima', time.process_time()-tempo)
tempo = time.process_time()
| |
<reponame>sandernaert/brat
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from projectconfig import ProjectConfiguration
from verify_annotations import argparser
from annotation import Annotations,TextAnnotations
from ssplit import regex_sentence_boundary_gen
from ssplit import _sentence_boundary_gen
from os.path import join as path_join
#from pynlpl.formats import folia
import folia
class EntityNotFoundError(Exception):
def __init__(self, entity):
self.entity = entity
def __str__(self):
return u'Entity not found: %s' % (entity )
def build_entities_attr(ann):
#input annotions object
#returns a dictionary key: entity id, value: set of attributes
attributes={}
for entity in ann.get_textbounds():
attributes[entity.id]=set((a for a in ann.get_attributes() if a.target == entity.id))
return attributes
def _text_by_offsets_gen(text, offsets):
for start, end in offsets:
sentence = text[start:end]
yield start, end, sentence
def build_text_structure(ann,txt_file_path):
'''
Will split a text file in paragraphs, sentences and words and return the folia document
For every word it will check 2 main things:
1) is the word part of some entities? and if so it will add them to a list of lists of words
2) is their an entity that ends with this word? if so it will create the entity with the right words out of the list and delete this element after
it took the words out.
After every sentence, paragraph all the entities that started and ended within that structure will be added into the EntityLayer
'''
from annotation import open_textfile
from tokenise import gtb_token_boundary_gen
def add_list_entities(struct, folia_entities):
#will check if any entities have to be added and add if needed
if folia_entities:
layer = struct.append(folia.EntitiesLayer)
for folia_entity in folia_entities:
layer.append(folia_entity)
for attr in attributes[folia_entity.id]:
folia_entity.append(folia.Feature(doc,subset=attr.type, cls=str(attr.value)))
try:
#Sort entities on offset instead of id
entities = sorted(ann.get_textbounds(), key=lambda entity: (entity.start, -entity.end))
index = 0
doc = folia.Document(id='brat')
attributes = build_entities_attr(ann)
folia_text = doc.append(folia.Text)
paragraph = folia_text.append(folia.Paragraph)
folia_sentence = 0
par_start = 0
#fictive sets
doc.annotationdefaults[folia.AnnotationType.ENTITY] = {"entiteit_set.xml": {} }
doc.annotations.append( (folia.AnnotationType.ENTITY, "entiteit_set.xml" ) )
doc.annotationdefaults[folia.AnnotationType.MORPHOLOGICAL] = {"morph_set.xml": {} }
doc.annotations.append( (folia.AnnotationType.MORPHOLOGICAL, "morph_set.xml" ) )
entity = entities[index]
entities_words=[]
inner_index=0
entities_words.append([])
folia_entitiesLayer_par=[]
folia_entitiesLayer_sen=[]
folia_entitiesLayer_txt=[]
with open_textfile(txt_file_path, 'r') as txt_file:
text = txt_file.read()
offsets = [o for o in regex_sentence_boundary_gen(text)]
for start, end, sentence in _text_by_offsets_gen(text, offsets):
if start == end and text[start-1] == '\n':
add_list_entities(paragraph, folia_entitiesLayer_par)
folia_entitiesLayer_par = []
paragraph = folia_text.append(folia.Paragraph)
par_start = start
elif sentence != "" :
add_list_entities(folia_sentence, folia_entitiesLayer_sen)
folia_entitiesLayer_sen = []
folia_sentence = paragraph.append(folia.Sentence,sentence)
offsetsw = [o for o in gtb_token_boundary_gen(sentence)]
for tok in _text_by_offsets_gen(sentence, offsetsw):
entity = entities[index]
inner_index=0
folia_word = folia_sentence.append(folia.Word, tok[2])
morph_layer= ""
#check if word is part of the entity and if so remember folia word
while entity.start <= entities[index].end :
while( len(entities_words) <= inner_index ):
entities_words.append([])
for span_start, span_end in entity.spans:
if ( span_start <= tok[0]+start and tok[1]+start <= span_end):
entities_words[inner_index].append(doc[folia_word.id])
#entity ends within the word
elif (tok[1]+start >= span_end and span_end > tok[0]+start) :
offset_start = span_start-(start+tok[0])
if offset_start <0 :# entity started before this word
offset_start =0;
offset_end = span_end-(start+tok[0])
string = tok[2][offset_start:offset_end]
if not morph_layer:
morph_layer = folia_word.append(folia.MorphologyLayer)
morph = morph_layer.append(folia.Morpheme(doc, generate_id_in=folia_word))
morph.append(folia.TextContent(doc, value=string, offset=offset_start))
entities_words[inner_index].append(doc[morph.id])
#entity starts within the word
elif (tok[1]+start > span_start and span_start >= tok[0]+start) :
offset_start = span_start-(start+tok[0])
offset_end = span_end-(start+tok[0])
string = tok[2][offset_start:offset_end]
if not morph_layer:
morph_layer = folia_word.append(folia.MorphologyLayer)
morph = morph_layer.append(folia.Morpheme(doc, generate_id_in=folia_word))
morph.append(folia.TextContent(doc, value=string, offset=offset_start))
entities_words[inner_index].append(doc[morph.id])
inner_index = inner_index + 1
if len(entities) > index + inner_index :
entity = entities[index+inner_index]
else:
break
entity = entities[index]
inner_index = 0
#check for end of an entity and append entity to either text, paragraph or sentece depending on start of the entity
current_index = index
while entity.start <= entities[current_index].end :
if entity.end <= start + tok[1] and entity.start <= start + tok[0] :
if (entity.start >= start):
folia_entitiesLayer = folia_entitiesLayer_sen
elif (entity.start >= par_start):
folia_entitiesLayer = folia_entitiesLayer_par
else:
folia_entitiesLayer = folia_entitiesLayer_txt
if entities_words[inner_index]:
folia_entity = folia.Entity(doc, cls=entity.type, id=entity.id , contents=entities_words[inner_index])
folia_entitiesLayer.append(folia_entity)
elif not any(x.id == entity.id for x in folia_entitiesLayer):
#see if entity is already added
try:
doc[entity.id]
except KeyError:
raise EntityNotFoundError(entity)
if(inner_index == 0):
entities_words.pop(0)
if len(entities) > index+1 :
index = index + 1
for i in range(0, len(entities_words)):
if(not entities_words[0]):
entities_words.pop(0)
index = index + 1
else:
break
elif(inner_index > 0):
entities_words[inner_index]=[]
inner_index = inner_index + 1
else:
inner_index = inner_index + 1
if len(entities) > index + inner_index:
entity = entities[index+inner_index]
else:
break
add_list_entities(paragraph, folia_entitiesLayer_par)
add_list_entities(folia_sentence, folia_entitiesLayer_sen)
add_list_entities(folia_text, folia_entitiesLayer_txt)
return doc
except IOError:
pass # Most likely a broken pipe
def build_relations(ann):
from annotation import TextBoundAnnotation, AttributeAnnotation, BinaryRelationAnnotation, EventAnnotation, EquivAnnotation
relations={}
equivs={}
events={}
for a in ann:
if isinstance(a, TextBoundAnnotation):
relations[a.id] = []
equivs[a.id] = []
if isinstance(a, BinaryRelationAnnotation):
relations[a.arg1].append(a)
elif isinstance(a, EquivAnnotation):
equivs[a.entities[0]].append(a)
elif isinstance(a, EventAnnotation):
events[a.trigger]=a
return relations, equivs, events
def add_relations_to_layer(folia_structure,layers,relations,doc):
#folia_structure is a sentence/paragraph/text
dependencies = []
for layer in layers:
for entity in layer.data:
for dependency in relations[entity.id]:
hd = folia.Headspan(doc, contents=doc[dependency.arg1].wrefs())
al = hd.append(folia.Alignment)
al.append(folia.AlignReference, id=dependency.arg1, type=folia.Entity)
dep = folia.DependencyDependent(doc, contents=doc[dependency.arg2].wrefs())
al = dep.append(folia.Alignment)
al.append(folia.AlignReference, id=dependency.arg2, type=folia.Entity)
folia_dependency = folia.Dependency(doc,cls=dependency.type, id=dependency.id)
folia_dependency.append(hd)
folia_dependency.append(dep)
dependencies.append(folia_dependency)
if dependencies:
dependenciesLayer = folia_structure.append(folia.DependenciesLayer, contents=dependencies)
def add_equivs_to_layer(folia_structure,layers,equivs,doc):
#folia_structure is a sentence/paragraph/text
dependencies = []
for layer in layers:
for entity in layer.data:
for dependency in equivs[entity.id]:
folia_dependency = folia.Dependency(doc,cls=dependency.type)
for ent in dependency.entities:
hd = folia.Headspan(doc, contents=doc[ent].wrefs())
al = hd.append(folia.Alignment)
al.append(folia.AlignReference, id=ent, type=folia.Entity)
folia_dependency.append(hd)
dependencies.append(folia_dependency)
if dependencies:
dependenciesLayer = folia_structure.append(folia.DependenciesLayer, contents=dependencies)
def add_event_rel_to_layer(folia_structure,layers,event_rel,doc):
dependencies = []
for layer in layers:
for entity in layer.data:
if entity.id in event_rel and event_rel[entity.id]:
folia_dependency = folia.Dependency(doc,cls="Event",id=event_rel[entity.id].id)
hd = folia.Headspan(doc, contents=doc[entity.id].wrefs())
al = hd.append(folia.Alignment)
al.append(folia.AlignReference, id=entity.id, type=folia.Entity)
folia_dependency.append(hd)
for arg in event_rel[entity.id].args:
dep = folia.DependencyDependent(doc, contents=doc[arg[1]].wrefs())
dep.append(folia.Feature(doc,subset=event_rel[entity.id].type, cls=str(arg[0])))
al = dep.append(folia.Alignment)
al.append(folia.AlignReference, id=arg[1], type=folia.Entity)
folia_dependency.append(dep)
dependencies.append(folia_dependency)
if dependencies:
dependenciesLayer = folia_structure.append(folia.DependenciesLayer, contents=dependencies)
def add_relations(doc,ann):
relations,equivs,event_rel = build_relations(ann)
doc.annotationdefaults[folia.AnnotationType.DEPENDENCY] = {"relation_set.xml": {} }
doc.annotations.append( (folia.AnnotationType.DEPENDENCY, "relation_set.xml" ) )
for texts in doc.data:
for par in doc.paragraphs():
for sentence in par.sentences():
layers = sentence.layers(folia.AnnotationType.ENTITY)
add_relations_to_layer(sentence,layers,relations,doc)
add_equivs_to_layer(sentence,layers,equivs,doc)
add_event_rel_to_layer(sentence,layers,event_rel,doc)
layers = par.layers(folia.AnnotationType.ENTITY)
add_relations_to_layer(par,layers,relations,doc)
add_equivs_to_layer(par,layers,equivs,doc)
add_event_rel_to_layer(par,layers,event_rel,doc)
layers = texts.layers(folia.AnnotationType.ENTITY)
add_relations_to_layer(texts,layers,relations,doc)
add_equivs_to_layer(texts,layers,equivs,doc)
add_event_rel_to_layer(texts,layers,event_rel,doc)
def add_comments(doc, ann):
for a in ann.get_oneline_comments():
desc = folia.Description(doc,value=a.tail.strip())
doc[a.target].append(desc)
def convert(path,doc):
#path is path to the file without extension
projectconf = ProjectConfiguration(path)
path = path_join(path,doc)
ann = Annotations(path+".ann")
doc = build_text_structure(ann,path+".txt")
add_relations(doc,ann)
add_comments(doc,ann)
#~ ent_set=xml(build_entity_set(doc))
#~ rel_set=xml(build_relations_set(doc))
#~ temp=open ("entiteit_set.xml",'w')
#~ temp.write(ent_set)
#~ temp.close()
#~ rel=open ("relation_set.xml",'w')
#~ rel.write(rel_set)
#~ rel.close()
doc.save(path+".xml")
def build_entity_set(doc, projectconf):
#build a folia set for entities
# dictionary so only unique id's are added
classes={}
subsets=[]
for types in projectconf.get_entity_types():
classes[types]=folia.ClassDefinition(types,folia.SetType.CLOSED,types)
for event in projectconf.get_event_types():
classes[event]=folia.ClassDefinition(event,folia.SetType.CLOSED,event)
for t in projectconf.get_attribute_type_hierarchy():
subset_classes ={}
if "Value" in t.arg_list:
for i in t.arguments["Value"]:
subset_classes[i] = folia.ClassDefinition(i,folia.SetType.CLOSED,i)
subsets.append(folia.SubsetDefinition(t.terms[0], folia.SetType.CLOSED, subset_classes))
else:
subsets.append(folia.SubsetDefinition(t.terms[0], folia.SetType.OPEN, subset_classes))
setdef = folia.SetDefinition("entiteit_set",classes,subsets)
doc.setdefinitions["entiteit_set.xml"] = setdef
return setdef
def build_relations_set(doc,projectconf):
#build a folia set for dependencies
classes={}
subsets=[]
for types in projectconf.get_relation_types():
classes[types]=folia.ClassDefinition(types,folia.SetType.CLOSED,types)
events = projectconf.get_event_type_hierarchy()
if events:
classes["Event"]=folia.ClassDefinition("Event",folia.SetType.CLOSED,"Event")
for t in projectconf.get_event_type_hierarchy():
subset_classes ={}
for c in t.arg_list:
if c == t.terms[0]:
c=t.terms[0] + "1"
subset_classes[c] = folia.ClassDefinition(c,folia.SetType.CLOSED,c)
subsets.append(folia.SubsetDefinition(t.terms[0], folia.SetType.CLOSED, subset_classes))
setdef = folia.SetDefinition("relation_set",classes,subsets)
doc.setdefinitions["relation_set.xml"] = setdef
return setdef
def xml(sett):
#takes a folia SetDefenition and will return a xml string
from lxml import etree
import string
xml_id = '{http://www.w3.org/XML/1998/namespace}id'
NSFOLIA="http://ilk.uvt.nl/folia"
root = etree.Element('set',{xml_id:sett.id, "type":"closed" }, namespace=NSFOLIA, nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
for clas in sett.classes:
root.append(etree.Element('class',{xml_id:sett.classes[clas].id, "label":sett.classes[clas].label }))
for subset in sett.subsets:
if subset.type == folia.SetType.CLOSED:
ss = etree.Element('subset',{xml_id:subset.id, "class":"closed" })
if subset.type == folia.SetType.OPEN:
ss = etree.Element('subset',{xml_id:subset.id, "class":"open" })
for sc in subset.classes:
clas = subset.classes[sc]
if(clas.id[0] == "_" or clas.id[0] in string.lowercase or clas.id[0] in string.uppercase):
ss.append(etree.Element('class',{xml_id:clas.id, "label":clas.label}))
root.append(ss)
return etree.tostring(root, pretty_print=True)
def compare(path,doc):
convert(path,doc)
ann = Annotations(path+doc)
fdoc = folia.Document(file=path+doc+".xml")
#test entities
for ent in ann.get_textbounds():
try:
found=fdoc[ent.id]
text = [str(a) for a in found.wrefs()]
if ent.tail.strip() != " ".join(text):
print "error: not found entity"
print ent
return False
except KeyError:
print "error: not found entity"
print ent
return False
#test relations
for rel in ann.get_relations():
try:
found=fdoc[rel.id]
arefs = | |
EDREAD reads data from the specified ascii output file so that it may
be used during postprocessing. After EDREAD, you must issue the STORE
command to store the data in time history variables. Once stored, the
variables can be viewed as plots of output item versus time.
The number of variables stored depends on the file specified. The
following table shows the items in each file and the order in which
they are stored. If data items were previously stored in variables
NSTART to NSTART+15, they will be overwritten. If more variables are
needed, change NV on the NUMVAR command. (Note that hourglass energy
will not be available if it was not specified for the model
[EDENERGY,1].)
The following items under MATSUM are listed in the MATSUM ASCII file
(in the Mat no. field) for each part number at time intervals specified
by the EDHTIME command. Use EDREAD,,MATSUM,NUM to specify the part
number that corresponds to the mat number in the MATSUM file.
Resultant contact forces and sliding interface energies are available
from the RCFORC and SLEOUT files, respectively. The RCFORC file is
written for surface based contact types that include target and contact
(master and slave) definitions. You should ensure that this file
contains valid force results before issuing EDREAD,,RCFORC. Only the
resultant contact forces on the master surface are available for time-
history postprocessing.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"EDREAD,{nstart},{label},{num},{step1},{step2}"
return self.run(command, **kwargs)
def enersol(self, nvar="", item="", name="", **kwargs):
"""Specifies the total energies to be stored.
APDL Command: ENERSOL
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV).
"""
command = f"ENERSOL,{nvar},{item},{name}"
return self.run(command, **kwargs)
def esol(self, nvar: MapdlInt = "", elem: MapdlInt = "",
node: MapdlInt = "", item: str = "", comp: str = "",
name: str = "", **kwargs) -> Optional[str]:
"""Specify element data to be stored from the results file.
/POST26 APDL Command: ESOL
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to
NV [NUMVAR]). Overwrites any existing results for this
variable.
elem
Element for which data are to be stored.
node
Node number on this element for which data are to be
stored. If blank, store the average element value (except
for FMAG values, which are summed instead of averaged).
item
Label identifying the item. General item labels are shown
in Table 134: ESOL - General Item and Component Labels
below. Some items also require a component label.
comp
Component of the item (if required). General component
labels are shown in Table 134: ESOL - General Item and
Component Labels below. If Comp is a sequence number (n),
the NODE field will be ignored.
name
Thirty-two character name for identifying the item on the
printout and displays. Defaults to a label formed by
concatenating the first four characters of the Item and
Comp labels.
Examples
--------
Switch to the time-history postprocessor
>>> mapdl.post26()
Store the stress in the X direction for element 1 at node 1
>>> nvar = 2
>>> mapdl.esol(nvar, 1, 1, 'S', 'X')
Move the value to an array and access it via mapdl.parameters
>>> mapdl.dim('ARR', 'ARRAY', 1)
>>> mapdl.vget('ARR', nvar)
>>> mapdl.parameters['ARR']
array(-1991.40234375)
Notes
-----
See Table: 134:: ESOL - General Item and Component Labels for
a list of valid item and component labels for element (except
line element) results.
The ESOL command defines element results data to be stored
from a results file (FILE). Not all items are valid for all
elements. To see the available items for a given element,
refer to the input and output summary tables in the
documentation for that element.
Two methods of data access are available via the ESOL
command. You can access some simply by using a generic label
(component name method), while others require a label and
number (sequence number method).
Use the component name method to access general element data
(that is, element data generally available to most element
types or groups of element types).
The sequence number method is required for data that is not
averaged (such as pressures at nodes and temperatures at
integration points), or data that is not easily described in a
generic fashion (such as all derived data for structural line
elements and contact elements, all derived data for thermal
line elements, and layer data for layered elements).
Element results are in the element coordinate system, except
for layered elements where results are in the layer coordinate
system. Element forces and moments are in the nodal
coordinate system. Results are obtainable for an element at a
specified node. Further location specifications can be made
for some elements via the SHELL, LAYERP26, and FORCE commands.
For more information on the meaning of contact status and its
possible values, see Reviewing Results in POST1 in the Contact
Technology Guide.
"""
command = f"ESOL,{nvar},{elem},{node},{item},{comp},{name}"
return self.run(command, **kwargs)
def file(self, fname="", ext="", **kwargs):
"""Specifies the data file where results are to be found.
APDL Command: FILE
Parameters
----------
fname
File name and directory path (248 characters maximum, including the
characters needed for the directory path). An unspecified
directory path defaults to the working directory; in this case, you
can use all 248 characters for the file name.
ext
Filename extension (eight-character maximum).
Notes
-----
Specifies the ANSYS data file where the results are to be found for
postprocessing.
"""
command = f"FILE,{fname},{ext}"
return self.run(command, **kwargs)
def gapf(self, nvar="", num="", name="", **kwargs):
"""Defines the gap force data to be stored in a variable.
APDL Command: GAPF
Parameters
----------
nvar
Arbitrary reference number assigned to this variable (2 to NV
[NUMVAR]). Overwrites any existing results for this variable.
num
Number identifying gap number for which the gap force is to be
stored. Issue the GPLIST command to display gap numbers.
name
Thirty-two character name for identifying the item on the printout
and displays (defaults to the name GAPF).
Notes
-----
Defines the gap force data to be stored in a variable. Applicable only
to the expansion pass of the mode-superposition linear transient
dynamic (ANTYPE,TRANS) analysis. The data is usually on Fname.RDSP.
"""
command = f"GAPF,{nvar},{num},{name}"
return self.run(command, **kwargs)
def gssol(self, nvar="", item="", comp="", name="", **kwargs):
"""Specifies which results to store from the results file when using
APDL Command: GSSOL
generalized plane strain.
Parameters
----------
nvar
Arbitrary reference number or name assigned to this variable.
Variable numbers can be 2 to NV (NUMVAR) while the name can be an
eight byte character string. Overwrites any existing results for
this variable.
item
Label identifying item to be stored.
LENGTH - Change of fiber length at the ending point.
ROT - Rotation of the ending plane during deformation.
F - Reaction force at the ending point in the fiber direction.
M - Reaction moment applied on the ending plane.
comp
Component of the item, if Item = ROT or M.
X - The rotation angle or reaction moment of the ending plane about X.
Y - The rotation angle or reaction moment of the ending plane about Y.
name
Thirty-two character name identifying the item on the printout and
display. Defaults to the label formed by concatenating the first
four characters of the Item and Comp labels.
Notes
-----
This command stores the results (new position of the ending plane after
deformation) for generalized plane strain. All outputs are in the
global Cartesian coordinate system. For more information about the
generalized plane strain feature, see Generalized Plane Strain Option
of Current-Technology Solid Elements in the Element Reference.
"""
command = f"GSSOL,{nvar},{item},{comp},{name}"
return self.run(command, **kwargs)
def jsol(self, nvar="", elem="", item="", comp="", name="", **kwargs):
"""Specifies result items to be stored for the joint element.
APDL Command: JSOL
Parameters
----------
nvar
Arbitrary reference number or name assigned to this variable.
| |
all headers in a FASTQ file unique by adding a number at the end of the header. Also, splits headers up to the first whitespace.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
exit(0);
input_fastq_path = sys.argv[2];
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 4):
out_fastq_path = sys.argv[3];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
uniquify_headers(input_fastq_path, out_fastq_path, fp_out)
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'qvfilter'):
if (len(sys.argv) < 5 or len(sys.argv) > 6):
sys.stderr.write('Output only reads which have average base qualities above or below certain threshold.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s lte_gte threshold <input_fastq_file> [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
sys.stderr.write('\tlte_gte - Select which reads to output. Can be either "lt", "lte", "gt", "gte" or "eq".\n');
sys.stderr.write('\n');
exit(0);
lte_gte = sys.argv[2];
qv_threshold = int(sys.argv[3]);
input_fastq_path = sys.argv[4];
if ((lte_gte in ['lt', 'lte', 'gt', 'gte', 'eq']) == False):
sys.stderr.write('ERROR: Incorrect value of the lte_gte parameter. Should be either "<" or ">".');
exit(1);
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 6):
out_fastq_path = sys.argv[5];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
base_quality_filter(input_fastq_path, lte_gte, qv_threshold, out_fastq_path, fp_out)
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'info'):
if (len(sys.argv) < 3 or len(sys.argv) > 4):
sys.stderr.write('Tool for obtaining basic stats from FASTA/FASTQ files, such as number of sequences, total sequence length, average sequence length, etc.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> [<input_reference_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
sys.stderr.write('\t<input_reference_file> - If a reference is given, coverage is caluclated using the Lander-Waterman equation.\n');
sys.stderr.write('\n');
exit(0);
input_fastq_path = sys.argv[2];
if (len(sys.argv) == 4):
reference_path = sys.argv[3];
[ref_ret_string, ref_num_seqs, ref_total_seq_len, ref_average_seq_len, max_seq_len] = fastqparser.count_seq_length(reference_path);
# sys.stdout.write('Reference info:\n');
sys.stdout.write('(reference) Info for "%s".\n' % (reference_path));
sys.stdout.write(ref_ret_string);
sys.stdout.write('\n');
[ret_string, num_seqs, total_seq_len, average_seq_len, max_seq_len] = fastqparser.count_seq_length(input_fastq_path);
# sys.stdout.write('FASTQ info:\n');
if (len(sys.argv) == 4):
sys.stdout.write('(reads) Info for "%s".\n' % (input_fastq_path));
else:
sys.stdout.write('Info for "%s".\n' % (input_fastq_path));
sys.stdout.write(ret_string);
if (len(sys.argv) == 4):
sys.stdout.write('\n');
sys.stdout.write('Coverage: %.2fx\n' % (float(total_seq_len) / float(ref_total_seq_len)));
sys.stdout.write('\n');
exit(0);
elif (sys.argv[1] == 'count1d2d'):
if (len(sys.argv) < 3 or len(sys.argv) > 3):
sys.stderr.write('This is not an actual filter, but counts the number of 1d or 2d reads.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
exit(0);
input_fastq_path = sys.argv[2];
count_1d2d(input_fastq_path);
exit(0);
elif (sys.argv[1] == 'subsample'):
if (len(sys.argv) < 5 or len(sys.argv) > 5):
sys.stderr.write('Subsamples a given fasta/fastq file to a given coverage. Prints the result to stdout.\n')
sys.stderr.write('If the initail coverage is too low, prints out the original file.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_fasta(q)_file> <desired_coverage> <ref_genome_size>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0);
input_fastq_path = sys.argv[2]
desired_coverage = float(sys.argv[3])
ref_genome_size = int(sys.argv[4])
subsample(input_fastq_path, desired_coverage, ref_genome_size)
exit(0);
elif (sys.argv[1] == 'getPairedHeaders'):
if (len(sys.argv) != 4 ):
sys.stderr.write('Outputs all fasta/fastq sequences for an input file, whose headers are present in a given target fasta/fastq file.\n')
sys.stderr.write('Prints results to stdout.\n')
sys.stderr.write('Useful when subsampling paired end reads in separate files.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_fasta(q)_file> <target_fasta(q)_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0);
input_fastq_path = sys.argv[2]
target_fastq_path = sys.argv[3]
getPaired(input_fastq_path, target_fastq_path)
exit(0);
elif (sys.argv[1] == 'fastq2fasta'):
if (len(sys.argv) < 3 or len(sys.argv) > 3):
sys.stderr.write('Outputs a given fastq file in fasta format. Replaces the ":" characters with blank spaces.\n')
sys.stderr.write('Additionally, replaces colons in header with spaces (relevant for running L&S pipeline).\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_fastq_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0);
input_fastq_path = sys.argv[2]
fastq2fasta(input_fastq_path, True)
exit(0);
elif (sys.argv[1] == 'fastq2fasta2'):
if (len(sys.argv) < 3 or len(sys.argv) > 3):
sys.stderr.write('Outputs a given fastq file in fasta format. Does not replace the ":" characters with blank spaces (unlike fastq2fasta).\n')
sys.stderr.write('Additionally, replaces colons in header with spaces (relevant for running L&S pipeline).\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_fastq_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0);
input_fastq_path = sys.argv[2]
fastq2fasta(input_fastq_path, False)
exit(0);
elif (sys.argv[1] == 'checknanoporepaths'):
if (len(sys.argv) < 4 or len(sys.argv) > 4):
sys.stderr.write('This is not an actual filter, but removes extra spaces from nanopore paths.\n')
sys.stderr.write('So that reads file can be used with nanopolish.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_fastq_file> <fast5_root_path>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0)
input_fastq_path = sys.argv[2]
fast5_root_path = sys.argv[3]
check_nanopore_paths(input_fastq_path, fast5_root_path)
exit(0)
elif (sys.argv[1] == 'length_distribution'):
if (len(sys.argv) < 3 or len(sys.argv) > 3):
sys.stderr.write('Outputs the distribution of reads in a fasta/fastq file.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0)
input_path = sys.argv[2]
length_distribution(input_path)
exit(0);
elif (sys.argv[1] == '1d' or sys.argv[1] == '2d'):
if (len(sys.argv) < 3 or len(sys.argv) > 4):
sys.stderr.write('Extracts only reads with typical %s (nanopore) headers (those containing either "1d", "template" or "complement"; "2d" or "twodir" in their header).\n' % (sys.argv[1]));
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
exit(0);
# header_patterns_path = sys.argv[2];
input_fastq_path = sys.argv[2];
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 4):
out_fastq_path = sys.argv[3];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
if (sys.argv[1] == '1d'):
filter_seqs_by_header_list(input_fastq_path, ['1d', 'template', 'complement'], out_fastq_path, fp_out);
else:
filter_seqs_by_header_list(input_fastq_path, ['2d', 'twodir'], out_fastq_path, fp_out);
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'readid'):
if (len(sys.argv) < 4 or len(sys.argv) > 5):
sys.stderr.write('Takes a FASTA/FASTQ file and a file containing seq IDs (0-offset). Extracts all seqs with given IDs.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_id_file> <input_fastq_file> [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
exit(0);
read_id_path = sys.argv[2];
input_fastq_path = sys.argv[3];
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 5):
out_fastq_path = sys.argv[4];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
filter_seqs_by_read_id(input_fastq_path, read_id_path, out_fastq_path, fp_out);
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'subseqs'):
if (len(sys.argv) < 5 or len(sys.argv) > 6):
sys.stderr.write('Extracts bases from all sequences in a FASTA file between specified coordinates. End coordinate is not inclusive.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> start end [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
exit(0);
input_fastq_path = sys.argv[2];
start_coord = int(sys.argv[3]);
end_coord = int(sys.argv[4]);
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 6):
out_fastq_path = sys.argv[5];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
extract_subseqs(input_fastq_path, start_coord, end_coord, out_fastq_path, fp_out);
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'msa2fasta'):
if (len(sys.argv) < 3 or len(sys.argv) > 4):
sys.stderr.write('Takes a multifasta file of multiple sequence alignments, and produces a majority vote consensus. Only [ACTG] bases are considered for output. Gaps in the input file are denoted by either a \'.\' or a \'-\'.\n');
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s %s <input_file> [<out_filtered_fastq_file>]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]))
sys.stderr.write('\n')
exit(0)
input_fastq_path = sys.argv[2];
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 4):
out_fastq_path = sys.argv[3];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files are the same! Exiting.\n');
exit(0);
try:
fp_out = open(out_fastq_path, 'w');
except Exception as e:
sys.stderr.write(str(e));
exit(0);
msa2fasta(input_fastq_path, fp_out)
if (fp_out != sys.stdout):
fp_out.close();
exit(0);
elif (sys.argv[1] == 'separate'):
if (len(sys.argv) < 4 or len(sys.argv) > 5 ):
sys.stderr.write('Separate a FASTA/FASTQ file into individual sequence files. File names will be numbers assigned to sequences sequentially.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> <out_folder> [headers_as_fn]\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
sys.stderr.write('\n');
sys.stderr.write(' headers_as_fn - if "true", output files will be named the same as the header of the corresponding sequence.\n');
sys.stderr.write('\n');
exit(0);
input_fastq_path = sys.argv[2];
out_folder = sys.argv[3];
headers_as_fn = False;
if (len(sys.argv) >= 5):
headers_as_fn = True if (sys.argv[4].lower() in ['true', '1']) else False;
separate_seqs(input_fastq_path, out_folder, headers_as_fn=headers_as_fn);
elif (sys.argv[1] == '2pacbio'):
if (len(sys.argv) < 4 or len(sys.argv) > 4):
sys.stderr.write('Makes all headers in a FASTQ file unique by adding a number at the end of the header. Also, splits headers up to the first whitespace.\n');
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s %s <input_fastq_file> <out_converted_fastq_file>\n' % (os.path.basename(sys.argv[0]), sys.argv[1]));
exit(0);
input_fastq_path = sys.argv[2];
out_fastq_path = '';
fp_out = sys.stdout;
if (len(sys.argv) == 4):
out_fastq_path = sys.argv[3];
if (input_fastq_path == out_fastq_path):
sys.stderr.write('ERROR: Output and input files | |
% (sPvMode, ', '.join(g_kasParavirtProviders),));
if len(self.asParavirtModes) == 0:
self.asParavirtModes = None;
else:
return iArg;
return iArg + 1;
def getResourceSet(self):
"""
Implements base.TestDriver.getResourceSet
"""
asResources = [];
for oTestVm in self.aoTestVms:
if not oTestVm.fSkip:
if oTestVm.sHd is not None:
asResources.append(oTestVm.sHd);
if oTestVm.sDvdImage is not None:
asResources.append(oTestVm.sDvdImage);
return asResources;
def actionConfig(self, oTestDrv, eNic0AttachType = None, sDvdImage = None):
"""
For base.TestDriver.actionConfig. Configure the VMs with defaults and
a few tweaks as per arguments.
Returns True if successful.
Returns False if not.
"""
for oTestVm in self.aoTestVms:
if oTestVm.fSkip:
continue;
if oTestVm.fSnapshotRestoreCurrent:
# If we want to restore a VM we don't need to create
# the machine anymore -- so just add it to the test VM list.
oVM = oTestDrv.addTestMachine(oTestVm.sVmName);
else:
## @todo This could possibly be moved to the TestVM object.
if sDvdImage is not None:
sMyDvdImage = sDvdImage;
else:
sMyDvdImage = oTestVm.sDvdImage;
if eNic0AttachType is not None:
eMyNic0AttachType = eNic0AttachType;
elif oTestVm.sNic0AttachType is None:
eMyNic0AttachType = None;
elif oTestVm.sNic0AttachType == 'nat':
eMyNic0AttachType = vboxcon.NetworkAttachmentType_NAT;
elif oTestVm.sNic0AttachType == 'bridged':
eMyNic0AttachType = vboxcon.NetworkAttachmentType_Bridged;
else:
assert False, oTestVm.sNic0AttachType;
oVM = oTestDrv.createTestVM(oTestVm.sVmName, 1, \
sHd = oTestVm.sHd, \
sKind = oTestVm.sKind, \
fIoApic = oTestVm.fIoApic, \
fPae = oTestVm.fPae, \
eNic0AttachType = eMyNic0AttachType, \
sDvdImage = sMyDvdImage, \
sHddControllerType = oTestVm.sHddControllerType,
sFloppy = oTestVm.sFloppy,
fVmmDevTestingPart = oTestVm.fVmmDevTestingPart,
fVmmDevTestingMmio = oTestVm.fVmmDevTestingPart);
if oVM is None:
return False;
return True;
def _removeUnsupportedVirtModes(self, oTestDrv):
"""
Removes unsupported virtualization modes.
"""
if 'hwvirt' in self.asVirtModes and not oTestDrv.hasHostHwVirt():
reporter.log('Hardware assisted virtualization is not available on the host, skipping it.');
self.asVirtModes.remove('hwvirt');
if 'hwvirt-np' in self.asVirtModes and not oTestDrv.hasHostNestedPaging():
reporter.log('Nested paging not supported by the host, skipping it.');
self.asVirtModes.remove('hwvirt-np');
if 'raw' in self.asVirtModes and not oTestDrv.hasRawModeSupport():
reporter.log('Raw-mode virtualization is not available in this build (or perhaps for this host), skipping it.');
self.asVirtModes.remove('raw');
return True;
def actionExecute(self, oTestDrv, fnCallback): # pylint: disable=R0914
"""
For base.TestDriver.actionExecute. Calls the callback function for
each of the VMs and basic configuration variations (virt-mode and cpu
count).
Returns True if all fnCallback calls returned True, otherwise False.
The callback can return True, False or None. The latter is for when the
test is skipped. (True is for success, False is for failure.)
"""
self._removeUnsupportedVirtModes(oTestDrv);
cMaxCpus = oTestDrv.getHostCpuCount();
#
# The test loop.
#
fRc = True;
for oTestVm in self.aoTestVms:
if oTestVm.fSkip and self.fIgnoreSkippedVm:
reporter.log2('Ignoring VM %s (fSkip = True).' % (oTestVm.sVmName,));
continue;
reporter.testStart(oTestVm.sVmName);
if oTestVm.fSkip:
reporter.testDone(fSkipped = True);
continue;
# Intersect the supported modes and the ones being testing.
asVirtModesSup = [sMode for sMode in oTestVm.asVirtModesSup if sMode in self.asVirtModes];
# Ditto for CPUs.
acCpusSup = [cCpus for cCpus in oTestVm.acCpusSup if cCpus in self.acCpus];
# Ditto for paravirtualization modes, except if not specified we got a less obvious default.
if self.asParavirtModes is not None and oTestDrv.fpApiVer >= 5.0:
asParavirtModes = [sPvMode for sPvMode in oTestVm.asParavirtModesSup if sPvMode in self.asParavirtModes];
assert None not in asParavirtModes;
elif oTestDrv.fpApiVer >= 5.0:
asParavirtModes = (oTestVm.asParavirtModesSup[0],);
assert asParavirtModes[0] is not None;
else:
asParavirtModes = (None,);
for cCpus in acCpusSup:
if cCpus == 1:
reporter.testStart('1 cpu');
else:
reporter.testStart('%u cpus' % (cCpus));
if cCpus > cMaxCpus:
reporter.testDone(fSkipped = True);
continue;
cTests = 0;
for sVirtMode in asVirtModesSup:
if sVirtMode == 'raw' and cCpus > 1:
continue;
reporter.testStart('%s' % ( g_dsVirtModeDescs[sVirtMode], ) );
cStartTests = cTests;
for sParavirtMode in asParavirtModes:
if sParavirtMode is not None:
assert oTestDrv.fpApiVer >= 5.0;
reporter.testStart('%s' % ( sParavirtMode, ) );
# Reconfigure the VM.
try:
(rc2, oVM) = oTestVm.getReconfiguredVm(oTestDrv, cCpus, sVirtMode, sParavirtMode = sParavirtMode);
except KeyboardInterrupt:
raise;
except:
reporter.errorXcpt(cFrames = 9);
rc2 = False;
if rc2 is True:
# Do the testing.
try:
rc2 = fnCallback(oVM, oTestVm);
except KeyboardInterrupt:
raise;
except:
reporter.errorXcpt(cFrames = 9);
rc2 = False;
if rc2 is False:
reporter.maybeErr(reporter.testErrorCount() == 0, 'fnCallback failed');
elif rc2 is False:
reporter.log('getReconfiguredVm failed');
if rc2 is False:
fRc = False;
cTests = cTests + (rc2 is not None);
if sParavirtMode is not None:
reporter.testDone(fSkipped = (rc2 is None));
reporter.testDone(fSkipped = cTests == cStartTests);
reporter.testDone(fSkipped = cTests == 0);
_, cErrors = reporter.testDone();
if cErrors > 0:
fRc = False;
return fRc;
def enumerateTestVms(self, fnCallback):
"""
Enumerates all the 'active' VMs.
Returns True if all fnCallback calls returned True.
Returns False if any returned False.
Returns None immediately if fnCallback returned None.
"""
fRc = True;
for oTestVm in self.aoTestVms:
if not oTestVm.fSkip:
fRc2 = fnCallback(oTestVm);
if fRc2 is None:
return fRc2;
fRc = fRc and fRc2;
return fRc;
class TestVmManager(object):
"""
Test VM manager.
"""
def __init__(self, sResourcePath):
self.sResourcePath = sResourcePath;
def getStandardVmSet(self, sTxsTransport):
"""
Gets the set of standard test VMs.
This is supposed to do something seriously clever, like searching the
testrsrc tree for usable VMs, but for the moment it's all hard coded. :-)
"""
oSet = TestVmSet(oTestVmManager = self);
oTestVm = TestVm(oSet, 'tst-nt4sp1', sHd = '4.2/' + sTxsTransport + '/nt4sp1/t-nt4sp1.vdi',
sKind = 'WindowsNT4', acCpusSup = [1]);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xppro', sHd = '4.2/' + sTxsTransport + '/xppro/t-xppro.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33));
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-nt4sp6', sHd = '4.2/nt4sp6/t-nt4sp6.vdi',
sKind = 'WindowsNT4', acCpusSup = range(1, 33));
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-2ksp4', sHd = '4.2/win2ksp4/t-win2ksp4.vdi',
sKind = 'Windows2000', acCpusSup = range(1, 33));
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2', sHd = '4.2/xpsp2/t-winxpsp2.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halaacpi', sHd = '4.2/xpsp2/t-winxp-halaacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halacpi', sHd = '4.2/xpsp2/t-winxp-halacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halapic', sHd = '4.2/xpsp2/t-winxp-halapic.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halmacpi', sHd = '4.2/xpsp2/t-winxp-halmacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(2, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halmps', sHd = '4.2/xpsp2/t-winxp-halmps.vdi',
sKind = 'WindowsXP', acCpusSup = range(2, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-win7', sHd = '4.2/win7-32/t-win7.vdi',
sKind = 'Windows7', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-win8', sHd = '4.2/win8-32/t-win8.vdi',
sKind = 'Windows8', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
return oSet;
def getSmokeVmSet(self):
"""
Gets a representative set of VMs for smoke testing.
"""
oSet = TestVmSet(oTestVmManager = self);
oTestVm = TestVm(oSet, 'tst-nt4sp1', sHd = '4.2/nat/nt4sp1/t-nt4sp1.vdi',
sKind = 'WindowsNT4', acCpusSup = [1], sNic0AttachType = 'nat');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xppro', sHd = '4.2/nat/xppro/t-xppro.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), sNic0AttachType = 'nat');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-rhel5', sHd = '3.0/tcp/rhel5.vdi',
sKind = 'RedHat', acCpusSup = range(1, 33), fIoApic = True, sNic0AttachType = 'nat');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-win2k3ent', sHd = '3.0/tcp/win2k3ent-acpi.vdi',
sKind = 'Windows2003', acCpusSup = range(1, 33), fPae = True, sNic0AttachType = 'bridged');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-sol10', sHd = '3.0/tcp/solaris10.vdi',
sKind = 'Solaris', acCpusSup = range(1, 33), fPae = True, sNic0AttachType = 'bridged');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-sol10-64', sHd = '3.0/tcp/solaris10.vdi',
sKind = 'Solaris_64', acCpusSup = range(1, 33), sNic0AttachType = 'bridged');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-sol11u1', sHd = '4.2/nat/sol11u1/t-sol11u1.vdi',
sKind = 'Solaris11_64', acCpusSup = range(1, 33), sNic0AttachType = 'nat',
fIoApic = True, sHddControllerType = 'SATA Controller');
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-nt4sp6', sHd = '4.2/nt4sp6/t-nt4sp6.vdi',
sKind = 'WindowsNT4', acCpusSup = range(1, 33));
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-2ksp4', sHd = '4.2/win2ksp4/t-win2ksp4.vdi',
sKind = 'Windows2000', acCpusSup = range(1, 33));
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2', sHd = '4.2/xpsp2/t-winxpsp2.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halaacpi', sHd = '4.2/xpsp2/t-winxp-halaacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halacpi', sHd = '4.2/xpsp2/t-winxp-halacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halapic', sHd = '4.2/xpsp2/t-winxp-halapic.vdi',
sKind = 'WindowsXP', acCpusSup = range(1, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halmacpi', sHd = '4.2/xpsp2/t-winxp-halmacpi.vdi',
sKind = 'WindowsXP', acCpusSup = range(2, 33), fIoApic = True);
oSet.aoTestVms.append(oTestVm);
oTestVm = TestVm(oSet, 'tst-xpsp2-halmps', sHd = '4.2/xpsp2/t-winxp-halmps.vdi',
sKind = 'WindowsXP', acCpusSup = | |
<reponame>CenterForOpenScience/SHARE
from collections import deque
from functools import reduce
import json
import logging
import re
import threading
import dateutil
import urllib
import types
import xmltodict
import pendulum
from lxml import etree
from django.conf import settings
from share.util import iris, DictHashingDict
from share.util.nameparser import HumanName
from share.transform.chain.exceptions import (
TransformError,
InvalidPath,
InvalidDate,
NoneOf,
InvalidIRI,
ChainError,
)
from share.transform.chain.utils import force_text
logger = logging.getLogger(__name__)
__all__ = ('ParseDate', 'ParseName', 'ParseLanguage', 'Trim', 'Concat', 'Map', 'Delegate', 'Maybe', 'XPath', 'Join', 'RunPython', 'Static', 'Try', 'Subjects', 'OneOf', 'Orcid', 'DOI', 'IRI', 'GuessAgentType', 'Filter', 'Unique', 'Int')
#### Public API ####
def ParseDate(chain):
return chain + DateParserLink()
def ParseName(chain):
return chain + NameParserLink()
def ParseLanguage(chain):
return chain + LanguageParserLink()
def Trim(chain):
return chain + TrimLink()
def Concat(*chains, deep=False):
return ConcatLink(*chains, deep=deep)
def XPath(chain, path):
return chain + XPathLink(path)
def Join(chain, joiner='\n'):
return chain + JoinLink(joiner=joiner)
def Maybe(chain, segment, default=None):
return chain + MaybeLink(segment, default=default)
def Try(chain, default=None, exceptions=None):
return TryLink(chain, default=default, exceptions=exceptions)
def Map(chain, *chains):
return Concat(*chains) + IteratorLink() + chain
def Delegate(parser, chain=None):
if chain:
return chain + DelegateLink(parser)
return DelegateLink(parser)
def RunPython(function_name, chain=None, *args, **kwargs):
if chain:
return chain + RunPythonLink(function_name, *args, **kwargs)
return RunPythonLink(function_name, *args, **kwargs)
def Static(value):
return StaticLink(value)
def Subjects(*chains):
return Concat(Map(MapSubjectLink(), *chains), deep=True)
def OneOf(*chains):
return OneOfLink(*chains)
def Orcid(chain=None):
if chain:
return (chain + OrcidLink()).IRI
return OrcidLink().IRI
def DOI(chain=None):
if chain:
return (chain + DOILink()).IRI
return DOILink().IRI
def IRI(chain=None, urn_fallback=False):
if chain:
return (chain + IRILink(urn_fallback=urn_fallback)).IRI
return IRILink(urn_fallback=urn_fallback).IRI
def GuessAgentType(chain=None, default=None):
if chain:
return chain + GuessAgentTypeLink(default=default)
return GuessAgentTypeLink(default=default)
def Filter(func, *chains):
return Concat(*chains) + FilterLink(func)
def Unique(chain=None):
if chain:
return AbstractLink.__add__(chain, UniqueLink())
return UniqueLink()
def Int(chain):
return chain + IntLink()
### /Public API
# BaseClass for all links
# Links are a single step of the parsing process
# Links may not mutate the object passed into them
# A chain is any number of links added together
class AbstractLink:
def __init__(self, _next=None, _prev=None):
# next and prev are generally set by the __add__ method
self._next = _next
self._prev = _prev
# Every chain must start with an AnchorLink
if self._prev is None and not isinstance(self, AnchorLink):
AnchorLink() + self
# Build the entire chain this link is a part of
# NOTE: This results in the entire chain rather than starting from the current link
def chain(self):
first = self
while first._prev:
first = first._prev
deq = deque([first])
while deq[-1]._next:
deq.append(deq[-1]._next)
return tuple(deq)
# Transformation logic goes here
def execute(self, obj):
raise NotImplementedError
# Add a link into an existing chain
def __add__(self, step):
self._next = step
step._prev = self
return step
def __radd__(self, other):
return self + PrependLink(other)
# For handling paths that are not valid python
# or are already used. IE text, execute, oai:title
# ctx('oai:title')
def __getitem__(self, name):
if isinstance(name, int):
return self + IndexLink(name)
if isinstance(name, str):
return self + PathLink(name)
raise TypeError(
'__getitem__ only accepts integers and strings\n'
'Found {}'.format(name)
)
# Reserved for special cases
# Any other use is an error
def __call__(self, name):
if name == '*':
return self + IteratorLink()
if name == 'index':
return self + GetIndexLink()
raise ValueError(
'"{}" is not a action that __call__ can resolve\n'
'__call__ is reserved for special actions\n'
'If you are trying to access an element use dictionary notation'.format(name)
)
# The preferred way of building paths.
# Can express either json paths or xpaths
# ctx.root.nextelement[0].first_item_attribute
def __getattr__(self, name):
if name[0] == '_':
raise AttributeError(
'{} has no attribute {}\n'
'NOTE: "_"s are reserved for accessing private attributes\n'
'Use dictionary notation to access elements beginning with "_"s\n'.format(self, name)
)
return self + PathLink(name)
def __repr__(self):
return '<{}()>'.format(self.__class__.__name__)
def run(self, obj):
Context().frames.append({'link': self, 'context': obj, 'parser': Context().parser})
try:
return self.execute(obj)
except ChainError as e:
if self.__class__ not in (AbstractLink, AnchorLink):
e.push(repr(self))
raise e
finally:
Context().frames.pop(-1)
# The begining link for all chains
# Contains logic for executing a chain against an object
# Adding another link to an anchor will result in a copy of the
# original anchor
class AnchorLink(AbstractLink):
def execute(self, obj):
return reduce(lambda acc, cur: cur.run(acc), self.chain()[1:], obj)
class Context(AnchorLink):
__CONTEXT = threading.local()
@property
def jsonld(self):
return {
'@graph': self.graph,
'@context': {}
}
def __init__(self):
if not hasattr(Context.__CONTEXT, '_ctxdict'):
Context.__CONTEXT._ctxdict = {}
self.clear()
super().__init__()
@property
def parser(self):
return self.parsers[-1] if self.parsers else None
def clear(self):
self.graph = []
self.frames = []
self.parsers = []
self._config = None
self.pool = DictHashingDict()
def __add__(self, step):
return AnchorLink() + step
def __radd__(self, other):
raise NotImplementedError
def __setattr__(self, name, value):
if not hasattr(Context.__CONTEXT, '_ctxdict'):
self.__init__()
Context.__CONTEXT._ctxdict[name] = value
def __getattr__(self, name):
if not hasattr(Context.__CONTEXT, '_ctxdict'):
self.__init__()
try:
return Context.__CONTEXT._ctxdict[name]
except KeyError:
return super().__getattr__(name)
class NameParserLink(AbstractLink):
def execute(self, obj):
return HumanName(obj)
class DateParserLink(AbstractLink):
LOWER_BOUND = pendulum.datetime(1200, 1, 1)
UPPER_BOUND = pendulum.today().add(years=100)
DEFAULT = pendulum.datetime(2016, 1, 1)
def execute(self, obj):
if obj:
try:
date = dateutil.parser.parse(obj, default=self.DEFAULT)
except dateutil.parser.ParserError as e:
raise InvalidDate(str(e)) from e
try:
date.utcoffset() # Forces tzoffset validation to run
except ValueError as e:
raise InvalidDate(*e.args) from e
if date < self.LOWER_BOUND:
raise InvalidDate('{} is before the lower bound {}.'.format(obj, self.LOWER_BOUND.isoformat()))
if date > self.UPPER_BOUND:
raise InvalidDate('{} is after the upper bound {}.'.format(obj, self.UPPER_BOUND.isoformat()))
return date.in_tz('UTC').isoformat()
raise InvalidDate('{} is not a valid date.'.format(obj))
class LanguageParserLink(AbstractLink):
def execute(self, maybe_code):
if isinstance(maybe_code, dict):
maybe_code = maybe_code['#text']
return maybe_code
class ConcatLink(AbstractLink):
def __init__(self, *chains, deep=False):
self._chains = chains
self._deep = deep
super().__init__()
def _concat(self, acc, val):
if val is None:
return acc
if not isinstance(val, list):
val = [val]
elif self._deep:
val = reduce(self._concat, val, [])
return acc + [v for v in val if v != '' and v is not None]
def execute(self, obj):
return reduce(self._concat, [
chain.chain()[0].run(obj)
for chain in self._chains
], [])
class JoinLink(AbstractLink):
def __init__(self, joiner='\n'):
self._joiner = joiner
super().__init__()
def execute(self, obj):
obj = obj or []
if not isinstance(obj, (list, tuple)):
obj = (obj, )
return self._joiner.join(x for x in obj if x)
class TrimLink(AbstractLink):
def execute(self, obj):
return obj.strip()
class IteratorLink(AbstractLink):
def __init__(self):
super().__init__()
self.__anchor = AnchorLink()
def __add__(self, step):
# Attach all new links to the "subchain"
chain = list(step.chain())
while isinstance(chain[0], AnchorLink):
chain.pop(0)
self.__anchor.chain()[-1] + chain[0]
return self
def execute(self, obj):
if not isinstance(obj, (list, tuple)):
obj = (obj, )
return [self.__anchor.run(sub) for sub in obj]
class MaybeLink(AbstractLink):
def __init__(self, segment, default=None):
super().__init__()
self._segment = segment
self._default = default
self.__anchor = AnchorLink()
def __add__(self, step):
# Attach all new links to the "subchain"
self.__anchor.chain()[-1] + step
return self
def execute(self, obj):
if not obj:
return []
val = obj.get(self._segment)
if val:
return self.__anchor.run(val)
if len(Context().frames) > 1 and isinstance(Context().frames[-2]['link'], (IndexLink, IteratorLink, ConcatLink, JoinLink)):
return []
return self._default
class TryLink(AbstractLink):
def __init__(self, chain, default=None, exceptions=None):
super().__init__()
self._chain = chain
self._default = default
self.__anchor = AnchorLink()
self._exceptions = (InvalidPath, ) + (exceptions or ())
def __add__(self, step):
# Attach all new links to the "subchain"
self.__anchor.chain()[-1] + step
return self
def execute(self, obj):
try:
val = self._chain.chain()[0].run(obj)
except self._exceptions:
return self._default
return self.__anchor.run(val)
class PathLink(AbstractLink):
def __init__(self, segment):
self._segment = segment
super().__init__()
def execute(self, obj):
try:
return obj[self._segment]
except (KeyError, TypeError) as e:
raise InvalidPath from e
def __repr__(self):
return '<{}({!r})>'.format(self.__class__.__name__, self._segment)
class IndexLink(AbstractLink):
def __init__(self, index):
self._index = index
super().__init__()
def execute(self, obj):
if not isinstance(obj, list):
raise InvalidPath('Tried to find index "{}", got type {} instead of list'.format(self._index, type(obj)))
try:
return obj[self._index]
except IndexError as e:
raise InvalidPath from e
def __repr__(self):
return '<{}([{}])>'.format(self.__class__.__name__, self._index)
class GetIndexLink(AbstractLink):
def execute(self, obj):
for frame in Context().frames[::-1]:
if isinstance(frame['link'], IteratorLink):
return frame['context'].index(obj)
return -1
# return Context().parent.index(obj)
class PrependLink(AbstractLink):
def __init__(self, string):
self._string = string
super().__init__()
def execute(self, obj):
return self._string + obj
class XPathLink(AbstractLink):
def __init__(self, xpath):
self._xpath = xpath
super().__init__()
def execute(self, obj):
unparsed_obj = xmltodict.unparse(obj)
xml_obj = etree.XML(unparsed_obj.encode())
elem = xml_obj.xpath(self._xpath)
elems = [xmltodict.parse(etree.tostring(x)) for x in elem]
if len(elems) == 1 and not isinstance(self._next, (IndexLink, IteratorLink)):
return elems[0]
return elems
def __repr__(self):
return '<{}({!r})>'.format(self.__class__.__name__, self._xpath)
class DelegateLink(AbstractLink):
def __init__(self, parser):
self._parser = parser
super().__init__()
def execute(self, obj):
# callable will return True for classes as well as functions
if isinstance(self._parser, types.FunctionType):
parser = self._parser(obj)
return parser(obj).parse()
return self._parser(obj).parse()
class RunPythonLink(AbstractLink):
def __init__(self, function_name, *args, **kwargs):
self._function_name = function_name
self._args = args
self._kwargs = kwargs
super().__init__()
def execute(self, obj):
if callable(self._function_name):
return self._function_name(obj, *self._args, **self._kwargs)
return getattr(Context().parser, self._function_name)(obj, *self._args, **self._kwargs)
class | |
since this was last called.
Data format: numpy array: [(timestamp, pool_id, neuron_index), ...]
Timestamps are in nanoseconds
"""
spk_ids, spk_times = self.driver.RecvXYSpikes(CORE_ID)
pool_ids, nrn_idxs, filtered_spk_times = self.last_mapped_network.translate_spikes(spk_ids, spk_times)
ret_data = np.array([filtered_spk_times, pool_ids, nrn_idxs]).T
return ret_data
def stop_all_inputs(self, time=0, flush=True):
"""Stop all tag stream generators"""
if self.last_mapped_core is not None:
num_gens = self.last_mapped_core.FPGASpikeGenerators.gens_used
if num_gens > 0:
for gen_idx in range(num_gens):
# it's ok to set tag out to 0, if you turn up the rate later, it'll program the right tag
self.driver.SetSpikeGeneratorRates(CORE_ID, [gen_idx], [0], [0], time, True)
def set_input_rate(self, inp, dim, rate, time=0, flush=True):
"""Controls a single tag stream generator's rate (on the FPGA)
On startup, all rates are 0.
Every FPGA time unit, the FPGA loops through the spike generators
and decides whether or not to emit a tag for each one.
The SGs can be reprogrammed to change their individual rates and targets
inp: Input object
dim : int
dimension within the Input object to target
rate: int
desired tag rate for the Input/dimension in Hz
time: int (default=0)
time to send inputs, in nanoseconds. 0 means immediately
flush: bool (default true)
whether to flush the inputs through the driver immediately.
If you're making several calls, it may be advantageous to only flush
the last one
WARNING: If <flush> is True, calling this will block traffic until the max <time>
provided has passed!
If you're queing up rates, make sure you call this in the order of the times
"""
self.set_input_rates([inp], [dim], [rate], time, flush)
def set_input_rates(self, inputs, dims, rates, time=0, flush=True):
"""Controls tag stream generators rates (on the FPGA)
on startup, all rates are 0
inputs: list of Input object
dims : list of ints
dimensions within each Input object to send to
rates: list of ints (or floats, which will be rounded)
desired tag rate for each Input/dimension in Hz
time: int (or float, which will be rounded) (default=0)
time to send inputs, in nanoseconds. 0 means immediately
flush: bool (default true)
whether to flush the inputs through the driver immediately.
If you're making several calls, it may be advantageous to only flush
the last one
WARNING: If <flush> is True, calling this will block traffic until the max <time>
provided has passed!
"""
if not (len(inputs) == len(dims) == len(rates)):
raise ValueError("inputs, dims, and rates all have to be the same length")
if not isinstance(inputs[0], pystorm.hal.neuromorph.graph.Input):
raise ValueError("inputs have to be of type neuromorph.graph.Input")
gen_idxs = [
inp.generator_idxs[dim] for inp, dim in zip(inputs, dims)]
out_tags = [
inp.generator_out_tags[dim] for inp, dim in zip(inputs, dims)]
self.driver.SetSpikeGeneratorRates(CORE_ID, gen_idxs, out_tags, np.round(rates).astype(int), int(time), flush)
##############################################################################
# Mapping functions #
##############################################################################
def remap_weights(self):
"""Reprogram weights that have been modified in the network objects
Effectively calls map again, but keeping pool allocations
"""
# this call is deprecated, has the following effect
self.map(self.last_mapped_network, remap=True)
def map(self, network, remap=False, verbose=False):
"""Maps a Network to low-level HAL objects and returns mapping info.
Parameters
----------
network: pystorm.hal.neuromorph.graph Network object
remap: reuse as much of the previous mapping as possible (e.g. pools will retain their physical locations on the chip)
"""
logger.info("HAL: doing logical mapping")
# should eventually get CORE_PARAMETERS from the driver itself (BDPars)
core = network.map(CORE_PARAMETERS, keep_pool_mapping=remap, verbose=verbose)
self.last_mapped_network = network
self.last_mapped_core = core
# implement core objects, calling driver
logger.info("HAL: programming mapping results to hardware")
self.implement_core()
def dump_core(self):
logger.info("PAT")
logger.info(self.driver.DumpMem(CORE_ID, bd.bdpars.BDMemId.PAT))
logger.info("TAT0")
logger.info(self.driver.DumpMem(CORE_ID, bd.bdpars.BDMemId.TAT0)[0:10])
logger.info("TAT1")
logger.info(self.driver.DumpMem(CORE_ID, bd.bdpars.BDMemId.TAT1)[0:10])
logger.info("AM")
logger.info(self.driver.DumpMem(CORE_ID, bd.bdpars.BDMemId.AM)[0:10])
logger.info("MM")
logger.info(self.driver.DumpMem(CORE_ID, bd.bdpars.BDMemId.MM)[0:10])
def implement_core(self):
"""Implements a core that resulted from map_network. This is called by map and remap_weights"""
# start with a clean slate
self.init_hardware()
core = self.last_mapped_core
# datapath memory programming
self.driver.SetMem(
CORE_ID, bd.bdpars.BDMemId.PAT, np.array(core.PAT.mem.M).flatten().tolist(), 0)
self.driver.SetMem(
CORE_ID, bd.bdpars.BDMemId.TAT0, np.array(core.TAT0.mem.M).flatten().tolist(), 0)
self.driver.SetMem(
CORE_ID, bd.bdpars.BDMemId.TAT1, np.array(core.TAT1.mem.M).flatten().tolist(), 0)
self.driver.SetMem(
CORE_ID, bd.bdpars.BDMemId.AM, np.array(core.AM.mem.M).flatten().tolist(), 0)
self.driver.SetMem(
CORE_ID, bd.bdpars.BDMemId.MM, np.array(core.MM.mem.M).flatten().tolist(), 0)
# connect diffusor around pools
for tile_id in range(core.NeuronArray_height_in_tiles * core.NeuronArray_width_in_tiles):
self.driver.CloseDiffusorAllCuts(CORE_ID, tile_id)
for pool, pool_allocation in core.neuron_array.pool_allocations.items():
# convert minimum pool units into tile units
# a pool consists of 4 (2x2 tiles)
# XXX this constant of 2 shouldn't be hardcoded
# XXX this should move to where core.Neurons.diffusor_cuts is set
# not touching it for now. don't want to break it and in the middle of
# something else
x_min = pool_allocation['px']*2
y_min = pool_allocation['py']*2
x_max = x_min + pool_allocation['pw']*2
y_max = y_min + pool_allocation['ph']*2
logger.debug("pool {}".format(str(pool)))
logger.debug(" px_min {}".format(x_min))
logger.debug(" px_max {}".format(x_max))
logger.debug(" py_min {}".format(y_min))
logger.debug(" py_max {}".format(y_max))
# cut top edge
for x_idx in range(x_min, x_max):
self.driver.OpenDiffusorCutXY(CORE_ID, x_idx, y_max-1, DIFFUSOR_NORTH_LEFT)
self.driver.OpenDiffusorCutXY(CORE_ID, x_idx, y_max-1, DIFFUSOR_NORTH_RIGHT)
# cut left edge
for y_idx in range(y_min, y_max):
self.driver.OpenDiffusorCutXY(CORE_ID, x_min, y_idx, DIFFUSOR_WEST_TOP)
self.driver.OpenDiffusorCutXY(CORE_ID, x_min, y_idx, DIFFUSOR_WEST_BOTTOM)
# cut bottom edge if not at edge of neuron array
if y_max < core.NeuronArray_height_in_tiles-1:
for x_idx in range(x_min, x_max):
self.driver.OpenDiffusorCutXY(CORE_ID, x_idx, y_max, DIFFUSOR_NORTH_LEFT)
self.driver.OpenDiffusorCutXY(CORE_ID, x_idx, y_max, DIFFUSOR_NORTH_RIGHT)
# cut right edge if not at edge of neuron array
if x_max < core.NeuronArray_width_in_tiles-1:
for y_idx in range(y_min, y_max):
self.driver.OpenDiffusorCutXY(CORE_ID, x_max, y_idx, DIFFUSOR_WEST_TOP)
self.driver.OpenDiffusorCutXY(CORE_ID, x_max, y_idx, DIFFUSOR_WEST_BOTTOM)
# implement user-controlled diffusor cuts
# XXX see above XXX comment
diffusor_cuts = core.neuron_array.diffusor_cuts
for direction, cuts in diffusor_cuts.items():
for y in range(cuts.shape[0]):
for x in range(cuts.shape[1]):
if cuts[y, x]:
self.set_diffusor(y, x, direction, 'broken')
# enable somas inside pool
# remember, x_min/x_max are tile units, 16 neurons per tile
# XXX this heavy lifting should be done in core.assign, too
assert(core.NeuronArray_width == core.neuron_array.nrns_used.shape[1])
assert(core.NeuronArray_height == core.neuron_array.nrns_used.shape[0])
for x in range(core.NeuronArray_width):
for y in range(core.NeuronArray_height):
if core.neuron_array.nrns_used[y, x] == 1:
logger.debug("enabling soma %d, %d (x, y)", x, y)
self.driver.EnableSomaXY(CORE_ID, x, y)
# enable used synapses
for tx, ty in core.neuron_array.syns_used:
logger.debug("enabling synapse", tx, ty)
self.driver.EnableSynapseXY(CORE_ID, tx, ty)
# set gain and bias twiddle bits
assert(core.NeuronArray_width == core.neuron_array.gain_divisors.shape[1])
assert(core.NeuronArray_height == core.neuron_array.gain_divisors.shape[0])
assert(core.NeuronArray_width == core.neuron_array.biases.shape[1])
assert(core.NeuronArray_height == core.neuron_array.biases.shape[0])
gain_ids = [
bd.bdpars.SomaGainId.ONE,
bd.bdpars.SomaGainId.ONE_HALF,
bd.bdpars.SomaGainId.ONE_THIRD,
bd.bdpars.SomaGainId.ONE_FOURTH]
bias_ids = [
bd.bdpars.SomaOffsetMultiplierId.ZERO,
bd.bdpars.SomaOffsetMultiplierId.ONE,
bd.bdpars.SomaOffsetMultiplierId.TWO,
bd.bdpars.SomaOffsetMultiplierId.THREE]
bias_signs = [
bd.bdpars.SomaOffsetSignId.NEGATIVE,
bd.bdpars.SomaOffsetSignId.POSITIVE]
for x in range(core.NeuronArray_width):
for y in range(core.NeuronArray_height):
gain_div_val = core.neuron_array.gain_divisors[y, x]
gain_id = gain_ids[gain_div_val - 1]
self.driver.SetSomaGainXY(CORE_ID, x, y, gain_id)
bias_val = core.neuron_array.biases[y, x]
bias_sign_id = bias_signs[int(bias_val > 0)]
bias_id = bias_ids[abs(bias_val)]
self.driver.SetSomaOffsetSignXY(CORE_ID, x, y, bias_sign_id)
self.driver.SetSomaOffsetMultiplierXY(CORE_ID, x, y, bias_id)
# set spike filter decay constant
# the following sets the filters to "count mode"
# exponential decay is also possible
self.driver.SetSpikeFilterDecayConst(CORE_ID, 0)
self.driver.SetSpikeFilterIncrementConst(CORE_ID, 1)
# remove any evidence of old network in driver queues
logger.info("HAL: clearing queued-up outputs")
self.driver.ClearOutputs()
## voodoo sleep, (wait for everything to go in)
sleep(1.0)
def get_driver_state(self):
return self.driver.GetState(CORE_ID)
def DAC_name_to_handle(self, dac_name):
name_to_handle = {
'DAC_SYN_EXC' : bd.bdpars.BDHornEP.DAC_SYN_EXC,
'DAC_SYN_DC' : bd.bdpars.BDHornEP.DAC_SYN_DC,
'DAC_SYN_INH' : bd.bdpars.BDHornEP.DAC_SYN_INH,
'DAC_SYN_LK' : bd.bdpars.BDHornEP.DAC_SYN_LK,
'DAC_SYN_PD' : bd.bdpars.BDHornEP.DAC_SYN_PD,
'DAC_SYN_PU' : bd.bdpars.BDHornEP.DAC_SYN_PU,
'DAC_DIFF_G' : bd.bdpars.BDHornEP.DAC_DIFF_G,
'DAC_DIFF_R' : bd.bdpars.BDHornEP.DAC_DIFF_R,
'DAC_SOMA_REF' : bd.bdpars.BDHornEP.DAC_SOMA_REF,
'DAC_SOMA_OFFSET' : bd.bdpars.BDHornEP.DAC_SOMA_OFFSET,
}
if dac_name not in name_to_handle:
raise ValueError("supplied bad dac_name: " + dac_name +
". Name must be one of " + str(name_to_handle.keys()))
handle = name_to_handle[dac_name]
return handle
def set_DAC_value(self, dac_name, value):
self.driver.SetDACCount(CORE_ID , self.DAC_name_to_handle(dac_name), value)
def get_DAC_value(self, dac_name):
return self.driver.GetDACCurrentCount(CORE_ID , self.DAC_name_to_handle(dac_name))
def set_diffusor(self, y, x, direction, state):
"""open or close the diffusor near neuron y, x
the diffusor can only be cut at the 4x4 tile boundaries, so the call
rounds the requested x,y to this resolution
Parameters:
==========
y, x (ints) : location of neuron around which to set the diffusor
direction (string, {'up', 'right', 'down', 'left'}) : which side to open/close
state (string, {'broken', 'joined'}) : what to do to the diffusor
"""
NORTH_LEFT = bd.bdpars.DiffusorCutLocationId.NORTH_LEFT
NORTH_RIGHT = bd.bdpars.DiffusorCutLocationId.NORTH_RIGHT
WEST_TOP = bd.bdpars.DiffusorCutLocationId.WEST_TOP
WEST_BOTTOM = bd.bdpars.DiffusorCutLocationId.WEST_BOTTOM
ty = y // 4
tx = x // 4
if direction == 'down':
ty += 1
sides = (NORTH_LEFT, NORTH_RIGHT)
elif direction == 'right':
tx += 1
sides = (WEST_TOP, WEST_BOTTOM)
elif direction == 'up':
sides = (NORTH_LEFT, NORTH_RIGHT)
elif direction == 'left':
sides = (WEST_TOP, WEST_BOTTOM)
else:
raise ValueError("direction must be in {'up', 'right', 'down', 'left'}")
if state == 'broken':
for | |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from spinup.utils.logx import Logger
import time
# import ray
# if not ray.is_initialized():
# ray.init(num_gpus=1)
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for TD3 agents.
"""
def __init__(self, obs_dim, act_dim, size,
logger_fname='experiences_log.txt', **logger_kwargs):
# ExperienceLogger: save experiences for supervised learning
logger_kwargs['output_fname'] = logger_fname
self.experience_logger = Logger(**logger_kwargs)
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done,
step_index, epoch_index, time, **kwargs):
# Save experiences in disk
self.log_experiences(obs, act, rew, next_obs, done,
step_index, epoch_index, time, **kwargs)
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
def log_experiences(self, obs, act, rew, next_obs, done,
step_index, epoch_index, time, **kwargs):
self.experience_logger.log_tabular('Epoch', epoch_index)
self.experience_logger.log_tabular('Step', step_index)
# Log observation
for i, o_i in enumerate(obs):
self.experience_logger.log_tabular('o_{}'.format(i), o_i)
# Log action
for i, a_i in enumerate(act):
self.experience_logger.log_tabular('a_{}'.format(i), a_i)
# Log reward
self.experience_logger.log_tabular('r', rew)
# Log next observation
for i, o2_i in enumerate(next_obs):
self.experience_logger.log_tabular('o2_{}'.format(i), o2_i)
# Log other data
for key, value in kwargs.items():
for i, v in enumerate(np.array(value).flatten(order='C')):
self.experience_logger.log_tabular('{}_{}'.format(key, i), v)
# Log done
self.experience_logger.log_tabular('d', done)
self.experience_logger.log_tabular('Time', time)
self.experience_logger.dump_tabular(print_data=False)
class MLP(tf.keras.Model):
"""
Multi-Layer Perceptron Network:
Model class used to create mlp.
"""
def __init__(self, layer_sizes=[32],
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=None,
hidden_activation=tf.keras.activations.relu, output_activation=tf.keras.activations.linear):
super(MLP, self).__init__()
self.hidden_layers = []
for h in layer_sizes[:-1]:
self.hidden_layers.append(tf.keras.layers.Dense(h, activation=hidden_activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer))
self.out_layer = tf.keras.layers.Dense(layer_sizes[-1], activation=output_activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer)
def call(self, inputs):
x = inputs
for h_layer in self.hidden_layers:
x = h_layer(x)
return self.out_layer(x)
class ActorCritic():
"""
Class used for create an Actor-Critic.
"""
def __init__(self, ac_name, obs_dim, act_dim, act_limit, hidden_sizes, gamma, pi_lr, q_lr, polyak):
self.act_name = ac_name
self.obs_dim = obs_dim
self.act_dim = act_dim
self.act_limit = act_limit
self.obs_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.obs_dim))
self.act_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.act_dim))
self.rew_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
self.new_obs_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.obs_dim))
self.done_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
self.hidden_sizes = hidden_sizes
self.actor_hidden_activation = tf.keras.activations.relu
self.actor_output_activation = tf.keras.activations.tanh
self.critic_hidden_activation = tf.keras.activations.relu
self.critic_output_activation = tf.keras.activations.linear
self.gamma = gamma
self.pi_lr = pi_lr
self.q_lr = q_lr
self.polyak = polyak
# Actor and Critic
with tf.variable_scope('{}_main'.format(ac_name)):
self.actor = MLP(self.hidden_sizes + [self.act_dim],
hidden_activation=self.actor_hidden_activation,
output_activation=self.actor_output_activation)
self.critic = MLP(self.hidden_sizes + [1],
hidden_activation=self.critic_hidden_activation,
output_activation=self.critic_output_activation)
self.pi = self.act_limit * self.actor(self.obs_ph)
self.q = tf.squeeze(self.critic(tf.concat([self.obs_ph, self.act_ph], axis=-1)), axis=1)
self.q_pi = tf.squeeze(self.critic(tf.concat([self.obs_ph, self.pi], axis=-1)), axis=1)
# Target Actor and Target Critic
with tf.variable_scope('{}_target'.format(ac_name)):
self.actor_targ = MLP(self.hidden_sizes + [self.act_dim],
hidden_activation=self.actor_hidden_activation,
output_activation=self.actor_output_activation)
self.critic_targ = MLP(self.hidden_sizes + [1],
hidden_activation=self.critic_hidden_activation,
output_activation=self.critic_output_activation)
self.pi_targ = self.act_limit * self.actor_targ(self.new_obs_ph)
self.q_pi_targ = tf.squeeze(self.critic_targ(tf.concat([self.new_obs_ph, self.pi_targ], axis=-1)), axis=1)
# Loss
self.backup = tf.stop_gradient(self.rew_ph + self.gamma * (1 - self.done_ph) * self.q_pi_targ)
self.pi_loss = -tf.reduce_mean(self.q_pi)
self.q_loss = tf.reduce_mean((self.q - self.backup) ** 2)
# Optimization
self.pi_optimizer = tf.train.AdamOptimizer(learning_rate=self.pi_lr)
self.q_optimizer = tf.train.AdamOptimizer(learning_rate=self.q_lr)
self.train_pi_op = self.pi_optimizer.minimize(self.pi_loss, var_list=self.actor.variables)
self.train_q_op = self.q_optimizer.minimize(self.q_loss, var_list=self.critic.variables)
# Update Target
self.target_update = tf.group([tf.assign(v_targ, self.polyak * v_targ + (1 - self.polyak) * v_main)
for v_main, v_targ in zip(self.actor.variables + self.critic.variables,
self.actor_targ.variables + self.critic_targ.variables)])
# Initializing targets to match main variables
self.target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(self.actor.variables + self.critic.variables,
self.actor_targ.variables + self.critic_targ.variables)])
# Create tf.Session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# Initialize target
self.sess.run(self.target_init)
def train(self, batch):
feed_dict = {self.obs_ph: batch['obs1'],
self.act_ph: batch['acts'],
self.rew_ph: batch['rews'],
self.new_obs_ph: batch['obs2'],
self.done_ph: batch['done']
}
# Train critic
train_critic_ops = [self.q_loss, self.q, self.train_q_op]
critic_outs = self.sess.run(train_critic_ops, feed_dict=feed_dict)
ac_en_q_loss = critic_outs[0]
ac_en_q = critic_outs[1]
# Train actor
train_actor_ops = [self.pi_loss, self.train_pi_op, self.target_update]
actor_outs = self.sess.run(train_actor_ops, feed_dict=feed_dict)
ac_en_pi_loss = actor_outs[0]
return ac_en_q_loss, ac_en_q, ac_en_pi_loss
def predict(self, input):
feed_dict = {self.obs_ph: input.reshape(1, -1)}
return self.sess.run(self.pi, feed_dict=feed_dict)[0]
def save_model(self):
# TODO
pass
class BootstrappedActorCriticEnsemble():
def __init__(self,ensemble_size,
obs_dim, act_dim, act_limit, hidden_sizes,
gamma, pi_lr, q_lr, polyak,
replay_size, replay_buf_bootstrap_p, logger_kwargs):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.act_limit = act_limit
self.hidden_sizes = hidden_sizes
self.replay_size = replay_size
self.replay_buf_bootstrap_p = replay_buf_bootstrap_p
self.logger_kwargs = logger_kwargs
self.gamma = gamma
self.pi_lr = pi_lr
self.q_lr = q_lr
self.polyak = polyak
# Create actor-critic ensemble
self.ensemble_size = ensemble_size
self.ensemble = [ActorCritic('act_{}'.format(i), self.obs_dim, self.act_dim, self.act_limit, self.hidden_sizes,
self.gamma, self.pi_lr, self.q_lr, self.polyak)
for i in range(self.ensemble_size)]
# Create Replay Buffer
self.ensemble_replay_bufs = [ReplayBuffer(self.obs_dim, self.act_dim, self.replay_size,
logger_fname='exp_log_ac_{}.txt'.format(ac_i),
**self.logger_kwargs)
for ac_i in range(self.ensemble_size)]
def predict(self, input):
predics = [self.ensemble[i].predict(input) for i in range(self.ensemble_size)]
return np.asarray(predics)
def train(self, batch_size=100, raw_batch_size=500, uncertainty_based_minibatch=False):
# Generate mini-batch
batches = self._generate_mini_batch(batch_size, raw_batch_size, uncertainty_based_minibatch)
# Train each member on its mini-batch
train_outs = [self.ensemble[i].train(batches[i]) for i in range(self.ensemble_size)]
train_outs = np.asarray(train_outs)
ac_en_q_loss = train_outs[:, 0]
ac_en_q = np.stack(train_outs[:,1], axis=0)
ac_en_pi_loss = train_outs[:, 2]
return ac_en_q_loss, ac_en_q, ac_en_pi_loss
def _generate_mini_batch(self, batch_size=100, raw_batch_size=500, uncertainty_based_minibatch=False):
# TODO:use multiprocessing to parallel mini-batch sampling
import pdb; pdb.set_trace()
start_time = time.time()
random_mini_batches = [reply_buf.sample_batch(batch_size) for reply_buf in self.ensemble_replay_bufs]
print('ramdom mini-batch sampling costs: {}s'.format(time.time()-start_time))
return random_mini_batches
def add_to_replay_buffer(self, obs, act, rew, next_obs, done,
step_index, epoch_index, time, **kwargs):
"""Add experience to each Actor-Critic's replay buffer with probability replay_buf_bootstrapp_p"""
for ac_i in range(self.ensemble_size):
if np.random.uniform(0, 1, 1) < self.replay_buf_bootstrap_p:
self.ensemble_replay_bufs[ac_i].store(obs, act, rew, next_obs, done,
step_index, epoch_index, time, **kwargs)
# @ray.remote
# class ActorCritic():
# """
# Class used for create an Actor-Critic.
# """
# def __init__(self, ac_name, obs_dim, act_dim, act_limit, hidden_sizes, gamma, pi_lr, q_lr, polyak):
# self.act_name = ac_name
# self.obs_dim = obs_dim
# self.act_dim = act_dim
# self.act_limit = act_limit
# self.obs_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.obs_dim))
# self.act_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.act_dim))
# self.rew_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
# self.new_obs_ph = tf.placeholder(dtype=tf.float32, shape=(None, self.obs_dim))
# self.done_ph = tf.placeholder(dtype=tf.float32, shape=(None,))
#
# self.hidden_sizes = hidden_sizes
#
# self.actor_hidden_activation = tf.keras.activations.relu
# self.actor_output_activation = tf.keras.activations.tanh
# self.critic_hidden_activation = tf.keras.activations.relu
# self.critic_output_activation = tf.keras.activations.linear
# self.gamma = gamma
# self.pi_lr = pi_lr
# self.q_lr = q_lr
# self.polyak = polyak
#
# # Actor and Critic
# with tf.variable_scope('{}_main'.format(ac_name)):
# self.actor = MLP(self.hidden_sizes + [self.act_dim],
# hidden_activation=self.actor_hidden_activation,
# output_activation=self.actor_output_activation)
# self.critic = MLP(self.hidden_sizes + [1],
# hidden_activation=self.critic_hidden_activation,
# output_activation=self.critic_output_activation)
# self.pi = self.act_limit * self.actor(self.obs_ph)
# self.q = tf.squeeze(self.critic(tf.concat([self.obs_ph, self.act_ph], axis=-1)), axis=1)
# self.q_pi = tf.squeeze(self.critic(tf.concat([self.obs_ph, self.pi], axis=-1)), axis=1)
#
# # Target Actor and Target Critic
# with tf.variable_scope('{}_target'.format(ac_name)):
# self.actor_targ = MLP(self.hidden_sizes + [self.act_dim],
# hidden_activation=self.actor_hidden_activation,
# output_activation=self.actor_output_activation)
# self.critic_targ = MLP(self.hidden_sizes + [1],
# hidden_activation=self.critic_hidden_activation,
# output_activation=self.critic_output_activation)
# self.pi_targ = self.act_limit * self.actor_targ(self.new_obs_ph)
# self.q_pi_targ = tf.squeeze(self.critic_targ(tf.concat([self.new_obs_ph, self.pi_targ], axis=-1)), axis=1)
#
# # Loss
# self.backup = tf.stop_gradient(self.rew_ph + self.gamma * (1 - self.done_ph) * self.q_pi_targ)
# self.pi_loss = -tf.reduce_mean(self.q_pi)
# self.q_loss = tf.reduce_mean((self.q - self.backup) ** 2)
#
# # Optimization
# self.pi_optimizer = tf.train.AdamOptimizer(learning_rate=self.pi_lr)
# self.q_optimizer = tf.train.AdamOptimizer(learning_rate=self.q_lr)
# self.train_pi_op = self.pi_optimizer.minimize(self.pi_loss, var_list=self.actor.variables)
# self.train_q_op = self.q_optimizer.minimize(self.q_loss, var_list=self.critic.variables)
#
# # Update Target
# self.target_update = tf.group([tf.assign(v_targ, self.polyak * v_targ + (1 - self.polyak) * v_main)
# for v_main, v_targ in zip(self.actor.variables + self.critic.variables,
# self.actor_targ.variables + self.critic_targ.variables)])
# # Initializing targets to match main variables
# self.target_init = tf.group([tf.assign(v_targ, v_main)
# for v_main, v_targ in zip(self.actor.variables + self.critic.variables,
# self.actor_targ.variables + self.critic_targ.variables)])
#
# # Create tf.Session
# self.sess = tf.Session()
# self.sess.run(tf.global_variables_initializer())
#
# # Initialize target
# self.sess.run(self.target_init)
#
# def train(self, batch):
# feed_dict = {self.obs_ph: batch['obs1'],
# self.act_ph: batch['acts'],
# self.rew_ph: batch['rews'],
# self.new_obs_ph: batch['obs2'],
# self.done_ph: batch['done']
# }
#
# # Train critic
# train_critic_ops = [self.q_loss, self.q, self.train_q_op]
# critic_outs = self.sess.run(train_critic_ops, feed_dict=feed_dict)
# ac_en_q_loss = critic_outs[0]
# ac_en_q = critic_outs[1]
# # Train actor
# train_actor_ops = [self.pi_loss, self.train_pi_op, self.target_update]
# actor_outs = self.sess.run(train_actor_ops, feed_dict=feed_dict)
# ac_en_pi_loss = actor_outs[0]
# return ac_en_q_loss, ac_en_q, ac_en_pi_loss
#
# def predict(self, input):
# feed_dict = {self.obs_ph: input.reshape(1, -1)}
# return self.sess.run(self.pi, feed_dict=feed_dict)[0]
#
# def save_model(self):
# # TODO
# pass
#
# class BootstrappedActorCriticEnsemble():
# def __init__(self,ensemble_size,
# obs_dim, act_dim, act_limit, hidden_sizes,
# gamma, pi_lr, q_lr, polyak,
# replay_size, replay_buf_bootstrap_p, logger_kwargs):
# self.obs_dim = obs_dim
# self.act_dim = act_dim
# self.act_limit = act_limit
#
# self.hidden_sizes = hidden_sizes
# self.replay_size = replay_size
# self.replay_buf_bootstrap_p = replay_buf_bootstrap_p
# self.logger_kwargs = logger_kwargs
#
# self.gamma = gamma
# self.pi_lr = pi_lr
# self.q_lr = q_lr
# self.polyak = polyak
#
# # Create actor-critic ensemble
# self.ensemble_size = ensemble_size
# self.ensemble = [ActorCritic.remote('act_{}'.format(i),
# self.obs_dim, self.act_dim, self.act_limit, self.hidden_sizes,
# self.gamma, self.pi_lr, self.q_lr, self.polyak)
# for i in range(self.ensemble_size)]
#
# # Create Replay Buffer
# self.ensemble_replay_bufs = [ReplayBuffer(self.obs_dim, self.act_dim, self.replay_size,
# logger_fname='exp_log_ac_{}.txt'.format(ac_i),
# **self.logger_kwargs)
# for ac_i in range(self.ensemble_size)]
#
# def predict(self, input):
# predics_id, _ = ray.wait([self.ensemble[i].predict.remote(input) for i in range(self.ensemble_size)],
# num_returns=self.ensemble_size)
# predics = ray.get(predics_id)
# return np.asarray(predics)
#
# def train(self, batch_size=100, raw_batch_size=500, uncertainty_based_minibatch=False):
# # Generate mini-batch
# batches = self._generate_mini_batch(batch_size, raw_batch_size, uncertainty_based_minibatch)
#
# # Train each member on its mini-batch
# train_outs_id, _ = ray.wait([self.ensemble[i].train.remote(batches[i]) for i in range(self.ensemble_size)],
# num_returns=self.ensemble_size)
# ac_en_q_loss, ac_en_q, ac_en_pi_loss = [], [], []
# for i in range(self.ensemble_size):
# | |
SEP)
program = cl.Program(self.context, kernelsource).build([options_string])
self.cl_kernel_time_integration = program.TimeIntegration
self.cl_kernel_update_displacement = program.UpdateDisplacement
self.cl_kernel_reduce_damage = program.ReduceDamage
# Set initial values in host memory
# horizons and horizons lengths
self.h_horizons = model.horizons
self.h_horizons_lengths = model.horizons_lengths
# Nodal coordinates
self.h_coords = np.ascontiguousarray(model.coords, dtype=np.float64)
# Displacement boundary conditions types and delta values
self.h_bc_types = model.bc_types
self.h_bc_values = model.bc_values
self.h_tip_types = model.tip_types
# Force boundary conditions types and values
self.h_force_bc_types = model.force_bc_types
self.h_force_bc_values = model.force_bc_values
# Nodal volumes
self.h_vols = model.V
# Bond stiffnesses
self.h_bond_stiffness = np.ascontiguousarray(model.bond_stiffness, dtype=np.float64)
self.h_bond_critical_stretch = np.ascontiguousarray(model.bond_critical_stretch, dtype=np.float64)
# Displacements
self.h_un = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
# Forces
self.h_udn = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
# Bond forces
self.local_mem_x = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
self.local_mem_y = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
self.local_mem_z = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
# Damage vector
self.h_damage = np.empty(model.nnodes).astype(np.float64)
self.local_mem = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
# For applying force in incriments
self.h_force_load_scale = np.float64(0.0)
# For applying displacement in incriments
self.h_displacement_load_scale = np.float64(0.0)
# Build OpenCL data structures
# Read only
self.d_coords = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_coords)
self.d_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_types)
self.d_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_values)
self.d_force_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_types)
self.d_force_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_values)
self.d_vols = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_vols)
self.d_bond_stiffness = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_stiffness)
self.d_bond_critical_stretch = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_critical_stretch)
self.d_horizons_lengths = cl.Buffer(
self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons_lengths)
# Read and write
self.d_horizons = cl.Buffer(
self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons)
self.d_un = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_un.nbytes)
self.d_udn = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn.nbytes)
# Write only
self.d_damage = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.h_damage.nbytes)
# Initialize kernel parameters
self.cl_kernel_time_integration.set_scalar_arg_dtypes(
[None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None
])
# Initialize kernel parameters
self.cl_kernel_update_displacement.set_scalar_arg_dtypes(
[None,
None,
None,
None,
None
])
self.cl_kernel_reduce_damage.set_scalar_arg_dtypes(
[None, None, None, None])
def __call__(self):
"""
Conduct one iteration of the integrator.
:arg u: A (`nnodes`, 3) array containing the displacements of all
nodes.
:type u: :class:`numpy.ndarray`
:arg f: A (`nnodes`, 3) array containing the components of the force
acting on each node.
:type f: :class:`numpy.ndarray`
:returns: The new displacements after integration.
:rtype: :class:`numpy.ndarray`
"""
def runtime(self, model):
# Update displacements
self.cl_kernel_update_displacement(
self.queue, (model.degrees_freedom * model.nnodes,), None,
self.d_udn,
self.d_un,
self.d_bc_types,
self.d_bc_values,
self.h_displacement_load_scale
)
# Time integration step
self.cl_kernel_time_integration(
self.queue, (model.nnodes * model.max_horizon_length,), (model.max_horizon_length,),
self.d_un,
self.d_udn,
self.d_vols,
self.d_horizons,
self.d_coords,
self.d_bond_stiffness,
self.d_bond_critical_stretch,
self.d_force_bc_types,
self.d_force_bc_values,
self.local_mem_x,
self.local_mem_y,
self.local_mem_z,
self.h_force_load_scale,
self.h_displacement_load_scale
)
def write(self, model, t, sample):
""" Write a mesh file for the current timestep
"""
self.cl_kernel_reduce_damage(self.queue, (model.nnodes * model.max_horizon_length,),
(model.max_horizon_length,), self.d_horizons,
self.d_horizons_lengths, self.d_damage, self.local_mem)
cl.enqueue_copy(self.queue, self.h_damage, self.d_damage)
cl.enqueue_copy(self.queue, self.h_un, self.d_un)
cl.enqueue_copy(self.queue, self.h_udn, self.d_udn)
# TODO define a failure criterion, idea: rate of change of damage goes to 0 after it has started increasing
tip_displacement = 0
tip_shear_force = 0
tmp = 0
for i in range(model.nnodes):
if self.h_tip_types[i] == 1:
tmp +=1
tip_displacement += self.h_un[i][2]
tip_shear_force += self.h_udn[i][2]
if tmp != 0:
tip_displacement /= tmp
else:
tip_displacement = None
vtk.write("output/U_"+"sample" + str(sample) +"t"+str(t) + ".vtk", "Solution time step = "+str(t),
model.coords, self.h_damage, self.h_un)
#vtk.writeDamage("output/damage_" + str(t)+ "sample" + str(sample) + ".vtk", "Title", self.h_damage)
return self.h_damage, tip_displacement, tip_shear_force
def incrementLoad(self, model, load_scale):
if model.num_force_bc_nodes != 0:
# update the host force load scale
self.h_force_load_scale = np.float64(load_scale)
def incrementDisplacement(self, model, displacement_scale):
# update the host force load scale
self.h_displacement_load_scale = np.float64(displacement_scale)
class EulerStochasticOptimised(Integrator):
r"""
Stochastic Euler integrator for quasi-static loading, using optimised OpenCL kernels.
The Euler method is a first-order numerical integration method. The
integration is given by,
.. math::
u(t + \delta t) = u(t) + \delta t f(t) d
where :math:`u(t)` is the displacement at time :math:`t`, :math:`f(t)` is
the force at time :math:`t`, :math:`\delta t` is the time step and
:math:`d` is a dampening factor.
"""
def __init__(self, model):
""" Initialise the integration scheme
"""
# Initializing OpenCL
self.context = cl.create_some_context()
self.queue = cl.CommandQueue(self.context)
# Print out device info
output_device_info(self.context.devices[0])
# Build the OpenCL program from file
kernelsource = open(pathlib.Path(__file__).parent.absolute() / "kernels/opencl_euler_stochastic_optimised.cl").read()
SEP = " "
options_string = (
"-cl-fast-relaxed-math" + SEP
+ "-DPD_DPN_NODE_NO=" + str(model.degrees_freedom * model.nnodes) + SEP
+ "-DPD_NODE_NO=" + str(model.nnodes) + SEP
+ "-DMAX_HORIZON_LENGTH=" + str(model.max_horizon_length) + SEP
+ "-DPD_DT=" + str(model.dt) + SEP)
program = cl.Program(self.context, kernelsource).build([options_string])
self.cl_kernel_update_displacement = program.UpdateDisplacement
self.cl_kernel_update_acceleration = program.UpdateAcceleration
self.cl_kernel_reduce_damage = program.ReduceDamage
self.cl_kernel_matrix_vector_mul1 = program.gemv1
self.cl_kernel_matrix_vector_mul2 = program.gemv2
self.cl_kernel_matrix_vector_mul3 = program.gemv3
self.cl_kernel_reduce_rows = program.reduce_rows
# Set initial values in host memory
# horizons and horizons lengths
self.h_horizons = model.horizons
self.h_horizons_lengths = model.horizons_lengths
# Nodal coordinates
self.h_coords = np.ascontiguousarray(model.coords, dtype=np.float64)
# Displacement boundary conditions types and delta values
self.h_bc_types = model.bc_types
self.h_bc_values = model.bc_values
self.h_tip_types = model.tip_types
# Force boundary conditions types and values
self.h_force_bc_types = model.force_bc_types
self.h_force_bc_values = model.force_bc_values
# Nodal volumes
self.h_vols = model.V
# Bond stiffnesses and stretch factors
self.h_bond_stiffness = np.ascontiguousarray(model.bond_stiffness, dtype=np.float64)
self.h_bond_critical_stretch = np.ascontiguousarray(model.bond_critical_stretch, dtype=np.float64)
self.h_bond_stiffness_const = np.float64(model.bond_stiffness_const)
self.h_bond_critical_stretch_const = np.float64(model.critical_stretch_const)
# Displacements
self.h_un = np.empty((model.nnodes, model.degrees_freedom), dtype=np.float64)
# Forces
self.h_udn_x = np.empty((model.nnodes), dtype=np.float64)
self.h_udn_y = np.empty((model.nnodes), dtype=np.float64)
self.h_udn_z = np.empty((model.nnodes), dtype=np.float64)
# Brownian motion
self.h_bdn_x = np.empty((model.nnodes), dtype=np.float64)
self.h_bdn_y = np.empty((model.nnodes), dtype=np.float64)
self.h_bdn_z = np.empty((model.nnodes), dtype=np.float64)
# Updated forces
self.h_udn1_x = np.empty((model.nnodes), dtype=np.float64)
self.h_udn1_y = np.empty((model.nnodes), dtype=np.float64)
self.h_udn1_z = np.empty((model.nnodes), dtype=np.float64)
# Updated brownian motion (sampled with a length scale)
self.h_bdn1_x = np.empty((model.nnodes), dtype=np.float64)
self.h_bdn1_y = np.empty((model.nnodes), dtype=np.float64)
self.h_bdn1_z = np.empty((model.nnodes), dtype=np.float64)
# Damage vector
self.h_damage = np.empty(model.nnodes).astype(np.float64)
# For applying force in incriments
self.h_force_load_scale = np.float64(0.0)
# For applying displacement in incriments
self.h_displacement_load_scale = np.float64(0.0)
# dimensions for matrix-vector multiplication
# local (work group) size
self.h_mdash = np.intc(16) #64
self.h_p = np.intc(4) #16
self.h_m = np.intc(
1<<(model.nnodes-1).bit_length()
)
#self.h_n = np.intc(model.nnodes) # mvul1
self.h_n = np.intc(np.ceil(model.nnodes/self.h_p)*self.h_p) #muvl2
# Bond forces
#local_mem_mvmul2 = np.empty((self.h_p * self.h_mdash), dtype=np.float64)
self.local_mem_mvmul2_1 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem_mvmul2_2 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem_mvmul2_3 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem_mvmul2_4 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem_mvmul2_5 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem_mvmul2_6 = cl.LocalMemory(np.dtype(np.float64).itemsize * self.h_p * self.h_mdash)
self.local_mem = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
self.local_mem_x = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
self.local_mem_y = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
self.local_mem_z = cl.LocalMemory(np.dtype(np.float64).itemsize * model.max_horizon_length)
# Build OpenCL data structures
# Read only
self.d_coords = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_coords)
self.d_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_types)
self.d_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bc_values)
self.d_force_bc_types = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_types)
self.d_force_bc_values = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_force_bc_values)
self.d_vols = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_vols)
self.d_bond_stiffness = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_stiffness)
self.d_bond_critical_stretch = cl.Buffer(self.context,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_bond_critical_stretch)
self.d_horizons_lengths = cl.Buffer(
self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons_lengths)
# Read and write
self.d_horizons = cl.Buffer(
self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR,
hostbuf=self.h_horizons)
self.d_un = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_un.nbytes)
self.d_udn_x = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn_x.nbytes)
self.d_udn_y = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn_y.nbytes)
self.d_udn_z = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn_z.nbytes)
self.d_udn1_x = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn1_x.nbytes)
self.d_udn1_y = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn1_y.nbytes)
self.d_udn1_z = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_udn1_z.nbytes)
self.d_bdn_x = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn_x.nbytes)
self.d_bdn_y = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn_y.nbytes)
self.d_bdn_z = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn_z.nbytes)
self.d_bdn1_x = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn1_x.nbytes)
self.d_bdn1_y = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn1_y.nbytes)
self.d_bdn1_z = cl.Buffer(self.context, cl.mem_flags.READ_WRITE, self.h_bdn1_z.nbytes)
# Write only
self.d_damage = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.h_damage.nbytes)
# Initialize kernel parameters
self.cl_kernel_update_displacement.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None, None, None, None, None, None, None, None])
self.cl_kernel_update_acceleration.set_scalar_arg_dtypes(
[None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None])
self.cl_kernel_reduce_damage.set_scalar_arg_dtypes([None, None, None, None])
self.cl_kernel_matrix_vector_mul1.set_scalar_arg_dtypes(
[None, None, None, None, None])
self.cl_kernel_matrix_vector_mul2.set_scalar_arg_dtypes(
[None, None, None, None, None, None])
self.cl_kernel_matrix_vector_mul3.set_scalar_arg_dtypes(
[None, None, None, None, None, None])
self.cl_kernel_reduce_rows.set_scalar_arg_dtypes(
[None, None, None])
def __call__(self):
"""
Conduct one iteration of the integrator.
:arg u: A (`nnodes`, 3) array containing the displacements of all
nodes.
:type u: :class:`numpy.ndarray`
:arg f: A (`nnodes`, 3) array containing the components of the force
acting on each node.
:type f: :class:`numpy.ndarray`
:returns: The new displacements after integration.
:rtype: :class:`numpy.ndarray`
"""
def noise(self, num_nodes, num_steps, degrees_freedom = 3):
"""Takes sample from multivariate normal distribution
with covariance matrix whith Cholesky factor, L
:arg C: Cholesky factor, C
:arg | |
expr: SizeParam) -> R:
return frozenset([expr])
def map_stack(self, expr: Stack) -> R:
return self.combine(frozenset([expr]), super().map_stack(expr))
def map_roll(self, expr: Roll) -> R:
return self.combine(frozenset([expr]), super().map_roll(expr))
def map_axis_permutation(self, expr: AxisPermutation) -> R:
return self.combine(frozenset([expr]), super().map_axis_permutation(expr))
def _map_index_base(self, expr: IndexBase) -> R:
return self.combine(frozenset([expr]), super()._map_index_base(expr))
def map_reshape(self, expr: Reshape) -> R:
return self.combine(frozenset([expr]), super().map_reshape(expr))
def map_concatenate(self, expr: Concatenate) -> R:
return self.combine(frozenset([expr]), super().map_concatenate(expr))
def map_einsum(self, expr: Einsum) -> R:
return self.combine(frozenset([expr]), super().map_einsum(expr))
def map_named_array(self, expr: NamedArray) -> R:
return self.combine(frozenset([expr]), super().map_named_array(expr))
def map_loopy_call_result(self, expr: LoopyCallResult) -> R:
return self.combine(frozenset([expr]), super().map_loopy_call_result(expr))
# }}}
# {{{ SubsetDependencyMapper
class SubsetDependencyMapper(DependencyMapper):
"""
Mapper to combine the dependencies of an expression that are a subset of
*universe*.
"""
def __init__(self, universe: FrozenSet[Array]):
self.universe = universe
super().__init__()
def combine(self, *args: FrozenSet[Array]) -> FrozenSet[Array]:
from functools import reduce
return reduce(lambda acc, arg: acc | (arg & self.universe),
args,
frozenset())
# }}}
# {{{ InputGatherer
class InputGatherer(CombineMapper[FrozenSet[InputArgumentBase]]):
"""
Mapper to combine all instances of :class:`pytato.array.InputArgumentBase` that
an array expression depends on.
"""
def combine(self, *args: FrozenSet[InputArgumentBase]
) -> FrozenSet[InputArgumentBase]:
from functools import reduce
return reduce(lambda a, b: a | b, args, frozenset())
def map_placeholder(self, expr: Placeholder) -> FrozenSet[InputArgumentBase]:
return self.combine(frozenset([expr]), super().map_placeholder(expr))
def map_data_wrapper(self, expr: DataWrapper) -> FrozenSet[InputArgumentBase]:
return self.combine(frozenset([expr]), super().map_data_wrapper(expr))
def map_size_param(self, expr: SizeParam) -> FrozenSet[SizeParam]:
return frozenset([expr])
# }}}
# {{{ SizeParamGatherer
class SizeParamGatherer(CombineMapper[FrozenSet[SizeParam]]):
"""
Mapper to combine all instances of :class:`pytato.array.SizeParam` that
an array expression depends on.
"""
def combine(self, *args: FrozenSet[SizeParam]
) -> FrozenSet[SizeParam]:
from functools import reduce
return reduce(lambda a, b: a | b, args, frozenset())
def map_size_param(self, expr: SizeParam) -> FrozenSet[SizeParam]:
return frozenset([expr])
# }}}
# {{{ WalkMapper
class WalkMapper(Mapper):
"""
A mapper that walks over all the arrays in a :class:`pytato.Array`.
Users may override the specific mapper methods in a derived class or
override :meth:`WalkMapper.visit` and :meth:`WalkMapper.post_visit`.
.. automethod:: visit
.. automethod:: post_visit
"""
def visit(self, expr: Any) -> bool:
"""
If this method returns *True*, *expr* is traversed during the walk.
If this method returns *False*, *expr* is not traversed as a part of
the walk.
"""
return True
def post_visit(self, expr: Any) -> None:
"""
Callback after *expr* has been traversed.
"""
pass
def rec_idx_or_size_tuple(self, situp: Tuple[IndexOrShapeExpr, ...]) -> None:
for comp in situp:
if isinstance(comp, Array):
self.rec(comp)
def map_index_lambda(self, expr: IndexLambda) -> None:
if not self.visit(expr):
return
for _, child in sorted(expr.bindings.items()):
self.rec(child)
self.rec_idx_or_size_tuple(expr.shape)
self.post_visit(expr)
def map_placeholder(self, expr: Placeholder) -> None:
if not self.visit(expr):
return
self.rec_idx_or_size_tuple(expr.shape)
self.post_visit(expr)
map_data_wrapper = map_placeholder
map_size_param = map_placeholder
def _map_index_remapping_base(self, expr: IndexRemappingBase) -> None:
if not self.visit(expr):
return
self.rec(expr.array)
self.post_visit(expr)
map_roll = _map_index_remapping_base
map_axis_permutation = _map_index_remapping_base
map_reshape = _map_index_remapping_base
def _map_index_base(self, expr: IndexBase) -> None:
if not self.visit(expr):
return
self.rec(expr.array)
self.rec_idx_or_size_tuple(expr.indices)
self.post_visit(expr)
def map_basic_index(self, expr: BasicIndex) -> None:
return self._map_index_base(expr)
def map_contiguous_advanced_index(self,
expr: AdvancedIndexInContiguousAxes
) -> None:
return self._map_index_base(expr)
def map_non_contiguous_advanced_index(self,
expr: AdvancedIndexInNoncontiguousAxes
) -> None:
return self._map_index_base(expr)
def map_stack(self, expr: Stack) -> None:
if not self.visit(expr):
return
for child in expr.arrays:
self.rec(child)
self.post_visit(expr)
def map_concatenate(self, expr: Concatenate) -> None:
if not self.visit(expr):
return
for child in expr.arrays:
self.rec(child)
self.post_visit(expr)
def map_einsum(self, expr: Einsum) -> None:
if not self.visit(expr):
return
for child in expr.args:
self.rec(child)
self.post_visit(expr)
def map_dict_of_named_arrays(self, expr: DictOfNamedArrays) -> None:
if not self.visit(expr):
return
for child in expr._data.values():
self.rec(child)
self.post_visit(expr)
def map_named_array(self, expr: NamedArray) -> None:
if not self.visit(expr):
return
self.rec(expr._container)
self.post_visit(expr)
def map_loopy_call(self, expr: LoopyCall) -> None:
if not self.visit(expr):
return
for _, child in sorted(expr.bindings.items()):
if isinstance(child, Array):
self.rec(child)
self.post_visit(expr)
# }}}
# {{{ CachedWalkMapper
class CachedWalkMapper(WalkMapper):
"""
WalkMapper that visits each node in the DAG exactly once. This loses some
information compared to :class:`WalkMapper` as a node is visited only from
one of its predecessors.
"""
def __init__(self) -> None:
self._visited_ids: Set[int] = set()
# type-ignore reason: CachedWalkMapper.rec's type does not match
# WalkMapper.rec's type
def rec(self, expr: ArrayOrNames) -> None: # type: ignore
# Why choose id(x) as the cache key?
# - Some downstream users (NamesValidityChecker) of this mapper rely on
# structurally equal objects being walked separately (e.g. to detect
# separate instances of Placeholder with the same name).
if id(expr) in self._visited_ids:
return
# type-ignore reason: super().rec expects either 'Array' or
# 'AbstractResultWithNamedArrays', passed 'ArrayOrNames'
super().rec(expr) # type: ignore
self._visited_ids.add(id(expr))
# }}}
# {{{ TopoSortMapper
class TopoSortMapper(CachedWalkMapper):
"""A mapper that creates a list of nodes in topological order.
:members: topological_order
"""
def __init__(self) -> None:
super().__init__()
self.topological_order: List[Array] = []
def post_visit(self, expr: Any) -> None:
self.topological_order.append(expr)
# }}}
# {{{ MapAndCopyMapper
class CachedMapAndCopyMapper(CopyMapper):
"""
Mapper that applies *map_fn* to each node and copies it. Results of
traversals are memoized i.e. each node is mapped via *map_fn* exactly once.
"""
def __init__(self, map_fn: Callable[[ArrayOrNames], ArrayOrNames]) -> None:
super().__init__()
self.map_fn: Callable[[ArrayOrNames], ArrayOrNames] = map_fn
# type-ignore-reason:incompatible with Mapper.rec()
def rec(self, expr: ArrayOrNames) -> ArrayOrNames: # type: ignore[override]
if expr in self._cache:
return self._cache[expr] # type: ignore[no-any-return]
result = super().rec(self.map_fn(expr))
self._cache[expr] = result
return result # type: ignore[no-any-return]
# type-ignore-reason: Mapper.__call__ returns Any
def __call__(self, expr: ArrayOrNames) -> ArrayOrNames: # type: ignore[override]
return self.rec(expr)
# }}}
# {{{ MPMS materializer
@dataclass(frozen=True, eq=True)
class MPMSMaterializerAccumulator:
"""This class serves as the return value of :class:`MPMSMaterializer`. It
contains the set of materialized predecessors and the rewritten expression
(i.e. the expression with tags for materialization applied).
"""
materialized_predecessors: FrozenSet[Array]
expr: Array
def _materialize_if_mpms(expr: Array,
nsuccessors: int,
predecessors: Iterable[MPMSMaterializerAccumulator]
) -> MPMSMaterializerAccumulator:
"""
Returns an instance of :class:`MPMSMaterializerAccumulator`, that
materializes *expr* if it has more than 1 successors and more than 1
materialized predecessors.
"""
from functools import reduce
materialized_predecessors: FrozenSet[Array] = reduce(
frozenset.union,
(pred.materialized_predecessors
for pred in predecessors),
frozenset())
if nsuccessors > 1 and len(materialized_predecessors) > 1:
new_expr = expr.tagged(ImplStored())
return MPMSMaterializerAccumulator(frozenset([new_expr]), new_expr)
else:
return MPMSMaterializerAccumulator(materialized_predecessors, expr)
class MPMSMaterializer(Mapper):
"""See :func:`materialize_with_mpms` for an explanation."""
def __init__(self, nsuccessors: Mapping[Array, int]):
super().__init__()
self.nsuccessors = nsuccessors
self.cache: Dict[ArrayOrNames, MPMSMaterializerAccumulator] = {}
# type-ignore reason: return type not compatible with Mapper.rec's type
def rec(self, expr: ArrayOrNames) -> MPMSMaterializerAccumulator: # type: ignore
if expr in self.cache:
return self.cache[expr]
# type-ignore reason: type not compatible with super.rec() type
result: MPMSMaterializerAccumulator = super().rec(expr) # type: ignore
self.cache[expr] = result
return result
def _map_input_base(self, expr: InputArgumentBase
) -> MPMSMaterializerAccumulator:
return MPMSMaterializerAccumulator(frozenset([expr]), expr)
map_placeholder = _map_input_base
map_data_wrapper = _map_input_base
map_size_param = _map_input_base
def map_named_array(self, expr: NamedArray) -> MPMSMaterializerAccumulator:
raise NotImplementedError("only LoopyCallResult named array"
" supported for now.")
def map_index_lambda(self, expr: IndexLambda) -> MPMSMaterializerAccumulator:
children_rec = {bnd_name: self.rec(bnd)
for bnd_name, bnd in expr.bindings.items()}
new_expr = IndexLambda(expr.expr,
expr.shape,
expr.dtype,
{bnd_name: bnd.expr
for bnd_name, bnd in children_rec.items()},
axes=expr.axes,
tags=expr.tags)
return _materialize_if_mpms(new_expr, self.nsuccessors[expr],
children_rec.values())
def map_stack(self, expr: Stack) -> MPMSMaterializerAccumulator:
rec_arrays = [self.rec(ary) for ary in expr.arrays]
new_expr = Stack(tuple(ary.expr for ary in rec_arrays),
expr.axis, expr.axes, expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
rec_arrays)
def map_concatenate(self, expr: Concatenate) -> MPMSMaterializerAccumulator:
rec_arrays = [self.rec(ary) for ary in expr.arrays]
new_expr = Concatenate(tuple(ary.expr for ary in rec_arrays),
expr.axis,
expr.axes,
expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
rec_arrays)
def map_roll(self, expr: Roll) -> MPMSMaterializerAccumulator:
rec_array = self.rec(expr.array)
new_expr = Roll(rec_array.expr, expr.shift, expr.axis, expr.axes, expr.tags)
return _materialize_if_mpms(new_expr, self.nsuccessors[expr],
(rec_array,))
def map_axis_permutation(self, expr: AxisPermutation
) -> MPMSMaterializerAccumulator:
rec_array = self.rec(expr.array)
new_expr = AxisPermutation(rec_array.expr, expr.axis_permutation,
expr.axes, expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
(rec_array,))
def _map_index_base(self, expr: IndexBase) -> MPMSMaterializerAccumulator:
rec_array = self.rec(expr.array)
rec_indices = {i: self.rec(idx)
for i, idx in enumerate(expr.indices)
if isinstance(idx, Array)}
new_expr = type(expr)(rec_array.expr,
tuple(rec_indices[i].expr
if i in rec_indices
else expr.indices[i]
for i in range(
len(expr.indices))),
expr.axes,
expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
(rec_array,) + tuple(rec_indices.values())
)
map_basic_index = _map_index_base
map_contiguous_advanced_index = _map_index_base
map_non_contiguous_advanced_index = _map_index_base
def map_reshape(self, expr: Reshape) -> MPMSMaterializerAccumulator:
rec_array = self.rec(expr.array)
new_expr = Reshape(rec_array.expr, expr.newshape,
expr.order, expr.axes, expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
(rec_array,))
def map_einsum(self, expr: Einsum) -> MPMSMaterializerAccumulator:
rec_arrays = [self.rec(ary) for ary in expr.args]
new_expr = Einsum(expr.access_descriptors,
tuple(ary.expr for ary in rec_arrays),
expr.axes,
expr.tags)
return _materialize_if_mpms(new_expr,
self.nsuccessors[expr],
rec_arrays)
def map_dict_of_named_arrays(self, expr: DictOfNamedArrays
) -> MPMSMaterializerAccumulator:
raise NotImplementedError
def map_loopy_call_result(self, expr: NamedArray) -> MPMSMaterializerAccumulator:
# loopy call result is always materialized
return MPMSMaterializerAccumulator(frozenset([expr]), expr)
# }}}
# {{{ mapper frontends
def copy_dict_of_named_arrays(source_dict: DictOfNamedArrays,
copy_mapper: CopyMapper) -> DictOfNamedArrays:
"""Copy the elements of a :class:`~pytato.DictOfNamedArrays` into a
:class:`~pytato.DictOfNamedArrays`.
:param source_dict: The :class:`~pytato.DictOfNamedArrays` to copy
:param copy_mapper: A mapper that | |
N )
#
return that
# pad each mode to a new_length
def pad(this,new_length=None, apply=True, extend=True ):
# Pad each mode
ans = this if apply else this.copy()
treset=True
for z in this.lm:
for k in this.lm[z]:
ans.lm[z][k].pad( new_length=new_length, apply=apply, extend=extend )
if treset:
ans.t = ans.lm[z][k].t
treset = False
#
if not apply:
if len(ans.t)!=new_length:
error('!!!')
return ans
# shift the time series
def tshift( this, shift=0, method=None, apply=True ):
# shift each mode
ans = this if apply else this.copy()
for z in ans.lm:
for k in ans.lm[z]:
ans.lm[z][k].tshift( shift=shift, method=method, apply=apply )
#
if not apply: return ans
# Recompose the waveforms at a sky position about the source
def recompose( this,
theta,
phi,
kind = None,
domain = None, # only useful if output_array = True
select_lm = None,
output_array = None, # Faster as the gwf constructor is not called (e.g. related ffts are not taken)
verbose = None):
# Validate the inputs
if kind is None:
msg = 'no kind specified for recompose calculation. We will proceed assuming that you desire recomposed strain. Please specify the desired kind (e.g. strain, psi4 or news) you wish to be output as a keyword (e.g. kind="news")'
# warning( msg, 'gwylm.recompose' )
kind = 'strain'
if domain is None:
msg = 'no domain specified for recompose calculation. We will proceed assuming that you desire recomposed time domain data. Please specify the desired domain (e.g. time or freq) you wish to be output as a keyword (e.g. domain="freq")'
# warning( msg, 'gwylm.recompose' )
domain = 'time'
# if it is desired to work with arrays
if output_array:
#
if (kind is None) or (domain is None):
error('When recomposing arrays, BOTH domain and kind keyword inputs must be given.')
ans = this.__recompose_array__( theta, phi, kind, domain, select_lm=select_lm, verbose=verbose )
else: # if it desired to work with gwf objects (this is time domain recomposition followed by gwf construction)
#
ans = this.__recompose_gwf__( theta, phi, kind=kind, select_lm=select_lm, verbose=verbose )
# Return the answer
return ans
# Enforce M_RELATIVE_SIGN_CONVENTION
def __enforce_m_relative_phase_orientation__(this,kind=None):
# Import usefuls
from numpy import arange,sign,diff,unwrap,angle,amax,isnan,amin,log,exp,std,median,mod,mean
from scipy.stats.mstats import mode
from scipy.version import version as scipy_version
thisfun=inspect.stack()[0][3]
#
if kind is None:
kind = 'psi4'
# Use the 2,2, multipole just after wstart to determine initial phase direction
mask = arange(this.startindex,this.startindex+50)
dphi = this[2,2][kind].dphi[mask]
m=2
if int(scipy_version.split('.')[1])<16:
# Account for old scipy functionality
external_sign_convention = sign(this.L[-1]) * sign(m) * mode( sign( dphi ) )[0][0]
initially_msign_matches_wsign = sign(m) == mode( sign( dphi ) )[0][0]
else:
# Account for modern scipy functionality
external_sign_convention = sign(this.L[-1]) * sign(m) * mode( sign( dphi ) ).mode[0]
initially_msign_matches_wsign = sign(m) == mode( sign( dphi ) ).mode[0]
# if initially_msign_matches_wsign: alert('## initall, m and td freq have same sign.')
this.external_sign_convention = external_sign_convention
if this.M_RELATIVE_SIGN_CONVENTION != this.external_sign_convention:
# Let the people know what is happening.
msg = yellow('[Verify stage] Re-orienting waveform phase')+' to be consistent with internal sign convention for Psi4, where sign(dPhi/dt)=%i*sign(m)*sign(this.L[-1]).' % this.M_RELATIVE_SIGN_CONVENTION + ' Note that the internal sign convention is defined in ... nrutils/core/__init__.py as "M_RELATIVE_SIGN_CONVENTION". This message has appeared becuase the waveform is determined to obey a sign convention: sign(dPhi/dt)=%i*sign(m)*sign(this.L[-1]). Note the appearance of the initial z angular momentum, this.L[-1].'%(this.external_sign_convention)
thisfun=inspect.stack()[0][3]
warning( msg, verbose=this.verbose )
#
for l,m in this.lm:
for kind in this[l,m]:
y = this[l,m][kind]
wfarr = y.wfarr
wfarr[:,2] *= -1
y.setfields( wfarr )
this[l,m][kind] = y
# for l,m in this.lm:
# for kind in this[l,m]:
# y = this[l,m][kind]
# wfarr = y.wfarr
# wfarr[:,2] *= -this.M_RELATIVE_SIGN_CONVENTION
# y.setfields( wfarr )
# this[l,m][kind] = y
# # Try to determine the sign convention used to define phase. Note that this will be determined only once for the current object based on the l=m=2 multipole.
# if this.external_sign_convention is None:
# msk_ = y_.amp > 0.0001*amax(y_.amp)
# # msk_ = y_.amp > 0.01*amax(y_.amp)
# if int(scipy_version.split('.')[1])<16:
# # Account for old scipy functionality
# external_sign_convention = sign(this.L[-1]) * sign(m) * mode( sign( y_.dphi[msk_] ) )[0][0]
# initially_msign_matches_wsign = sign(m) == mode( sign( y_.dphi[msk_] ) )[0][0]
# else:
# # Account for modern scipy functionality
# external_sign_convention = sign(this.L[-1]) * sign(m) * mode( sign( y_.dphi[msk_] ) ).mode[0]
# initially_msign_matches_wsign = sign(m) == mode( sign( y_.dphi[msk_] ) ).mode[0]
# if initially_msign_matches_wsign: alert('## initall, m and td freq have same sign.')
# this.external_sign_convention = external_sign_convention
#
# if this.M_RELATIVE_SIGN_CONVENTION != this.external_sign_convention:
# wfarr[:,2] = -wfarr[:,2]
# y_ = mkgwf(wfarr)
# # Let the people know what is happening.
# msg = yellow('Re-orienting waveform phase')+' to be consistent with internal sign convention for Psi4, where sign(dPhi/dt)=%i*sign(m)*sign(this.L[-1]).' % this.M_RELATIVE_SIGN_CONVENTION + ' Note that the internal sign convention is defined in ... nrutils/core/__init__.py as "M_RELATIVE_SIGN_CONVENTION". This message has appeared becuase the waveform is determioned to obey and sign convention: sign(dPhi/dt)=%i*sign(m)*sign(this.L[-1]). Note the appearance of the initial z angular momentum, this.L[-1].'%(this.external_sign_convention)
# thisfun=inspect.stack()[0][3]
# warning( msg, verbose=this.verbose )
#
#@
return None
# recompose individual arrays for a select data type (psi4, strain or news)
def __recompose_array__( this,theta,phi,kind,domain,select_lm=None,verbose=False ):
'''
Recompose individual arrays for a select data type (psi4, strain or news)
'''
# Set default for select_lm
select_lm = this.__input_lmlist__ if select_lm is None else select_lm
# Construct functions which handle options
fd_wfarr_dict_fun = lambda k: { lm:this.lm[lm][k].fd_wfarr for lm in select_lm }
td_wfarr_dict_fun = lambda k: { lm:this.lm[lm][k].wfarr for lm in select_lm }
wfarr_dict_fun = lambda d,k: fd_wfarr_dict_fun(k) if d in ('fd','freq','fequency','f') else td_wfarr_dict_fun(k)
# Get desired waveform array
wfarr_dict = wfarr_dict_fun(domain,kind)
#
error('There\'s a bug in this workflow that cases the spectra of h+/x to not obey conjugate symmetry!!')
# Recompose using low level function in basics.py
recomposed_wfarr = recompose_wfarrs( wfarr_dict, theta, phi )
# Return answer
ans = recomposed_wfarr
return ans
#
def __calc_initial_j_frame__(this,use_dynamics=False,verbose=False):
'''
Rotate multipoles such that initial J is parallel to z-hat
'''
# Import usefuls
from numpy import arccos,arctan2,array,linalg,cos,sin,dot,zeros,ones
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
J = this.J.copy()
J_norm = linalg.norm(this.J)
thetaJ = arccos(J[2]/J_norm)
phiJ = arctan2(J[1],J[0])
# Define gamma and beta accordingly
beta = -thetaJ
gamma = -phiJ
# Define zeta0 (i.e. -alpha) such that L is along the y-z plane at the initial time step
L_new = rotate3 ( this.L1 + this.L2, 0, beta , gamma )
zeta0 = arctan2( L_new.T[1], L_new.T[0] )
alpha = -zeta0
# Bundle rotation angles
angles = [ alpha, beta, gamma ]
# perform rotation
that = this.__rotate_frame_at_all_times__(angles,verbose=verbose)
#
that.frame = 'J-initial('+('dyn' if use_dynamics else 'bbh')+')'
#
return that
#
def __calc_initial_l_frame__(this,verbose=False):
'''
Rotate multipoles such that initial L is parallel to z-hat
'''
# Import usefuls
from numpy import arccos,arctan2,array,linalg,cos,sin,dot,zeros,ones
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
L_norm = linalg.norm(this.L)
thetaL = arccos(this.L[2]/L_norm)
phiL = arctan2(this.L[1],this.L[0])
# Define gamma and beta accordingly
beta = -thetaL
gamma = -phiL
# Define zeta0 (i.e. -alpha) such that J is along the y-z plane at the initial time step
J_new = rotate3 ( this.J, 0, beta , gamma )
zeta0 = arctan2( J_new.T[1], J_new.T[0] )
alpha = -zeta0
# Bundle rotation angles
angles = [ alpha, beta, gamma ]
# perform rotation
that = this.__rotate_frame_at_all_times__(angles,verbose=verbose)
#
that.frame = 'L-initial'
#
return that
#
def __calc_j_of_t_frame__(this,verbose=None,use_mask_and_preserve_length=False,enforce_initial_J_consistency=True):
#
from numpy.linalg import norm
from numpy import arccos,arctan2,array,cos,sin,dot,zeros,ones,zeros_like, unwrap
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
#
use_mask = use_mask_and_preserve_length
this.__calc_radiated_quantities__(use_mask=use_mask,enforce_initial_J_consistency=enforce_initial_J_consistency)
# get time series for radiated quantities
t = this.remnant['time_used']
#
J = this.remnant['J']
J_norm = norm(J, axis=1)
thetaJ = zeros( len(J_norm) )
phiJ = zeros( len(J_norm) )
for k in range ( len(J_norm) ):
thetaJ[k] = arccos(J[k,2]/J_norm[k])
phiJ[k] = arctan2(J[k,1],J[k,0])
phiJ = unwrap(phiJ)
#
phiJ_spl = IUS(t, phiJ, k=5)
dp_dt_spl = phiJ_spl.derivative()
dp_dt = dp_dt_spl(t)
# calculate zeta according to the minimal rotation condition: Eq. 18 of arxiv::1110.2965
dz_dt = -dp_dt*cos(thetaJ)
# calculate zeta
dz_dt_spl = IUS(t, dz_dt, k=5)
zeta_spl = dz_dt_spl.antiderivative()
zeta = zeta_spl(t)
# Define | |
(eta*ccms) * FDM(sc.q_arr, n)**2. * f2[Ei], axis=-1) # np.sum(_, axis=-1): summing over the q-columns
elif integrate == 'simps':
correction = (meeV**2. / mu_chie(m_chi)**2.) * simps(sc.q_arr**-2. * (eta*ccms) * FDM(sc.q_arr, n)**2. * f2[Ei], sc.q_arr, axis=-1) # simps(_, axis=-1): integrating over the q-columns
else: raise ValueError("Did not understand value of \'integrate\'. Must be either \'sum\' or \'simps\'.")
if wanna_print: print "Ei="+str(Ei)+" [index_option="+bin_option+"],\tcorrection="+str(correction)+" [correction_option="+str(correction_option)+"]"
else: raise AttributeError("WORK IN PROGRESS:\nSo far we can only give the crystal form factor numerically, from a table. Please select a value for \'target_material\' whose attribute \'_crystal_function\' is \'numeric\'.")
return estimate*correction
elif target_type == 'atomic':
raise ValueError("!!!!!!!!!!!!!!!!!!\nMODULE UNDER CONSTRUCTION!\n!!!!!!!!!!!!!!!!!!\n")
else:
raise ValueError("\'target_type\' must be either \'semiconductor\' or \'atomic\'.")
# Differential energy spectrum rate
def dRdE_discrete(m_chi, xsec, n, target_type='semiconductor', target_material='Si_num', MTarget=1., Time=1., dm_comp='SHM_default', variables='E', day=ap.tvernal, tArr=ap.time_arr, EArr=sc.E_arr, qArr=sc.q_arr, correction_option=0, integrate='sum', wanna_print=False):
"""
DM-e scattering event rate per target mass per unit energy [# events/eV/kg/year], evaluated over an E array or a t-E array.
Parameters
----------
m_chi : DM mass [eV]
xsec : DM-e cross section [cm^2]
n : power of DM form factor FDM(q,n)
target_type : whether 'semiconductor' or 'atomic' (default: 'semiconductor')
target_material : the material that makes up the target (default: 'Si_num')
MTarget : the mass of the target [kg] (default: 1 kg)
Time : the unit time for the rate [yr] (default: 1 yr)
ingredients : the dictionary of DM components and their fractions (default: {'SHM_default':1.})
variables : whether 'E' (Energy) [eV] or 'Et'/'tE' (Energy [eV]-time [days]) (default: 'E')
tArr : time [days] array (default: ap.time_arr)
EArr : energy [eV] array (default: sc.E_arr)
qArr : momentum transfer [eV] array (default: sc.q_arr)
TODO: correction_option, integrate, wanna_print
"""
exposure = Time*sec2year*MTarget # the exposure in kg*s
if target_type == 'semiconductor':
if target_material not in sc.semiconductor_targets.keys(): raise ValueError("The \'target_material\' passed has not been instantiated in the \'semiconductors\' module. Either limit yourself to "+str(sc.semiconductor_targets.keys())+" or instantiate it in the \'semiconductors\' module and add it to the dictionary \'semiconductor_targets\'.")
target = sc.semiconductor_targets[target_material] # the class of the semiconductor target material
estimate = (exposure/target._MCell) * (rho_chi/m_chi) * xsec * (a_em*ccms) # a rough estimate of the logarithmic rate dR/dlnE (~ Time * N_Cell * <n_DM * xsec * v_rel>), which turns out to be also the first factor in the exact expression (see. Eq. 3.13 in Essig et al.)
correction = 1. # some default value. Will be calculated below.
if target._crystal_function == 'numeric':
vminGr = vmin(qGr, EGr, m_chi)
if variables == 'E':
# gvmin_vec = np.vectorize(fn_dir[dm_comp, 'gvmin', 'v']) # vectorizing gvmin [s/km] for the DM component considered
# eta = gvmin_vec(vminGr)/kms2cms # same as gvmin, but in [s/cm]
gvmin = fn_dir[dm_comp, 'gvmin', 'v']
eta = gvmin(vminGr)/kms2cms
del gvmin
elif (variables == 'Et' or variables == 'tE'):
# gvmin_vec = np.vectorize(fn_dir[dm_comp, 'gvmin', 'tv']) # vectorizing gvmin [s/km] for the DM component considered
# eta = gvmin_vec(day, vminGr)/kms2cms # same as gvmin, but in [s/cm]
gvmin = fn_dir[dm_comp, 'gvmin', 'vt']
eta = np.array([gvmin(vminGr[:,i], tArr).T for i in range(len(sc.q_arr))]).T / kms2cms
del gvmin
else: raise ValueError("Parameter \'variables\' can only be either \'E\' or \'Et\'.")
if wanna_print: print 'shape of vminGr:', vminGr.shape
del vminGr
if correction_option == 0:
f2 = target._fcrystal2
elif correction_option == 1:
f2 = target._fmatrix2
else:
raise ValueError("Did not understand value of \'correction_option\'. Must be either 0 or 1.")
if integrate == 'sum':
correction = (meeV**2. / mu_chie(m_chi)**2.) * np.sum(sc.qunit * sc.q_arr**-2. * (eta*ccms) * FDM(sc.q_arr, n)**2. * f2, axis=-1) # np.sum(_, axis=-1): summing over the q-columns
elif integrate == 'simps':
correction = (meeV**2. / mu_chie(m_chi)**2.) * simps(sc.q_arr**-2. * (eta*ccms) * FDM(sc.q_arr, n)**2. * f2, sc.q_arr, axis=-1) # simps(_, axis=-1): integrating over the q-columns
else: raise ValueError("Did not understand value of \'integrate\'. Must be either \'sum\' or \'simps\'.")
if wanna_print: print 'shape of correction:', correction.shape
else: raise AttributeError("WORK IN PROGRESS:\nSo far we can only give the crystal form factor numerically, from a table. Please select a value for \'target_material\' whose attribute \'_crystal_function\' is \'numeric\'.")
return estimate*correction
elif target_type == 'atomic':
raise ValueError("!!!!!!!!!!!!!!!!!!\nMODULE UNDER CONSTRUCTION!\n!!!!!!!!!!!!!!!!!!\n")
else:
raise ValueError("\'target_type\' must be either \'semiconductor\' or \'atomic\'.")
def binning(spectrum, Eini=0., dE=1., bin_option='floor', integrate='sum', wanna_print=False):
"""
Binning of a given spectrum [# events/eV/kg/year].
Parameters
----------
spectrum : DM-e scattering event rate spectrum [# events/eV/kg/year] as a function of energy [eV].
Eini : left-most/initial value of the energy [eV] bins (default: 0.)
dE : energy [eV] bin widths (default: 1.)
TODO: bin_option, integrate, wanna_print
"""
idx_E0 = energy_bin(Eini, bin_option=bin_option)-1 # the index in sc.E_arr that corresponds to the initial energy Eini
reduced_Earr = sc.E_arr[idx_E0:] # the reduced array of energies, dropping everything to the left of Eini
reduced_spectrum = spectrum[idx_E0:] # the reduced spectrum, dropping everything to the left of Eini
NEs = my_floor(dE/sc.Eunit) # the number of indices from sc.E_arr that are in an energy bin
ELs = (reduced_Earr[::NEs])[:-1] - sc.Eunit/2. # the energies from reduced_Earr corresponding to the left edges of the energy bins, but the last one (to avoid possible cases where the last bin has a width smaller than NEs)
ECs = ELs + NEs*sc.Eunit/2. # the energies from reduced_Earr corresponding to the centers of the energy bins
if wanna_print: print 'idx_E0=', idx_E0, '\n', 'len(reduced_E_arr)=', len(reduced_Earr), '\n', 'len(reduced_spectrum)=', len(reduced_spectrum), '\n', 'NEs=', NEs, '\n', 'ELs=', ELs, '\n', 'ECs=', ECs, '\n',
if integrate == 'sum':
binned_rates = np.array([ np.sum(reduced_spectrum[i*NEs:(i+1)*NEs]) / NEs for i in range(len(ECs)) ])
elif integrate == 'simps':
binned_rates = np.array([ simps(reduced_spectrum[i*NEs:(i+1)*NEs], reduced_Earr[i*NEs:(i+1)*NEs]) / (NEs*sc.Eunit) for i in range(len(ECs)) ])
else: raise ValueError("Did not understand value of \'integrate\'. Must be either \'sum\' or \'simps\'.")
if wanna_print: print 'NEs=', NEs, '\n', 'len(reduced_spectrum[NEs:2*NEs])=', len(reduced_spectrum[NEs:2*NEs])
return ECs, binned_rates
def Qbinning(spectrum, target_material, bin_option='floor', integrate='sum', wanna_print=False):
"""
Binning of a given spectrum according to the ionization level of the semiconductor.
Parameters
----------
"""
target = sc.semiconductor_targets[target_material] # the class of the semiconductor target material
Eini = target._Egap
dE = target._epsilon
Es, rts = binning(spectrum, Eini=Eini, dE=dE, bin_option=bin_option, integrate=integrate, wanna_print=wanna_print)
rts *= dE # we want the total rate in each Q-bin, i.e. the number of events, not the rate spectrum
Qlist = np.vectorize(target.Qlvl)
Qs = Qlist(Es)
if wanna_print: print 'Es=', Es, '\n', 'Qs=', Qs, '\n', 'rts=', rts
return Qs, rts
def Tbinning(tQspectrum, ntbins=12, years=1, rescaled_exposure=True, total_rate=False, integrate=True, Nti=20, wanna_print=False):
"""
Function that takes a Q-spectrum continuous in time bins it in the time axis.
Parameters
----------
tQspectrum : the continuous Q-spectrum for 1 year
ntbins : number of time bins in a single year (default: 12)
years : number of years (default: 1)
rescaled_exposure : whether the events will be rescaled by the time-bin exposure (default: True)
total_rate : whether we want to sum over the Q-bins (default: False)
integrate : whether we want to use integration for the binning (default: True)
Nti : number of time points per time bin for binning via integration (default: 10)
wanna_print : whether we want to print at different checkpoints (default: False)
"""
new_spectrum = [tQspectrum]*years
new_spectrum = np.concatenate(new_spectrum)
new_spectrum = new_spectrum.T# Q-rows and t-columns
ns_shape = new_spectrum.shape# Q-rows and t-columns
full_time_arr = np.linspace(0, 365.25*years, ns_shape[1])# the time array
if wanna_print:
print ns_shape
tbinned = []
if (((ns_shape[1]-1) % ntbins*years) == 0) and (not integrate):# not gonna integrate: gonna sum instead
if wanna_print:
print 'summing!'
Nti = int((ns_shape[1]-1)/(ntbins*years))
for Qi, osc in enumerate(new_spectrum):
fixed_Q = []
for i in xrange(ntbins*years):
fixed_Q.append(sum(osc[i*Nti:(i+1)*Nti])/float(Nti))
tbinned.append(fixed_Q)
else:
if wanna_print:
print 'integrating!'
for Qi, osc in enumerate(new_spectrum):
osc_fn = interp1d(full_time_arr, osc, kind='linear', fill_value='extrapolate')
fixed_Q = []
for i in xrange(ntbins*years):
time_arr = np.linspace(i*365.25/ntbins, (i+1)*365.25/ntbins, Nti)
value = simps(osc_fn(time_arr), time_arr)/(365.25/ntbins)
fixed_Q.append(value)
tbinned.append(fixed_Q)
tbinned = np.array(tbinned)
tbinned | |
'--movie', metavar = 'MOVIE', default = 'movie.json',
help = 'Specify the .json file describing the movie chunks.')
parser.add_argument('-ml', '--movie-length', metavar = 'LEN', type = float, default = None,
help = 'Specify the movie length in seconds (use MOVIE length if None).')
parser.add_argument('-a', '--abr', metavar = 'ABR',
choices = abr_list.keys(), default = abr_default,
help = 'Choose ABR algorithm (%s).' % ', '.join(abr_list.keys()))
parser.add_argument('-ab', '--abr-basic', action = 'store_true',
help = 'Set ABR to BASIC (ABR strategy dependant).')
parser.add_argument('-ao', '--abr-osc', action = 'store_true',
help = 'Set ABR to minimize oscillations.')
parser.add_argument('-gp', '--gamma-p', metavar = 'GAMMAP', type = float, default = 5,
help = 'Specify the (gamma p) product in seconds.')
parser.add_argument('-noibr', '--no-insufficient-buffer-rule', action = 'store_true',
help = 'Disable Insufficient Buffer Rule.')
parser.add_argument('-ma', '--moving-average', metavar = 'AVERAGE',
choices = average_list.keys(), default = average_default,
help = 'Specify the moving average strategy (%s).' %
', '.join(average_list.keys()))
parser.add_argument('-ws', '--window-size', metavar = 'WINDOW_SIZE',
nargs = '+', type = int, default = [3],
help = 'Specify sliding window size.')
parser.add_argument('-hl', '--half-life', metavar = 'HALF_LIFE',
nargs = '+', type = float, default = [3, 8],
help = 'Specify EWMA half life.')
parser.add_argument('-s', '--seek', nargs = 2, metavar = ('WHEN', 'SEEK'),
type = float, default = None,
help = 'Specify when to seek in seconds and where to seek in seconds.')
choices = ['none', 'left', 'right']
parser.add_argument('-r', '--replace', metavar = 'REPLACEMENT',
choices = choices, default = 'none',
help = 'Set replacement strategy (%s).' % ', '.join(choices))
parser.add_argument('-b', '--max-buffer', metavar = 'MAXBUFFER', type = float, default = 25,
help = 'Specify the maximum buffer size in seconds.')
parser.add_argument('-noa', '--no-abandon', action = 'store_true',
help = 'Disable abandonment.')
parser.add_argument('-rmp', '--rampup-threshold', metavar = 'THRESHOLD',
type = int, default = None,
help = 'Specify at what quality index we are ramped up (None matches network).')
parser.add_argument('-v', '--verbose', action = 'store_true',
help = 'Run in verbose mode.')
args = parser.parse_args()
verbose = args.verbose
buffer_contents = []
buffer_fcc = 0
pending_quality_up = []
reaction_metrics = []
rebuffer_event_count = 0
rebuffer_time = 0
played_utility = 0
played_bitrate = 0
total_play_time = 0
total_bitrate_change = 0
total_log_bitrate_change = 0
total_reaction_time = 0
last_played = None
overestimate_count = 0
overestimate_average = 0
goodestimate_count = 0
goodestimate_average = 0
estimate_average = 0
rampup_origin = 0
rampup_time = None
rampup_threshold = args.rampup_threshold
max_buffer_size = args.max_buffer * 1000
manifest = load_json(args.movie) # Load json file
bitrates = manifest['bitrates_kbps'] # get bitrates
utility_offset = 0 - math.log(bitrates[0]) # so utilities[0] = 0
utilities = [math.log(b) + utility_offset for b in bitrates] # Calculate utility
if args.movie_length != None:
l1 = len(manifest['segment_sizes_bits'])
l2 = math.ceil(args.movie_length * 1000 / manifest['segment_duration_ms'])
manifest['segment_sizes_bits'] *= math.ceil(l2 / l1)
manifest['segment_sizes_bits'] = manifest['segment_sizes_bits'][0:l2]
# ManifestInfo is namedTuple having named index
manifest = ManifestInfo(segment_time = manifest['segment_duration_ms'],
bitrates = bitrates,
utilities = utilities,
segments = manifest['segment_sizes_bits'])
# Network Information
network_trace = load_json(args.network)
network_trace = [NetworkPeriod(time = p['duration_ms'],
bandwidth = p['bandwidth_kbps'] * args.network_multiplier,
latency = p['latency_ms'])
for p in network_trace]
buffer_size = args.max_buffer * 1000 # default value 25
gamma_p = args.gamma_p # default value 5
config = {'buffer_size': buffer_size,
'gp': gamma_p,
'abr_osc': args.abr_osc, # Set ABR to minimize oscillations
'abr_basic': args.abr_basic, # Set ABR to BASIC (ABR strategy dependant)
'no_ibr': args.no_insufficient_buffer_rule} # Disable Insufficient Buffer Rule.
abr_list[args.abr].use_abr_o = args.abr_osc
abr_list[args.abr].use_abr_u = not args.abr_osc
abr = abr_list[args.abr](config) # Create an object of called ABR with para/args of config.
network = NetworkModel(network_trace)
# Skip video
if args.replace == 'left':
replacer = Replace(0) # Need to understand these options which one is forward and backward
elif args.replace == 'right':
replacer = Replace(1)
else:
replacer = NoReplace()
config = {'window_size': args.window_size, 'half_life': args.half_life}
throughput_history = average_list[args.moving_average](config)
# download first segment
quality = abr.get_first_quality()
size = manifest.segments[0][quality]
download_metric = network.download(size, 0, quality, 0)
download_time = download_metric.time - download_metric.time_to_first_bit
startup_time = download_time
buffer_contents.append(download_metric.quality)
t = download_metric.size / download_time # time taken to download
l = download_metric.time_to_first_bit # latency in receiving first bit
throughput_history.push(download_time, t, l)
#print('%d,%d -> %d,%d' % (t, l, throughput, latency))
total_play_time += download_metric.time
if verbose:
print('[%d-%d] %d: q=%d s=%d/%d t=%d=%d+%d bl=0->0->%d' %
(0, round(download_metric.time), 0, download_metric.quality,
download_metric.downloaded, download_metric.size,
download_metric.time, download_metric.time_to_first_bit,
download_metric.time - download_metric.time_to_first_bit,
get_buffer_level()))
# download rest of segments
next_segment = 1
abandoned_to_quality = None
while next_segment < len(manifest.segments):
# TODO: BEGIN TODO: reimplement seeking - currently only proof-of-concept hack
if args.seek != None:
if next_segment * manifest.segment_time >= 1000 * args.seek[0]:
next_segment = math.floor(1000 * args.seek[1] / manifest.segment_time)
buffer_contents = []
buffer_fcc = 0
abr.report_seek(1000 * args.seek[1])
args.seek = None
rampup_origin = total_play_time
rampup_time = None
# TODO: END TODO: reimplement seeking - currently only proof-of-concept hack
# do we have space for a new segment on the buffer?
full_delay = get_buffer_level() + manifest.segment_time - buffer_size
if full_delay > 0:
deplete_buffer(full_delay)
network.delay(full_delay)
abr.report_delay(full_delay)
if verbose:
print('full buffer delay %d bl=%d' % (full_delay, get_buffer_level()))
if abandoned_to_quality == None:
(quality, delay) = abr.get_quality_delay(next_segment)
replace = replacer.check_replace(quality)
else:
(quality, delay) = (abandoned_to_quality, 0)
replace = None
abandon_to_quality = None
if replace != None:
delay = 0
current_segment = next_segment + replace
check_abandon = replacer.check_abandon
else:
current_segment = next_segment
check_abandon = abr.check_abandon
if args.no_abandon:
check_abandon = None
size = manifest.segments[current_segment][quality]
if delay > 0:
deplete_buffer(delay)
network.delay(delay)
if verbose:
print('abr delay %d bl=%d' % (delay, get_buffer_level()))
#print('size %d, current_segment %d, quality %d, buffer_level %d' %
# (size, current_segment, quality, get_buffer_level()))
download_metric = network.download(size, current_segment, quality,
get_buffer_level(), check_abandon)
#print('index %d, quality %d, downloaded %d/%d, time %d=%d+.' %
# (download_metric.index, download_metric.quality,
# download_metric.downloaded, download_metric.size,
# download_metric.time, download_metric.time_to_first_bit))
if verbose:
print('[%d-%d] %d: q=%d s=%d/%d t=%d=%d+%d ' %
(round(total_play_time), round(total_play_time + download_metric.time),
current_segment, download_metric.quality,
download_metric.downloaded, download_metric.size,
download_metric.time, download_metric.time_to_first_bit,
download_metric.time - download_metric.time_to_first_bit),
end = '')
if replace == None:
if download_metric.abandon_to_quality == None:
print('bl=%d' % get_buffer_level(), end = '')
else:
print(' ABANDONED to %d - %d/%d bits in %d=%d+%d ttfb+ttdl bl=%d' %
(download_metric.abandon_to_quality,
download_metric.downloaded, download_metric.size,
download_metric.time, download_metric.time_to_first_bit,
download_metric.time - download_metric.time_to_first_bit,
get_buffer_level()),
end = '')
else:
if download_metric.abandon_to_quality == None:
print(' REPLACEMENT bl=%d' % get_buffer_level(), end = '')
else:
print(' REPLACMENT ABANDONED after %d=%d+%d ttfb+ttdl bl=%d' %
(download_metric.time, download_metric.time_to_first_bit,
download_metric.time - download_metric.time_to_first_bit,
get_buffer_level()),
end = '')
#print('deplete buffer %d' % download_metric.time)
deplete_buffer(download_metric.time)
if verbose:
print('->%d' % get_buffer_level(), end='')
# update buffer with new download
if replace == None:
if download_metric.abandon_to_quality == None:
buffer_contents += [quality]
next_segment += 1
else:
abandon_to_quality = download_metric.abandon_to_quality
else:
# abandon_to_quality == None
if download_metric.abandon_to_quality == None:
if get_buffer_level() + manifest.segment_time * replace >= 0:
buffer_contents[replace] = quality
else:
print('WARNING: too late to replace')
pass
else:
pass
# else: do nothing because segment abandonment does not suggest new download
#if rampup_time == None and download_metric.abandon_to_quality == None:
# if rampup_threshold == None:
# if download_metric.quality >= sustainable_quality:
# rampup_time = download_metric.index * manifest.segment_time
# else:
# if download_metric.quality >= rampup_threshold:
# rampup_time = download_metric.index * manifest.segment_time
if verbose:
print('->%d' % get_buffer_level())
abr.report_download(download_metric, replace != None)
# calculate throughput and latency
download_time = download_metric.time - download_metric.time_to_first_bit
t = download_metric.downloaded / download_time
l = download_metric.time_to_first_bit
# check accuracy of throughput estimate
if throughput > t:
overestimate_count += 1
overestimate_average += (throughput - t - overestimate_average) / overestimate_count
else:
goodestimate_count += 1
goodestimate_average += (t - throughput - goodestimate_average) / goodestimate_count
estimate_average += ((throughput - t - estimate_average) /
(overestimate_count + goodestimate_count))
# update throughput estimate
if download_metric.abandon_to_quality == None:
throughput_history.push(download_time, t, l)
# loop while next_segment < len(manifest.segments)
playout_buffer()
# multiply by to_time_average to get per/chunk average
to_time_average = 1 / (total_play_time / manifest.segment_time)
count = len(manifest.segments)
time = count * manifest.segment_time + rebuffer_time + startup_time
print('buffer size: %d' % buffer_size)
print('total played utility: %f' % played_utility)
print('time average played utility: %f' % (played_utility * to_time_average))
print('total played bitrate: %f' % played_bitrate)
print('time average played bitrate: %f' % (played_bitrate * to_time_average))
print('total play time: %f' % (total_play_time / 1000))
print('total play time chunks: %f' % (total_play_time / manifest.segment_time))
print('total rebuffer: %f' % (rebuffer_time / 1000))
print('rebuffer ratio: %f' % (rebuffer_time / total_play_time))
| |
from __future__ import absolute_import
import weakref
import unicodedata
from fontTools.agl import AGL2UV
from fontTools.misc.py23 import basestring, range
from defcon.tools import unicodeTools
from defcon.objects.base import BaseDictObject
class UnicodeData(BaseDictObject):
"""
This object serves Unicode data for the font.
**This object posts the following notifications:**
===================
Name
===================
UnicodeData.Changed
===================
This object behaves like a dict. The keys are Unicode values and the
values are lists of glyph names associated with that unicode value::
{
65 : ["A"],
66 : ["B"],
}
To get the list of glyph names associated with a particular Unicode
value, do this::
glyphList = unicodeData[65]
The object defines many more convenient ways of interacting
with this data.
.. warning::
Setting data into this object manually is *highly* discouraged.
The object automatically keeps itself in sync with the font and the
glyphs contained in the font. No manual intervention is required.
"""
changeNotificationName = "UnicodeData.Changed"
representationFactories = {}
def __init__(self, layer=None):
self._layer = None
if layer is not None:
self._layer = weakref.ref(layer)
super(UnicodeData, self).__init__()
self.beginSelfNotificationObservation()
self._glyphNameToForcedUnicode = {}
self._forcedUnicodeToGlyphName = {}
# --------------
# Parent Objects
# --------------
def getParent(self):
return self.layer
def _get_font(self):
layerSet = self.layerSet
if layerSet is None:
return None
return layerSet.font
font = property(_get_font, doc="The :class:`Font` that this object belongs to.")
def _get_layerSet(self):
layer = self.layer
if layer is None:
return None
return layer.layerSet
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this object belongs to.")
def _get_layer(self):
if self._layer is None:
return None
return self._layer()
layer = property(_get_layer, doc="The :class:`Layer` that this object belongs to.")
# -----------
# set and get
# -----------
def removeGlyphData(self, glyphName, values):
"""
Remove the data for the glyph with **glyphName** and
the Unicode values **values**.
This should never be called directly.
"""
for value in values:
if value not in self:
continue
glyphList = self[value]
if glyphName in glyphList:
glyphList.remove(glyphName)
if not glyphList:
super(UnicodeData, self).__delitem__(value)
# remove the forced reference to the glyph
if glyphName in self._glyphNameToForcedUnicode:
fourcedValue = self._glyphNameToForcedUnicode[glyphName]
del self._glyphNameToForcedUnicode[glyphName]
del self._forcedUnicodeToGlyphName[fourcedValue]
self.postNotification(notification=self.changeNotificationName)
def addGlyphData(self, glyphName, values):
"""
Add the data for the glyph with **glyphName** and
the Unicode values **values**.
This should never be called directly.
"""
for value in values:
# update unicode to glyph name
glyphList = self.get(value, [])
if glyphName not in glyphList:
glyphList.append(glyphName)
super(UnicodeData, self).__setitem__(value, glyphList)
self.postNotification(notification=self.changeNotificationName)
def __delitem__(self, value):
glyphList = self.get(value)
if glyphList is None:
return
for glyphName in glyphList:
# remove forced references
if glyphName in self._glyphNameToForcedUnicode:
forcedValue = self._glyphNameToForcedUnicode[glyphName]
del self._forcedUnicodeToGlyphName[forcedValue]
del self._glyphNameToForcedUnicode[glyphName]
super(UnicodeData, self).__delitem__(value)
self.postNotification(notification=self.changeNotificationName)
def __setitem__(self, value, glyphList):
if value not in self:
super(UnicodeData, self).__setitem__(value, [])
for glyphName in glyphList:
self[value].append(glyphName)
# remove now out dated forced references
if glyphName in self._glyphNameToForcedUnicode:
forcedValue = self._glyphNameToForcedUnicode[glyphName]
del self._forcedUnicodeToGlyphName[forcedValue]
del self._glyphNameToForcedUnicode[glyphName]
self.postNotification(notification=self.changeNotificationName)
def clear(self):
"""
Completely remove all stored data.
This should never be called directly.
"""
super(UnicodeData, self).clear()
self._forcedUnicodeToGlyphName.clear()
self._glyphNameToForcedUnicode.clear()
def update(self, other):
"""
Update the data int this object with the data from **other**.
This should never be called directly.
"""
for value, glyphList in other.items():
for glyphName in glyphList:
if glyphName in self._glyphNameToForcedUnicode:
forcedValue = self._glyphNameToForcedUnicode[glyphName]
del self._forcedUnicodeToGlyphName[forcedValue]
del self._glyphNameToForcedUnicode[glyphName]
super(UnicodeData, self).__setitem__(value, list(glyphList))
self.postNotification(notification=self.changeNotificationName)
# -------
# Loaders
# -------
def _setupForcedValueDict(self):
for value, glyphList in self.values():
if not glyphList:
glyphName = None
else:
glyphName = glyphList[0]
if value >= _privateUse1Min and value <= _privateUse1Max:
self._forcedUnicodeToGlyphName[value] = glyphName
elif value >= _privateUse2Min and value <= _privateUse2Max:
self._forcedUnicodeToGlyphName[value] = glyphName
elif value >= _privateUse3Min and value <= _privateUse3Max:
self._forcedUnicodeToGlyphName[value] = glyphName
def _loadForcedUnicodeValue(self, glyphName):
# already loaded
if glyphName in self._glyphNameToForcedUnicode:
return
# glyph has a real unicode
if self.unicodeForGlyphName(glyphName) is not None:
return
# start at the highest point, falling back to the bottom of the PUA
startPoint = max(list(self._forcedUnicodeToGlyphName.keys()) + [_privateUse1Min])
# find the value and store it
value = _findAvailablePUACode(self._forcedUnicodeToGlyphName)
self._forcedUnicodeToGlyphName[value] = glyphName
self._glyphNameToForcedUnicode[glyphName] = value
# ---------------
# Value Retrieval
# ---------------
def unicodeForGlyphName(self, glyphName):
"""
Get the Unicode value for **glyphName**. Returns *None*
if no value is found.
"""
font = self.font
if glyphName not in font:
return None
glyph = font[glyphName]
unicodes = glyph.unicodes
if not unicodes:
return None
return unicodes[0]
def glyphNameForUnicode(self, value):
"""
Get the first glyph assigned to the Unicode specified
as **value**. This will return *None* if no glyph is found.
"""
glyphList = self.get(value)
if not glyphList:
return None
return glyphList[0]
def pseudoUnicodeForGlyphName(self, glyphName):
"""
Get the pseudo-Unicode value for **glyphName**.
This will return *None* if nothing is found.
"""
realValue = self.unicodeForGlyphName(glyphName)
if realValue is not None:
return realValue
# glyph doesn't have a suffix
if glyphName.startswith(".") or glyphName.startswith("_"):
return None
if "." not in glyphName and "_" not in glyphName:
return None
# get the base
base = glyphName.split(".")[0]
# in the case of ligatures, grab the first glyph
base = base.split("_")[0]
# get the value for the base
return self.unicodeForGlyphName(base)
def forcedUnicodeForGlyphName(self, glyphName):
"""
Get the forced-Unicode value for **glyphName**.
"""
realValue = self.unicodeForGlyphName(glyphName)
if realValue is not None:
return realValue
if glyphName not in self._glyphNameToForcedUnicode:
self._loadForcedUnicodeValue(glyphName)
return self._glyphNameToForcedUnicode[glyphName]
def glyphNameForForcedUnicode(self, value):
"""
Get the glyph name assigned to the forced-Unicode
specified by **value**.
"""
if value in self:
glyphName = self[value]
if isinstance(glyphName, list):
glyphName = glyphName[0]
return glyphName
# A value will not be considered valid until it has
# been mapped to a glyph name. Therefore, unknown
# values should return None
if value not in self._forcedUnicodeToGlyphName:
return None
return self._forcedUnicodeToGlyphName[value]
# ---------------------
# Description Retrieval
# ---------------------
def scriptForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the script for **glyphName**. If **allowPseudoUnicode** is
True, a pseudo-Unicode value will be used if needed. This will
return *None* if nothing can be found.
"""
if allowPseudoUnicode:
value = self.pseudoUnicodeForGlyphName(glyphName)
else:
value = self.unicodeForGlyphName(glyphName)
if value is None:
return "Unknown"
return unicodeTools.script(value)
def blockForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the block for **glyphName**. If **allowPseudoUnicode** is
True, a pseudo-Unicode value will be used if needed. This will
return *None* if nothing can be found.
"""
if allowPseudoUnicode:
value = self.pseudoUnicodeForGlyphName(glyphName)
else:
value = self.unicodeForGlyphName(glyphName)
if value is None:
return "No_Block"
return unicodeTools.block(value)
def categoryForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the category for **glyphName**. If **allowPseudoUnicode** is
True, a pseudo-Unicode value will be used if needed. This will
return *None* if nothing can be found.
"""
if allowPseudoUnicode:
value = self.pseudoUnicodeForGlyphName(glyphName)
else:
value = self.unicodeForGlyphName(glyphName)
if value is None:
return "Cn"
return unicodeTools.category(value)
def decompositionBaseForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the decomposition base for **glyphName**. If **allowPseudoUnicode**
is True, a pseudo-Unicode value will be used if needed. This will
return *glyphName* if nothing can be found.
"""
if allowPseudoUnicode:
uniValue = self.pseudoUnicodeForGlyphName(glyphName)
else:
uniValue = self.unicodeForGlyphName(glyphName)
if uniValue is None:
return glyphName
if uniValue is not None:
font = self.font
decomposition = unicodeTools.decompositionBase(uniValue)
if decomposition != -1:
if decomposition in font.unicodeData:
baseGlyphName = font.unicodeData[decomposition][0]
if "." in glyphName:
suffix = glyphName.split(".", 1)[1]
baseWithSuffix = baseGlyphName + "." + suffix
if baseWithSuffix in font:
baseGlyphName = baseWithSuffix
return baseGlyphName
return glyphName
def closeRelativeForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the close relative for **glyphName**. For example, if you
request the close relative of the glyph name for the character (,
you will be given the glyph name for the character ) if it exists
in the font. If **allowPseudoUnicode** is True, a pseudo-Unicode
value will be used if needed. This will return *None* if nothing
can be found.
"""
return self._openCloseSearch(glyphName, allowPseudoUnicode, unicodeTools.closeRelative)
def openRelativeForGlyphName(self, glyphName, allowPseudoUnicode=True):
"""
Get the open relative for **glyphName**. For example, if you
request the open relative of the glyph name for the character ),
you will be given the glyph name for the character ( if it exists
in the font. If **allowPseudoUnicode** is True, a pseudo-Unicode
value will be used if needed. This will return *None* if nothing
can be found.
"""
return self._openCloseSearch(glyphName, allowPseudoUnicode, unicodeTools.openRelative)
def _openCloseSearch(self, glyphName, allowPseudoUnicode, lookup):
if allowPseudoUnicode:
uniValue = self.pseudoUnicodeForGlyphName(glyphName)
else:
uniValue = self.unicodeForGlyphName(glyphName)
if uniValue | |
#
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import dask_cudf
import numpy as np
from dask import delayed
from dask_cuml.core import new_ipc_thread, parse_host_port
from dask_cuml.core import device_of_devicendarray, build_host_dict
from dask.distributed import wait, default_client
from math import ceil
from numba import cuda
from toolz import first
from tornado import gen
class LinearRegression(object):
"""
Model-Parallel Multi-GPU Linear Regression Model. Single Process Multi GPU
supported currently
"""
def __init__(self, fit_intercept=True, normalize=False):
"""
Initializes the linear regression class.
Parameters
----------
fit_intercept: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
normalize: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
"""
self.coef_ = None
self.intercept_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self._model_fit = False
self._consec_call = 0
def _build_params_map(self):
return {"fit_intercept": self.fit_intercept,
"normalize": self.normalize}
def fit(self, X, y):
"""
Fits a multi-gpu linear regression model such that each of the
resulting coefficients are also distributed across the GPUs.
:param futures:
:return:
"""
client = default_client()
self.dtype = X[X.columns[0]].compute().dtype
coef, intercept, locations = client.sync(self._do_fit, X, y,
self.dtype)
self.intercept = intercept
self._locations = locations
self._model_fit = True
self._ncols = X.shape[1]
self.coef_ = dask_cudf.from_delayed(coef)
@gen.coroutine
def _do_fit(self, X_df, y_df, dtype):
client = default_client()
# Finding location of parts of y_df to distribute columns of X_df
loc_dict = {}
yield wait(y_df)
tt = yield client.who_has(y_df)
location = tuple(tt.values())
for i in range(X_df.npartitions):
part_number = eval(list(tt.keys())[i])[1]
loc_dict[part_number] = parse_host_port(str(location[i])[:-3])
# Lets divide the columns evenly, matching the order of the labels
part_size = ceil(X_df.shape[1] / X_df.npartitions)
# We scatter delayed operations to gather columns on the workers
scattered = []
coefs = []
for i in range(X_df.npartitions):
up_limit = min((i+1)*part_size, X_df.shape[1])
cols = X_df.columns.values[i*part_size:up_limit]
loc_cudf = X_df[cols]
yield wait(loc_cudf)
scattered.append(client.submit(preprocess_on_worker,
loc_cudf,
workers=[loc_dict[i]]))
yield wait(scattered)
coefs.append(client.submit(dev_array_on_worker,
up_limit - i*part_size,
dtype=dtype,
unique=np.random.randint(0, 1e6),
workers=[loc_dict[i]]))
yield wait(coefs)
del(loc_cudf)
# Break apart Dask.array/dataframe into chunks/parts
# data_parts = map(delayed, scattered)
data_parts = scattered
label_parts = y_df.to_delayed()
coef_parts = coefs
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, label_parts, coef_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
for part in parts:
if part.status == 'error':
yield part # trigger error locally
# A dict in the form of { part_key: part }
key_to_part_dict = dict([(str(part.key), part) for part in parts])
who_has = yield client.who_has(parts)
worker_parts = {}
for key, workers in who_has.items():
worker = parse_host_port(first(workers))
if worker not in worker_parts:
worker_parts[worker] = []
worker_parts[worker].append(key_to_part_dict[key])
"""
Create IP Handles on each worker hosting input data
"""
# Format of input_devarrays = ([(X, y)..], dev)
input_devarrays = [(worker, client.submit(fit_to_device_arrays,
part, workers=[worker]))
for worker, part in worker_parts.items()]
yield wait(input_devarrays)
"""
Gather IPC handles for each worker and call _fit() on each worker
containing data.
"""
# Last worker is the only one that can have less items.
exec_node = loc_dict[X_df.npartitions-1]
# Need to fetch parts on worker
on_worker = list(filter(lambda x: x[0] == exec_node, input_devarrays))
not_on_worker = list(filter(lambda x: x[0] != exec_node,
input_devarrays))
ipc_handles = [client.submit(get_input_ipc_handles, future,
workers=[a_worker])
for a_worker, future in not_on_worker]
raw_arrays = [future for a_worker, future in on_worker]
# IPC Handles are loaded in separate threads on worker so they can be
# used to make calls through cython
# Calls _fit_on_worker defined in the bottom
intercept = client.submit(_fit_on_worker, (ipc_handles, raw_arrays),
self._build_params_map(),
workers=[exec_node])
yield wait(intercept)
coef_series = [client.submit(coef_on_worker, coefs[i], i,
X_df.shape[1],
X_df.npartitions, loc_dict[i],
workers=[loc_dict[i]])
for i in range(len(loc_dict))]
# coef_on_worker(self, coef, locations, ncols, nparts, worker):
raise gen.Return((coef_series, intercept, loc_dict))
def predict(self, X):
"""
Predict values for the multi-gpu linear regression model by making
calls to the predict function with dask-cudf objects.
:param df:
a dask-cudf with data distributed one worker per GPU
:return:
a dask-cudf containing outputs of the linear regression
"""
if self._model_fit:
client = default_client()
ret = client.sync(self._do_predict, X, self.coef_,
self._locations, self.intercept, self.dtype)
ret = dask_cudf.from_delayed(ret)
return ret
else:
raise ValueError('Model coefficients have not been fit. You need '
'to run the fit() method first. ')
@gen.coroutine
def _do_predict(self, X_df, coefs, loc_dict, intercept, dtype):
client = default_client()
part_size = ceil(X_df.shape[1] / X_df.npartitions)
# We scatter delayed operations to gather columns on the workers
scattered = []
for i in range(X_df.npartitions):
up_limit = min((i+1)*part_size, X_df.shape[1])
cols = X_df.columns.values[i*part_size:up_limit]
loc_cudf = X_df[cols]
yield wait(loc_cudf)
scattered.append(client.submit(preprocess_predict,
loc_cudf,
workers=[loc_dict[i]]))
yield wait(scattered)
del(loc_cudf)
# Break apart Dask.array/dataframe into chunks/parts
data_parts = scattered
coef_parts = coefs.to_delayed()
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, coef_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
for part in parts:
if part.status == 'error':
yield part # trigger error locally
# A dict in the form of { part_key: part }
key_to_part_dict = dict([(str(part.key), part) for part in parts])
who_has = yield client.who_has(parts)
worker_parts = {}
for key, workers in who_has.items():
worker = parse_host_port(first(workers))
if worker not in worker_parts:
worker_parts[worker] = []
worker_parts[worker].append(key_to_part_dict[key])
"""
Create IP Handles on each worker hosting input data
"""
# Format of input_devarrays = ([(X, y)..], dev)
input_devarrays = [(worker, client.submit(predict_to_device_arrays,
part, worker, loc_dict,
X_df.npartitions,
dtype=dtype,
workers=[worker]))
for worker, part in worker_parts.items()]
yield wait(input_devarrays)
"""
Gather IPC handles for each worker and call _fit() on each worker
containing data.
"""
exec_node = loc_dict[X_df.npartitions-1]
# Need to fetch parts on worker
on_worker = list(filter(lambda x: x[0] == exec_node, input_devarrays))
not_on_worker = list(filter(lambda x: x[0] != exec_node,
input_devarrays))
ipc_handles = [client.submit(get_input_ipc_handles, future,
unique=np.random.randint(0, 1e6),
workers=[a_worker])
for a_worker, future in not_on_worker]
raw_arrays = [future for a_worker, future in on_worker]
# IPC Handles are loaded in separate threads on worker so they can be
# used to make calls through cython
# Calls _predict_on_worker defined in the bottom
ret = client.submit(_predict_on_worker, (ipc_handles, raw_arrays),
self.intercept, self._build_params_map(),
workers=[exec_node])
yield wait(ret)
dfs = [client.submit(series_on_worker, f, worker, loc_dict,
X_df.npartitions, X_df, workers=[worker])
for worker, f in input_devarrays]
return dfs
def _build_host_dict(self, gpu_futures, client):
who_has = client.who_has(gpu_futures)
key_to_host_dict = {}
for key in who_has:
key_to_host_dict[key] = parse_host_port(who_has[key][0])
hosts_to_key_dict = {}
for key, host in key_to_host_dict.items():
if host not in hosts_to_key_dict:
hosts_to_key_dict[host] = set([key])
else:
hosts_to_key_dict[host].add(key)
workers = [key[0] for key in list(who_has.values())]
return build_host_dict(workers)
def _fit_on_worker(data, params):
ipc_dev_list, devarrs_dev_list = data
# Open 1 ipc thread per device
open_ipcs = []
for p, dev in ipc_dev_list:
arrs = []
for x, y, coef in p:
arrs.append(x)
arrs.append(y)
arrs.append(coef)
ipct = new_ipc_thread(arrs, dev)
open_ipcs.append(ipct)
alloc_info = []
for t in open_ipcs:
outsiders = t.info()
triplet = []
for i in range(0, len(outsiders), 3):
triplet.append(outsiders[i])
triplet.append(outsiders[i+1])
triplet.append(outsiders[i+2])
alloc_info.append(triplet)
for p, dev in devarrs_dev_list:
locals = []
for X, coef, pred in p:
locals.append(build_alloc_info(X)[0])
locals.append(build_alloc_info(coef)[0])
locals.append(build_alloc_info(pred)[0])
alloc_info.append(locals)
try:
from cuml.linear_model.linear_regression_mg import LinearRegressionMG as cuOLS # NOQA
ols = cuOLS()
intercept = ols._fit_mg(alloc_info, params)
except Exception as e:
print("FAILURE in FIT: " + str(e))
[t.close() for t in open_ipcs]
# [t.join() for t in open_ipcs]
return intercept
def _predict_on_worker(data, intercept, params):
ipc_dev_list, devarrs_dev_list = data
open_ipcs = []
for p, dev in ipc_dev_list:
arrs = []
for mat, coef, pred in p:
arrs.append(mat)
arrs.append(coef)
arrs.append(pred)
ipct = new_ipc_thread(arrs, dev)
open_ipcs.append(ipct)
alloc_info = []
for t in open_ipcs:
outsiders = t.info()
triplet = []
for i in range(0, len(outsiders), 3):
triplet.append(outsiders[i])
triplet.append(outsiders[i+1])
triplet.append(outsiders[i+2])
alloc_info.append(triplet)
for p, dev in devarrs_dev_list:
locals = []
for X, y, coef in p:
locals.append(
build_alloc_info(X, unique=np.random.randint(0, 1e6))[0])
locals.append(
build_alloc_info(y, unique=np.random.randint(0, 1e6))[0])
locals.append(
build_alloc_info(coef, unique=np.random.randint(0, 1e6))[0])
alloc_info.append(locals)
try:
from cuml.linear_model.linear_regression_mg import LinearRegressionMG as cuOLS # NOQA
ols = cuOLS()
ols._predict_mg(alloc_info, intercept, params)
except Exception as e:
print("Failure in predict(): " + str(e))
[t.close() for t in open_ipcs]
# [t.join() for t in open_ipcs]
def group(lst, n):
for i in | |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import aiohttp
from settings import WOW_CLIENT_ID, WOW_CLIENT_SECRET, LOCALE
from constants import *
async def get_data(region, access_token, **kwargs):
"""Helper function that grabs data from the World of Warcraft API."""
if access_token == "credential_error":
return access_token
else:
if region == "cn":
base_api_path = "https://gateway.battlenet.com.cn"
else:
base_api_path = "https://%s.api.blizzard.com" % (region)
try:
async with aiohttp.ClientSession() as client:
# Fires off a different API call depending on the type of requested content.
if kwargs.get("field") == "wow_token":
api_path = (
"%s/data/wow/token/?namespace=dynamic-%s&access_token=%s"
% (base_api_path, region, access_token)
)
else:
api_path = (
"%s/wow/character/%s/%s?fields=%s&locale=%s&access_token=%s"
% (
base_api_path,
kwargs.get("realm"),
kwargs.get("name"),
kwargs.get("field"),
LOCALE,
access_token,
)
)
async with client.get(
api_path, headers={"Authorization": "Bearer %s" % (access_token)}
) as api_response:
if api_response.status == 200:
api_json = await api_response.json()
return api_json
elif api_response.status == 404:
print("Error: Character not found")
if kwargs.get("field") == "wow_token":
return "gold_error"
else:
return "not_found"
else:
raise
except Exception as error:
# Error receiving game data:
print(error)
print("Error: Connection error occurred when retrieving game data.")
return "connection_error"
async def get_access_token(region):
auth_path = "https://%s.battle.net/oauth/token" % (region)
auth_credentials = aiohttp.BasicAuth(
login=WOW_CLIENT_ID, password=<PASSWORD>
)
try:
async with aiohttp.ClientSession(auth=auth_credentials) as client:
async with client.get(
auth_path, params={"grant_type": "client_credentials"}
) as auth_response:
assert auth_response.status == 200
auth_json = await auth_response.json()
return auth_json["access_token"]
except Exception as error:
# Error receiving token:
print("Error: Unable to retrieve auth token")
return "credential_error"
def character_achievements(achievement_data, faction):
"""Accepts achievement data json and a faction string,
and returns notable achievement progress. """
achievements = achievement_data["achievements"]
# Return In Progress or empty unless they are found.
keystone_season_master = "In Progress"
keystone_season_conqueror = "In Progress"
arena_challenger = "In Progress"
arena_rival = "In Progress"
arena_duelist = "In Progress"
arena_gladiator = "In Progress"
rbg_2400 = "In Progress"
rbg_2000 = "In Progress"
rbg_1500 = "In Progress"
ud_feat = ""
bod_feat = ""
cos_feat = ""
tep_feat = ""
if AC_SEASON_KEYSTONE_MASTER in achievements["achievementsCompleted"]:
keystone_season_master = "Completed"
if AC_SEASON_KEYSTONE_CONQUEROR in achievements["achievementsCompleted"]:
keystone_season_conqueror = "Completed"
if AC_ARENA_CHALLENGER in achievements["achievementsCompleted"]:
arena_challenger = "Completed"
if AC_ARENA_RIVAL in achievements["achievementsCompleted"]:
arena_rival = "Completed"
if AC_ARENA_DUELIST in achievements["achievementsCompleted"]:
arena_duelist = "Completed"
if AC_ARENA_GLADIATOR in achievements["achievementsCompleted"]:
arena_gladiator = "Completed"
if AC_AOTC_UD in achievements["achievementsCompleted"]:
ud_feat = "Ahead of the Curve"
# Checks to see if the user has completed tier 2 of the AOTC achievement.
if AC_CE_UD in achievements["achievementsCompleted"]:
ud_feat = "Cutting Edge"
if AC_AOTC_BOD in achievements["achievementsCompleted"]:
bod_feat = "Ahead of the Curve"
if AC_CE_BOD in achievements["achievementsCompleted"]:
bod_feat = "Cutting Edge"
if AC_AOTC_COS in achievements["achievementsCompleted"]:
cos_feat = "Ahead of the Curve"
if AC_CE_COS in achievements["achievementsCompleted"]:
cos_feat = "Cutting Edge"
if AC_AOTC_TEP in achievements["achievementsCompleted"]:
tep_feat = "Ahead of the Curve"
if AC_CE_TEP in achievements["achievementsCompleted"]:
tep_feat = "Cutting Edge"
# RBG achievements have a different id/name based on faction, checks these
# based on function argument.
if faction == "Alliance":
rbg_2400_name = AC_GRAND_MARSHALL_NAME
rbg_2000_name = AC_LIEAUTENANT_COMMANDER_NAME
rbg_1500_name = AC_SERGEANT_MAJOR_NAME
if AC_GRAND_MARSHALL in achievements["achievementsCompleted"]:
rbg_2400 = "Completed"
if AC_LIEUTENANT_COMMANDER in achievements["achievementsCompleted"]:
rbg_2000 = "Completed"
if AC_SERGEANT_MAJOR in achievements["achievementsCompleted"]:
rbg_1500 = "Completed"
if faction == "Horde":
rbg_2400_name = AC_HIGH_WARLORD_NAME
rbg_2000_name = AC_CHAMPION_NAME
rbg_1500_name = AC_FIRST_SERGEANT_NAME
if AC_HIGH_WARLORD in achievements["achievementsCompleted"]:
rbg_2400 = "Completed"
if AC_CHAMPION in achievements["achievementsCompleted"]:
rbg_2000 = "Completed"
if AC_FIRST_SERGEANT in achievements["achievementsCompleted"]:
rbg_1500 = "Completed"
achievement_list = {
"keystone_season_master": keystone_season_master,
"keystone_season_conqueror": keystone_season_conqueror,
"arena_challenger": arena_challenger,
"arena_rival": arena_rival,
"arena_duelist": arena_duelist,
"arena_gladiator": arena_gladiator,
"rbg_2400_name": rbg_2400_name,
"rbg_2000_name": rbg_2000_name,
"rbg_1500_name": rbg_1500_name,
"rbg_2400": rbg_2400,
"rbg_2000": rbg_2000,
"rbg_1500": rbg_1500,
"ud_feat": ud_feat,
"bod_feat": bod_feat,
"cos_feat": cos_feat,
"tep_feat": tep_feat,
}
return achievement_list
def calculate_boss_kills(raid):
"""Accepts character raid data and figures out how many bosses
the player has killed and at what difficulty."""
# Initiate values at zero.
lfr_kills = 0
normal_kills = 0
heroic_kills = 0
mythic_kills = 0
bosses = 0
for boss in raid["bosses"]:
if boss["lfrKills"] > 0:
lfr_kills = lfr_kills + 1
if boss["normalKills"] > 0:
normal_kills = normal_kills + 1
if boss["heroicKills"] > 0:
heroic_kills = heroic_kills + 1
if boss["mythicKills"] > 0:
mythic_kills = mythic_kills + 1
# Determines how many bosses are actually part of this raid.
bosses = bosses + 1
raid_data = {
"lfr": lfr_kills,
"normal": normal_kills,
"heroic": heroic_kills,
"mythic": mythic_kills,
"bosses": bosses,
}
return raid_data
def character_progression(progression_data):
"""Accepts a JSON object containing raid data
and returns the players current progression."""
raids = progression_data["progression"]["raids"]
for raid in raids:
# Loop over the raids and filter the most recent.
if raid["id"] == RAID_UD:
uldir = calculate_boss_kills(raid)
if raid["id"] == RAID_BOD:
battle_of_dazaralor = calculate_boss_kills(raid)
if raid["id"] == RAID_COS:
crucible_of_storms = calculate_boss_kills(raid)
if raid["id"] == RAID_TEP:
the_eternal_palace = calculate_boss_kills(raid)
raid_stats = {
"uldir": uldir,
"battle_of_dazaralor": battle_of_dazaralor,
"crucible_of_storms": crucible_of_storms,
"the_eternal_palace": the_eternal_palace,
}
return raid_stats
def character_arena_progress(pvp_data):
"""Accepts a JSON object containing pvp data
and returns the players current arena/bg progression. """
brackets = pvp_data["pvp"]["brackets"]
two_v_two = brackets["ARENA_BRACKET_2v2"]["rating"]
two_v_two_skirmish = brackets["ARENA_BRACKET_2v2_SKIRMISH"]["rating"]
three_v_three = brackets["ARENA_BRACKET_3v3"]["rating"]
rated_bg = brackets["ARENA_BRACKET_RBG"]["rating"]
honorable_kills = pvp_data["totalHonorableKills"]
pvp_data = {
"2v2": two_v_two,
"2v2s": two_v_two_skirmish,
"3v3": three_v_three,
"rbg": rated_bg,
"kills": honorable_kills,
}
return pvp_data
def character_talents(talent_data):
"""Accepts a JSON object containing a players talents
and returns the players current active specalization."""
talents = talent_data["talents"]
# Starts empty just incase the player hasn't got a spec selected.
active_spec = ""
for talent in talents:
# The API returns the selected key only if it's selected, therefore this check
# makes sure we're not looking for something that doesn't exist.
if "selected" in talent.keys():
if talent["selected"] == True:
active_spec = talent["spec"]["name"]
talent_data = {"active_spec": active_spec}
return talent_data
def faction_details(faction_id):
"""Accepts a faction id and returns the name."""
if faction_id == FACTION_HORDE:
faction_name = FACTION_HORDE_NAME
if faction_id == FACTION_ALLIANCE:
faction_name = FACTION_ALLIANCE_NAME
return faction_name
def class_details(class_type):
"""Accepts a class index and then determines the
colour code and name for that class."""
class_colour = ""
class_name = ""
# Warrior
if class_type == CLASS_WARRIOR:
class_colour = CLASS_WARRIOR_COLOUR
class_name = CLASS_WARRIOR_NAME
# Paladin
if class_type == CLASS_PALADIN:
class_colour = CLASS_PALADIN_COLOUR
class_name = CLASS_PALADIN_NAME
# Hunter
if class_type == CLASS_HUNTER:
class_colour = CLASS_HUNTER_COLOUR
class_name = CLASS_HUNTER_NAME
# Rogue
if class_type == CLASS_ROGUE:
class_colour = CLASS_ROGUE_COLOUR
class_name = CLASS_ROGUE_NAME
# Priest
if class_type == CLASS_PRIEST:
class_colour = CLASS_PRIEST_COLOUR
class_name = CLASS_PRIEST_NAME
# Death Knight
if class_type == CLASS_DEATH_KNIGHT:
class_colour = CLASS_DEATH_KNIGHT_COLOUR
class_name = CLASS_DEATH_KNIGHT_NAME
# Shaman
if class_type == CLASS_SHAMAN:
class_colour = CLASS_SHAMAN_COLOUR
class_name = CLASS_SHAMAN_NAME
# Mage
if class_type == CLASS_MAGE:
class_colour = CLASS_MAGE_COLOUR
class_name = CLASS_MAGE_NAME
# Warlock
if class_type == CLASS_WARLOCK:
class_colour = CLASS_WARLOCK_COLOUR
class_name = CLASS_WARLOCK_NAME
# Monk
if class_type == CLASS_MONK:
class_colour = CLASS_MONK_COLOUR
class_name = CLASS_MONK_NAME
# Druid
if class_type == CLASS_DRUID:
class_colour = CLASS_DRUID_COLOUR
class_name = CLASS_DRUID_NAME
# Demon Hunter
if class_type == CLASS_DEMON_HUNTER:
class_colour = CLASS_DEMON_HUNTER_COLOUR
class_name = CLASS_DEMON_HUNTER_NAME
class_data = {"colour": class_colour, "name": class_name}
return class_data
async def character_info(name, realm, query, region):
"""Main function which accepts a name/realm/query (pvp or pve).
Builds a character sheet out of their name, realm,
armory link, player thumbnail, ilvl, achievement and raid progress and more."""
# Grabs overall character data including their ilvl.
access_token = await get_access_token(region)
info = await get_data(region, access_token, name=name, realm=realm, field="items")
if info == "not_found" or info == "connection_error" or info == "credential_error":
return info
# If the data returned isn't an error string assume it found a character.
else:
try:
class_data = class_details(info["class"])
faction_name = faction_details(info["faction"])
# Gathers achievement data from the achievements API.
achievement_data = await get_data(
region, access_token, name=name, realm=realm, field="achievements"
)
achievements = character_achievements(achievement_data, faction_name)
# Gathers talent data
talent_data = await get_data(
region, access_token, name=name, realm=realm, field="talents"
)
talents = character_talents(talent_data)
# Builds a character sheet depending on the function argument.
if query == "pve":
progression_data = await get_data(
region, access_token, name=name, realm=realm, field="progression"
)
progression = character_progression(progression_data)
pve_character_sheet = {
"name": info["name"],
"level": info["level"],
"realm": info["realm"],
"faction": faction_name,
"spec": talents["active_spec"],
"battlegroup": info["battlegroup"],
"class_colour": class_data["colour"],
"class_type": class_data["name"],
"armory": "http://%s.battle.net/wow/en/character/%s/%s"
% (region, realm, name),
"thumb": info["thumbnail"],
"ilvl": info["items"]["averageItemLevelEquipped"],
"keystone_season_master": achievements["keystone_season_master"],
"keystone_season_conqueror": achievements[
"keystone_season_conqueror"
],
"ud_feat": achievements["ud_feat"],
"uldir": progression["uldir"],
"bod_feat": achievements["bod_feat"],
"battle_of_dazaralor": progression["battle_of_dazaralor"],
"cos_feat": achievements["cos_feat"],
"crucible_of_storms": progression["crucible_of_storms"],
"tep_feat": achievements["tep_feat"],
"the_eternal_palace": progression["the_eternal_palace"],
}
return pve_character_sheet
if query == "pvp":
pvp_data = await get_data(
region, access_token, name=name, realm=realm, field="pvp"
)
| |
<filename>backend/generate_z3.py<gh_stars>10-100
import logging
from stencil_ir import *
from verify import *
from assertion_to_z3 import *
import generate_sketch
import asp.codegen.ast_tools as ast_tools
def loop_key(node):
import hashlib
return hashlib.sha224(tree_to_str(node)).hexdigest()[0:10]
class Z3Generator(object):
"""
Generates a Z3 script, with the parsed postcondition from the
output of Sketch. The output of this class is a script ready to
send to Z3 for verification.
"""
def __init__(self, program, inputs, loopvars, invariant):
"""
program is the AST of the loop nest to process.
inputs is a dict mapping names to (Sketch) types (most importantly for arrays).
invariant is a dict mapping generated function names from sketch to strings that can be parsed by parse_ir
"""
self.program = program
self.inputs = inputs
self.loopvars = loopvars
self.loopvar_mins = {}
self.loopvar_maxs = {}
self.set_maxs_and_mins()
logging.debug("Preprocessing, invariat is %s", invariant)
self.synthesized_invariant = self.process_invariants(invariant)
logging.debug("Synthesized invariant: %s", self.synthesized_invariant)
self.out_array = generate_sketch.OutputArrayFinder().get_output_arrays(program)
self.containing_loop_invs = {}
def process_invariants(self, invariant):
"""
Take strings in the invariant dict and convert into Z3 syntax.
"""
from backend_halide import ToHalide
import parse_ir
ret = {}
for inv_key in invariant.keys():
ir = parse_ir.parse_expression(invariant[inv_key])
logging.debug("loopvars are %s", self.loopvars)
if "gen" in inv_key:
converted_invariant = ToZ3(ir,self.loopvars,None,False,invariant,self.inputs).to_str()
ret[inv_key] = converted_invariant
else:
ret[inv_key] = tree_to_str(ir)
logging.debug("Processed invariants: ", ret)
return ret
def generate(self):
"""
Top-level. Generates an entire Z3 script for the given program and inputs.
"""
# first, we generate the invariant & postcondition call
postcondition = CallExp(VarNode("postcondition"),
[VarNode(x) for x in self.get_params_without_types()]
+ map(lambda x: VarNode(x), self.get_loopvars())
+ map(lambda x: VarNode(x+"_p"), self.get_loopvars()))
new_invariant_signatures = self.generate_invariant_func_signatures()
for x in new_invariant_signatures.keys():
logging.debug("inv: %s", tree_to_str(new_invariant_signatures[x]))
# get verification conditions
logging.debug("invariant signatures: %s", [tree_to_str(new_invariant_signatures[x]) for x in new_invariant_signatures.keys()])
wpc = WeakestPrecondition(self.program, postcondition, [], invariant_call=new_invariant_signatures)
conds = wpc.get()
additional_conds = wpc.additional_conditions
from generate_sketch import RHSInvariantReplacer
conds = RHSInvariantReplacer(self.get_loopvars()).visit(conds)
additional_conds = map(RHSInvariantReplacer(self.get_loopvars()).visit, additional_conds)
# translate verification conditions to Z3
logging.debug("Translating the following VCs: %s %s", tree_to_str(conds), '\n\n'.join([tree_to_str(x) for x in additional_conds]))
vc = ToZ3(conds, self.get_loopvars(), additional_conds, True, self.synthesized_invariant, self.inputs).to_str()
# put it all together
ret = self.generate_invariant_funcs()
ret += self.generate_postcon_func()
ret += self.generate_constants() + "\n\n"
ret += self.generate_assumptions()
ret += self.generate_signature() + vc + "))\n\n"
ret += "(assert (not main))\n(check-sat)\n(get-model)\n"
return ret
def generate_invariant_func_signatures(self):
"""
Generates signatures for each invariant function into a dict keyed by a hash of the loop
body.
"""
class InvGenLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self, inputs, loopvars, params_without_types):
super(InvGenLoopVisitor, self).__init__()
self.invariants = {}
self.invariant_names_to_loops = {} # dict from names to loops
self.inputs = inputs
self.loopvars = loopvars
self.params_without_types = params_without_types
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
self.invariants[key] = CallExp(VarNode(invariant_name),
[VarNode(x) for x in self.params_without_types] + map(lambda x: VarNode(x), self.loopvars))
self.invariant_names_to_loops[invariant_name] = node
self.visit(node.body)
visitor = InvGenLoopVisitor(self.inputs, self.get_loopvars(), self.get_params_without_types())
visitor.visit(self.program)
self.invariant_names_to_loops = visitor.invariant_names_to_loops
return visitor.invariants
def generate_invariant_funcs(self):
"""
Generates the Z3 function for the invariant.
"""
self.find_dependent_loopvars()
self.find_loopvar_nesting()
self.find_output_nesting()
from mako.template import Template
inv_template = Template(filename="templates/z3/invariant.2.z3.mako", format_exceptions=True)
ret = ""
#for looplevel in range(len(self.get_loopvars())):
#var = self.get_loopvars()[looplevel]
#ret += inv_template.render(name="I_"+var,
#looplevel=looplevel,
#loopvar_maxs=self.loopvar_maxs,
#loopvar_mins=self.loopvar_mins,
#parameters=self.get_params(),
#call_params=self.get_params_without_types(),
#outarray=self.get_out_array(),
#synthesized_invariant=self.get_synthesized_invariant_rhs(),
#loopvar=self.get_loopvars(),
#dependent_loopvars=self.dependent_loopvars,
#loopvar_nesting=self.loopvar_nesting,
#output_nesting=self.output_nesting)
for invariant in self.invariant_names_to_loops.keys():
#FIXME
looplevel = 0
node = self.invariant_names_to_loops[invariant]
thiskey = loop_key(node)
var = node.iter_var.name
containing_loop_invs = self.get_containing_loop_invs(node)
# we need to also know which loops this loop contains
thisloopcontains = self.get_loops_contained_by(node)
ret += inv_template.render(name=invariant,
synthesized_invariant=self.get_synthesized_invariant_rhs(),
looplevel=looplevel,
output_nesting=self.output_nesting,
containing_loop_invs=containing_loop_invs,
parameters=self.get_params(),
int_params=[x[0] for x in self.inputs if x[1]=="int"] + self.get_loopvars(),
call_params=self.get_params_without_types(),
outarray=self.get_out_array(),
thisloopvar=var,
thiskey=thiskey,
thisloopcontains=thisloopcontains,
loopvar=self.get_loopvars(),
mins=self.loopvar_mins,
maxs=self.loopvar_maxs,
loopvar_nesting=self.loopvar_nesting,
dependent_loopvars=self.dependent_loopvars)
return ret
def generate_postcon_func(self):
"""
Generate the Z3 function for the postcondition.
"""
from mako.template import Template
pcon_template = Template(filename="templates/z3/postcondition.z3.mako")
return pcon_template.render(parameters=self.get_params(),
call_params=self.get_params_without_types(),
loopvar_maxs=self.loopvar_maxs,
loopvar_mins=self.loopvar_mins,
outarray=self.get_out_array(),
synthesized_invariant=self.get_synthesized_invariant_rhs(),
loopvar=self.get_loopvars())
def generate_constants(self):
"""
Generates declarations for constants at the top-level of the script.
"""
all_params = [(x, "Int") for x in self.get_loopvars()] #+ [(x+"_to_check", "Int") for x in self.get_loopvars()]
all_params += [(x+"_p", "Int") for x in self.get_loopvars()] + self.get_params()
ret = "\n".join(["(declare-const %s %s)" % (x[0], x[1]) for x in all_params])
return ret
def get_params(self):
"""
Returns a list of tuples of (name, type) for each input.
"""
def is_arr(tp):
return "[" in tp[1]
def convert_type(tp):
translation_dict = {"double":"Real", "int":"Int"}
return translation_dict[tp.split()[0]]
def convert_type_array(tp):
scalar_tp = convert_type(tp.split("[")[0] + " ")
ret = ""
dim = len(tp.split("*"))
for x in range(dim):
ret += "(Array Int "
ret += scalar_tp
for x in range(dim):
ret += ")"
return ret
def is_float(tp):
return tp[1] == "double" or tp[1] == "float"
arrs = filter(is_arr, self.inputs)
non_arrs = filter(lambda x: not is_arr(x) and not is_float(x), self.inputs)
floats = filter(is_float, self.inputs)
return [(x[0], convert_type(x[1])) for x in floats] + [(x[0], "%s" % convert_type_array(x[1])) for x in arrs] + [(x[0], convert_type(x[1])) for x in non_arrs]
def generate_signature(self):
"""
Generate the signature for the main Z3 function.
"""
return "(define-fun main () Bool\n(and \n"
def generate_assumptions(self):
"""
Generates the necessary assumptions.
Right now, it generates, for a loopvar `i`, lower and upper bounds for `i` and `i_valp`.
For arrays of the type `T[N]` it generates bounds for `N` such that it is greater than 3.
"""
import asp.codegen.ast_tools
import re
ret = ""
for x in self.get_loopvars():
ret += "(assert (> (- %s %s) 1))" % (self.loopvar_maxs[x], self.loopvar_mins[x]) + "\n"
return ret
def get_params_without_types(self):
#return ', '.join(["%s" % (x[0]) for x in self.inputs])
return [x[0] for x in self.get_params()]
def get_out_array(self):
return self.out_array
def get_loopvars(self):
return self.loopvars
def get_synthesized_invariant_rhs(self):
#return "(select b (+ i_to_check 1))"
#return "(select b (+ (- i_to_check 1) (* j_to_check N)))"
return self.synthesized_invariant
def set_maxs_and_mins(self):
for x in self.get_loopvars():
maxfinder = generate_sketch.MaxFinder(x)
maxfinder.visit(self.program)
initfinder = generate_sketch.InitFinder(x)
initfinder.visit(self.program)
self.loopvar_mins[x] = ToZ3(initfinder.init,None,None).to_str()
self.loopvar_maxs[x] = ToZ3(maxfinder.maximum,None,None).to_str()
def replace_idx_vars(self, tree):
"""
Given an expression, replace the loopvariables `x` with `x_to_check`.
"""
import asp.codegen.ast_tools as ast_tools
import grammar
import copy
tree_copy = copy.deepcopy(tree)
class IdxReplacer(ast_tools.NodeTransformer):
def __init__(self, loopvars):
self.loopvars = loopvars
def visit_VarNode(self, node):
if node.name in self.loopvars:
return grammar.VarNode(node.name+"_to_check")
else:
return node
return IdxReplacer(self.get_loopvars()).visit(tree_copy)
def find_dependent_loopvars(self):
"""
For each output array, find which loopvars it depends on.
"""
class DependenceFinder(ast_tools.NodeVisitor):
def __init__(self, outputs, loopvars):
super(DependenceFinder, self).__init__()
self.outputs = outputs
self.loopvars = loopvars
self.dependences = {}
for x in self.outputs:
self.dependences[x] = []
self.in_lhs = False
self.in_arr_access = None
def visit_Block(self, node):
map(self.visit, node.body)
def visit_AssignExp(self, node):
self.in_lhs = True
self.visit(node.lval)
self.in_lhs = False
self.visit(node.rval)
def visit_ArrExp(self, node):
if self.in_lhs:
self.in_arr_access = node.name.name
self.visit(node.loc)
self.in_arr_access = None
def visit_VarNode(self, node):
if self.in_lhs and self.in_arr_access and node.name in self.loopvars:
self.dependences[self.in_arr_access].append(node.name)
df = DependenceFinder(self.get_out_array(), self.loopvars)
df.visit(self.program)
logging.debug("Dependent loop vars: %s", df.dependences)
self.dependent_loopvars = df.dependences
def find_loopvar_nesting(self):
"""
Find the nesting structure for the loops.
Returns loop->[containing loops] dict.
"""
self.loopvar_nesting = {}
for lv in self.get_loopvars():
self.loopvar_nesting[lv] = []
for inv in self.invariant_names_to_loops.keys():
node = self.invariant_names_to_loops[inv]
thisnodevar = node.iter_var.name
for x in self.get_containing_loop_invs(node):
logging.debug("%s contained by %s", thisnodevar, x[1].iter_var.name)
self.loopvar_nesting[thisnodevar].append(x[1].iter_var.name)
def find_output_nesting(self):
"""
Creates a structure to map from output->innermost loop.
"""
class OutputNestFinder(ast_tools.NodeVisitor):
def __init__(self, outputs):
self.outputs = outputs
self.cur_loopvar = None
self.output_nesting = {}
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
old_loopvar = self.cur_loopvar
self.cur_loopvar = node.iter_var.name
self.visit(node.body)
self.cur_loopvar = old_loopvar
def visit_AssignExp(self, node):
if self.cur_loopvar and isinstance(node.lval, ArrExp):
self.output_nesting[node.lval.name.name] = self.cur_loopvar
onf = OutputNestFinder(self.get_out_array())
onf.visit(self.program)
logging.debug("Output nesting: %s", onf.output_nesting)
self.output_nesting = onf.output_nesting
def get_containing_loop_invs(self, node):
"""
Return a list of (invariant function name, node) that correspond to the loops
outside a given loop.
"""
class ContainingLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self):
super(ContainingLoopVisitor, self).__init__()
self.containing_loops = {}
self.current_outerloops = []
def visit_Block(self, node):
# need to do this sequentially
for n in node.body:
self.visit(n)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
self.containing_loops[invariant_name] = self.current_outerloops[:]
self.current_outerloops.append((invariant_name, node))
self.visit(node.body)
self.current_outerloops.pop()
if not self.containing_loop_invs:
visitor = ContainingLoopVisitor()
visitor.visit(self.program)
self.containing_loop_invs = visitor.containing_loops
logging.debug("Containing loops: %s", visitor.containing_loops)
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
return self.containing_loop_invs[invariant_name]
def get_loops_contained_by(self, node):
"""
Return a list of (invariant function name, node) that correspond to the
loops contained by node.
"""
class ContainedLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self):
super(ContainedLoopVisitor, self).__init__()
self.contained_loops = []
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % | |
from abc import ABC, abstractmethod
from itertools import chain
from typing import List, Tuple, Iterator
from pysat.solvers import Solver
from ..structures import APTA, InconsistencyGraph
from ..variables import VarPool
CLAUSE = Tuple[int, ...]
CLAUSES = Iterator[CLAUSE]
def _implication_to_clauses(lhs: int, rhs: int) -> CLAUSES:
"""
generates CNF formula of an expression /lhs => rhs/
:type lhs: int
:type rhs: int
"""
yield (-lhs, rhs)
def _conjunction_implies_to_clauses(lhs: CLAUSE, rhs: int) -> CLAUSES:
"""
generates CNF formula of an expression /lhs_1 and lhs_2 and ... and lhs_n => rhs/
:type lhs: list(int)
:type rhs: int
"""
yield tuple(-lit for lit in lhs) + (rhs,)
def _iff_to_clauses(lhs: int, rhs: int) -> CLAUSES:
"""
generates CNF formula of an expression /lhs <=> rhs/
:type lhs: int
:type rhs: int
"""
yield from _implication_to_clauses(lhs, rhs)
yield from _implication_to_clauses(rhs, lhs)
def _iff_disjunction_to_clauses(lhs: int, rhs: CLAUSE) -> CLAUSES:
"""
generates CNF formula of an expression /lhs <=> rhs_1 or rhs_2 or ... or rhs_n/
:type lhs: int
:type rhs: list(int)
"""
yield (-lhs,) + rhs
yield from ((lhs, -lit) for lit in rhs)
def _iff_conjunction_to_clauses(lhs: int, rhs: CLAUSE) -> CLAUSES:
"""
generates CNF formula of an expression /lhs <=> rhs_1 and rhs_2 and ... and rhs_n/
:type lhs: int
:type rhs: list(int)
"""
yield (lhs,) + tuple(-lit for lit in rhs)
yield from ((-lhs, lit) for lit in rhs)
class BaseClausesGenerator(ABC):
def __init__(self, apta: APTA, ig: InconsistencyGraph, var_pool: VarPool, assumptions_mode: str) -> None:
self._apta = apta
self._ig = ig
self._vars = var_pool
self._assumptions_mode = assumptions_mode
self._alphabet = self._apta.alphabet
self._alphabet_size = len(self._alphabet)
@abstractmethod
def generate(self, solver: Solver, size: int) -> None:
pass
@abstractmethod
def generate_with_new_counterexamples(self, solver: Solver, size: int, new_from: int,
changed_statuses: List[int]) -> None:
pass
@abstractmethod
def generate_with_new_size(self, solver: Solver, old_size: int, new_size: int) -> None:
pass
def build_assumptions(self, cur_size: int, solver: Solver) -> List[int]:
assumptions = []
if self._assumptions_mode == 'chain':
for v in range(self._apta.size):
assumptions.append(self._vars.var('alo_x', cur_size, v))
for from_ in range(cur_size):
for l_id in range(self._apta.alphabet_size):
assumptions.append(self._vars.var('alo_y', cur_size, from_, l_id))
elif self._assumptions_mode == 'switch':
for v in range(self._apta.size):
assumptions.append(-self._vars.var('sw_x', cur_size, v))
for from_ in range(cur_size):
for l_id in range(self._apta.alphabet_size):
assumptions.append(-self._vars.var('sw_y', cur_size, from_, l_id))
return assumptions
class ClauseGenerator(BaseClausesGenerator):
def __init__(self, apta: APTA, ig: InconsistencyGraph, var_pool: VarPool, assumptions_mode: str, sb: str) -> None:
super().__init__(apta, ig, var_pool, assumptions_mode)
self._mindfa_generator = MinDFAToSATClausesGenerator(apta, ig, var_pool, assumptions_mode)
if sb == 'BFS':
self._sb_generator = BFSBasedSymBreakingClausesGenerator(apta, ig, var_pool, assumptions_mode)
elif sb == 'TIGHTBFS':
self._sb_generator = TightBFSBasedSymBreakingClausesGenerator(apta, ig, var_pool, assumptions_mode)
else:
self._sb_generator = NoSymBreakingClausesGenerator(apta, ig, var_pool, assumptions_mode)
def generate(self, solver: Solver, size: int) -> None:
self._mindfa_generator.generate(solver, size)
self._sb_generator.generate(solver, size)
def generate_with_new_counterexamples(self, solver: Solver, size: int, new_from: int,
changed_statuses: List[int]) -> None:
self._mindfa_generator.generate_with_new_counterexamples(solver, size, new_from, changed_statuses)
self._sb_generator.generate_with_new_counterexamples(solver, size, new_from, changed_statuses)
def generate_with_new_size(self, solver: Solver, old_size: int, new_size: int) -> None:
self._mindfa_generator.generate_with_new_size(solver, old_size, new_size)
self._sb_generator.generate_with_new_size(solver, old_size, new_size)
class MinDFAToSATClausesGenerator(BaseClausesGenerator):
def generate(self, solver: Solver, size: int) -> None:
self._fix_start_state(solver)
self._one_node_maps_to_alo_state(solver, size)
self._one_node_maps_to_at_most_one_state(solver, size)
self._dfa_is_complete(solver, size)
self._dfa_is_deterministic(solver, size)
self._state_status_compatible_with_node_status(solver, size)
self._mapped_adjacent_nodes_force_transition(solver, size)
self._mapped_node_and_transition_force_mapping(solver, size)
self._inconsistency_graph_constraints(solver, size)
def generate_with_new_counterexamples(self, solver: Solver, size: int, new_from: int,
changed_statuses: List[int]) -> None:
self._one_node_maps_to_alo_state(solver, size, new_node_from=new_from)
self._one_node_maps_to_at_most_one_state(solver, size, new_node_from=new_from)
self._state_status_compatible_with_node_status(solver,
size,
new_node_from=new_from,
changed_statuses=changed_statuses)
self._mapped_adjacent_nodes_force_transition(solver, size, new_node_from=new_from)
self._mapped_node_and_transition_force_mapping(solver, size, new_node_from=new_from)
self._inconsistency_graph_constraints(solver, size, new_node_from=new_from)
def generate_with_new_size(self, solver: Solver, old_size: int, new_size: int) -> None:
self._one_node_maps_to_alo_state(solver, new_size, old_size=old_size)
self._one_node_maps_to_at_most_one_state(solver, new_size, old_size=old_size)
self._dfa_is_complete(solver, new_size, old_size=old_size)
self._dfa_is_deterministic(solver, new_size, old_size=old_size)
self._state_status_compatible_with_node_status(solver, new_size, old_size=old_size)
self._mapped_adjacent_nodes_force_transition(solver, new_size, old_size=old_size)
self._mapped_node_and_transition_force_mapping(solver, new_size, old_size=old_size)
def _fix_start_state(self, solver: Solver) -> None:
solver.add_clause((self._vars.var('x', 0, 0),))
def _one_node_maps_to_alo_state(self,
solver: Solver,
size: int,
new_node_from: int = 0,
old_size: int = 0) -> None:
if self._assumptions_mode == 'none':
self._one_node_maps_to_alo_state_classic(solver, size, new_node_from)
elif self._assumptions_mode == 'chain':
self._one_node_maps_to_alo_state_chain(solver, size, new_node_from, old_size)
elif self._assumptions_mode == 'switch':
self._one_node_maps_to_alo_state_switch(solver, size, new_node_from, old_size)
def _one_node_maps_to_alo_state_classic(self, solver: Solver, size: int, new_node_from: int = 0) -> None:
for i in range(new_node_from, self._apta.size):
solver.add_clause(tuple(self._vars.var('x', i, j) for j in range(size)))
def _one_node_maps_to_alo_state_chain(self,
solver: Solver,
size: int,
new_node_from: int = 0,
old_size: int = 0) -> None:
if old_size == 0:
for i in range(new_node_from, self._apta.size):
solver.add_clause(
tuple(self._vars.var('x', i, j) for j in range(old_size, size)) + (
-self._vars.var('alo_x', size, i),)
)
else:
for i in range(new_node_from, self._apta.size):
solver.add_clause(
tuple(self._vars.var('x', i, j) for j in range(old_size, size)) +
(-self._vars.var('alo_x', size, i), self._vars.var('alo_x', old_size, i))
)
def _one_node_maps_to_alo_state_switch(self,
solver: Solver,
size: int,
new_node_from: int = 0,
old_size: int = 0) -> None:
for i in range(new_node_from, self._apta.size):
solver.add_clause(
tuple(self._vars.var('x', i, j) for j in range(size)) + (self._vars.var('sw_x', size, i),)
)
if old_size > 0:
for v in range(self._apta.size):
solver.add_clause((self._vars.var('sw_x', old_size, v),))
def _one_node_maps_to_at_most_one_state(self, solver: Solver, size: int, new_node_from: int = 0,
old_size: int = 0) -> None:
for v in range(new_node_from, self._apta.size):
for i in range(old_size, size):
for j in range(0, i):
solver.add_clause(
(-self._vars.var('x', v, i), -self._vars.var('x', v, j))
)
def _dfa_is_complete(self, solver: Solver, size: int, old_size: int = 0):
if self._assumptions_mode == 'none':
self._dfa_is_complete_classic(solver, size)
elif self._assumptions_mode == 'chain':
self._dfa_is_complete_chain(solver, size, old_size)
elif self._assumptions_mode == 'switch':
self._dfa_is_complete_switch(solver, size, old_size)
def _dfa_is_complete_classic(self, solver: Solver, size: int) -> None:
for i in range(size):
for l_id in range(self._alphabet_size):
solver.add_clause(
tuple(self._vars.var('y', i, l_id, j) for j in range(size))
)
def _dfa_is_complete_chain(self, solver: Solver, size: int, old_size: int = 0) -> None:
if old_size == 0:
for l_id in range(self._alphabet_size):
for i in range(old_size):
solver.add_clause(
tuple(self._vars.var('y', i, l_id, j) for j in range(old_size, size)) +
(-self._vars.var('alo_y', size, i, l_id),)
)
else:
for l_id in range(self._alphabet_size):
for i in range(old_size):
solver.add_clause(
tuple(self._vars.var('y', i, l_id, j) for j in range(old_size, size)) +
(-self._vars.var('alo_y', size, i, l_id), self._vars.var('alo_y', old_size, i, l_id))
)
for l_id in range(self._alphabet_size):
for i in range(old_size, size):
solver.add_clause(
tuple(self._vars.var('y', i, l_id, j) for j in range(size)) +
(-self._vars.var('alo_y', size, i, l_id),)
)
def _dfa_is_complete_switch(self, solver: Solver, size: int, old_size: int = 0) -> None:
for i in range(size):
for l_id in range(self._alphabet_size):
solver.add_clause(
tuple(self._vars.var('y', i, l_id, j) for j in range(size)) + (
self._vars.var('sw_y', size, i, l_id),
)
)
if old_size > 0:
for from_ in range(old_size):
for l_id in range(self._alphabet_size):
solver.add_clause((self._vars.var('sw_y', old_size, from_, l_id),))
def _dfa_is_deterministic(self, solver: Solver, size: int, old_size: int = 0) -> None:
for l_id in range(self._alphabet_size):
for i in range(old_size):
for j in range(old_size, size):
for k in range(j):
solver.add_clause(
(-self._vars.var('y', i, l_id, j), -self._vars.var('y', i, l_id, k))
)
for i in range(old_size, size):
for j in range(size):
for k in range(j):
solver.add_clause(
(-self._vars.var('y', i, l_id, j), -self._vars.var('y', i, l_id, k))
)
def _state_status_compatible_with_node_status(self,
solver: Solver,
size: int,
new_node_from: int = 0,
old_size: int = 0,
changed_statuses=None) -> None:
if changed_statuses is None:
changed_statuses = []
for i in chain(range(new_node_from, self._apta.size), changed_statuses):
if self._apta.get_node(i).is_accepting():
for j in range(old_size, size):
solver.append_formula(
_implication_to_clauses(self._vars.var('x', i, j), self._vars.var('z', j)))
elif self._apta.get_node(i).is_rejecting():
for j in range(old_size, size):
solver.append_formula(
_implication_to_clauses(self._vars.var('x', i, j), -self._vars.var('z', j)))
def _mapped_adjacent_nodes_force_transition(self, solver: Solver, size: int, new_node_from: int = 0,
old_size: int = 0) -> None:
for parent in self._apta.nodes:
for label, child in parent.children.items():
if parent.id_ >= new_node_from or child.id_ >= new_node_from:
for from_ in range(old_size, size):
for to in range(old_size, size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('x', child.id_, to),
),
self._vars.var('y', from_, label, to)
)
)
if old_size > 0:
for from_ in range(old_size):
for to in range(old_size, size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('x', child.id_, to),
),
self._vars.var('y', from_, label, to)
)
)
for from_ in range(old_size, size):
for to in range(old_size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('x', child.id_, to),
),
self._vars.var('y', from_, label, to)
)
)
def _mapped_node_and_transition_force_mapping(self, solver: Solver, size: int, new_node_from: int = 0,
old_size: int = 0) -> None:
for parent in self._apta.nodes:
for label, child in parent.children.items():
if parent.id_ >= new_node_from or child.id_ >= new_node_from:
for from_ in range(old_size, size):
for to in range(old_size, size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('y', from_, label, to),
),
self._vars.var('x', child.id_, to)
)
)
if old_size > 0:
for from_ in range(old_size):
for to in range(old_size, size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('y', from_, label, to),
),
self._vars.var('x', child.id_, to)
)
)
for from_ in range(old_size, size):
for to in range(old_size):
solver.append_formula(
_conjunction_implies_to_clauses(
(
self._vars.var('x', parent.id_, from_),
self._vars.var('y', from_, label, | |
import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import copy
from model_search import Network
from genotypes import PRIMITIVES
from genotypes import Genotype
parser = argparse.ArgumentParser("text classification")
parser.add_argument('--data_dir', type=str, default='data', help='data dir')
parser.add_argument('--workers', type=int, default=2, help='number of workers to load dataset')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--embed_size', type=int, default=300, help='embed size')
parser.add_argument('--max_features', type=int, default=120000, help='max words in dict')
parser.add_argument('--mini_data', type=int, default=50000, help='mini data')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='GPU device id')
parser.add_argument('--epochs', type=int, default=25, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=2, help='total number of layers')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='checkpoints', help='experiment path')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--arch_learning_rate', type=float, default=6e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--tmp_data_dir', type=str, default='/tmp/cache/', help='temp data dir')
parser.add_argument('--note', type=str, default='try', help='note for this run')
parser.add_argument('--dropout_rate', action='append', default=[], help='dropout rate of skip connect')
parser.add_argument('--add_width', action='append', default=['0'], help='add channels')
parser.add_argument('--add_layers', action='append', default=['0'], help='add layers')
args = parser.parse_args()
args.save = '{}-search-{}-{}'.format(args.save, args.note, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def load_data(args):
x_train = np.load(args.data_dir+"/x_train.npy")
y_train = np.load(args.data_dir+"/y_train.npy")
if args.mini_data>0:
indexs = np.random.choice(x_train.shape[0], args.mini_data, replace=False)
x_train = x_train[indexs]
y_train = y_train[indexs]
logging.info(f'from raw data choose {args.mini_data}')
x_train=torch.from_numpy(x_train)
y_train=torch.from_numpy(y_train)
return torch.utils.data.TensorDataset(x_train,y_train)
def main():
if not torch.cuda.is_available():
logging.info('No GPU device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
logging.info('GPU device = %d' % args.gpu)
logging.info("args = %s", args)
# prepare dataset
train_data = load_data(args)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=args.workers)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=args.workers)
# build Network
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
switches = []
for i in range(14):
switches.append([True for j in range(len(PRIMITIVES))])
switches_normal = copy.deepcopy(switches)
switches_reduce = copy.deepcopy(switches)
# To be moved to args
num_to_keep = [3]
num_to_drop = [3]
if len(args.add_width) == len(num_to_keep):
add_width = args.add_width
else:
add_width = [0, 0, 0]
if len(args.add_layers) == len(num_to_keep):
add_layers = args.add_layers
else:
add_layers = [0, 6, 12]
if len(args.dropout_rate) ==len(num_to_keep):
drop_rate = args.dropout_rate
else:
drop_rate = [0.0, 0.0, 0.0]
eps_no_archs = [10, 10, 10]
for sp in range(len(num_to_keep)):
model = Network(args.embed_size,args.max_features,args.init_channels + int(add_width[sp]), 2, args.layers + int(add_layers[sp]), criterion, switches_normal=switches_normal, switches_reduce=switches_reduce, p=float(drop_rate[sp]))
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
network_params = []
for k, v in model.named_parameters():
if not (k.endswith('alphas_normal') or k.endswith('alphas_reduce')):
network_params.append(v)
optimizer = torch.optim.SGD(
network_params,
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
optimizer_a = torch.optim.Adam(model.arch_parameters(),
lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
sm_dim = -1
epochs = args.epochs
eps_no_arch = eps_no_archs[sp]
scale_factor = 0.2
for epoch in range(epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('Epoch: %d lr: %e', epoch, lr)
epoch_start = time.time()
# training
if epoch < eps_no_arch:
model.p = float(drop_rate[sp]) * (epochs - epoch - 1) / epochs
model.update_p()
train_acc, train_obj = train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=False)
else:
model.p = float(drop_rate[sp]) * np.exp(-(epoch - eps_no_arch) * scale_factor)
model.update_p()
train_acc, train_obj = train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=True)
logging.info('Train_acc %f', train_acc)
epoch_duration = time.time() - epoch_start
logging.info('Epoch time: %ds', epoch_duration)
# validation
if epochs - epoch < 5:
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('Valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
print('------Dropping %d paths------' % num_to_drop[sp])
# Save switches info for s-c refinement.
if sp == len(num_to_keep) - 1:
switches_normal_2 = copy.deepcopy(switches_normal)
switches_reduce_2 = copy.deepcopy(switches_reduce)
# drop operations with low architecture weights
arch_param = model.arch_parameters()
normal_prob = F.softmax(arch_param[0], dim=sm_dim).data.cpu().numpy()
for i in range(14):
idxs = []
for j in range(len(PRIMITIVES)):
if switches_normal[i][j]:
idxs.append(j)
if sp == len(num_to_keep) - 1:
# for the last stage, drop all Zero operations
drop = get_min_k_no_zero(normal_prob[i, :], idxs, num_to_drop[sp])
else:
drop = get_min_k(normal_prob[i, :], num_to_drop[sp])
for idx in drop:
switches_normal[i][idxs[idx]] = False
reduce_prob = F.softmax(arch_param[1], dim=-1).data.cpu().numpy()
for i in range(14):
idxs = []
for j in range(len(PRIMITIVES)):
if switches_reduce[i][j]:
idxs.append(j)
if sp == len(num_to_keep) - 1:
drop = get_min_k_no_zero(reduce_prob[i, :], idxs, num_to_drop[sp])
else:
drop = get_min_k(reduce_prob[i, :], num_to_drop[sp])
for idx in drop:
switches_reduce[i][idxs[idx]] = False
logging.info('switches_normal = %s', switches_normal)
logging_switches(switches_normal)
logging.info('switches_reduce = %s', switches_reduce)
logging_switches(switches_reduce)
if sp == len(num_to_keep) - 1:
arch_param = model.arch_parameters()
normal_prob = F.softmax(arch_param[0], dim=sm_dim).data.cpu().numpy()
reduce_prob = F.softmax(arch_param[1], dim=sm_dim).data.cpu().numpy()
normal_final = [0 for idx in range(14)]
reduce_final = [0 for idx in range(14)]
# remove all Zero operations
for i in range(14):
if switches_normal_2[i][0] == True:
normal_prob[i][0] = 0
normal_final[i] = max(normal_prob[i])
if switches_reduce_2[i][0] == True:
reduce_prob[i][0] = 0
reduce_final[i] = max(reduce_prob[i])
# Generate Architecture, similar to DARTS
keep_normal = [0, 1]
keep_reduce = [0, 1]
n = 3
start = 2
for i in range(3):
end = start + n
tbsn = normal_final[start:end]
tbsr = reduce_final[start:end]
edge_n = sorted(range(n), key=lambda x: tbsn[x])
keep_normal.append(edge_n[-1] + start)
keep_normal.append(edge_n[-2] + start)
edge_r = sorted(range(n), key=lambda x: tbsr[x])
keep_reduce.append(edge_r[-1] + start)
keep_reduce.append(edge_r[-2] + start)
start = end
n = n + 1
# set switches according the ranking of arch parameters
for i in range(14):
if not i in keep_normal:
for j in range(len(PRIMITIVES)):
switches_normal[i][j] = False
if not i in keep_reduce:
for j in range(len(PRIMITIVES)):
switches_reduce[i][j] = False
# translate switches into genotype
genotype = parse_network(switches_normal, switches_reduce)
logging.info(genotype)
## restrict skipconnect (normal cell only)
logging.info('Restricting skipconnect...')
# generating genotypes with different numbers of skip-connect operations
for sks in range(0, 9):
max_sk = 8 - sks
num_sk = check_sk_number(switches_normal)
if not num_sk > max_sk:
continue
while num_sk > max_sk:
normal_prob = delete_min_sk_prob(switches_normal, switches_normal_2, normal_prob)
switches_normal = keep_1_on(switches_normal_2, normal_prob)
switches_normal = keep_2_branches(switches_normal, normal_prob)
num_sk = check_sk_number(switches_normal)
logging.info('Number of skip-connect: %d', max_sk)
genotype = parse_network(switches_normal, switches_reduce)
logging.info(genotype)
def train(train_queue, valid_queue, model, network_params, criterion, optimizer, optimizer_a, lr, train_arch=True):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
if train_arch:
# In the original implementation of DARTS, it is input_search, target_search = next(iter(valid_queue), which slows down
# the training when using PyTorch 0.4 and above.
try:
input_search, target_search = next(valid_queue_iter)
except:
valid_queue_iter = iter(valid_queue)
input_search, target_search = next(valid_queue_iter)
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer_a.zero_grad()
logits = model(input_search)
loss_a = criterion(logits, target_search)
loss_a.backward()
nn.utils.clip_grad_norm_(model.arch_parameters(), args.grad_clip)
optimizer_a.step()
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(network_params, args.grad_clip)
optimizer.step()
f1 = utils.f1_scores(logits.detach(), target)
objs.update(loss.data.item(), n)
top1.update(f1, n)
if step % args.report_freq == 0:
logging.info('TRAIN Step: %03d Objs: %e R1: %f', step, objs.avg, top1.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
with torch.no_grad():
logits = model(input)
loss = criterion(logits, target)
f1 = utils.f1_scores(logits, target)
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(f1, n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f', step, objs.avg, top1.avg)
return top1.avg, objs.avg
def parse_network(switches_normal, switches_reduce):
def _parse_switches(switches):
n = 2
start = 0
gene = []
step = 4
for i in range(step):
end = start + n
for j in range(start, end):
for k in range(len(switches[j])):
if switches[j][k]:
gene.append((PRIMITIVES[k], j - start))
start = end
n = n + 1
return gene
gene_normal = _parse_switches(switches_normal)
gene_reduce = _parse_switches(switches_reduce)
concat = range(2, 6)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
def get_min_k(input_in, k):
input = copy.deepcopy(input_in)
index = []
for i in range(k):
idx = np.argmin(input)
index.append(idx)
input[idx] = 1
return index
def get_min_k_no_zero(w_in, idxs, k):
w = copy.deepcopy(w_in)
index = []
if 0 in idxs:
zf = True
else:
zf = False
if zf:
w = w[1:]
index.append(0)
k = k - 1
for i in range(k):
idx = np.argmin(w)
w[idx] = 1
if zf:
idx = idx + 1
index.append(idx)
return index
def logging_switches(switches):
for i in range(len(switches)):
ops = []
for j in range(len(switches[i])):
if | |
"""Various sift utilities
Licensed under the 3-clause BSD License:
Copyright (c) 2011-2014, <NAME> (<EMAIL>)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, sys, time, math, os.path, random
from PIL import Image, ImageDraw, ImageColor
from utils import log, getListAsStr
try:
import simplejson as json
except:
import json
#TODO add nice command line options
COLORS = ImageColor.colormap.values()
def existsnonzero(fname):
"""Checks if the given file exists and is non-zero"""
try:
if os.stat(fname).st_size > 0: return 1
except Exception:
pass
return 0
def siftfname(imfname, ext=None, dir=None):
"""Returns the sift filename for the given image filename.
Assumes it's in the same directory, unless you specify a dir.
Tries all formats, in this order:
.projected.gz - gzipped projected output
.projected - projected output
.sift.gz - gzipped vlfeat output
.sift - vlfeat output
.key - Lowe's binary output
Or you can specify the extension yourself, either as a string, or a list of strings to try.
Returns a filename, or the empty string if no suitable file found.
Note that we're not actually checking that the file is actually in the right format.
"""
siftdir = dir if dir else os.path.dirname(imfname)
base = os.path.join(siftdir, os.path.basename(imfname).rsplit('.', 1)[0])
# if they didn't specify an extension, check them all from most projected to least
if not ext:
ext = '.projected.gz .projected .sift.gz .sift .key'.split()
# make list of extensions to check
exts = [ext] if isinstance(ext, basestring) else ext
# check each extension
for ext in exts:
if ext[0] != '.':
ext = '.'+ext
fname = base+ext
if existsnonzero(fname):
return fname
# if we're here, then no valid file was found
return ''
def grouper(n, iterable, padvalue=None):
"""Taken from Python's itertools recipes.
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]"""
from itertools import izip, chain, repeat
return izip(*[chain(iterable, repeat(padvalue, n-1))]*n)
class SiftFeat:
"""Keeps track of a single sift descriptor"""
def __init__(self, x, y, scale, orientation, data):
"""Creates a new sift descriptor from all the relevant information"""
# x, y, and scale are all to sub-pixel accuracy
self.x, self.y, self.scale = x, y, scale
# orientation is in radians from -PI to +PI
self.orientation = orientation
# the actual descriptor should all be bytes (0-255)
self.data = data
@classmethod
def fakefeat(cls, val, cache={}, **kw):
"""Makes a fake sift feature for the given val.
This function is memoized in cache, so you'll always get the same feat for the same input val.
You can optionally pass in any of x, y, scale, orientation, data.
Otherwise, they are initialized to:
x: uniform(0, 100)
y: uniform(0, 100)
scale: uniform(0, 10)
orientation: uniform(0, pi)
data: randint(0,256)*128
"""
from random import uniform, randint
from math import pi
if val not in cache:
for varname, maxval in zip('x y scale orientation'.split(), [100, 100, 10, math.pi]):
if varname not in kw:
kw[varname] = uniform(0, maxval)
if 'data' not in kw:
kw['data'] = [randint(0,256) for i in range(128)]
ret = cache[val] = cls(**kw)
return cache[val]
@classmethod
def siftFromFile(cls, f, fmt=None):
"""Creates a list of sift features from a given file or filename or vectors.
This tries to do the appropriate format detection.
If f is a string, then we assume it's a filename. We handle:
'.key' files, as dumped by Lowe's binary
'.sift' files, as dumped by VLFeat's sift binary
'.sift.gz' files, compressed versions of .sift files.
'.projected' files, as dumped by projectindivmain()
'.projected.gz' files, compressed versions of .projected files
You can optionally specify the fmt if you know it:
'lowe'
'vlfeat'
'projected'
If f is a file, assumes it's in Lowe's format.
Else, assumes it's a pair of (locs, fvecs).
Returns an empty string or list on error.
"""
if fmt == 'lowe': return cls.siftFromLowe(f)
if fmt == 'vlfeat': return cls.siftFromVLFeat(f)
if fmt == 'projected': return cls.siftFromProjected(f)
if isinstance(f, basestring):
# it's a filename
if f.endswith('.key'): return cls.siftFromLowe(open(f))
if f.endswith('.sift') or f.endswith('.sift.gz'): return cls.siftFromVLFeat(f)
if f.endswith('.projected') or f.endswith('.projected.gz'): return cls.siftFromProjected(f)
elif isinstance(f, file):
# it's a file itself, so assume it's in lowe's format
return cls.siftFromLowe(f)
else:
# it's a list
try:
# see if it's a pair of (locs, fvecs)
ret = [cls(x,y,s,o,fvec) for (x,y,s,o), fvec in zip(*f)]
except Exception:
ret = []
for el in f:
if isinstance(el, cls):
# check if it's already a siftfeat
ret.append(el)
return ret
return []
@classmethod
def siftFromLowe(cls, f):
"""Creates a list of sift features from text output from Lowe's original sift binary"""
if isinstance(f, basestring):
f = open(f)
# read the number of points and the length of each descriptor
num, length = [int(i) for i in f.readline().split()]
# read the rest of it and transform it appropriately
all = ''.join(f.readlines()).replace('\n', ' ').split()
items = grouper(length+4, all)
# now read each feature
feats = []
for item in items:
# the first four correspond to the metadata for that feature
y, x, scale, orientation = [float(i) for i in item[:4]]
# the rest of it corresponds to the actual data for the descriptor
data = [int(i) for i in item[4:]]
feats.append(cls(x, y, scale, orientation, data))
return feats
@classmethod
def siftFromVLFeat(cls, f):
"""Creates a list of sift features from text output from VLFeat's sift binary"""
import gzip
import numpy as np
# if the file is actually a filename, open it first
if isinstance(f, basestring):
# check for gzipped files (since we often gzip the sift files)
f = gzip.open(f) if f.endswith('.gz') else open(f)
# descrips are one-per-line, making it easy to parse
feats = []
for l in f:
els = l.rstrip().split()
x, y, scale, ori = map(float, els[:4])
data = np.array(map(int, els[4:]), dtype=np.float32)
feats.append(cls(x, y, scale, ori, data))
return feats
@classmethod
def siftFromProjected(cls, f):
"""Creates a list of sift features from projected features output.
This is for "extended mode" output, which includes the locations as well.
"""
import gzip
import numpy as np
# if the file is actually a filename, open it first
if isinstance(f, basestring):
# check for gzipped files (since we often gzip the sift files)
f = gzip.open(f) if f.endswith('.gz') else open(f)
# descrips are one-per-line, making it easy to parse
feats = []
for l in f:
els = l.rstrip().split()
x, y, scale, ori = map(float, els[:4])
fnum = int(els[4])
#data = np.array(map(int, els[4:]), dtype=np.float32)
feats.append(cls(x, y, scale, ori, [fnum]))
return feats
@classmethod
def siftsFromVocab(cls, f, fnames=None):
"""Loads projected sift (or other) features from the given file.
The file can be a filename (and optionally compressed).
If 'fnames' is None (default), loads all lines of the file.
Else, fnames can contain a list of:
strings - assumes each is a filename (first 1st col of file)
ints - line numbers to read (prunes out 1st col of file)
Returns a dict mapping filenames to lists | |
<reponame>lintondf/MorrisonPolynomialFiltering
'''
Created on Feb 15, 2019
@author: NOOK
'''
import time
from typing import Tuple;
from netCDF4 import Dataset
from math import sin, cos, exp
import numpy as np
from numpy import array, array2string, diag, eye, ones, transpose, zeros, sqrt, mean, std, var,\
isscalar, arange, flip, polyder, poly1d, concatenate
from numpy import array as vector
from numpy.linalg.linalg import det, inv
from numpy.random import randn
from scipy.linalg.matfuncs import expm
from scipy.stats import chi2
from scipy.stats._continuous_distns import norm
from random import uniform
from numpy.polynomial.polynomial import Polynomial
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
import numpy as np
from polynomialfiltering.Main import AbstractFilter
from scipy.integrate.odepack import odeint
from scipy.integrate._bvp import solve_bvp
from polynomialfiltering.filters.FixedMemoryFilter import FixedMemoryFilter;
def assert_clear() -> None:
pass
def assert_report(source : str, iStep : int) -> float:
return 0.0
def assert_allclose( A : array, B : array, rtol=1e-07, atol=0):
np.testing.assert_allclose( A, B, rtol=rtol, atol=atol)
def assert_almost_equal( A : array, B : array, decimal=7):
np.testing.assert_almost_equal(A, B, decimal=decimal)
def assert_array_less( A : array, B : array ):
np.testing.assert_array_less(A, B)
class RollingStatistics(object):
def __init__(self, N : int):
self.N = N
self.n = 0
self.m = 0
self.M = 0
self.S = 0
def append(self, X : float) -> None:
if (self.n == 0) :
self.M = X
self.S = 0
else :
k = self.n - self.m
s = (X - self.M)
self.M += (X - self.M)/k
self.S += s * (X - self.M)
def replace(self, X : float, Y : float) -> None:
if (self.n == 0) :
self.M = X
self.S = 0
else :
k = self.n - self.m
s = (X - self.M)
t = (Y - self.M)
self.M += (X - self.M)/k - (Y - self.M)/k
self.S += s * (X - self.M) - t * (Y - self.M)
self.m += 1
def getMean(self) -> float:
return self.M
def getVariance(self) -> float:
k = self.n - self.m
return self.S / (k-1)
class WindowedStatistics(object):
def __init__(self, N : int):
super().__init__()
self.N = N
self.W = zeros([N])
def add(self, X : float) -> None:
if (self.n < self.N) :
super().add(X)
self.W[self.n % self.N] = X
else :
super().replace(X, self.W[self.n % self.N])
self.W[self.n % self.N] = X
def createTestGroup(cdf : Dataset, name : str ) -> Dataset:
return cdf.createGroup(name);
def readTestVariable( group : Dataset, name : str) -> array:
return group.variables[name];
def writeTestVariable(group : Dataset, name : str, data : array) -> None:
dims = data.shape;
if (len(dims) == 0) :
dims = (1, 1);
elif (len(dims) == 1) :
dims = (dims[0], 1);
nDim = '%s_N' % name;
mDim = '%s_M' % name;
group.createDimension(nDim, dims[0]);
group.createDimension(mDim, dims[1]);
v = group.createVariable(name, 'd', (nDim, mDim))
v[:] = data;
def hellingerDistance( u1, P1, u2, P2 ):
"""
https://en.wikipedia.org/wiki/Hellinger_distance
"""
if (isscalar(u1) or len(u1) == 1) :
e = exp(-0.25 * (u1 - u2)**2 / (P1+P2))
return 1.0 - sqrt((2.0*sqrt(P1*P2))/(P1+P2)) * e
else :
P12 = 0.5 * (P1 + P2)
a = det(P1)**0.25 * det(P2)**0.25 / det(P12)**0.5;
b = -(1/8)* transpose(u1-u2) @ inv(P12) @ (u1-u2)
return sqrt(1 - a * exp(b));
def covarianceIntersection( P1, P2 ):
I1 = inv(P1)
I2 = inv(P2)
dI1 = det(I1)
dI2 = det(I2)
dI12 = det(I1+I2)
w1 = (dI12 - dI2 + dI1) / (2*dI12);
w2 = 1 - w1;
P = inv( w1 * I1 + w2 * I2);
print('w1 = ', w1)
return P
# def stateTransitionMatrix(N : int, dt : float) -> array:
# '''
# Return a Pade' expanded status transition matrix of order m [RMKdR(7)]
# P(d)_i,j = (d^(j-i))/(j-i)! where 0 <= i <= j <= m elsewhere zero
#
# :param N: return matrix is (N,N)
# :param dt: time step
# '''
# B = (diag(ones([N-1]),k=1))
# return expm(dt*B)
def stateTransitionMatrix(N : int, dt : float) -> array:
B = (diag(ones([N-1]),k=1))
return expm(dt*B)
def generateTestData(order, N, t0, Y0, dt, bias=0.0, sigma=1.0):
''' (times, truth, observations, noise)
times N by 1
truth N by order+1
observations N by 1
noise N by 1 '''
if not isscalar(sigma) :
sigma = sigma[0,0]
if (order >= 0) :
truth = zeros([N,order+1])
noise = bias + sigma*randn(N,1)
# # insure that sample mean and variance conform to specification
# noise += (bias - mean(noise))
# noise /= (sigma / std(noise))
# print('gTD', bias, sigma, mean(noise), std(noise))
observations = zeros([N,1])
times = zeros([N,1])
S = stateTransitionMatrix(order+1, dt)
t = t0
Y = zeros([order+1]);
m = min( order+1, len(Y0) )
Y[0:m] = Y0[0:m];
for i in range(0,N) :
times[i] = t
m = min( truth.shape[1], len(Y) )
truth[i,0:m] = Y[0:m]
observations[i] = Y[0] + noise[i]
Y = S @ Y
t = t+dt
else :
order = -order
truth = zeros([N,order+1])
noise = bias + sigma*randn(N,1)
observations = zeros([N]) + noise
times = zeros([N,1])
t = t0
Y = Y0
for i in range(0,N) :
Y[0] = Y0[0] + Y0[1]*sin(0.01*t)
observations[i] += Y[0]
times[i] = t
truth[i,:] = Y[:]
t = t+dt
return (times, truth, observations, noise)
def isChi2Valid( varSample, varPopulation, N, p=0.05 ) :
y = (N-1) * varSample/varPopulation;
yl = chi2.ppf(p, N)
yu = chi2.ppf(1.0-p, N)
return y >= yl and y <= yu
def A2S( A : array, format="%10.3g" ) -> str:
return array2string(A, formatter={'float_kind':lambda y: format % y}, max_line_width=256)
def correlationToCovariance( R : array, d : vector ) -> array:
D = diag( d );
return D @ R @ D;
def covarianceToCorrelation( C : array ) -> Tuple[array, vector]:
d = sqrt(diag(C));
D = diag( 1. / d )
return (D @ C @ D, d);
def nearPSD(A,epsilon=0):
n = A.shape[0]
eigval, eigvec = np.linalg.eig(A)
val = np.matrix(np.maximum(eigval,epsilon))
vec = np.matrix(eigvec)
T = 1/(np.multiply(vec,vec) * val.T)
T = np.matrix(np.sqrt(np.diag(np.array(T).reshape((n)) )))
B = T * vec * np.diag(np.array(np.sqrt(val)).reshape((n)))
out = B*B.T
return(out)
def _getAplus(A):
eigval, eigvec = np.linalg.eig(A)
Q = np.matrix(eigvec)
xdiag = np.matrix(np.diag(np.maximum(eigval, 0)))
return Q*xdiag*Q.T
def _getPs(A, W=None):
W05 = np.matrix(W**.5)
return W05.I * _getAplus(W05 * A * W05) * W05.I
def _getPu(A, W=None):
Aret = np.array(A.copy())
Aret[W > 0] = np.array(W)[W > 0]
return np.matrix(Aret)
def nearPD(A, nit=10):
n = A.shape[0]
W = np.identity(n)
# W is the matrix used for the norm (assumed to be Identity matrix here)
# the algorithm should work for any diagonal W
deltaS = 0
Yk = A.copy()
for k in range(nit):
Rk = Yk - deltaS
Xk = _getPs(Rk, W=W)
deltaS = Xk - Rk
Yk = _getPu(Xk, W=W)
return Yk
def box_m(n_1,C0,n_2,C1):
global Xp
m = 2
k = 2 # len(np.cov(X0))
# n_1 = len(X0[0])
# n_2 = len(X1[0])
n = n_1+n_2 # len(X0[0])+len(X1[0])
print(m,k,n_1,n_2,n)
Xp = ( ((n_1-1)*C0) + ((n_2-1)*C1) ) / (n-m)
M = ((n-m)*np.log(np.linalg.det(Xp))) \
- (n_1-1)*(np.log(np.linalg.det(C0))) - (n_2-1)*(np.log(np.linalg.det(C1)))
c = ( ( 2*(k**2) + (3*k) - 1 ) / ( (6*(k+1)*(m-1)) ) ) \
* ( (1/(n_1-1)) + (1/(n_2-1)) - (1/(n-m)) )
df = (k*(k+1)*(m-1))/2
c2 = ( ((k-1)*(k+2)) / (6*(m-1)) ) \
* ( (1/((n_1-1)**2)) + (1/((n_2-1)**2)) - (1/((n-m)**2)) )
df2 = (df+2) / (np.abs(c2-c**2))
if (c2>c**2):
a_plus = df / (1-c-(df/df2))
F = M / a_plus
else:
a_minus = df2 / (1-c+(2/df2))
F = (df2*M) / (df*(a_minus-M))
print('M = {}'.format(M))
print('c = {}'.format(c))
print('c2 = {}'.format(c2))
print('-------------------')
print('df = {}'.format(df))
print('df2 = {}'.format(df2))
print('-------------------')
print('F = {}'.format(F))
def scaleVRFFMP( V : array, u : float, theta : float ) -> array:
t = 1-theta;
S = zeros(V.shape);
S[0,0] = t;
for i in range(1,S.shape[0]) :
S[i,0] = S[i-1,0] * t / u;
for i in range(0,S.shape[0]) :
for j in range(1,S.shape[1]) :
S[i,j] = S[i,j-1] * t / u;
return S * V;
def scaleVRFEMP( V : array, t : float, n : float ) -> array:
'''@S : array'''
'''@i : int'''
'''@j : int'''
S = zeros([V.shape[0], V.shape[1]]);
S[0,0] = 1.0/n;
for i in range(1,S.shape[0]) :
S[i,0] = S[i-1,0] / (t*n);
for i in range(0,S.shape[0]) :
for j in range(1,S.shape[1]) :
S[i,j] = S[i,j-1] / (t*n);
return S * V;
def generateConsistentPolynomial( tau : float, V : array) -> array:
pass
def generateTestPolynomial( order : int, N : int, t0 : float, tau : float, minY : float | |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This test checks simple acceptance of bigger blocks via p2p.
It is derived from the much more complex p2p-fullblocktest.
The intention is that small tests can be derived from this one, or
this one can be extended, to cover the checks done for bigger blocks
(e.g. sigops limits).
"""
from collections import deque
import random
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_transaction,
make_conform_to_ctor,
)
from test_framework.comptool import TestInstance, TestManager
from test_framework.cdefs import ONE_MEGABYTE
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxOut,
HeaderAndShortIDs,
msg_cmpctblock,
msg_sendcmpct,
ser_compact_size,
)
from test_framework.mininode import (
mininode_lock,
network_thread_start,
network_thread_join,
P2PInterface,
)
from test_framework.script import CScript, OP_RETURN, OP_TRUE
from test_framework.test_framework import ComparisonTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal, wait_until
class PreviousSpendableOutput():
def __init__(self, tx=CTransaction(), n=-1):
self.tx = tx
self.n = n # the output we're spending
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(P2PInterface):
def __init__(self):
self.last_sendcmpct = None
self.last_cmpctblock = None
self.last_getheaders = None
self.last_headers = None
super().__init__()
def on_sendcmpct(self, message):
self.last_sendcmpct = message
def on_cmpctblock(self, message):
self.last_cmpctblock = message
self.last_cmpctblock.header_and_shortids.header.calc_sha256()
def on_getheaders(self, message):
self.last_getheaders = message
def on_headers(self, message):
self.last_headers = message
for x in self.last_headers.headers:
x.calc_sha256()
def clear_block_data(self):
with mininode_lock:
self.last_sendcmpct = None
self.last_cmpctblock = None
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do
# the comparison.
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
self.excessive_block_size = 16 * ONE_MEGABYTE
self.extra_args = [['-norelaypriority',
'-whitelist=127.0.0.1',
'-limitancestorcount=999999',
'-limitancestorsize=999999',
'-limitdescendantcount=999999',
'-limitdescendantsize=999999',
'-maxmempool=99999',
"-excessiveblocksize={}".format(self.excessive_block_size)]]
def add_options(self, parser):
super().add_options(parser)
parser.add_argument(
"--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
network_thread_start()
# Set the blocksize to 2MB as initial condition
self.nodes[0].setexcessiveblock(self.excessive_block_size)
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0, extra_txns=0):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height)
coinbase.rehash()
if spend == None:
# We need to have something to spend to fill the block.
assert_equal(block_size, 0)
block = create_block(base_block_hash, coinbase, block_time)
else:
# all but one satoshi to fees
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
# Make sure we have plenty enough to spend going forward.
spendable_outputs = deque([spend])
def get_base_transaction():
# Create the new transaction
tx = CTransaction()
# Spend from one of the spendable outputs
spend = spendable_outputs.popleft()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n)))
# Add spendable outputs
for i in range(4):
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
spendable_outputs.append(PreviousSpendableOutput(tx, i))
pad_tx(tx)
return tx
tx = get_base_transaction()
# Make it the same format as transaction added for padding and save the size.
# It's missing the padding output, so we add a constant to account for it.
tx.rehash()
base_tx_size = len(tx.serialize()) + 18
# If a specific script is required, add it.
if script != None:
tx.vout.append(CTxOut(1, script))
# Put some random data into the first transaction of the chain to randomize ids.
tx.vout.append(
CTxOut(0, CScript([random.randint(0, 256), OP_RETURN])))
# Add the transaction to the block
self.add_transactions_to_block(block, [tx])
# Add transaction until we reach the expected transaction count
for _ in range(extra_txns):
self.add_transactions_to_block(block, [get_base_transaction()])
# If we have a block size requirement, just fill
# the block until we get there
current_block_size = len(block.serialize())
overage_bytes = 0
while current_block_size < block_size:
# We will add a new transaction. That means the size of
# the field enumerating how many transaction go in the block
# may change.
current_block_size -= len(ser_compact_size(len(block.vtx)))
current_block_size += len(ser_compact_size(len(block.vtx) + 1))
# Add padding to fill the block.
left_to_fill = block_size - current_block_size
# Don't go over the 1 mb limit for a txn
if left_to_fill > 500000:
# Make sure we eat up non-divisible by 100 amounts quickly
# Also keep transaction less than 1 MB
left_to_fill = 500000 + left_to_fill % 100
# Create the new transaction
tx = get_base_transaction()
pad_tx(tx, left_to_fill - overage_bytes)
if len(tx.serialize()) + current_block_size > block_size:
# Our padding was too big try again
overage_bytes += 1
continue
# Add the tx to the list of transactions to be included
# in the block.
self.add_transactions_to_block(block, [tx])
current_block_size += len(tx.serialize())
# Now that we added a bunch of transaction, we need to recompute
# the merkle root.
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
# Check that the block size is what's expected
if block_size > 0:
assert_equal(len(block.serialize()), block_size)
# Do PoW, which is cheap on regnet
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject=None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# shorthand for functions
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
# Get to one block of the May 15, 2018 HF activation
for i in range(6):
block(5100 + i)
test.blocks_and_transactions.append([self.tip, True])
# Send it all to the node at once.
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(100):
out.append(get_spendable_output())
# There can be only one network thread running at a time.
# Adding a new P2P connection here will try to start the network thread
# at init, which will throw an assertion because it's already running.
# This requires a few steps to avoid this:
# 1/ Disconnect all the TestManager nodes
# 2/ Terminate the network thread
# 3/ Add the new P2P connection
# 4/ Reconnect all the TestManager nodes
# 5/ Restart the network thread
# Disconnect all the TestManager nodes
[n.disconnect_node() for n in self.test.p2p_connections]
self.test.wait_for_disconnections()
self.test.clear_all_connections()
# Wait for the network thread to terminate
network_thread_join()
# Add the new connection
node = self.nodes[0]
node.add_p2p_connection(TestNode())
# Reconnect TestManager nodes
self.test.add_all_connections(self.nodes)
# Restart the network thread
network_thread_start()
# Wait for connection to be etablished
peer = node.p2p
peer.wait_for_verack()
# Check that compact block also work for big blocks
# Wait for SENDCMPCT
def received_sendcmpct():
return (peer.last_sendcmpct != None)
wait_until(received_sendcmpct, timeout=30)
sendcmpct = msg_sendcmpct()
sendcmpct.version = 1
sendcmpct.announce = True
peer.send_and_ping(sendcmpct)
# Exchange headers
def received_getheaders():
return (peer.last_getheaders != None)
wait_until(received_getheaders, timeout=30)
# Return the favor
peer.send_message(peer.last_getheaders)
# Wait for the header list
def received_headers():
return (peer.last_headers != None)
wait_until(received_headers, timeout=30)
# It's like we know about the same headers !
peer.send_message(peer.last_headers)
# Send a block
b1 = block(1, spend=out[0], block_size=ONE_MEGABYTE + 1)
yield accepted()
# Checks the node to forward it via compact block
def received_block():
return (peer.last_cmpctblock != None)
wait_until(received_block, timeout=30)
# Was it our block ?
cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header
cmpctblk_header.calc_sha256()
assert(cmpctblk_header.sha256 == b1.sha256)
# Send a large block with numerous transactions.
peer.clear_block_data()
b2 = block(2, spend=out[1], extra_txns=70000,
block_size=self.excessive_block_size - 1000)
yield accepted()
# Checks the node forwards it via compact block
wait_until(received_block, timeout=30)
# Was it our | |
null, 0.95],
[0.95, null, 0.33, 0.11],
[1.00, 0.95, 0.11, 1.00],
]
)
expected = array(
[
[0.63, 0.75, null, 1.00],
[0.75, 0.10, null, 0.95],
[null, null, null, null],
[1.00, 0.95, null, 1.00],
]
)
actual = filter_threshold_based_multiple_interdependency(
None, m, 0.33, 1, less_equal
)
assert_allclose(actual, expected)
def test_probs_from_dict(self):
"""probs_from_dict: dict of probs -> list of probs in alphabet's order"""
d = {"A": 0.25, "D": 0.52, "C": 0.23}
a = list("ACD")
assert_allclose(probs_from_dict(d, a), [0.25, 0.23, 0.52])
a = list("ADC")
assert_allclose(probs_from_dict(d, a), [0.25, 0.52, 0.23])
a = list("DCA")
assert_allclose(probs_from_dict(d, a), [0.52, 0.23, 0.25])
a = CharAlphabet("DCA")
assert_allclose(probs_from_dict(d, a), [0.52, 0.23, 0.25])
# protein natural probs
l = probs_from_dict(protein_dict, AAGapless)
for i in range(20):
assert_allclose(l[i], protein_dict[AAGapless[i]], 0.001)
def test_freqs_from_aln(self):
"""freqs_from_aln: freqs of alphabet chars in aln is calc'ed correctly"""
# non-default scaled_aln_size
aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGT", "AGCT", "ACCC", "TAGG"])),
moltype=PROTEIN,
)
alphabet = "ACGT"
expected = [4, 5, 4, 3]
assert_equal(freqs_from_aln(aln, alphabet, 16), expected)
# change the order of the alphabet
alphabet = "TGCA"
expected = [3, 4, 5, 4]
assert_equal(freqs_from_aln(aln, alphabet, 16), expected)
# default scaled_aln_size, sums of freqs == 100
alphabet = "ACGT"
expected = [25.0, 31.25, 25, 18.75]
assert_allclose(freqs_from_aln(aln, alphabet), expected)
# alphabet char which doesn't show up gets zero freq
alphabet = "ACGTW"
expected = [25.0, 31.25, 25, 18.75, 0]
assert_allclose(freqs_from_aln(aln, alphabet), expected)
# alignment char which doesn't show up is silently ignored
aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGT", "AGCT", "ACCC", "TWGG"])),
moltype=PROTEIN,
)
alphabet = "ACGT"
expected = [18.75, 31.25, 25, 18.75]
assert_allclose(freqs_from_aln(aln, alphabet), expected)
def test_freqs_to_array(self):
"""freqs_to_array: should convert CategoryCounter object to array"""
# should work with empty object
f = CategoryCounter()
f2a = freqs_to_array
assert_allclose(f2a(f, AAGapless), zeros(20))
# should work with full object, omitting unwanted keys
f = CategoryCounter({"A": 20, "Q": 30, "X": 20})
expected = zeros(20)
expected[AAGapless.index("A")] = 20
expected[AAGapless.index("Q")] = 30
assert_allclose(f2a(f, AAGapless), expected)
# should work for normal dict and any alphabet
d = {"A": 3, "D": 1, "C": 5, "E": 2}
alpha = "ABCD"
exp = array([3, 0, 5, 1])
assert_allclose(f2a(d, alpha), exp)
def test_get_allowed_perturbations(self):
"""get_allowed_perturbations: should work for different cutoff values"""
counts = [50, 40, 10, 0]
a = list("ACGT")
self.assertEqual(get_allowed_perturbations(counts, 1.0, a), [])
self.assertEqual(get_allowed_perturbations(counts, 0.51, a), [])
self.assertEqual(get_allowed_perturbations(counts, 0.5, a), ["A"])
self.assertEqual(get_allowed_perturbations(counts, 0.49, a), ["A"])
self.assertEqual(get_allowed_perturbations(counts, 0.401, a), ["A"])
self.assertEqual(get_allowed_perturbations(counts, 0.40, a), ["A", "C"])
self.assertEqual(get_allowed_perturbations(counts, 0.399, a), ["A", "C"])
self.assertEqual(get_allowed_perturbations(counts, 0.10, a), ["A", "C", "G"])
self.assertEqual(get_allowed_perturbations(counts, 0.0, a), a)
def test_get_subalignments(self):
"""get_subalignments: works with different alignment sizes and cutoffs"""
aln = ArrayAlignment(
data={1: "AAAA", 2: "AAAC", 3: "AACG", 4: "ACCT", 5: "ACG-"},
moltype=PROTEIN,
)
sub_aln_0A = ArrayAlignment(
data={1: "AAAA", 2: "AAAC", 3: "AACG", 4: "ACCT", 5: "ACG-"},
moltype=PROTEIN,
)
sub_aln_0C = {}
sub_aln_1A = ArrayAlignment(
data={1: "AAAA", 2: "AAAC", 3: "AACG"}, moltype=PROTEIN
)
sub_aln_1C = ArrayAlignment(data={4: "ACCT", 5: "ACG-"}, moltype=PROTEIN)
sub_aln_2G = ArrayAlignment(data={5: "ACG-"}, moltype=PROTEIN)
self.assertEqual(get_subalignments(aln, 0, ["A"]), [sub_aln_0A])
self.assertEqual(get_subalignments(aln, 0, ["C"]), [sub_aln_0C])
self.assertEqual(get_subalignments(aln, 1, ["A"]), [sub_aln_1A])
self.assertEqual(get_subalignments(aln, 1, ["C"]), [sub_aln_1C])
self.assertEqual(
get_subalignments(aln, 1, ["A", "C"]), [sub_aln_1A, sub_aln_1C]
)
self.assertEqual(get_subalignments(aln, 2, ["G"]), [sub_aln_2G])
self.assertEqual(get_subalignments(aln, 3, ["-"]), [sub_aln_2G])
def test_get_positional_frequencies_w_scale(self):
"""get_positional_frequencies: works with default scaled_aln_size"""
aln = ArrayAlignment(
data={1: "ACDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected_0 = array([100.0, 0.0, 0.0, 0.0, 0.0])
expected_1 = array([0.0, 25.0, 25.0, 25.0, 25.0])
expected_2 = array([0.0, 0.0, 50.0, 50.0, 0.0])
expected_3 = array([0.0, 0.0, 25.0, 50.0, 25.0])
assert_allclose(get_positional_frequencies(aln, 0, "ACDEF"), expected_0)
assert_allclose(get_positional_frequencies(aln, 1, "ACDEF"), expected_1)
assert_allclose(get_positional_frequencies(aln, 2, "ACDEF"), expected_2)
assert_allclose(get_positional_frequencies(aln, 3, "ACDEF"), expected_3)
# extra characters (W) are silently ignored -- is this the desired
# behavior?
aln = ArrayAlignment(
data={1: "WCDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected_0 = array([75.0, 0.0, 0.0, 0.0, 0.0])
assert_allclose(get_positional_frequencies(aln, 0, "ACDEF"), expected_0)
# 20 residue amino acid alphabet
aln = ArrayAlignment(
data={1: "ACDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected = array([100.0] + [0.0] * 19)
assert_allclose(get_positional_frequencies(aln, 0, AAGapless), expected)
def test_get_positional_frequencies(self):
"""get_positional_frequencies: works with non-default scaled_aln_size"""
aln = ArrayAlignment(
data={1: "ACDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected_0 = array([4.0, 0.0, 0.0, 0.0, 0.0])
expected_1 = array([0.0, 1.0, 1.0, 1.0, 1.0])
expected_2 = array([0.0, 0.0, 2.0, 2.0, 0.0])
expected_3 = array([0.0, 0.0, 1.0, 2.0, 1.0])
assert_allclose(get_positional_frequencies(aln, 0, "ACDEF", 4), expected_0)
assert_allclose(get_positional_frequencies(aln, 1, "ACDEF", 4), expected_1)
assert_allclose(get_positional_frequencies(aln, 2, "ACDEF", 4), expected_2)
assert_allclose(get_positional_frequencies(aln, 3, "ACDEF", 4), expected_3)
# extra characters (W) are silently ignored -- is this the desired
# behavior?
aln = ArrayAlignment(
data={1: "WCDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected_0 = array([3.0, 0.0, 0.0, 0.0, 0.0])
assert_allclose(get_positional_frequencies(aln, 0, "ACDEF", 4), expected_0)
# 20 residue amino acid alphabet
aln = ArrayAlignment(
data={1: "ACDE", 2: "ADDE", 3: "AEED", 4: "AFEF"}, moltype=PROTEIN
)
expected = array([4.0] + [0.0] * 19)
assert_allclose(get_positional_frequencies(aln, 0, AAGapless, 4), expected)
def test_validate_alphabet_invalid(self):
"""validate_alphabet: raises error on incompatible alpabet and freqs"""
# len(alpha) > len(freqs)
self.assertRaises(ValueError, validate_alphabet, "ABC", {"A": 0.5, "B": 0.5})
self.assertRaises(ValueError, validate_alphabet, "ABCD", {"A": 0.5, "B": 0.5})
# len(alpha) == len(freqs)
self.assertRaises(ValueError, validate_alphabet, "AC", {"A": 0.5, "B": 0.5})
# len(alpha) < len(freqs)
self.assertRaises(ValueError, validate_alphabet, "A", {"A": 0.5, "B": 0.5})
self.assertRaises(ValueError, validate_alphabet, "", {"A": 0.5, "B": 0.5})
# different values, len(alpha) > len(freqs)
self.assertRaises(ValueError, validate_alphabet, [1, 42, 3], {42: 0.5, 1: 0.5})
self.assertRaises(
ValueError, validate_alphabet, CharAlphabet("ABC"), {"A": 0.5, "C": 0.5}
)
def test_validate_alphabet_valid(self):
"""validate_alphabet: does nothing on compatible alpabet and freqs"""
validate_alphabet("AB", {"A": 0.5, "B": 0.5})
validate_alphabet(CharAlphabet("AB"), {"A": 0.5, "B": 0.5})
validate_alphabet([1, 42, 8], {1: 0.5, 42: 0.25, 8: 0.25})
def test_validate_position_invalid(self):
"""validate_position: raises error on invalid position"""
self.assertRaises(ValueError, validate_position, self.dna_aln, 4)
self.assertRaises(ValueError, validate_position, self.dna_aln, 42)
self.assertRaises(ValueError, validate_position, self.dna_aln, -1)
self.assertRaises(ValueError, validate_position, self.dna_aln, -199)
def test_validate_position_valid(self):
"""validate_position: does nothing on valid position"""
validate_position(self.dna_aln, 0)
validate_position(self.dna_aln, 1)
validate_position(self.dna_aln, 2)
validate_position(self.dna_aln, 3)
def test_validate_alignment(self):
"""validate_alignment: ValueError on bad alignment characters"""
# ambiguous characters
aln = ArrayAlignment(
data={0: "BA", 1: "AC", 2: "CG", 3: "CT", 4: "TA"}, moltype=PROTEIN
)
self.assertRaises(ValueError, validate_alignment, aln)
aln = ArrayAlignment(
data={0: "NA", 1: "AC", 2: "CG", 3: "CT", 4: "TA"}, moltype=DNA
)
self.assertRaises(ValueError, validate_alignment, aln)
aln = ArrayAlignment(
data={0: "YA", 1: "AC", 2: "CG", 3: "CU", 4: "UA"}, moltype=RNA
)
self.assertRaises(ValueError, validate_alignment, aln)
aln = ArrayAlignment(
data={0: "AA", 1: "AC", 2: "CG", 3: "CT", 4: "TA"}, moltype=PROTEIN
)
validate_alignment(aln)
aln = ArrayAlignment(
data={0: "AA", 1: "AC", 2: "CG", 3: "CT", 4: "TA"}, moltype=DNA
)
validate_alignment(aln)
aln = ArrayAlignment(
data={0: "AA", 1: "AC", 2: "CG", 3: "CU", 4: "UA"}, moltype=RNA
)
validate_alignment(aln)
def test_coevolve_functions_validate_alignment(self):
"""coevolve_*: functions run validate alignment"""
aln = ArrayAlignment(
data={"0": "BA", "1": "AC", "2": "CG", "3": "CT", "4": "TA"},
moltype=PROTEIN,
)
self.assertRaises(ValueError, coevolve_pair, mi_pair, aln, 0, 1)
self.assertRaises(ValueError, coevolve_position, mi_position, aln, 0)
self.assertRaises(ValueError, coevolve_alignment, mi_alignment, aln)
self.assertRaises(ValueError, coevolve_alignments, mi_alignment, aln, aln)
def test_get_positional_probabilities_w_non_def_num_seqs(self):
"""get_positional_probabilities: works w/ non-def num_seqs"""
freqs = [1.0, 2.0, 0.0]
probs = [0.33, 0.33, 0.33]
expected = array([0.444411, 0.218889, 0.300763])
assert_allclose(get_positional_probabilities(freqs, probs, 3), expected)
def test_get_dg(self):
"""get_dg: returns delta_g vector"""
p = [0.1, 0.2, 0.3]
a = [0.5, 0.6, 0.7]
expected = [log(0.1 / 0.5), log(0.2 / 0.6), log(0.3 / 0.7)]
assert_allclose(get_dg(p, a), expected)
def test_get_dgg(self):
"""get_dgg: returns delta_delta_g value given two delta_g vectors"""
v1 = array([0.05, 0.5, 0.1])
v2 = array([0.03, 0.05, 0.1])
expected = sqrt(sum((v1 - v2) * (v1 - v2))) / 100 * e
assert_allclose(get_dgg(v1, v2), expected)
def test_get_positional_probabilities_w_def_num_seqs(self):
"""get_positional_probabilities: works w/ num_seqs scaled to 100 (def)"""
freqs = [15.0, 33.0, 52.0]
probs = [0.33, 0.33, 0.33]
expected = array([2.4990e-5, 0.0846, 3.8350e-5])
assert_allclose(get_positional_probabilities(freqs, probs), expected, 0.001)
def test_get_positional_probs_handles_rounding_error_in_freqs(self):
"""get_positional_probabilities: works w/ rounding error in freqs"""
# Since freqs are scaled to scaled_aln_size, rounding error can cause
# errors for positions that are perfectly controled. Testing here that
# that value error is handled.
# default scaled_aln_size
freqs = [100.0000000001, 0.0, 0.0]
probs = [0.33, 0.33, 0.33]
expected = array([7.102218e-49, 4.05024e-18, 4.05024e-18])
assert_allclose(get_positional_probabilities(freqs, probs), expected, rtol=1e-5)
# value that is truely over raises an error
freqs = [101.0000000001, 0.0, 0.0]
probs = [0.33, 0.33, 0.33]
self.assertRaises(ValueError, get_positional_probabilities, freqs, probs)
# non-default scaled_aln_size
freqs = [50.0000000001, 0.0, 0.0]
probs = [0.33, 0.33, 0.33]
expected = array([8.42747e-25, | |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with action and observation specifications.
These specifications can be nested lists and dicts of `Array` and its
subclass `BoundedArray`.
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type, TypeVar
from absl import flags
from absl import logging
import dm_env
from dm_env import specs
import numpy as np
# Internal profiling
FLAGS = flags.FLAGS
# Defaulting to True, to prefer failing fast and closer to the bug.
flags.DEFINE_boolean('debug_specs', True,
'Debugging switch for checking values match specs.')
flags.DEFINE_integer('max_validations', 1000,
'Stop validating after this many calls.')
_validation_count = 0
ObservationSpec = Mapping[str, specs.Array]
ObservationValue = Mapping[str, np.ndarray]
ScalarOrArray = TypeVar('ScalarOrArray', np.floating, np.ndarray)
def debugging_flag() -> bool:
return FLAGS.debug_specs
class TimeStepSpec(object):
"""Type specification for a TimeStep."""
def __init__(self, observation_spec: ObservationSpec,
reward_spec: specs.Array, discount_spec: specs.Array):
self._observation_spec = observation_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
@property
def observation_spec(self) -> Mapping[str, specs.Array]:
return dict(self._observation_spec)
@property
def reward_spec(self) -> specs.Array:
return self._reward_spec
@property
def discount_spec(self) -> specs.Array:
return self._discount_spec
def validate(self, timestep: dm_env.TimeStep):
validate_observation(self.observation_spec, timestep.observation)
validate(self.reward_spec, timestep.reward)
validate(self.discount_spec, timestep.discount)
def minimum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = minimum(self._reward_spec)
discount = minimum(self._discount_spec)
observation = {k: minimum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def maximum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = maximum(self._reward_spec)
discount = maximum(self._discount_spec)
observation = {k: maximum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def replace(self,
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.Array] = None) -> 'TimeStepSpec':
"""Return a new TimeStepSpec with specified fields replaced."""
if observation_spec is None:
observation_spec = self._observation_spec
if reward_spec is None:
reward_spec = self._reward_spec
if discount_spec is None:
discount_spec = self._discount_spec
return TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def __eq__(self, other):
if not isinstance(other, TimeStepSpec):
return False
# All the properties of the spec must be equal.
if self.reward_spec != other.reward_spec:
return False
if self.discount_spec != other.discount_spec:
return False
if len(self.observation_spec) != len(other.observation_spec):
return False
for key in self.observation_spec:
if (key not in other.observation_spec or
self.observation_spec[key] != other.observation_spec[key]):
return False
return True
def minimum(spec: specs.Array):
if hasattr(spec, 'minimum'):
return clip(np.asarray(spec.minimum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).min)
else:
return np.full(spec.shape, np.finfo(spec.dtype).min)
def maximum(spec: specs.Array):
if hasattr(spec, 'maximum'):
return clip(np.asarray(spec.maximum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).max)
else:
return np.full(spec.shape, np.finfo(spec.dtype).max)
def zeros(action_spec: specs.Array) -> np.ndarray:
"""Create a zero value for this Spec."""
return np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
def cast(spec: specs.Array, value: ScalarOrArray) -> ScalarOrArray:
"""Cast a value to conform to a spec."""
if np.isscalar(value):
return spec.dtype.type(value)
else:
return value.astype(spec.dtype)
def clip(value: np.ndarray, spec: specs.BoundedArray) -> np.ndarray:
"""Clips the given value according to the spec."""
if value is None:
raise ValueError('no value')
if isinstance(spec.dtype, np.inexact):
eps = np.finfo(spec.dtype).eps * 5.0
else:
eps = 0
min_bound = np.array(spec.minimum, dtype=spec.dtype)
max_bound = np.array(spec.maximum, dtype=spec.dtype)
return np.clip(value, min_bound + eps, max_bound - eps)
def shrink_to_fit(
value: np.ndarray,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
) -> np.ndarray:
"""Scales the value towards zero to fit within spec min and max values.
Clipping is done after scaling to ensure there are no values that are very
slightly (say 10e-8) out of range.
This, by nature, assumes that min <= 0 <= max for the spec.
Args:
value: np.ndarray to scale towards zero.
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
Returns:
Scaled and clipped value.
Raises:
ValueError: On missing values or high-dimensional values.
"""
if value is None:
raise ValueError('no value')
if spec is None:
raise ValueError('no spec')
if not isinstance(value, np.ndarray):
raise ValueError('value not numpy array ({})'.format(type(value)))
if len(value.shape) > 1:
raise ValueError('2d values not yet handled')
if not isinstance(spec, specs.BoundedArray):
raise ValueError('Cannot scale to spec: {})'.format(spec))
if np.any(spec.minimum > 0) or np.any(spec.maximum < 0):
raise ValueError('Cannot scale to spec, due to bounds: {})'.format(spec))
factor = 1.0
for val, min_val, max_val in zip(value, spec.minimum, spec.maximum):
if val < min_val:
new_factor = min_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
if val > max_val:
new_factor = max_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
scaled = (value * factor).astype(spec.dtype)
clipped = clip(scaled, spec)
try:
validate(spec, clipped, ignore_nan)
except ValueError:
logging.error('Failed to scale %s to %s. Got: %s', value, spec, clipped)
return clipped
def merge_specs(spec_list: Sequence[specs.BoundedArray]):
"""Merges a list of BoundedArray into one."""
# Check all specs are flat.
for spec in spec_list:
if len(spec.shape) > 1:
raise ValueError('Not merging multi-dimensional spec: {}'.format(spec))
# Filter out no-op specs with no actuators.
spec_list = [spec for spec in spec_list if spec.shape and spec.shape[0]]
dtype = np.find_common_type([spec.dtype for spec in spec_list], [])
num_actions = 0
name = ''
mins = np.array([], dtype=dtype)
maxs = np.array([], dtype=dtype)
for i, spec in enumerate(spec_list):
num_actions += spec.shape[0]
if name:
name += '\t'
name += spec.name or f'spec_{i}'
mins = np.concatenate([mins, spec.minimum])
maxs = np.concatenate([maxs, spec.maximum])
return specs.BoundedArray(
shape=(num_actions,), dtype=dtype, minimum=mins, maximum=maxs, name=name)
def merge_primitives(values: Sequence[np.ndarray],
default_value: Optional[float] = None) -> np.ndarray:
"""Merge the given values (arrays) where NaNs are considered missing.
Args:
values: The values to merge.
default_value: A default value to replace NaNs with, after merging.
Returns:
A merged value.
Raises:
ValueError: On ambiguity, shape/dtype mismatch, or no values.
An ambiguity means >1 arrays have a non-nan value in the same index.
"""
if not values:
raise ValueError('No values to merge')
# Ignore Nones.
shape = values[0].shape
dtype = values[0].dtype
result = np.ndarray(shape=shape, dtype=dtype)
result.fill(np.nan)
if len(shape) != 1:
raise ValueError('Not implemented for multi-dimensional arrays')
for value in values:
if value.shape != shape:
raise ValueError('Shape mismatch, expect {} got {}. All: {}'.format(
shape, value.shape, [v.shape for v in values]))
if value.dtype != dtype:
raise ValueError('dtype mismatch, expect {} got {}. All: {}'.format(
dtype, value.dtype, [v.dtype for v in values]))
for i in range(shape[0]):
if not np.isnan(value[i]):
if np.isnan(result[i]):
result[i] = value[i]
else:
raise ValueError('Ambiguous merge at index {} with values: {}'.format(
i, values))
if default_value is not None:
result[np.isnan(result)] = default_value
return result
def merge_in_default(value, default_value):
"""Fill in the given value with the parts of the default_value."""
if value is None:
return default_value
if isinstance(default_value, dict):
for key in default_value.keys():
value[key] = merge_in_default(value.get(key, None), default_value[key])
return value
elif isinstance(value, list):
for i in range(len(default_value)):
if i >= len(value):
value.append(default_value[i])
else:
value[i] = merge_in_default(value[i], default_value[i])
return value
else:
return value
def validate_timestep(spec: TimeStepSpec, timestep: dm_env.TimeStep):
validate_observation(spec.observation_spec, timestep.observation)
validate(spec.reward_spec, timestep.reward)
validate(spec.discount_spec, timestep.discount)
def ensure_spec_compatibility(sub_specs: TimeStepSpec,
full_specs: TimeStepSpec):
"""Validates compatibility of 2 timestep specs.
For the observations we only check inclusion of sub_specs in full_specs.
Args:
sub_specs:
full_specs:
Raises:
ValueError: If the discount_spec, the reward_spec or one of the observation
spec do not match.
KeyError: If an observation in sub_specs is not in full_specs.
"""
if sub_specs.discount_spec != full_specs.discount_spec:
raise ValueError('Non matching discount specs.\nDiscount_sub_spec : {} \n'
'Discount_full_specs: {}\n'.format(
sub_specs.discount_spec, full_specs.discount_spec))
if sub_specs.reward_spec != full_specs.reward_spec:
raise ValueError('Non matching reward specs.\nReward_sub_spec : {} \n'
'Reward_spec: {}\n'.format(sub_specs.reward_spec,
full_specs.reward_spec))
for obs_spec_key, obs_spec in sub_specs.observation_spec.items():
if obs_spec_key not in full_specs.observation_spec:
raise KeyError('Missing observation key {} in spec.'.format(obs_spec_key))
if obs_spec != full_specs.observation_spec[obs_spec_key]:
raise ValueError('Non matching observation specs for key {}. \n'
'sub_spec = {} \n spec = {}'.format(
obs_spec_key, obs_spec,
full_specs.observation_spec[obs_spec_key]))
def verify_specs_equal_unbounded(expected: specs.Array, actual: specs.Array):
"""Assert that two specs are equal."""
if expected.shape != actual.shape:
raise ValueError(f'invalid shape for spec {expected.name}: '
f'{expected.shape}, actual shape: {actual.shape}')
if expected.dtype != actual.dtype:
raise ValueError(f'invalid dtype for spec {expected.name}: '
f'{expected.dtype}, actual dtype: {actual.dtype}')
if expected.name != actual.name:
raise ValueError(f'invalid | |
import tempfile
from collections import defaultdict
import pandas as pd
import six
from lazy_property import LazyProperty
from .. import numeric_datatypes, _pretty_print
from ..column.base import Column
from ..connection import Connection
from ..exception import TableDoesNotExistError, NoSuchColumnError
from ..util import process_schema_and_conn, seaborn_required
class Table(object):
"""
This class is used for representing table metadata.
:ivar pg_utils.connection.Connection conn: A connection to be used by this table.
:ivar str name: The fully-qualified name of this table.
:ivar tuple[str] column_names: A list of column names for the table, as found in the database.
:ivar tuple[Column] columns: A tuple of :class:`Column` objects.
:ivar tuple[str] numeric_columns: A list of column names corresponding to the column_names in the table that have some kind of number datatype (``int``, ``float8``, ``numeric``, etc).
"""
@process_schema_and_conn
def __init__(self, table_name, schema=None, conn=None, columns=None, check_existence=True, debug=False):
"""
:param str table_name: The name of the table in the database. If it's qualified with a schema, then leave the ``schema`` argument alone.
:param None|str schema: The name of the schema in which this table lies. If unspecified and the value of ``table_name`` doesn't include a schema, then the (OS-specified) username of the given user is taken to be the schema.
:param None|pg_utils.connection.Connection conn: A connection object that's used to fetch data and metadata. If not specified, a new connection is made with default arguments provided for username, password, etc.
:param str|list[str]|tuple[str] columns: An iterable of specified column names. It's used by the ``__getitem__`` magic method, so you shouldn't need to fiddle with this.
:param bool check_existence: If enabled, an extra check is made to ensure that the table referenced by this object actually exists in the database.
:param bool debug: Enable to get some extra logging that's useful for debugging stuff.
"""
self._table_name = table_name
self._schema = schema
self.conn = conn
self.column_names = columns
self._all_column_names = None
self._all_column_data_types = None
self.check_existence = check_existence
self.debug = debug
self._validate()
self._process_columns()
for col in self.columns:
setattr(self, col.name, col)
def _validate(self):
if self.check_existence and not Table.exists(self.table_name, conn=self.conn, schema=self.schema):
raise TableDoesNotExistError("Table {} does not exist".format(self))
@classmethod
@process_schema_and_conn
def create(cls, table_name, create_stmt, conn=None, schema=None,
*args, **kwargs):
"""
This is the constructor that's easiest to use when creating a new table.
:param str table_name: As mentioned above.
:param str create_stmt: A string of SQL (presumably including a "CREATE TABLE" statement for the corresponding database table) that will be executed before ``__init__`` is run.
.. note::
The statement ``drop table if exists schema.table_name;`` is
executed **before** the SQL in ``create_stmt`` is executed.
:param None|pg_utils.connection.Connection conn: A ``Connection`` object to use for creating the table. If not specified, a new connection will be created with no arguments. Look at the docs for the Connection object for more information.
:param None|str schema: A specified schema (optional).
:param args: Other positional arguments to pass to the initializer.
:param kwargs: Other keyword arguments to pass to the initializer.
:return: The corresponding ``Table`` object *after* the ``create_stmt`` is executed.
"""
update_kwargs = {"check_existence": False, "conn": conn, "schema": schema}
cur = conn.cursor()
drop_stmt = "drop table if exists {} cascade;".format(table_name)
if schema is not None:
drop_stmt = "drop table if exists {}.{} cascade;".format(schema, table_name)
cur.execute(drop_stmt)
conn.commit()
cur.execute(create_stmt)
conn.commit()
kwargs.update(update_kwargs)
return cls(table_name, *args, **kwargs)
@classmethod
def from_table(cls, table, *args, **kwargs):
"""
This class method constructs a table from a given table. Used to give a fresh ``Table`` object with different columns, but all other parameters the same as the given table.
If the ``columns`` attribute only specifies one column, then a :class:`Column` object will be returned.
:param Table table: The table object from which the output will be created.
:param list args: Any positional arguments (if any).
:param dict kwargs: Any keyword arguments to pass along (if any).
:return: Either a fresh ``Table`` or :class:`Column`, depending on whether the ``columns`` parameter is restricted to just a single column.
:rtype: Column|Table
"""
kwargs["check_existence"] = False
kwargs.update({attr: getattr(table, attr)
for attr in ["conn", "schema", "debug"]})
kwargs.setdefault("columns", table.column_names)
if "columns" in kwargs and isinstance(kwargs["columns"], six.string_types):
col = kwargs["columns"]
del kwargs["columns"]
parent = Table.from_table(table, *args, **kwargs)
result = Column(col, parent)
else:
result = cls(table.table_name, *args, **kwargs)
return result
def select_all_query(self):
return "select {} from {}".format(",".join(self.column_names), self)
@LazyProperty
def count(self):
"""Returns the number of rows in the corresponding database table."""
cur = self.conn.cursor()
cur.execute("select count(1) from {}".format(self))
return cur.fetchone()[0]
def head(self, num_rows=10, **read_sql_kwargs):
"""
Returns some of the rows, returning a corresponding Pandas DataFrame.
:param int|str num_rows: The number of rows to fetch, or ``"all"`` to fetch all of the rows.
:param dict read_sql_kwargs: Any other keyword arguments that you'd like to pass into ``pandas.read_sql`` (as documented `here <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html>`_).
:return: The resulting data frame.
:rtype: pandas.core.frame.DataFrame
"""
if (not isinstance(num_rows, six.integer_types) or num_rows <= 0) and \
num_rows != "all":
raise ValueError(
"'num_rows': Expected a positive integer or 'all'")
query = self.select_all_query()
if num_rows != "all":
query += " limit {}".format(num_rows)
result = pd.read_sql(query, self.conn, **read_sql_kwargs)
if len(self.column_names) == 1:
result = result[self.column_names[0]]
return result
@LazyProperty
def shape(self):
"""
As in the property of Pandas DataFrames by the same name, this gives a tuple showing the dimensions of the table: ``(number of rows, number of columns)``
"""
return self.count, len(self.column_names)
@LazyProperty
def dtypes(self):
"""
Mimics the `pandas.DataFrame.dtypes` property, giving a Series of dtypes (given as strings) corresponding to each column.
:return: The Series of dtypes.
:rtype: pd.Series
"""
return pd.Series([c.dtype for c in self.columns], index=self.column_names)
def get_dtype_counts(self):
counts = defaultdict(int)
dtypes = self.dtypes
for dt in dtypes:
counts[dt] += 1
return pd.Series(
[counts[dt] for dt in sorted(list(counts.keys()))],
index=sorted(list(counts.keys()))
)
def insert(self, row, columns=None):
"""
Inserts a single tuple into the table.
:param list|pandas.Series row: A list or Series of items to insert. If a list, its length must match up with the number of columns that we'll insert. If it's a series, the column names must be contained within the index.
:param None|list[str]|tuple[str] columns: An iterable of column names to use, that must be contained within this table. If not specified, all of the columns are taken.
:return: Returns a boolean indicating success.
:rtype: bool
"""
if columns is None:
columns = self.column_names
elif any([c for c in columns if c not in self.column_names]):
raise ValueError("The following columns are not in table {}: {}".format(
self, ",".join([str(c) for c in columns if c not in self.column_names])
))
columns = list(columns)
if isinstance(row, pd.Series):
if any([x for x in row.index if x not in columns]):
raise ValueError(
"The following index elements are not specified columns: {}".format(
",".join([str(x) for x in row.index if x not in columns])
))
columns = [c for c in columns if c in row.index]
if len(columns) != len(row):
raise ValueError(
"Length of row to be inserted is not the same as the number of columns selected ({} vs {})".format(
len(row), len(columns)))
stmt = """insert into {} ({}) values ({});""".format(
self, ", ".join(columns), ", ".join(["%s"] * len(columns))
)
cur = self.conn.cursor()
cur.execute(stmt, tuple(row))
return bool(cur.rowcount)
def insert_csv(self, file_name, columns=None, header=True, sep=",", null="", size=8192):
"""
A wrapper around the `copy_expert <http://initd.org/psycopg/docs/cursor.html#cursor.copy_expert>`_ method of the psycopg2 cursor class to do a bulk insert into the table.
:param str file_name: The name of the CSV file.
:param None|list[str]|tuple[str] columns: An iterable of column names to use, that must be contained within this table. If not specified, all of the columns are taken.
:param bool header: Indicates whether or not the file has a header.
:param str sep: The separator character.
:param str null: The string used to indicate null values.
:param int size: The size of the buffer that ``psycopg2.cursor.copy_expert`` uses.
"""
column_str = "" if columns is None else " ({})".format(",".join([str(x) for x in columns]))
cmd = "copy {}{} from stdin delimiter '{}' null '{}' csv".format(self,
column_str,
sep, null)
if header:
cmd += " header"
else:
cmd += " no header"
with open(file_name) as f:
cur = self.conn.cursor()
cur.copy_expert(sql=cmd, file=f, size=size)
self.conn.commit()
cur.close()
def insert_dataframe(self, data_frame, encoding="utf8", **csv_kwargs):
"""
| |
<reponame>kyper999/OCRmyPDF
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# © 2013-16: jbarlow83 from Github (https://github.com/jbarlow83)
#
# Python FFI wrapper for Leptonica library
import argparse
import sys
import os
import logging
from tempfile import TemporaryFile
from ctypes.util import find_library
from .lib._leptonica import ffi
from functools import lru_cache
from enum import Enum
lept = ffi.dlopen(find_library('lept'))
logger = logging.getLogger(__name__)
def stderr(*objs):
"""Python 2/3 compatible print to stderr.
"""
print("leptonica.py:", *objs, file=sys.stderr)
class LeptonicaErrorTrap(object):
"""Context manager to trap errors reported by Leptonica.
Leptonica's error return codes are unreliable to the point of being
almost useless. It does, however, write errors to stderr provided that is
not disabled at its compile time. Fortunately this is done using error
macros so it is very self-consistent.
This context manager redirects stderr to a temporary file which is then
read and parsed for error messages. As a side benefit, debug messages
from Leptonica are also suppressed.
"""
def __enter__(self):
from io import UnsupportedOperation
self.tmpfile = TemporaryFile()
# Save the old stderr, and redirect stderr to temporary file
sys.stderr.flush()
try:
self.copy_of_stderr = os.dup(sys.stderr.fileno())
os.dup2(self.tmpfile.fileno(), sys.stderr.fileno(),
inheritable=False)
except UnsupportedOperation:
self.copy_of_stderr = None
return
def __exit__(self, exc_type, exc_value, traceback):
# Restore old stderr
sys.stderr.flush()
if self.copy_of_stderr is not None:
os.dup2(self.copy_of_stderr, sys.stderr.fileno())
os.close(self.copy_of_stderr)
# Get data from tmpfile (in with block to ensure it is closed)
with self.tmpfile as tmpfile:
tmpfile.seek(0) # Cursor will be at end, so move back to beginning
leptonica_output = tmpfile.read().decode(errors='replace')
assert self.tmpfile.closed
assert not sys.stderr.closed
# If there are Python errors, let them bubble up
if exc_type:
logger.warning(leptonica_output)
return False
# If there are Leptonica errors, wrap them in Python excpetions
if 'Error' in leptonica_output:
if 'image file not found' in leptonica_output:
raise FileNotFoundError()
if 'pixWrite: stream not opened' in leptonica_output:
raise LeptonicaIOError()
raise LeptonicaError(leptonica_output)
return False
class LeptonicaError(Exception):
pass
class LeptonicaIOError(LeptonicaError):
pass
class RemoveColormap(Enum):
to_binary = 0
to_grayscale = 1
to_full_color = 2
based_on_src = 3
class Pix:
"""Wrapper around leptonica's PIX object.
Leptonica uses referencing counting on PIX objects. Also, many Leptonica
functions return the original object with an increased reference count
if the operation had no effect (for example, image skew was found to be 0).
This has complications for memory management in Python. Whenever Leptonica
returns a PIX object (new or old), we wrap it in this class, which
registers it with the FFI garbage collector. pixDestroy() decrements the
reference count and only destroys when the last reference is removed.
Leptonica's reference counting is not threadsafe. This class can be used
in a threadsafe manner if a Python threading.Lock protects the data.
This class treats Pix objects as immutable. All methods return new
modified objects. This allows convenient chaining:
>>> Pix.read('filename.jpg').scale((0.5, 0.5)).deskew().show()
"""
def __init__(self, pix):
self._pix = ffi.gc(pix, Pix._pix_destroy)
def __repr__(self):
if self._pix:
s = "<leptonica.Pix image size={0}x{1} depth={2} at 0x{3:x}>"
return s.format(self._pix.w, self._pix.h, self._pix.d,
int(ffi.cast("intptr_t", self._pix)))
else:
return "<leptonica.Pix image NULL>"
def _repr_png_(self):
"""iPython display hook
returns png version of image
"""
data = ffi.new('l_uint8 **')
size = ffi.new('size_t *')
err = lept.pixWriteMemPng(data, size, self._pix, 0)
if err != 0:
raise LeptonicaIOError("pixWriteMemPng")
char_data = ffi.cast('char *', data[0])
return ffi.buffer(char_data, size[0])[:]
def __getstate__(self):
data = ffi.new('l_uint32 **')
size = ffi.new('size_t *')
err = lept.pixSerializeToMemory(self._pix, data, size)
if err != 0:
raise LeptonicaIOError("pixSerializeToMemory")
char_data = ffi.cast('char *', data[0])
# Copy from C bytes to python bytes()
data_bytes = ffi.buffer(char_data, size[0])[:]
# Can now free C bytes
lept.lept_free(char_data)
return dict(data=data_bytes)
def __setstate__(self, state):
cdata_bytes = ffi.new('char[]', state['data'])
cdata_uint32 = ffi.cast('l_uint32 *', cdata_bytes)
pix = lept.pixDeserializeFromMemory(
cdata_uint32, len(state['data']))
Pix.__init__(self, pix)
def __eq__(self, other):
return self.__getstate__() == other.__getstate__()
@property
def width(self):
return self._pix.w
@property
def height(self):
return self._pix.h
@property
def depth(self):
return self._pix.d
@property
def size(self):
return (self._pix.w, self._pix.h)
@property
def info(self):
return {'dpi': (self._pix.xres, self._pix.yres)}
@property
def mode(self):
"Return mode like PIL.Image"
if self.depth == 1:
return '1'
elif self.depth >= 16:
return 'RGB'
elif not self._pix.colormap:
return 'L'
else:
return 'P'
@classmethod
def read(cls, filename):
"""Load an image file into a PIX object.
Leptonica can load TIFF, PNM (PBM, PGM, PPM), PNG, and JPEG. If
loading fails then the object will wrap a C null pointer.
"""
with LeptonicaErrorTrap():
return cls(lept.pixRead(
filename.encode(sys.getfilesystemencoding())))
def write_implied_format(
self, filename, jpeg_quality=0, jpeg_progressive=0):
"""Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
"""
with LeptonicaErrorTrap():
lept.pixWriteImpliedFormat(
filename.encode(sys.getfilesystemencoding()),
self._pix, jpeg_quality, jpeg_progressive)
def topil(self):
"Returns a PIL.Image version of this Pix"
from PIL import Image
# Leptonica manages data in words, so it implicitly does an endian
# swap. Tell Pillow about this when it reads the data.
pix = self
if sys.byteorder == 'little':
if self.mode == 'RGB':
raw_mode = 'XBGR'
elif self.mode == 'RGBA':
raw_mode = 'ABGR'
elif self.mode == '1':
raw_mode = '1;I'
pix = Pix(lept.pixEndianByteSwapNew(pix._pix))
else:
raw_mode = self.mode
pix = Pix(lept.pixEndianByteSwapNew(pix._pix))
else:
raw_mode = self.mode # no endian swap needed
size = (pix._pix.w, pix._pix.h)
bytecount = pix._pix.wpl * 4 * pix._pix.h
buf = ffi.buffer(pix._pix.data, bytecount)
stride = pix._pix.wpl * 4
im = Image.frombytes(self.mode, size, buf, 'raw', raw_mode, stride)
return im
def show(self):
return self.topil().show()
def deskew(self, reduction_factor=0):
"""Returns the deskewed pix object.
A clone of the original is returned when the algorithm cannot find a
skew angle with sufficient confidence.
reduction_factor -- amount to downsample (0 for default) when searching
for skew angle
"""
with LeptonicaErrorTrap():
return Pix(lept.pixDeskew(self._pix, reduction_factor))
def scale(self, scale_xy):
"Returns the pix object rescaled according to the proportions given."
with LeptonicaErrorTrap():
return Pix(lept.pixScale(self._pix, scale_xy[0], scale_xy[1]))
def rotate180(self):
with LeptonicaErrorTrap():
return Pix(lept.pixRotate180(ffi.NULL, self._pix))
def rotate_orth(self, quads):
"Orthographic rotation, quads: 0-3, number of clockwise rotations"
with LeptonicaErrorTrap():
return Pix(lept.pixRotateOrth(self._pix, quads))
def find_skew(self):
"""Returns a tuple (deskew angle in degrees, confidence value).
Returns (None, None) if no angle is available.
"""
with LeptonicaErrorTrap():
angle = ffi.new('float *', 0.0)
confidence = ffi.new('float *', 0.0)
result = lept.pixFindSkew(self._pix, angle, confidence)
if result == 0:
return (angle[0], confidence[0])
else:
return (None, None)
def convert_rgb_to_luminance(self):
with LeptonicaErrorTrap():
gray_pix = lept.pixConvertRGBToLuminance(self._pix)
if gray_pix:
return Pix(gray_pix)
return None
def remove_colormap(self, removal_type):
"""Remove a palette
removal_type - RemovalColormap()
"""
with LeptonicaErrorTrap():
return Pix(lept.pixRemoveColormap(self._pix, removal_type))
def otsu_adaptive_threshold(
self, tile_size=(300, 300), kernel_size=(4, 4), scorefract=0.1):
with LeptonicaErrorTrap():
sx, sy = tile_size
smoothx, smoothy = kernel_size
p_pix = ffi.new('PIX **')
result = lept.pixOtsuAdaptiveThreshold(
self._pix,
sx, sy,
smoothx, smoothy,
scorefract,
ffi.NULL,
p_pix)
if result == 0:
return Pix(p_pix[0])
else:
return None
def otsu_threshold_on_background_norm(
self, mask=None, tile_size=(10, 15), thresh=100, mincount=50,
bgval=255, kernel_size=(2, 2), scorefract=0.1):
with LeptonicaErrorTrap():
sx, sy = tile_size
smoothx, smoothy = kernel_size
if mask is None:
mask = ffi.NULL
if isinstance(mask, Pix):
mask = mask._pix
thresh_pix = lept.pixOtsuThreshOnBackgroundNorm(
self._pix,
mask,
sx, sy,
thresh, mincount, bgval,
smoothx, smoothy,
scorefract,
ffi.NULL
)
if thresh_pix == ffi.NULL:
return None
return Pix(thresh_pix)
def crop_to_foreground(
self, threshold=128, mindist=70, erasedist=30, pagenum=0,
showmorph=0, display=0, pdfdir=ffi.NULL):
with LeptonicaErrorTrap():
cropbox = Box(lept.pixFindPageForeground(
self._pix,
threshold,
mindist,
erasedist,
pagenum,
showmorph,
display,
pdfdir))
print(repr(cropbox))
cropped_pix = lept.pixClipRectangle(
self._pix,
cropbox._box,
ffi.NULL)
return Pix(cropped_pix)
def clean_background_to_white(
self, mask=None, grayscale=None, gamma=1.0, black=0, white=255):
with LeptonicaErrorTrap():
return Pix(lept.pixCleanBackgroundToWhite(
self._pix,
mask or ffi.NULL,
grayscale or ffi.NULL,
gamma,
black,
white))
def gamma_trc(self, gamma=1.0, minval=0, maxval=255):
with LeptonicaErrorTrap():
return Pix(lept.pixGammaTRC(
ffi.NULL,
self._pix,
gamma,
minval,
maxval
))
def background_norm(
self, mask=None, grayscale=None, tile_size=(10, 15), fg_threshold=60,
min_count=40, bg_val=200, smooth_kernel=(2, 1)):
with LeptonicaErrorTrap():
return Pix(lept.pixBackgroundNorm(
self._pix,
mask or ffi.NULL,
grayscale or ffi.NULL,
tile_size[0],
tile_size[1],
fg_threshold,
min_count,
bg_val,
smooth_kernel[0],
smooth_kernel[1]
))
@staticmethod
@lru_cache(maxsize=1)
def make_pixel_sum_tab8():
return lept.makePixelSumTab8()
@staticmethod
def correlation_binary(pix1, pix2):
if get_leptonica_version() < 'leptonica-1.72':
# Older versions of Leptonica (pre-1.72) have a buggy
# implementation of pixCorrelationBinary that overflows on larger
# images. Ubuntu trusty has 1.70. Ubuntu PPA
# ppa:rebuntu16/avidemux+unofficial has "leptonlib" 1.73.
pix1_count = ffi.new('l_int32 *')
pix2_count = ffi.new('l_int32 *')
pixn_count = ffi.new('l_int32 *')
tab8 = Pix.make_pixel_sum_tab8()
lept.pixCountPixels(pix1._pix, pix1_count, tab8)
lept.pixCountPixels(pix2._pix, pix2_count, tab8)
pixn = Pix(lept.pixAnd(ffi.NULL, pix1._pix, pix2._pix))
lept.pixCountPixels(pixn._pix, pixn_count, tab8)
# Python converts these int32s to larger units as needed
# to avoid overflow. Overflow happens easily here.
correlation = (
(pixn_count[0] * pixn_count[0]) | |
<reponame>taDachs/jacoco-badge-generator<filename>JacocoBadgeGenerator.py<gh_stars>0
#!/usr/bin/env python3
#
# jacoco-badge-generator: Github action for generating a jacoco coverage
# percentage badge.
#
# Copyright (c) 2020-2021 <NAME>
# https://www.cicirello.org/
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import csv
import sys
import math
import pathlib
import os
import os.path
import requests
badgeTemplate = '<svg xmlns="http://www.w3.org/2000/svg" width="{4}" \
height="20" role="img" aria-label="{3}: {0}">\
<linearGradient id="s" x2="0" y2="100%">\
<stop offset="0" stop-color="#bbb" stop-opacity=".1"/>\
<stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="r">\
<rect width="{4}" height="20" rx="3" fill="#fff"/></clipPath>\
<g clip-path="url(#r)"><rect width="{5}" height="20" fill="#555"/>\
<rect x="{5}" width="43" height="20" fill="{1}"/>\
<rect width="{4}" height="20" fill="url(#s)"/></g>\
<g fill="#fff" text-anchor="middle" \
font-family="Verdana,Geneva,DejaVu Sans,sans-serif" \
text-rendering="geometricPrecision" font-size="110">\
<text aria-hidden="true" x="315" y="150" fill="#010101" \
fill-opacity=".3" transform="scale(.1)" textLength="510">{3}</text>\
<text x="{5}" y="140" transform="scale(.1)" fill="#fff" \
textLength="510">{3}</text>\
<text aria-hidden="true" x="815" y="150" \
fill="#010101" fill-opacity=".3" transform="scale(.1)" \
textLength="{2}">{0}</text><text x="815" y="140" \
transform="scale(.1)" fill="#fff" textLength="{2}">{0}</text>\
</g></svg>'
defaultColors = [ "#4c1", "#97ca00", "#a4a61d", "#dfb317", "#fe7d37", "#e05d44" ]
def generateBadge(covStr, color, badgeType="coverage", message=None) :
"""Generates the badge as a string.
Keyword arguments:
covStr - The coverage as a string.
color - The color for the badge.
badgeType - The text string for a label on the badge.
message - The text on the left of the string
"""
if message is None:
message = badgeType
return _generateBadge(covStr, color, message)
def _generateBadge(covStr, color, message):
message = message.replace('-', '--')
message = message.replace('_', '__')
message = message.replace(' ', '_')
url = f"https://img.shields.io/badge/{message}-{covStr}25-{color[1:]}"
r = requests.get(url, allow_redirects=True)
return r.text
def computeCoverage(fileList) :
"""Parses one or more jacoco.csv files and computes code coverage
percentages. Returns: coverage, branchCoverage. The coverage
is instruction coverage.
Keyword arguments:
fileList - A list (or any iterable) of the filenames, including path, of the jacoco.csv files.
"""
missed = 0
covered = 0
missedBranches = 0
coveredBranches = 0
for filename in fileList :
with open(filename, newline='') as csvfile :
jacocoReader = csv.reader(csvfile)
for i, row in enumerate(jacocoReader) :
if i > 0 :
missed += int(row[3])
covered += int(row[4])
missedBranches += int(row[5])
coveredBranches += int(row[6])
return (calculatePercentage(covered, missed),
calculatePercentage(coveredBranches, missedBranches))
def calculatePercentage(covered, missed) :
"""Calculates the coverage percentage from number of
covered and number of missed. Returns 1 if both are 0
to handle the special case of running on an empty class
(no instructions) or a case with no if, switch, loops (no
branches).
Keyword arguments:
covered - The number of X covered (where X is the metric).
missed - The number of X missed (where X is the metric).
"""
if missed == 0 :
return 1
return covered / (covered + missed)
def coverageTruncatedToString(coverage) :
"""Converts the coverage percentage to a formatted string.
Returns: coveragePercentageAsString, coverageTruncatedToOneDecimalPlace
Keyword arguments:
coverage - The coverage percentage.
"""
# Truncate the 2nd decimal place, rather than rounding
# to avoid considering a non-passing percentage as
# passing (e.g., if user considers 70% as passing threshold,
# then 69.99999...% is technically not passing).
coverage = int(1000 * coverage) / 10
if coverage - int(coverage) == 0 :
covStr = "{0:d}%".format(int(coverage))
else :
covStr = "{0:.1f}%".format(coverage)
return covStr, coverage
def badgeCoverageStringColorPair(coverage, cutoffs=[100, 90, 80, 70, 60], colors=[]) :
"""Converts the coverage percentage to a formatted string,
and determines the badge color.
Returns: coveragePercentageAsString, colorAsString
Keyword arguments:
coverage - The coverage percentage.
cutoffs - List of percentages that begin begin each color interval.
colors - List of badge colors in decreasing order of coverage percentages.
"""
if len(colors) == 0 :
colors = defaultColors
cov, coverage = coverageTruncatedToString(coverage)
c = computeColorIndex(coverage, cutoffs, len(colors))
return cov, colors[c]
def computeColorIndex(coverage, cutoffs, numColors) :
"""Computes index into color list from coverage.
Keyword arguments:
coverage - The coverage percentage.
cutoffs - The thresholds for each color.
"""
numIntervals = min(numColors, len(cutoffs)+1)
for c in range(numIntervals-1) :
if coverage >= cutoffs[c] :
return c
return numIntervals-1
def createOutputDirectories(badgesDirectory) :
"""Creates the output directory if it doesn't already exist.
Keyword arguments:
badgesDirectory - The badges directory
"""
if not os.path.exists(badgesDirectory) :
p = pathlib.Path(badgesDirectory)
os.umask(0)
p.mkdir(mode=0o777, parents=True, exist_ok=True)
def splitPath(filenameWithPath) :
"""Breaks a filename including path into containing directory and filename.
Keyword arguments:
filenameWithPath - The filename including path.
"""
if filenameWithPath.startswith("./") :
filenameWithPath = filenameWithPath[2:]
if filenameWithPath[0] == "/" :
filenameWithPath = filenameWithPath[1:]
i = filenameWithPath.rfind("/")
if i >= 0 :
return filenameWithPath[:i], filenameWithPath[i+1:]
else :
return ".", filenameWithPath
def formFullPathToFile(directory, filename) :
"""Generates path string.
Keyword arguments:
directory - The directory for the badges
filename - The filename for the badge.
"""
if len(filename) > 1 and filename[0:2] == "./" :
filename = filename[2:]
if filename[0] == "/" :
filename = filename[1:]
if len(directory) > 1 and directory[0:2] == "./" :
directory = directory[2:]
if len(directory) > 0 and directory[0] == "/" :
directory = directory[1:]
if directory == "" or directory == "." :
return filename
elif directory[-1] == "/" :
return directory + filename
else :
return directory + "/" + filename
def filterMissingReports(jacocoFileList, failIfMissing=False) :
"""Validates report file existence, and returns a list
containing a subset of the report files that exist. Logs
files that don't exist to the console as warnings.
Keyword arguments:
jacocoFileList - A list of jacoco.csv files.
failIfMissing - If true and if any of the jacoco.csv files
don't exist, then it will exit with a non-zero exit code causing
workflow to fail.
"""
goodReports = []
for f in jacocoFileList :
if os.path.exists(f) :
goodReports.append(f)
else :
print("WARNING: Report file", f, "does not exist.")
if len(goodReports) == 0 :
print("WARNING: No JaCoCo csv reports found.")
if failIfMissing :
sys.exit(1)
if failIfMissing and len(goodReports) != len(jacocoFileList) :
sys.exit(1)
return goodReports
def stringToPercentage(s) :
"""Converts a string describing a percentage to
a float. The string s can be of any of the following
forms: 60.2%, 60.2, or 0.602. All three of these will
be treated the same. Without the percent sign, it is
treated the same as with the percent sign if the value
is greater than 1. This is to gracefully handle
user misinterpretation of action input specification. In all cases,
this function will return a float in the interval [0.0, 1.0].
Keyword arguments:
s - the string to convert.
"""
if len(s)==0 :
return 0
doDivide = False
if s[-1]=="%" :
s = s[:-1].strip()
if len(s)==0 :
return 0
doDivide = True
try :
p = float(s)
except ValueError :
return 0
if p > 1 :
doDivide = True
return p / 100 if doDivide else p
def coverageIsFailing(coverage, branches, minCoverage, minBranches) :
"""Checks if coverage or branchs coverage or both are
below minimum to pass workflow run. Logs messages if it is.
Actual failing behavior should be handled by caller.
Keyword arguments:
coverage - instructions coverage in interval 0.0 to 1.0.
branches - branches coverage in interval 0.0 to 1.0.
minCoverage - minimum instructions coverage to pass in interval 0.0 to 1.0.
minBranches - minimum branches coverage to pass in interval 0.0 to 1.0.
"""
shouldFail = False
if coverage < minCoverage :
shouldFail = True
print("Coverage of", coverage, "is below passing threshold of", minCoverage)
if branches < minBranches :
shouldFail = True
print("Branches of", branches, "is below passing threshold of", minBranches)
return shouldFail
def getPriorCoverage(badgeFilename, whichBadge) :
"""Parses an existing badge (if one exists) returning
| |
#!/usr/bin/env python
# coding: utf-8
# systems tools
import os
import shutil
import sys
import time
import sys
import signal
import random
# multiprocess
import threading
import psutil
#format
import string
import json
#sqlite
import sqlite3
#args
import argparse
#maths
import numpy as np
#strings
import re #regular expressions
#FIFO
from collections import deque
#pandas
import pandas as pd
from IPython.display import display
#Constants
DEBUG = True
DB_FILENAME = "openv_events.db"
JSON_FILENAME = "stats.json"
TABLES_NAME = ['application', 'config', 'frameInterrupt', 'pkt', 'queue', 'rpl', 'schedule', 'sixtop', 'sixtopStates' ]
#initialization: args + sqlite connection
def init():
#parsing arguments
parser = argparse.ArgumentParser(description='data processing from a sqlite openwsn db.')
parser.add_argument('--dir',
default="./results",
help='directory to parse (one directory per experiment)')
parser.add_argument('--rewrite', dest='rewrite', action='store_true',
help='rewrite the stats even if a json file exists in a subdirectory')
parser.add_argument('--no-rewrite', dest='rewrite', action='store_false',
help='keep the stats if a json file exists in a subdirectory')
parser.set_defaults(rewrite=False)
args = parser.parse_args()
print("DIR:{}".format(args.dir))
print("REWRITE:{}".format(args.rewrite))
return(args)
#retrives the table names
def loadTables(file_out, file_in):
db_out = sqlite3.connect(file_out)
db_in = sqlite3.connect(file_in)
tables_in = []
cursor_in = db_in.cursor()
for row in cursor_in.execute("SELECT name FROM sqlite_master WHERE type='table';"):
tables_in.append(row[0])
tables_out = []
cursor_out = db_out.cursor()
for row in cursor_out.execute("SELECT name FROM sqlite_master WHERE type='table';"):
tables_out.append(row[0])
return db_out, db_in, tables_out, tables_in
#merge 'in' into 'out'
def merge(db_out, db_in, tables_out, tables_in):
cursor_in = db_in.cursor()
cursor_out = db_out.cursor()
for id_, name, filename in db_in.execute('PRAGMA database_list'):
if name == 'main' and filename is not None:
print("Merging tables for db {}: ".format(os.path.basename(filename)), end = '')
break
#for each table
for table_name in tables_in:
#one exists -> copy of in + out in a tmp table + removal + renaming
if table_name in tables_out:
new_table_name = table_name + "_tmp"
try:
#copy of the table in out
cursor_out.execute("CREATE TABLE IF NOT EXISTS " + new_table_name + " AS SELECT * FROM " + table_name)
#copy of in into out
for row in cursor_in.execute("SELECT * FROM " + table_name):
#print(row)
cursor_out.execute("INSERT INTO " + new_table_name + " VALUES" + str(row) +";")
#rename the table with the right name
cursor_out.execute("DROP TABLE IF EXISTS " + table_name);
cursor_out.execute("ALTER TABLE " + new_table_name + " RENAME TO " + table_name);
except sqlite3.OperationalError:
print("ERROR!: Merge Failed")
cursor_out.execute("DROP TABLE IF EXISTS " + new_table_name);
#no table exists -> creation of the schema AND copy
else:
for sql in cursor_in.execute("SELECT sql FROM sqlite_master WHERE type='table' AND name='"+ table_name +"'"):
cursor_out.execute(sql[0])
for row in cursor_in.execute("SELECT * FROM " + table_name):
#print(row)
cursor_out.execute("INSERT INTO " + table_name + " VALUES" + str(row) +";")
db_out.commit()
#print("{} ".format(table_name), end = '')
print(".. ok")
db_in.close()
db_out.close()
return
#returns a connection to the global sqlite db
def dbconn_aggregate_get(directory):
db_filename_out = os.path.join(directory, DB_FILENAME)
#flush existing db file + connection
if os.path.isfile(db_filename_out) is True:
os.remove(db_filename_out)
dbconn = sqlite3.connect(db_filename_out)
return(dbconn)
#merge the indidivudal sqlite dbs in a single one
def merge_sqllite_db(directory):
dbconn = None
db_filename_out = os.path.join(directory, DB_FILENAME)
#for all db files
for (dirpath, dirnames, db_filenames_in) in os.walk(directory):
for db_filename_in in db_filenames_in:
#db extension + emulated/m3 portname before a digit
if re.search("((emulated[0-9]+)|(m3-[0-9]+.*)).db$", db_filename_in) is not None:
#emptydbconn
if dbconn is None:
dbconn = dbconn_aggregate_get(directory)
db_out, db_in, tables_out, tables_in = loadTables(db_filename_out, os.path.join(directory, db_filename_in))
merge(db_out, db_in, tables_out, tables_in)
if dbconn is not None:
dbconn.close()
#multithreading is here safe because we NEVER modify the db, we just read it
def db_create_connection(db_filename):
return(sqlite3.connect(db_filename, check_same_thread=False))
#returns the list of motes
def motes_get(con):
motes = []
cur = con.cursor()
for row in cur.execute('SELECT DISTINCT moteid FROM sixtopStates'):
motes.append(row[0])
return(motes)
#returns the list of dagroot ids
def dagroot_ids_get(con):
dagroot_ids = []
cur = con.cursor()
for row in cur.execute('SELECT DISTINCT moteid FROM config WHERE rpl_dagroot="1"'):
dagroot_ids.append(row[0])
return(dagroot_ids)
#returns the configuration
def configuration_get(con):
config = {}
cur = con.cursor()
for row in cur.execute('SELECT DISTINCT sixtop_anycast, sixtop_lowest, msf_numcells, msf_maxcells, msf_mincells, neigh_maxrssi, neigh_minrssi, cexample_period FROM config'):
config['sixtop_anycast'] = row[0]
config['sixtop_lowest'] = row[1]
config['msf_numcells'] = row[2]
config['msf_maxcells'] = row[3]
config['msf_mincells'] = row[4]
config['neigh_maxrssi'] = row[5]
config['neigh_minrssi'] = row[6]
config['cexample_period'] = row[7]
return(config)
return(None)
#returns the largest ASN in the experiment
def asn_end_get(con):
cur = con.cursor()
for row in cur.execute('SELECT MAX(asn) as max FROM queue'):
return(row[0])
return(0)
#returns the list of links (anycast = yes / no) + the tx/rx
def links_get(con):
links = []
cur = con.cursor()
for row in cur.execute('SELECT DISTINCT moteid, neighbor, neighbor2, anycast, asn, slotOffset, channelOffset FROM schedule WHERE type="TX" AND shared="0" AND event="ADD"'):
#search for the correspond DEL asn
cur2 = con.cursor()
asn_end = None
for row2 in cur2.execute('SELECT asn FROM schedule WHERE type="TX" AND shared="0" AND event="DEL" AND moteid="{0}" AND neighbor="{1}" AND slotOffset="{2}" '.format(row[0], row[1], row[5])):
#keep the smallest ASN larger than the start_asn
if row2[0] > row[4] and ( asn_end is None or row2[0] < asn_end):
asn_end = row2[0]
links.append({'src':row[0], 'neigh':row[1], 'neigh2':row[2], 'anycast':row[3], 'slot':row[5], 'channel':row[6], 'start':row[4], 'end':asn_end})
return(links)
#all l2 data tx
def l2tx_get(con):
l2tx = []
cur = con.cursor()
sql_request = 'SELECT dataTX.asn, dataTX.moteid, dataTX.l2dest, dataTX.slotOffset, dataTX.channelOffset, dataTX.shared, dataTX.autoCell, dataTX.buffer_pos, \
dataRX.moteid, dataRX.priority, dataRX.crc, dataRx.rssi, dataRx.buffer_pos, \
ackTX.moteid, \
ackRX.crc, \
INTRPT.intrpt\
FROM pkt AS dataTX\
LEFT JOIN(\
SELECT *\
FROM pkt\
WHERE type="DATA" AND event="RX"\
) dataRX\
ON dataTX.moteid=dataRX.l2src AND dataTX.asn=dataRX.asn\
LEFT JOIN (\
SELECT *\
FROM pkt \
WHERE type="ACK" AND event="TX"\
) ackTX\
ON ackTX.moteid = dataRX.moteid AND ackTX.asn=dataRX.asn\
LEFT JOIN(\
SELECT *\
FROM pkt \
WHERE type="ACK" AND event="RX"\
) ackRX\
ON ackTX.moteid = ackRX.l2src AND ackTX.asn=ackRX.asn\
LEFT JOIN(\
SELECT *\
FROM frameInterrupt\
WHERE (intrpt="STARTOFFRAME" AND state="S_CCATRIGGER") OR (intrpt="CCA_IDLE" AND state="S_CCATRIGGERED")\
)INTRPT\
ON INTRPT.asn=dataRX.asn AND INTRPT.moteid=dataRX.moteid\
WHERE dataTX.type="DATA" AND dataTX.event="TX" '
#print(sql_request)
for row in cur.execute(sql_request):
l2tx.append({'asn': row[0], 'moteid_tx':row[1], 'moteid_dest':row[2], 'slotOffset':row[3], 'channelOffset':row[4], 'shared':row[5], 'autoCell':row[6], 'tx_buffer_pos':row[7], 'moteid_rx':row[8], 'priority_rx':row[9], 'crc_data':row[10], 'rssi':row[11], 'rx_buffer_pos':row[12], 'ack_tx':(row[13] is not None), 'crc_ack':row[14], 'intrpt':row[15]})
return(l2tx)
##returns the list of receivers for this l2 transmission
def cex_l2receivers_get(con, l2src, asn):
receivers = []
cur_rx = con.cursor()
for rx in cur_rx.execute('SELECT moteid, buffer_pos, crc, rssi, priority FROM pkt WHERE event="RX" AND type="DATA" AND asn="{0}" AND l2src="{1}"'.format(asn, l2src)):
if DEBUG:
print(" rx(anycast, {1}): {0}".format(rx, l2src))
moteid = rx[0]
buffer_pos = rx[1]
crc = rx[2]
rssi = rx[3]
priority = rx[4]
#if the packet has been received correctly, track the corresponding ack tx
ack_txed = 0
if (crc == 1):
cur_acktx = con.cursor()
cur_acktx.execute('SELECT moteid FROM pkt WHERE event="TX" AND type="ACK" AND asn="{0}" AND l2src="{1}"'.format(asn, rx[0]))
results = cur_acktx.fetchall()
#an ack has been txed -> it will try to forward the packet
if (len(results) == 0): # probably second receiver in anycast
ack_txed = 0
elif (len(results) == 1): #an ack has been txed
ack_txed = 1
elif DEBUG:
print("Hum, several acks from the same moteid? sounds strange....")
print(results)
print('SELECT moteid FROM pkt WHERE event="TX" AND type="ACK" AND asn="{0}" AND l2src="{1}"'.format(asn, rx[0]))
else:
print("BAD CRC, not POPPED")
#insert this receiver for this hop
receivers.append({'moteid':moteid, 'crc':crc, 'rssi':rssi, 'buffer_pos':buffer_pos, 'priority':priority, 'ack_txed':ack_txed})
return(receivers)
#list all the l2 transmissions for a given mote (packet in the queue of a mote)
#be careful: a mote may receive the same cex_packet several times, each will constitute a different "hop"
def cex_l2transmissions_for_hop(con, l2tx_list, processingQueue, elem):
#for each TX *and* RETX in this two-ASN interval
cur_tx = con.cursor()
for tx in cur_tx.execute('SELECT asn, slotOffset, channelOffset, l2dest FROM pkt WHERE moteid="{0}" AND event="TX" AND type="DATA" AND buffer_pos="{1}" AND asn<="{2}" AND asn>="{3}" '.format(elem['l2src'], elem['buffer_pos'], elem['asn_del'], elem['asn_add'])):
asn = tx[0]
slotOffset = tx[1]
channelOffset = tx[2]
l2dest = tx[3]
#print("txdata: src={1} & asn={0}".format(asn, elem['l2src']))
#an ack has been correctly received? (crc ok)
cur_rxack = con.cursor()
ack_rcvd = 0
cur_rxack.execute('SELECT moteid, buffer_pos FROM pkt WHERE event="RX" AND type="ACK" AND asn="{0}" AND crc="1" '.format(asn))
results = cur_rxack.fetchall()
if (len(results) > 0):
ack_rcvd = 1
#list the l2 receivers for this l2 transmission
receivers = cex_l2receivers_get(con, elem['l2src'], asn)
#add the receivers to the processing list (if it sent an ack, it means it will forward the packet)
for rcvr in receivers:
if rcvr['ack_txed'] == 1:
if (DEBUG):
print("TO PROCESS: id={0}, asn={1}, buffer_pos={2}".format(rcvr['moteid'], asn, rcvr['buffer_pos']))
processingQueue.append({'l2src':rcvr['moteid'], 'buffer_pos':rcvr['buffer_pos'], 'asn_add':asn})
#we have listed everything for this hop
l2tx_list.append({'asn':tx[0], 'l2src':elem['l2src'], 'buffer_pos':elem['buffer_pos'], 'slotOffset':slotOffset, | |
<reponame>kalxas/eoxserver
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-06 19:09
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
('backends', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AcquisitionStation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AcquisitionSubType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AllowedValueRange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.FloatField()),
('end', models.FloatField()),
],
),
migrations.CreateModel(
name='ArchivingCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ArrayDataItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=1024)),
('format', models.CharField(blank=True, max_length=64, null=True)),
('field_index', models.PositiveSmallIntegerField(default=0)),
('band_count', models.PositiveSmallIntegerField(default=1)),
('subdataset_type', models.CharField(blank=True, max_length=64, null=True)),
('subdataset_locator', models.CharField(blank=True, max_length=1024, null=True)),
('bands_interpretation', models.PositiveSmallIntegerField(choices=[(0, b'fields'), (1, b'dimension')], default=0)),
],
),
migrations.CreateModel(
name='Browse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=1024)),
('format', models.CharField(blank=True, max_length=64, null=True)),
('style', models.CharField(blank=True, max_length=256, null=True)),
('coordinate_reference_system', models.TextField()),
('min_x', models.FloatField()),
('min_y', models.FloatField()),
('max_x', models.FloatField()),
('max_y', models.FloatField()),
('width', models.PositiveIntegerField()),
('height', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='BrowseType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
('red_or_grey_expression', models.CharField(blank=True, max_length=512, null=True)),
('green_expression', models.CharField(blank=True, max_length=512, null=True)),
('blue_expression', models.CharField(blank=True, max_length=512, null=True)),
('alpha_expression', models.CharField(blank=True, max_length=512, null=True)),
],
),
migrations.CreateModel(
name='CollectionMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_type', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('doi', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('platform', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('platform_serial_identifier', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('instrument', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('sensor_type', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('composite_type', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('processing_level', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('orbit_type', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('spectral_range', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('wavelength', models.IntegerField(blank=True, db_index=True, null=True)),
('product_metadata_summary', models.TextField(blank=True, null=True)),
('coverage_metadata_summary', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='CollectionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
],
),
migrations.CreateModel(
name='CoverageMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='CoverageType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
],
),
migrations.CreateModel(
name='EOObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=256, unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_.-]*$'), message=b'This field must contain a valid NCName.')])),
('begin_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
('footprint', django.contrib.gis.db.models.fields.GeometryField(blank=True, null=True, srid=4326)),
('inserted', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='FieldType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveSmallIntegerField()),
('identifier', models.CharField(max_length=512, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_.-]*$'), message=b'This field must contain a valid NCName.')])),
('description', models.TextField(blank=True, null=True)),
('definition', models.CharField(blank=True, max_length=512, null=True)),
('unit_of_measure', models.CharField(blank=True, max_length=64, null=True)),
('wavelength', models.FloatField(blank=True, null=True)),
('significant_figures', models.PositiveSmallIntegerField(blank=True, null=True)),
('numbits', models.PositiveSmallIntegerField(blank=True, null=True)),
('signed', models.BooleanField(default=True)),
('is_float', models.BooleanField(default=False)),
('coverage_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='field_types', to='coverages.CoverageType')),
],
options={
'ordering': ('index',),
},
),
migrations.CreateModel(
name='Frame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Grid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, null=True, unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
('coordinate_reference_system', models.TextField()),
('axis_1_name', models.CharField(max_length=256)),
('axis_2_name', models.CharField(blank=True, max_length=256, null=True)),
('axis_3_name', models.CharField(blank=True, max_length=256, null=True)),
('axis_4_name', models.CharField(blank=True, max_length=256, null=True)),
('axis_1_type', models.SmallIntegerField(choices=[(0, b'spatial'), (1, b'elevation'), (2, b'temporal'), (3, b'other')])),
('axis_2_type', models.SmallIntegerField(blank=True, choices=[(0, b'spatial'), (1, b'elevation'), (2, b'temporal'), (3, b'other')], null=True)),
('axis_3_type', models.SmallIntegerField(blank=True, choices=[(0, b'spatial'), (1, b'elevation'), (2, b'temporal'), (3, b'other')], null=True)),
('axis_4_type', models.SmallIntegerField(blank=True, choices=[(0, b'spatial'), (1, b'elevation'), (2, b'temporal'), (3, b'other')], null=True)),
('axis_1_offset', models.CharField(blank=True, max_length=256, null=True)),
('axis_2_offset', models.CharField(blank=True, max_length=256, null=True)),
('axis_3_offset', models.CharField(blank=True, max_length=256, null=True)),
('axis_4_offset', models.CharField(blank=True, max_length=256, null=True)),
('resolution', models.PositiveIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Mask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=1024)),
('format', models.CharField(blank=True, max_length=64, null=True)),
('geometry', django.contrib.gis.db.models.fields.GeometryField(blank=True, null=True, srid=4326)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MaskType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
],
),
migrations.CreateModel(
name='MetaDataItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=1024)),
('format', models.CharField(blank=True, max_length=64, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NilValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=512)),
('reason', models.CharField(choices=[(b'http://www.opengis.net/def/nil/OGC/0/inapplicable', b'Inapplicable (There is no value)'), (b'http://www.opengis.net/def/nil/OGC/0/missing', b'Missing'), (b'http://www.opengis.net/def/nil/OGC/0/template', b'Template (The value will be available later)'), (b'http://www.opengis.net/def/nil/OGC/0/unknown', b'Unknown'), (b'http://www.opengis.net/def/nil/OGC/0/withheld', b'Withheld (The value is not divulged)'), (b'http://www.opengis.net/def/nil/OGC/0/AboveDetectionRange', b'Above detection range'), (b'http://www.opengis.net/def/nil/OGC/0/BelowDetectionRange', b'Below detection range')], max_length=512)),
('field_types', models.ManyToManyField(blank=True, related_name='nil_values', to='coverages.FieldType')),
],
),
migrations.CreateModel(
name='OrbitNumber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProcessingCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProcessingMode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProcessorName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parent_identifier', models.CharField(blank=True, db_index=True, max_length=256, null=True)),
('production_status', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'ARCHIVED'), (1, b'ACQUIRED'), (2, b'CANCELLED')], db_index=True, null=True)),
('acquisition_type', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'NOMINAL'), (1, b'CALIBRATION'), (2, b'OTHER')], db_index=True, null=True)),
('orbit_direction', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'ASCENDING'), (1, b'DESCENDING')], db_index=True, null=True)),
('product_quality_status', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'NOMINAL'), (1, b'DEGRAGED')], db_index=True, null=True)),
('creation_date', models.DateTimeField(blank=True, db_index=True, null=True)),
('modification_date', models.DateTimeField(blank=True, db_index=True, null=True)),
('processing_date', models.DateTimeField(blank=True, db_index=True, null=True)),
('availability_time', models.DateTimeField(blank=True, db_index=True, null=True)),
('start_time_from_ascending_node', models.IntegerField(blank=True, db_index=True, null=True)),
('completion_time_from_ascending_node', models.IntegerField(blank=True, db_index=True, null=True)),
('illumination_azimuth_angle', models.FloatField(blank=True, db_index=True, null=True)),
('illumination_zenith_angle', models.FloatField(blank=True, db_index=True, null=True)),
('illumination_elevation_angle', models.FloatField(blank=True, db_index=True, null=True)),
('polarisation_mode', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'single'), (1, b'dual'), (2, b'twin'), (3, b'quad'), (4, b'UNDEFINED')], db_index=True, null=True)),
('polarization_channels', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'HV'), (1, b'HV, VH'), (2, b'VH'), (3, b'VV'), (4, b'HH, VV'), (5, b'HH, VH'), (6, b'HH, HV'), (7, b'VH, VV'), (8, b'VH, HV'), (9, b'VV, HV'), (10, b'VV, VH'), (11, b'HH'), (12, b'HH, HV, VH, VV'), (13, b'UNDEFINED')], db_index=True, null=True)),
('antenna_look_direction', models.PositiveSmallIntegerField(blank=True, choices=[(0, b'LEFT'), (1, b'RIGHT')], db_index=True, null=True)),
('minimum_incidence_angle', models.FloatField(blank=True, db_index=True, null=True)),
('maximum_incidence_angle', models.FloatField(blank=True, db_index=True, null=True)),
('doppler_frequency', models.FloatField(blank=True, db_index=True, null=True)),
('incidence_angle_variation', models.FloatField(blank=True, db_index=True, null=True)),
('cloud_cover', models.FloatField(blank=True, db_index=True, null=True)),
('snow_cover', models.FloatField(blank=True, db_index=True, null=True)),
('lowest_location', models.FloatField(blank=True, db_index=True, null=True)),
('highest_location', models.FloatField(blank=True, db_index=True, null=True)),
('acquisition_station', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.AcquisitionStation')),
('acquisition_sub_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.AcquisitionSubType')),
('archiving_center', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ArchivingCenter')),
('frame', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.Frame')),
('orbit_number', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.OrbitNumber')),
('processing_center', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ProcessingCenter')),
('processing_mode', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ProcessingMode')),
('processor_name', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ProcessorName')),
],
),
migrations.CreateModel(
name='ProductQualityDegredationTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512, unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'^[a-zA-z_][a-zA-Z0-9_]*$'), message=b'This field must contain a valid Name.')])),
('allowed_coverage_types', models.ManyToManyField(blank=True, related_name='allowed_product_types', to='coverages.CoverageType')),
],
),
migrations.CreateModel(
name='ProductVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SensorMode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SwathIdentifier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(db_index=True, max_length=256, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Collection',
fields=[
('eoobject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='coverages.EOObject')),
],
bases=('coverages.eoobject',),
),
migrations.CreateModel(
name='Coverage',
fields=[
('eoobject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='coverages.EOObject')),
('axis_1_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_2_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_3_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_4_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_1_size', models.PositiveIntegerField()),
('axis_2_size', models.PositiveIntegerField(blank=True, null=True)),
('axis_3_size', models.PositiveIntegerField(blank=True, null=True)),
('axis_4_size', models.PositiveIntegerField(blank=True, null=True)),
('collections', models.ManyToManyField(blank=True, related_name='coverages', to='coverages.Collection')),
('coverage_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='coverages', to='coverages.CoverageType')),
('grid', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='coverages.Grid')),
],
options={
'abstract': False,
},
bases=('coverages.eoobject', models.Model),
),
migrations.CreateModel(
name='Mosaic',
fields=[
('eoobject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='coverages.EOObject')),
('axis_1_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_2_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_3_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_4_origin', models.CharField(blank=True, max_length=256, null=True)),
('axis_1_size', models.PositiveIntegerField()),
('axis_2_size', models.PositiveIntegerField(blank=True, null=True)),
('axis_3_size', models.PositiveIntegerField(blank=True, null=True)),
('axis_4_size', models.PositiveIntegerField(blank=True, null=True)),
('collections', models.ManyToManyField(blank=True, related_name='mosaics', to='coverages.Collection')),
('coverage_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='mosaics', to='coverages.CoverageType')),
('grid', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='coverages.Grid')),
],
options={
'abstract': False,
},
bases=('coverages.eoobject', models.Model),
),
migrations.CreateModel(
name='Product',
fields=[
('eoobject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='coverages.EOObject')),
('collections', models.ManyToManyField(blank=True, related_name='products', to='coverages.Collection')),
('package', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='backends.Storage')),
('product_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='products', to='coverages.ProductType')),
],
bases=('coverages.eoobject',),
),
migrations.CreateModel(
name='ReservedID',
fields=[
('eoobject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='coverages.EOObject')),
('until', models.DateTimeField(blank=True, null=True)),
('request_id', models.CharField(blank=True, max_length=256, null=True)),
],
bases=('coverages.eoobject',),
),
migrations.AddField(
model_name='productmetadata',
name='product_quality_degradation_tag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ProductQualityDegredationTag'),
),
migrations.AddField(
model_name='productmetadata',
name='product_version',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.ProductVersion'),
),
migrations.AddField(
model_name='productmetadata',
name='sensor_mode',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='metadatas', to='coverages.SensorMode'),
),
migrations.AddField(
model_name='productmetadata',
name='swath_identifier',
field=models.ForeignKey(blank=True, | |
#
# Name: updateCache.py.
# Author: <NAME> <<EMAIL>>
#
# This project is released under the terms of the MIT license,
# which you should find included in the LICENSE file.
#
# The purpose of this script is to keep a local backup of the
# complete contents of your gmail account. These are the
# elements it backs up:
#
# - All original messages (attachments included I assume).
# - Message / thread relationships.
# - Thread / label relationships.
# - List of labels.
#
# This script will create a 'data' directory, in the current
# directory, which is where it also expects to save the
# backed up data, and to be run from.
#
# ** What format is the data backed up in:
#
# - As pickled files, using the Python pickling format.
#
# To make use of this data, you will need to write your
# own scripts to unpickle it and process it as needed.
# But the key point is that it is there, backed up, and
# accessible.
#
# ** What this script does not handle:
#
# - Removal of labels. You will need to write code to
# handle this case yourself.
#
# - Renaming of labels. Labels do not appear to have IDs
# and as such there is no easy way to relate an old
# label name to a new one. If you want the mails in
# the backed up data under the renamed label, you will
# have to move it there yourself.
#
# ** Notes about gmail and the development of this script:
#
# - Where this script receives an unexpected result back from
# gmail, it exits immediately saving any data it has
# collected that is worth saving.
#
# DO NOT RE-RUN IT UNTIL YOU HAVE USED YOUR EMAIL ACCOUNT
# IN A BROWSER AND ENSURED THAT YOU ARE NOT LOCKED OUT OF
# YOUR ACCOUNT BECAUSE OF "unusual usage detected".
#
# I am convinced that the potential case of being locked
# out of your account for up to 24 hours is more likely
# the more you use something like this script when gmail
# has already locked you out, or just after the let you
# back into your account.
#
# - Being locked out of your account seems pretty harmless.
# I was locked out of mine about five times during the
# development of this script. Filling in the form they
# provide to get your account unlocked is.. well, all
# it seemed to get me was a confused sounding email
# telling me to reply providing the same information I
# entered into the form if I thought I shouldn't have
# been locked out. The same thing filling out the form
# was supposed to accomplish. No fricking idea what is
# going on there.
#
# - In order to avoid being locked out of your account it
# is worthwhile, when modifying this script, to enable
# the option "backupSearchResults" and to understand how
# it can help you hammer the gmail servers less.
#
# - Are the delays enough? Throughout this script, wherever
# the gmail servers are hit with requests, this is followed
# with a 2 second delay to avoid hammering them. I just
# did it as a token effort.
#
import sys, os, time
import cPickle
from getpass import getpass
from libgmail import libgmail
defaultUsername = "no default" # "<EMAIL>"
# If you are planning to run this over and over tweaking the script
# you will want to take advantage of this in or
backupSearchResults = True
messageDumpFileNameTemplate = "data/messages%05d.bin"
messageIndexFileNameTemplate = "data/messages%05d.idx"
threadMessagesFileName = "data/threadMessages.bin"
threadLabelsFileName = "data/threadLabels.bin"
labelsFileName = "data/labels.bin"
def GetNextMessageDumpFileName():
fileNumber = 1
l = list(GetMessageFileNames())
if len(l):
fileNumber = l[-1][0] + 1
fileName = messageDumpFileNameTemplate % fileNumber
if os.path.exists(fileName):
raise RuntimeError("Found unexpected file")
return fileName
def GetMessageFileNames():
fileNumber = 1
dumpFileName = messageDumpFileNameTemplate % fileNumber
indexFileName = messageIndexFileNameTemplate % fileNumber
while os.path.exists(dumpFileName):
yield fileNumber, indexFileName, dumpFileName
fileNumber += 1
dumpFileName = messageDumpFileNameTemplate % fileNumber
indexFileName = messageIndexFileNameTemplate % fileNumber
def ValidMessage(s):
if not (s is None or s.startswith("<script>")):
return True
return False
tempSearchLimitedFileName = "data/tmp.SEARCH-1.bin"
tempSearchCompleteFileName = "data/tmp.SEARCH-2.bin"
def FetchSearchResult(ga, allPages=False):
if allPages:
tempSearchFileName = tempSearchCompleteFileName
else:
tempSearchFileName = tempSearchLimitedFileName
if backupSearchResults and os.path.exists(tempSearchFileName):
print " Loading the cached search result."
searchResult = cPickle.load(open(tempSearchFileName, "rb"))
searchResult._account = ga
else:
print " Fetching the latest search result from the server...",
searchResult = ga.getMessagesByFolder("all", allPages=allPages)
if backupSearchResults:
cPickle.dump(searchResult, open(tempSearchFileName, "wb"))
print "done."
return searchResult
def PurgeCachedSearchResults():
if os.path.exists(tempSearchLimitedFileName):
os.remove(tempSearchLimitedFileName)
if os.path.exists(tempSearchCompleteFileName):
os.remove(tempSearchCompleteFileName)
class Data:
threadID = None
threadTally = 0
def ProcessSearchResult(result, data, minimumToCheck=None):
cnt = 1
for thread in result:
print " %05d" % cnt,
data.threadTally += 1
if data.wantedThreadMessages.has_key(thread.id):
print "Skipping previously processed thread", thread.id
continue
usedCache = True
if len(thread._messages) == 0:
print "Fetching messages for thread", thread.id, "from server...",
usedCache = False
else:
print "Using cached messages for thread", thread.id
messages = thread[:]
if not usedCache:
print "done."
if not len(messages):
print
print "FATAL ERROR: No messages returned for thread", thread.id
sys.exit(1)
messageDataByID = data.messageIDsByThreadID.get(thread.id, None)
if messageDataByID is not None:
foundMissing = False
for msg in messages:
if not messageDataByID.has_key(msg.id):
foundMissing = True
if not data.wantedThreadMessages.has_key(thread.id):
data.wantedThreadMessages[thread.id] = {}
data.wantedThreadMessages[thread.id][msg.id] = None
if not data.cachedMessages.has_key(msg.id):
data.wantedMessages[msg.id] = None
if minimumToCheck is not None and minimumToCheck < cnt:
# If we are forcibly checking a minimum number of posts..
pass
elif not foundMissing:
# We have all the messages already.
data.threadID = thread.id
return True
else:
d = data.wantedThreadMessages[thread.id] = {}
for msg in messages:
# Index it by thread.
d[msg.id] = None
# Note to get the full message if it is not cached already.
if not data.cachedMessages.has_key(msg.id):
data.wantedMessages[msg.id] = None
# Store the labels for the thread.
for label in thread.getLabels():
if not data.threadsByLabel.has_key(label):
data.threadsByLabel[label] = {}
data.threadsByLabel[label][thread.id] = None
# Token attempt not to hammer the gmail server.
if not usedCache:
time.sleep(2.0)
cnt += 1
# Did not identify that we had reached a preprocessed point.
return False
def UpdateCachedLabels(ga, data):
data.labels = []
if os.path.exists(labelsFileName):
data.labels = cPickle.load(open(labelsFileName, "rb"))
currentLabels = ga.getLabelNames()
# Check that no labels have gone missing.
labelsMissing = False
for label in data.labels:
if label not in currentLabels:
print "ERROR: Label '%s' no longer exists." % label
labelsMissing = True
if labelsMissing:
print "You've removed one or more labels, this is your problem to sort out!"
sys.exit(1)
# Check for labels that need to be added.
labelsAdded = False
for label in currentLabels:
if label not in data.labels:
print "New label '%s' detected." % label
data.labels.append(label)
labelsAdded = True
if labelsAdded:
cPickle.dump(data.labels, open(labelsFileName, "wb"))
else:
print "Loaded %d labels." % len(data.labels)
if __name__ == "__main__":
if not os.path.exists("data"):
os.mkdir("data")
data = Data()
# Process the cached messages we already have.
messagesByID = {}
data.cachedMessages = {}
data.wantedMessages = {}
for fileNumber, indexFileName, dumpFileName in GetMessageFileNames():
if os.path.exists(indexFileName):
wanted, cached = cPickle.load(open(indexFileName, "rb"))
else:
wanted = {}
cached = {}
for k, v in cPickle.load(open(dumpFileName, "rb")).iteritems():
if not ValidMessage(v):
wanted[k] = None
else:
cached[k] = len(v)
cPickle.dump((wanted, cached), open(indexFileName, "wb"))
data.wantedMessages.update(wanted)
data.cachedMessages.update(cached)
# May have one big dump, which would otherwise be kept around.
wanted = cached = None
# Delete wanted messages that were in later caches.
for messageID in data.wantedMessages.keys():
if data.cachedMessages.has_key(messageID):
del data.wantedMessages[messageID]
invalidCount = len(data.wantedMessages)
print "Loaded", len(data.cachedMessages), "cached messages (%d were invalid)." % invalidCount
# Load in the thread / message index.
data.messageIDsByThreadID = {}
if os.path.exists(threadMessagesFileName):
data.messageIDsByThreadID = cPickle.load(open(threadMessagesFileName, "rb"))
# Process it, correcting the entry format, and noting missing
# messages from the cache of "original message" data.
for threadID, messageDataByID in data.messageIDsByThreadID.iteritems():
for messageID in messageDataByID.keys():
messageDataByID[messageID] = None
if not data.cachedMessages.has_key(messageID):
data.wantedMessages[messageID] = None
s = ""
delta = len(data.wantedMessages) - invalidCount
if delta > 0:
s = " (%d messages are not cached)" % delta
print "Loaded", len(data.messageIDsByThreadID), "thread message mappings"+ s +"."
data.threadsByLabel = {}
if os.path.exists(threadLabelsFileName):
data.threadsByLabel = cPickle.load(open(threadLabelsFileName, "rb"))
# TODO: Check we have the same threads as in the thread messages dictionary.
print "Loaded", len(data.threadsByLabel), "thread label mappings."
print
# Log into gmail.
username = raw_input("Username [%s]: " % defaultUsername).strip()
if not len(username):
username | |
Run "make" to regenerate code after modifying this file
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter(name="secondaryServer")
def secondary_server(self) -> pulumi.Input[str]:
return pulumi.get(self, "secondary_server")
@secondary_server.setter
def secondary_server(self, value: pulumi.Input[str]):
pulumi.set(self, "secondary_server", value)
@property
@pulumi.getter(name="secondaryServerResourceGroup")
def secondary_server_resource_group(self) -> pulumi.Input[str]:
return pulumi.get(self, "secondary_server_resource_group")
@secondary_server_resource_group.setter
def secondary_server_resource_group(self, value: pulumi.Input[str]):
pulumi.set(self, "secondary_server_resource_group", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="keyVaultToStoreSecrets")
def key_vault_to_store_secrets(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key_vault_to_store_secrets")
@key_vault_to_store_secrets.setter
def key_vault_to_store_secrets(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_to_store_secrets", value)
@pulumi.input_type
class AzureSqlFailoverGroupStatusArgs:
def __init__(__self__, *,
completed: Optional[pulumi.Input[str]] = None,
contains_update: Optional[pulumi.Input[bool]] = None,
failed_provisioning: Optional[pulumi.Input[bool]] = None,
flattened_secrets: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
output: Optional[pulumi.Input[str]] = None,
polling_url: Optional[pulumi.Input[str]] = None,
provisioned: Optional[pulumi.Input[bool]] = None,
provisioning: Optional[pulumi.Input[bool]] = None,
requested: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
spec_hash: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
ASOStatus (AzureServiceOperatorsStatus) defines the observed state of resource actions
"""
if completed is not None:
pulumi.set(__self__, "completed", completed)
if contains_update is not None:
pulumi.set(__self__, "contains_update", contains_update)
if failed_provisioning is not None:
pulumi.set(__self__, "failed_provisioning", failed_provisioning)
if flattened_secrets is not None:
pulumi.set(__self__, "flattened_secrets", flattened_secrets)
if message is not None:
pulumi.set(__self__, "message", message)
if output is not None:
pulumi.set(__self__, "output", output)
if polling_url is not None:
pulumi.set(__self__, "polling_url", polling_url)
if provisioned is not None:
pulumi.set(__self__, "provisioned", provisioned)
if provisioning is not None:
pulumi.set(__self__, "provisioning", provisioning)
if requested is not None:
pulumi.set(__self__, "requested", requested)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if spec_hash is not None:
pulumi.set(__self__, "spec_hash", spec_hash)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def completed(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "completed")
@completed.setter
def completed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completed", value)
@property
@pulumi.getter(name="containsUpdate")
def contains_update(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "contains_update")
@contains_update.setter
def contains_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "contains_update", value)
@property
@pulumi.getter(name="failedProvisioning")
def failed_provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "failed_provisioning")
@failed_provisioning.setter
def failed_provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "failed_provisioning", value)
@property
@pulumi.getter(name="flattenedSecrets")
def flattened_secrets(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "flattened_secrets")
@flattened_secrets.setter
def flattened_secrets(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "flattened_secrets", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def output(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "output")
@output.setter
def output(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "output", value)
@property
@pulumi.getter(name="pollingUrl")
def polling_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "polling_url")
@polling_url.setter
def polling_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "polling_url", value)
@property
@pulumi.getter
def provisioned(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned")
@provisioned.setter
def provisioned(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned", value)
@property
@pulumi.getter
def provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioning")
@provisioning.setter
def provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioning", value)
@property
@pulumi.getter
def requested(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "requested")
@requested.setter
def requested(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requested", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="specHash")
def spec_hash(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "spec_hash")
@spec_hash.setter
def spec_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spec_hash", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class AzureSqlFirewallRuleSpecArgs:
def __init__(__self__, *,
resource_group: pulumi.Input[str],
server: pulumi.Input[str],
end_ip_address: Optional[pulumi.Input[str]] = None,
start_ip_address: Optional[pulumi.Input[str]] = None):
"""
AzureSqlFirewallRuleSpec defines the desired state of AzureSqlFirewallRule
:param pulumi.Input[str] resource_group: INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
pulumi.set(__self__, "resource_group", resource_group)
pulumi.set(__self__, "server", server)
if end_ip_address is not None:
pulumi.set(__self__, "end_ip_address", end_ip_address)
if start_ip_address is not None:
pulumi.set(__self__, "start_ip_address", start_ip_address)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Input[str]:
"""
INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "end_ip_address")
@end_ip_address.setter
def end_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_ip_address", value)
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "start_ip_address")
@start_ip_address.setter
def start_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_ip_address", value)
@pulumi.input_type
class AzureSqlFirewallRuleStatusArgs:
def __init__(__self__, *,
completed: Optional[pulumi.Input[str]] = None,
contains_update: Optional[pulumi.Input[bool]] = None,
failed_provisioning: Optional[pulumi.Input[bool]] = None,
flattened_secrets: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
output: Optional[pulumi.Input[str]] = None,
polling_url: Optional[pulumi.Input[str]] = None,
provisioned: Optional[pulumi.Input[bool]] = None,
provisioning: Optional[pulumi.Input[bool]] = None,
requested: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
spec_hash: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
ASOStatus (AzureServiceOperatorsStatus) defines the observed state of resource actions
"""
if completed is not None:
pulumi.set(__self__, "completed", completed)
if contains_update is not None:
pulumi.set(__self__, "contains_update", contains_update)
if failed_provisioning is not None:
pulumi.set(__self__, "failed_provisioning", failed_provisioning)
if flattened_secrets is not None:
pulumi.set(__self__, "flattened_secrets", flattened_secrets)
if message is not None:
pulumi.set(__self__, "message", message)
if output is not None:
pulumi.set(__self__, "output", output)
if polling_url is not None:
pulumi.set(__self__, "polling_url", polling_url)
if provisioned is not None:
pulumi.set(__self__, "provisioned", provisioned)
if provisioning is not None:
pulumi.set(__self__, "provisioning", provisioning)
if requested is not None:
pulumi.set(__self__, "requested", requested)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if spec_hash is not None:
pulumi.set(__self__, "spec_hash", spec_hash)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def completed(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "completed")
@completed.setter
def completed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completed", value)
@property
@pulumi.getter(name="containsUpdate")
def contains_update(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "contains_update")
@contains_update.setter
def contains_update(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "contains_update", value)
@property
@pulumi.getter(name="failedProvisioning")
def failed_provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "failed_provisioning")
@failed_provisioning.setter
def failed_provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "failed_provisioning", value)
@property
@pulumi.getter(name="flattenedSecrets")
def flattened_secrets(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "flattened_secrets")
@flattened_secrets.setter
def flattened_secrets(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "flattened_secrets", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def output(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "output")
@output.setter
def output(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "output", value)
@property
@pulumi.getter(name="pollingUrl")
def polling_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "polling_url")
@polling_url.setter
def polling_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "polling_url", value)
@property
@pulumi.getter
def provisioned(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned")
@provisioned.setter
def provisioned(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned", value)
@property
@pulumi.getter
def provisioning(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioning")
@provisioning.setter
def provisioning(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioning", value)
@property
@pulumi.getter
def requested(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "requested")
@requested.setter
def requested(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "requested", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@property
@pulumi.getter(name="specHash")
def spec_hash(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "spec_hash")
@spec_hash.setter
def spec_hash(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spec_hash", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class AzureSqlServerSpecArgs:
def __init__(__self__, *,
location: pulumi.Input[str],
resource_group: pulumi.Input[str],
key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None):
"""
AzureSqlServerSpec defines the desired state of AzureSqlServer
:param pulumi.Input[str] location: INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "resource_group", resource_group)
if key_vault_to_store_secrets is not None:
pulumi.set(__self__, "key_vault_to_store_secrets", key_vault_to_store_secrets)
@property
@pulumi.getter
def location(self) -> pulumi.Input[str]:
"""
INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: pulumi.Input[str]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group")
@resource_group.setter
def resource_group(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group", value)
@property
@pulumi.getter(name="keyVaultToStoreSecrets")
def key_vault_to_store_secrets(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key_vault_to_store_secrets")
@key_vault_to_store_secrets.setter
def key_vault_to_store_secrets(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_vault_to_store_secrets", value)
@pulumi.input_type
class AzureSqlServerStatusArgs:
def __init__(__self__, *,
completed: Optional[pulumi.Input[str]] = None,
contains_update: Optional[pulumi.Input[bool]] = None,
failed_provisioning: Optional[pulumi.Input[bool]] = None,
flattened_secrets: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
output: Optional[pulumi.Input[str]] = None,
polling_url: Optional[pulumi.Input[str]] = None,
provisioned: Optional[pulumi.Input[bool]] = None,
provisioning: Optional[pulumi.Input[bool]] = None,
requested: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
spec_hash: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
ASOStatus (AzureServiceOperatorsStatus) defines the observed state of resource actions
"""
if completed is not None:
pulumi.set(__self__, "completed", completed)
if contains_update is not None:
pulumi.set(__self__, "contains_update", contains_update)
if failed_provisioning is not None:
pulumi.set(__self__, "failed_provisioning", failed_provisioning)
if flattened_secrets is not None:
pulumi.set(__self__, "flattened_secrets", flattened_secrets)
if message is not | |
#!/usr/bin/env python2
#
# TODO:
# - Task colors:
# - User-defined using config file.
# - Automagically chosen from color space.
# - Advanced algorithm (contact <NAME>).
# - Koos' specs:
# - Resources and tasks sorted in read-in order (default)
# or alphabetically (flag).
# - Have proper gnuplot behavior on windows/x11, eps/pdf, latex terminals.
# - Create and implement algorithm for critical path analysis.
# - Split generic stuff into a Gantt class, and specific stuff into the main.
#
# gantt.py ganttfile | gnuplot
import itertools, sys, getopt
from ConfigParser import ConfigParser
rectangleHeight = 0.8 #: Height of a rectangle in units.
class Activity(object):
"""
Container for activity information.
@ivar resource: Resource name.
@type resource: C{str}
@ivar start: Start time of the activity.
@type start: C{float}
@ivar stop: End time of the activity.
@type stop: C{float}
@ivar task: Name of the task/activity being performed.
@type task: C{str}
"""
def __init__(self, resource, start, stop, task):
self.resource = resource
self.start = start
self.stop = stop
self.task = task
class Rectangle(object):
"""
Container for rectangle information.
"""
def __init__(self, bottomleft, topright, fillcolor):
self.bottomleft = bottomleft
self.topright = topright
self.fillcolor = fillcolor
self.fillstyle = 'solid 0.8'
self.linewidth = 2
class ColorBook(object):
"""
Class managing colors.
@ivar colors
@ivar palette
@ivar prefix
"""
def __init__(self, colorfname, tasks):
"""
Construct a ColorBook object.
@param colorfname: Name of the color config file (if specified).
@type colorfname: C{str} or C{None}
@param tasks: Existing task types.
@type tasks: C{list} of C{str}
"""
if colorfname:
values = self.load_config(colorfname, tasks)
else:
values = self.fixed(tasks)
self.colors, self.palette, self.prefix = values
def load_config(self, colorfname, tasks):
"""
Read task colors from a configuration file.
"""
palettedef = 'model RGB'
colorprefix = 'rgb'
# Read in task colors from configuration file
config = ConfigParser()
config.optionxform = str # makes option names case sensitive
config.readfp(open(colorfname, 'r'))
# Colors are RGB colornames
colors = dict(config.items('Colors'))
# Raise KeyError if no color is specified for a task
nocolors = [t for t in tasks if not colors.has_key(t)]
if nocolors:
msg = 'Could not find task color for ' + ', '.join(nocolors)
raise KeyError(msg)
return colors, palettedef, colorprefix
def fixed(self, tasks):
"""
Pick colors from a pre-defined palette.
"""
# Set task colors
# SE colors
# (see http://w3.wtb.tue.nl/nl/organisatie/systems_engineering/\
# info_for_se_students/how2make_a_poster/pictures/)
# Decrease the 0.8 values for less transparent colors.
se_palette_pieter = {
"se_black": (0.0, 0.0, 0.0),
"se_red": (1.0, 0.0, 0.0),
"se_purple": (1.0, 0.0, 1.0),
"se_blue": (0.0, 0.0, 1.0),
"se_magenta": (0.0, 1.0, 1.0),
"se_green": (0.0, 1.0, 0.0),
"se_yellow": (1.0, 1.0, 0.0),
"se_white": (1.0, 1.0, 1.0),
"se_grey": (0.5, 0.5, 0.5),
"se_d_red": (0.5, 0.0, 0.0),
"se_d_purple": (0.5, 0.0, 0.5),
"se_d_blue": (0.0, 0.0, 0.5),
"se_d_magenta": (0.0, 0.5, 0.5),
"se_d_green": (0.0, 0.5, 0.0),
"se_d_yellow": (0.5, 0.5, 0.0)}
se_gradient_pieter = ["se_black", "se_red", "se_purple", "se_blue",
"se_magenta", "se_green", "se_yellow", "se_white",
"se_red", "se_purple", "se_blue",
"se_magenta", "se_green", "se_yellow"]
# palette generated by http://tools.medialab.sciences-po.fr/iwanthue/
se_palette_iwanthue = {
"color_00": (0.761719, 0.617188, 0.765625),
"color_01": (0.453125, 0.832031, 0.285156),
"color_02": (0.761719, 0.507813, 0.218750),
"color_03": (0.738281, 0.339844, 0.785156),
"color_04": (0.312500, 0.222656, 0.144531),
"color_05": (0.347656, 0.488281, 0.218750),
"color_06": (0.777344, 0.296875, 0.496094),
"color_07": (0.531250, 0.796875, 0.816406),
"color_08": (0.804688, 0.789063, 0.292969),
"color_09": (0.785156, 0.660156, 0.542969),
"color_10": (0.335938, 0.199219, 0.335938),
"color_11": (0.523438, 0.828125, 0.589844),
"color_12": (0.765625, 0.273438, 0.226563),
"color_13": (0.312500, 0.437500, 0.441406),
"color_14": (0.402344, 0.453125, 0.765625)}
se_gradient_iwanthue = ["color_00", "color_01", "color_02", "color_03",
"color_04", "color_05", "color_06", "color_07", "color_08",
"color_09", "color_10", "color_11", "color_12", "color_13",
"color_14"]
se_palette = se_palette_iwanthue
se_gradient = se_gradient_iwanthue
se_palettedef = '( ' + \
', '.join(('%d ' % n +
' '.join((str(x) for x in se_palette[c]))
for n, c in enumerate(se_gradient))) + \
' )'
palettedef = 'model RGB defined %s' % se_palettedef
colorprefix = 'palette frac'
# Colors are fractions from the palette defined
colors = dict((t, '%0.2f' % (float(n)/(len(tasks)-1)))
for n, t in enumerate(tasks))
return colors, palettedef, colorprefix
class DummyClass(object):
"""
Dummy class for storing option values in.
"""
def make_rectangles(activities, resource_map, colors):
"""
Construct a collection of L{Rectangle} for all activities.
@param activities: Activities being performed.
@type activities: C{iterable} of L{Activity}
@param resource_map: Indices of all resources.
@type resource_map: C{dict} of C{str} to C{int}
@param colors: Colors for all tasks.
@type colors: C{dict} of C{str} to C{str}
@return: Collection of rectangles to draw.
@rtype: C{list} of L{Rectangle}
"""
rectangles = []
for act in activities:
ypos = resource_map[act.resource]
bottomleft = (act.start, ypos - 0.5 * rectangleHeight)
topright = (act.stop, ypos + 0.5 * rectangleHeight)
fillcolor = colors[act.task]
rectangles.append(Rectangle(bottomleft, topright, fillcolor))
return rectangles
def load_ganttfile(ganttfile):
"""
Load the resource/task file.
@param ganttfile: Name of the gantt file.
@type ganttfile: C{str}
@return: Activities loaded from the file, collection of
(resource, start, end, task) activities.
@rtype: C{list} of L{Activity}
"""
activities = []
oldresource = ""
oldstop = -10.0
oldstart = -10.0
oldtask = ""
first = 1
for line in open(ganttfile, 'r').readlines():
line = line.strip().split('\t')
if len(line) == 0:
continue
resource = line[0]
start = float(line[1])
stop = float(line[2])
task = line[3]
# if oldtask == task:
# if oldresource == resource:
# if start > oldstop:
# if start - oldstop < 0.1:
# oldstop = stop
# continue
if first == 0:
activities.append(Activity(oldresource, oldstart, oldstop, oldtask))
oldtask = task
oldstop = stop
oldstart = start
oldresource = resource
first = 0
if first == 0:
activities.append(Activity(oldresource, oldstart, oldstop, oldtask))
return activities
def make_unique_tasks_resources(alphasort, activities):
"""
Construct collections of unique task names and resource names.
@param alphasort: Sort resources and tasks alphabetically.
@type alphasort: C{bool}
@param activities: Activities to draw.
@type activities: C{list} of L{Activity}
@return: Collections of task-types and resources.
@rtype: C{list} of C{str}, C{list} of C{str}
"""
# Create list with unique resources and tasks in activity order.
resources = []
tasks = []
for act in activities:
if act.resource not in resources:
resources.append(act.resource)
if act.task not in tasks:
tasks.append(act.task)
# Sort such that resources and tasks appear in alphabetical order
if alphasort:
resources.sort()
tasks.sort()
# Resources are read from top (y=max) to bottom (y=1)
resources.reverse()
return tasks, resources
def generate_plotdata(activities, resources, tasks, rectangles, options,
resource_map, color_book):
"""
Generate Gnuplot lines.
"""
xmin = 0
xmax = max(act.stop for act in activities)
ymin = 0 + (rectangleHeight / 2)
ymax = len(resources) + 1 - (rectangleHeight / 2)
xlabel = 'time'
ylabel = ''
title = options.plottitle
ytics = ''.join(['(',
', '.join(('"%s" %d' % item)
for item in resource_map.iteritems()),
')'])
# outside and 2 characters from the graph
key_position = 'outside width +2'
grid_tics = 'xtics'
# Set plot dimensions
plot_dimensions = ['set terminal \'wxt\'',
'set xrange [%f:%f]' % (xmin, xmax),
'set yrange [%f:%f]' % (ymin, ymax),
'set autoscale x', # extends x axis to next tic mark
'set xlabel "%s"' % xlabel,
'set ylabel "%s"' % ylabel,
'set title "%s"' % title,
'set ytics %s' % ytics,
'set key %s' % key_position,
'set grid %s' % grid_tics,
'set palette %s' % color_book.palette,
'unset colorbox']
# Generate gnuplot rectangle objects
plot_rectangles = (' '.join(['set object %d rectangle' % n,
'from %f, %0.1f' % r.bottomleft,
'to %f, %0.1f' % r.topright,
'fillcolor %s %s' % (color_book.prefix,
r.fillcolor),
'fillstyle solid 0.8'])
for n, r in itertools.izip(itertools.count(1), rectangles))
# Generate gnuplot lines
plot_lines = ['plot ' +
', \\\n\t'.join(' '.join(['-1',
'title "%s"' % t,
'with lines',
'linecolor %s %s ' % (color_book.prefix,
color_book.colors[t]),
'linewidth 6'])
for t in tasks)]
#plot_lines += ["set terminal \'svg\' size 1500,1000", "set output \'gantt.svg\'",
# "replot\n" ]
return plot_dimensions, plot_rectangles, plot_lines
def write_data(plot_dimensions, plot_rectangles, plot_lines, fname):
"""
Write plot data out to file or screen.
@param fname: Name of the output file, if specified.
@type fname: C{str} (??)
"""
if fname:
g = open(fname, 'w')
g.write('\n'.join(itertools.chain(plot_dimensions, plot_rectangles,
plot_lines)))
g.close()
else:
print '\n'.join(itertools.chain(plot_dimensions, plot_rectangles,
plot_lines))
print '\npause mouse close; exit\n'
def fmt_opt(short, long, arg, text):
if arg:
return '-%s %s, --%s%s\t%s' % (short[:-1], arg, long, arg, text)
else:
return '-%s, --%s\t%s' % (short, long, text)
def make_default_options():
option_values = DummyClass()
option_values.outputfile = ''
option_values.colorfile = ''
option_values.alphasort = False
option_values.plottitle = ''
return option_values
def process_options():
"""
Handle option and command-line argument processing.
@return: Options and gantt input | |
the console handler must be polled
self.consoleHandlerIsPolling = True
else:
# on UNIX the console handler use the normal file handler API
self.consoleHandlerIsPolling = False
self.createFileHandler(sys.stdin, consoleHandler.receiveCallback)
# ---------------------------------------------------------------------------
# External Event processing support
# ---------------------------------------------------------------------------
def pushEvent(self, event):
"""puts an event in the event buffer for execution by the poll loop"""
self.eventBuffer.append(event)
# ---------------------------------------------------------------------------
def registerView(self, view):
"""registers a view for status updates"""
self.views[view] = view
# ---------------------------------------------------------------------------
def unregisterView(self, view):
"""unregisters a view for status updates"""
del self.views[view]
# ---------------------------------------------------------------------------
def notifyViews(self, status):
"""notifies the views with status updates"""
for view in self.views.keys():
view.notifyStatus(status)
# ---------------------------------------------------------------------------
def notifyCommand(self, argv, extraData):
"""notifies with a command (string list) and extra data"""
pass
# =============================================================================
class ProcessingTask(Task):
"""A task that performs the processing of the application."""
# ---------------------------------------------------------------------------
def __init__(self, isParent):
"""initialises whether the task is a parent or a child"""
Task.__init__(self, isParent=isParent, isProcessing=True)
# ---------------------------------------------------------------------------
def getAppMnemo(self):
"""Application Mnemonic"""
return UTIL.SYS.s_configuration.SYS_APP_MNEMO
# ---------------------------------------------------------------------------
def getAppName(self):
"""Application Name"""
return UTIL.SYS.s_configuration.SYS_APP_NAME
# ---------------------------------------------------------------------------
def getVersion(self):
"""Application Version, should be in line with the User Manual"""
return UTIL.SYS.s_configuration.SYS_APP_VERSION
# ---------------------------------------------------------------------------
def logMethod(self, methodName, subsystem=None):
"""Logs a method name"""
LOG_INFO(self.getAppMnemo() + "." + methodName, subsystem)
# ---------------------------------------------------------------------------
def notifyCommand(self, argv, extraData):
"""Callback for processing the input arguments"""
if len(argv) > 0:
# decode the command
cmd = argv[0].upper()
if cmd == "H" or cmd == "HELP":
self.helpCmd(argv)
elif cmd == "Q" or cmd == "QUIT":
self.quitCmd(argv)
else:
LOG_WARNING("Invalid command " + argv[0])
self.helpCmd([])
return 0
# ---------------------------------------------------------------------------
def helpCmd(self, argv):
"""Decoded help command"""
LOG_INFO("Available commands:")
LOG("")
LOG("h | help ........provides this information")
LOG("q | quit ........terminates the application")
LOG("")
# ---------------------------------------------------------------------------
def quitCmd(self, argv):
"""Decoded quit command"""
global s_parentTask
s_parentTask.stop()
# =============================================================================
class ConsoleHandler(object):
"""generic keyboard handler that can be registers in the ModelTask"""
# ---------------------------------------------------------------------------
def __init__(self):
self.inputLine = "" # not used on UNIX
# ---------------------------------------------------------------------------
def receiveCallback(self, socket, stateMask):
"""Callback when data are received on sys.stdin (UNIX only)"""
# due to the line buffering of the UNIX shell
# it is possible to read a full line
inputLine = sys.stdin.readline()
# skip the last character in the string, which is "\n"
inputLine = inputLine[:-1]
self.processBuffer(inputLine)
# ---------------------------------------------------------------------------
def poll(self):
"""Poll for data on msvcrt (Windows only)"""
completeLineRead = False
while msvcrt.kbhit():
nextChar = msvcrt.getche()
if nextChar == "\r":
completeLineRead = True
print("")
break
self.inputLine += nextChar
if completeLineRead:
self.processBuffer(self.inputLine)
self.inputLine = ""
# ---------------------------------------------------------------------------
def processBuffer(self, buffer):
"""Callback when a line is read from the console"""
# split the buffer into tokens
argv = buffer.split()
# delegate the processing to the processing task
return UTIL.TASK.s_processingTask.notifyCommand(argv, None)
# =============================================================================
class RequestHandler(ConsoleHandler):
"""
Handles the requests invoked by the ART framework
Goes into background if the commandline switch
'-bg' or '-background' is used.
Opens a TCP/IP port if the commandline switch
'-p <portNr>' or '-port <portNr>' is used.
"""
# ---------------------------------------------------------------------------
def __init__(self, argv):
"""Initialise the test driver and fork on demand"""
ConsoleHandler.__init__(self)
self.foreground = True
self.helpRequested = False
self.portNr = 0
self.connectSocket = None
self.clientSocket = None
self.tcpLineBuffer = ""
argc = len(argv)
i = 0;
for arg in argv:
LOG("argv[" + str(i) + "] = " + arg)
i += 1
# parse command line arguments
logFileName = None
i = 0
while i < argc:
cmdSwitch = argv[i]
if (cmdSwitch == "-bg") or (cmdSwitch == "-background"):
# shall be evaluated in the main program
self.foreground = False
elif (cmdSwitch == "-h") or (cmdSwitch == "-help"):
# shall be evaluated in the main program
self.helpRequested = True
elif (cmdSwitch == "-l") or (cmdSwitch == "-logfile"):
# logfile switch ---> next argument is the logfile name
i += 1
if i < argc:
logFileName = argv[i]
else:
LOG_ERROR("no logfile name specified for switch " + cmdSwitch)
sys.exit(-1)
elif (cmdSwitch == "-p") or (cmdSwitch == "-port"):
# port switch ---> next argument is the port number
i += 1
if i < argc:
self.portNr = int(argv[i]);
else:
LOG_ERROR("no port number specified for switch " + cmdSwitch)
sys.exit(-1)
i += 1
# checks if the process shall go into background
if not self.foreground:
# bring the process into background via fork
# ignore the SIGCHLD signal before forking,
# otherwise is inherited by the parent
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
# start the process
process_ID = os.fork()
if process_ID != 0:
# this is the parent ---> terminate
sys.exit(0);
# enalble the log file only if the process is in foreground or the child
if logFileName != None:
UTIL.SYS.s_logger.enableFileLogging(logFileName)
# ---------------------------------------------------------------------------
def openConnectPort(self, hostName=None):
"""Open the test driver TCP/IP connect port (TECO connect port)"""
# check if the port is already open
if self.connectSocket != None:
LOG_ERROR("connect port already open!")
return False
# create the server socket
try:
connectSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except:
LOG_ERROR("can't create server socket!")
return False
# set the socket linger
try:
connectSocket.setsockopt(socket.SOL_SOCKET,
socket.SO_LINGER,
struct.pack('ii', 1, 10))
except:
LOG_ERROR("can't set socket linger!")
connectSocket.close()
return False
# set the socket reuse address
try:
connectSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
LOG_ERROR("can't set socket reuse address!")
connectSocket.close()
return False
# bind the server socket
if hostName == None:
hostName = socket.gethostname()
try:
connectSocket.bind((hostName, self.portNr))
except:
LOG_ERROR("bind the server socket!")
connectSocket.close()
return False
# listen on the server socket
try:
connectSocket.listen(5)
except:
LOG_ERROR("listen on the server socket!")
connectSocket.close()
return False
self.connectSocket = connectSocket
return True
# ---------------------------------------------------------------------------
def closeConnectPort(self):
"""Close the test driver TCP/IP connect port"""
LOG_INFO("RequestHandler.closeConnectPort")
# check if the port is already open
if self.connectSocket == None:
LOG_ERROR("connect port not open!")
return False
try:
self.connectSocket.close()
except:
LOG_ERROR("close of connect port failed!")
self.connectSocket = None
return False
self.connectSocket = None
return True
# ---------------------------------------------------------------------------
def closeClientPort(self):
"""Close the test driver TCP/IP client port"""
LOG_INFO("RequestHandler.closeClientPort")
# check if the port is already open
if self.clientSocket == None:
LOG_ERROR("data port not open!")
return False
try:
self.clientSocket.close()
except:
LOG_ERROR("close of data port failed!")
self.clientSocket = None
return False
self.clientSocket = None
return True
# ---------------------------------------------------------------------------
def tcpConnectCallback(self, socket, stateMask):
"""Callback when a TCP/IP client (e.g. TECO) has connected"""
# accept the client connection
try:
clientSocket,clientHost = self.connectSocket.accept()
except:
LOG_ERROR("accept of the client connection failed!")
return
self.clientSocket = clientSocket;
# delegate the remaing processing
self.connected()
# ---------------------------------------------------------------------------
def tcpDataCallback(self, socket, stateMask):
"""Callback when a TCP/IP client (e.g. TECO) has send a command"""
# read the next set of byte from stdin
tcpLineBuffer = self.tcpLineBuffer
try:
tcpLineBuffer += self.clientSocket.recv(LINEBUFFERLEN).decode("ascii")
LOG("tcpLineBuffer: " + tcpLineBuffer)
except:
# read failed
self.disconnected()
return
# handle the input: extract the lines from the line buffer
lines = tcpLineBuffer.split("\n")
# the last line has to be handled in a special way and can not be
# processed directly
lastLine = lines[-1]
lines = lines[:-1]
if lastLine == "":
# read of the data was complete (incl. "\n")
pass
else:
# last line was cutt off and the rest should come with the next read
self.tcpLineBuffer = lastLine
for line in lines:
# remove a terminating "\r" for clients like telnet
if line[-1] == "\r":
line = line[:-1]
# terminate the client connection if exit has been entered (case insensitive)
upperLine = line.upper()
if (upperLine == "X") or (upperLine == "EXIT"):
LOG("exit requested")
# set the OK response back to the TECO
retString = "OK 0\n"
try:
self.clientSocket.send(retString.encode())
except:
LOG_ERROR("send of OK response failed!")
# terminate the client connection
self.disconnected();
return
# delegate the input
pstatus = self.processBuffer(line);
if pstatus == 0:
# send the OK response back to the TECO
retString = "OK 0\n";
try:
self.clientSocket.send(retString.encode())
except:
LOG_ERROR("send of OK response failed!")
else:
LOG_WARNING("return status = " + str(pstatus))
# set the Error response back to the TECO:
retString = "Error: execution failed (see log)!\n"
try:
self.clientSocket.send(retString.encode())
except:
LOG_ERROR("send of Error response failed!")
# ---------------------------------------------------------------------------
def connected(self):
"""Client (TECO) has connected: register/unregister file descriptors"""
# unregister the connect socket
UTIL.TASK.s_processingTask.deleteFileHandler(self.connectSocket)
# register the client socket
UTIL.TASK.s_processingTask.createFileHandler(self.clientSocket,
self.tcpDataCallback)
# ---------------------------------------------------------------------------
def disconnected(self):
"""Client (TECO) has disconnected: register/unregister | |
<filename>bigchaindb/backend/query.py
"""Query interfaces for backends."""
from functools import singledispatch
from bigchaindb.backend.exceptions import OperationError
VALIDATOR_UPDATE_ID = 'a_unique_id_string'
PRE_COMMIT_ID = 'a_unique_id_string'
@singledispatch
def write_transaction(connection, signed_transaction):
"""Write a transaction to the backlog table.
Args:
signed_transaction (dict): a signed transaction.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def store_asset(connection, asset):
"""Write an asset to the asset table.
Args:
asset (dict): the asset.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def store_assets(connection, assets):
"""Write a list of assets to the assets table.
Args:
assets (list): a list of assets to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def store_metadatas(connection, metadata):
"""Write a list of metadata to metadata table.
Args:
metadata (list): list of metadata.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def store_transaction(connection, signed_transaction):
"""Same as write_transaction."""
raise NotImplementedError
@singledispatch
def store_transactions(connection, signed_transactions):
"""Store list of transactions."""
raise NotImplementedError
@singledispatch
def get_transaction(connection, transaction_id):
"""Get a transaction from the transactions table.
Args:
transaction_id (str): the id of the transaction.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def get_transactions(connection, transaction_ids):
"""Get transactions from the transactions table.
Args:
transaction_ids (list): list of transaction ids to fetch
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def get_asset(connection, asset_id):
"""Get a transaction from the transactions table.
Args:
asset_id (str): the id of the asset
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def update_transaction(connection, transaction_id, doc):
"""Update a transaction in the backlog table.
Args:
transaction_id (str): the id of the transaction.
doc (dict): the values to update.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def delete_transaction(connection, *transaction_id):
"""Delete a transaction from the backlog.
Args:
*transaction_id (str): the transaction(s) to delete.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_stale_transactions(connection, reassign_delay):
"""Get a cursor of stale transactions.
Transactions are considered stale if they have been assigned a node,
but are still in the backlog after some amount of time specified in the
configuration.
Args:
reassign_delay (int): threshold (in seconds) to mark a transaction stale.
Returns:
A cursor of transactions.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_block(connection, transaction_id, block_id):
"""Get a transaction from a specific block.
Args:
transaction_id (str): the id of the transaction.
block_id (str): the id of the block.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_transaction_from_backlog(connection, transaction_id):
"""Get a transaction from backlog.
Args:
transaction_id (str): the id of the transaction.
Returns:
The matching transaction.
"""
raise NotImplementedError
@singledispatch
def get_blocks_status_from_transaction(connection, transaction_id):
"""Retrieve block election information given a secondary index and value.
Args:
value: a value to search (e.g. transaction id string, payload hash string)
index (str): name of a secondary index, e.g. 'transaction_id'
Returns:
:obj:`list` of :obj:`dict`: A list of blocks with with only election information
"""
raise NotImplementedError
@singledispatch
def get_asset_by_id(conneciton, asset_id):
"""Returns the asset associated with an asset_id.
Args:
asset_id (str): The asset id.
Returns:
Returns a rethinkdb cursor.
"""
raise NotImplementedError
@singledispatch
def get_spent(connection, transaction_id, condition_id):
"""Check if a `txid` was already used as an input.
A transaction can be used as an input for another transaction. Bigchain
needs to make sure that a given `txid` is only used once.
Args:
transaction_id (str): The id of the transaction.
condition_id (int): The index of the condition in the respective
transaction.
Returns:
The transaction that used the `txid` as an input else `None`
"""
raise NotImplementedError
@singledispatch
def get_spending_transactions(connection, inputs):
"""Return transactions which spend given inputs
Args:
inputs (list): list of {txid, output}
Returns:
Iterator of (block_ids, transaction) for transactions that
spend given inputs.
"""
raise NotImplementedError
@singledispatch
def get_owned_ids(connection, owner):
"""Retrieve a list of `txids` that can we used has inputs.
Args:
owner (str): base58 encoded public key.
Returns:
Iterator of (block_id, transaction) for transactions
that list given owner in conditions.
"""
raise NotImplementedError
@singledispatch
def get_votes_by_block_id(connection, block_id):
"""Get all the votes casted for a specific block.
Args:
block_id (str): the block id to use.
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_by_block_id_and_voter(connection, block_id, node_pubkey):
"""Get all the votes casted for a specific block by a specific voter.
Args:
block_id (str): the block id to use.
node_pubkey (str): base58 encoded public key
Returns:
A cursor for the matching votes.
"""
raise NotImplementedError
@singledispatch
def get_votes_for_blocks_by_voter(connection, block_ids, pubkey):
"""Return votes for many block_ids
Args:
block_ids (set): block_ids
pubkey (str): public key of voting node
Returns:
A cursor of votes matching given block_ids and public key
"""
raise NotImplementedError
@singledispatch
def write_block(connection, block):
"""Write a block to the bigchain table.
Args:
block (dict): the block to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_block(connection, block_id):
"""Get a block from the bigchain table.
Args:
block_id (str): block id of the block to get
Returns:
block (dict): the block or `None`
"""
raise NotImplementedError
@singledispatch
def get_block_with_transaction(connection, txid):
"""Get a block containing transaction id `txid`
Args:
txid (str): id of transaction to be searched.
Returns:
block_id (int): the block id or `None`
"""
raise NotImplementedError
@singledispatch
def write_assets(connection, assets):
"""Write a list of assets to the assets table.
Args:
assets (list): a list of assets to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def write_metadata(connection, metadata):
"""Write a list of metadata to the metadata table.
Args:
metadata (list): a list of metadata to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_assets(connection, asset_ids):
"""Get a list of assets from the assets table.
Args:
asset_ids (list): a list of ids for the assets to be retrieved from
the database.
Returns:
assets (list): the list of returned assets.
"""
raise NotImplementedError
@singledispatch
def get_metadata(connection, transaction_ids):
"""Get a list of metadata from the metadata table.
Args:
transaction_ids (list): a list of ids for the metadata to be retrieved from
the database.
Returns:
metadata (list): the list of returned metadata.
"""
raise NotImplementedError
@singledispatch
def count_blocks(connection):
"""Count the number of blocks in the bigchain table.
Returns:
The number of blocks.
"""
raise NotImplementedError
@singledispatch
def count_backlog(connection):
"""Count the number of transactions in the backlog table.
Returns:
The number of transactions in the backlog.
"""
raise NotImplementedError
@singledispatch
def write_vote(connection, vote):
"""Write a vote to the votes table.
Args:
vote (dict): the vote to write.
Returns:
The database response.
"""
raise NotImplementedError
@singledispatch
def get_genesis_block(connection):
"""Get the genesis block.
Returns:
The genesis block
"""
raise NotImplementedError
@singledispatch
def get_last_voted_block_id(connection, node_pubkey):
"""Get the last voted block for a specific node.
Args:
node_pubkey (str): base58 encoded public key.
Returns:
The id of the last block the node has voted on. If the node didn't cast
any vote then the genesis block id is returned.
"""
raise NotImplementedError
@singledispatch
def get_txids_filtered(connection, asset_id, operation=None):
"""Return all transactions for a particular asset id and optional operation.
Args:
asset_id (str): ID of transaction that defined the asset
operation (str) (optional): Operation to filter on
"""
raise NotImplementedError
@singledispatch
def get_new_blocks_feed(connection, start_block_id):
"""Return a generator that yields change events of the blocks feed
Args:
start_block_id (str): ID of block to resume from
Returns:
Generator of change events
"""
raise NotImplementedError
@singledispatch
def text_search(conn, search, *, language='english', case_sensitive=False,
diacritic_sensitive=False, text_score=False, limit=0, table=None):
"""Return all the assets that match the text search.
The results are sorted by text score.
For more information about the behavior of text search on MongoDB see
https://docs.mongodb.com/manual/reference/operator/query/text/#behavior
Args:
search (str): Text search string to query the text index
language (str, optional): The language for the search and the rules for
stemmer and tokenizer. If the language is ``None`` text search uses
simple tokenization and no stemming.
case_sensitive (bool, optional): Enable or disable case sensitive
search.
diacritic_sensitive (bool, optional): Enable or disable case sensitive
diacritic search.
text_score (bool, optional): If ``True`` returns the text score with
each document.
limit (int, optional): Limit the number of returned documents.
Returns:
:obj:`list` of :obj:`dict`: a list of assets
Raises:
OperationError: If the backend does not support text search
"""
raise OperationError('This query is only supported when running '
'BigchainDB with MongoDB as the backend.')
@singledispatch
def get_latest_block(conn):
"""Get the latest commited block i.e. block with largest height """
raise NotImplementedError
@singledispatch
def store_block(conn, block):
"""Write a new block to the `blocks` table
Args:
block (dict): block with current height and block hash.
Returns:
The result of the operation.
"""
raise NotImplementedError
@singledispatch
def store_unspent_outputs(connection, unspent_outputs):
"""Store unspent outputs in ``utxo_set`` table."""
raise NotImplementedError
@singledispatch
def delete_unspent_outputs(connection, | |
k.name()):
if progress:
sys.stderr.write(".")
out.write('%s objects\n' % (klass.name()))
attrs = [attr for attr in klass.allAttrs() if attr.hasSQLColumn()]
colNames = [attr.name() for attr in attrs]
colNames.insert(0, klass.sqlSerialColumnName())
out.write(CSVJoiner.joinCSVFields(colNames) + "\n")
# write out a line for each object in this class
objlist = self.fetchObjectsOfClass(klass.name(), isDeep=False)
for obj in objlist:
fields = []
fields.append(str(obj.serialNum()))
for attr in attrs:
# jdh 2003-03-07: if the attribute is a dangling object reference, the value
# will be None. This means that dangling references will _not_ be remembered
# across dump/generate/create/insert procedures.
method = getattr(obj, attr.pyGetName())
value = method()
if value is None:
fields.append('')
elif isinstance(value, MiddleObject):
fields.append('%s.%d' % (value.klass().name(),
value.serialNum()))
else:
fields.append(str(value))
out.write(CSVJoiner.joinCSVFields(fields).replace('\r', '\\r'))
out.write('\n')
out.write('\n')
out.write('\n')
if progress:
sys.stderr.write("\n")
class Model(object):
def sqlDatabaseName(self):
"""Return the name of the database.
This is either the 'Database' setting or self.name().
"""
name = self.setting('Database', None)
if name is None:
name = self.name()
return name
class MiddleObjectMixIn(object):
def sqlObjRef(self):
"""Return the 64-bit integer value that refers to self in a SQL database.
This only makes sense if the UseBigIntObjRefColumns setting is True.
"""
return objRefJoin(self.klass().id(), self.serialNum())
def sqlInsertStmt(self, unknowns, id=None):
"""Return SQL insert statements.
Returns the SQL insert statements for MySQL (as a tuple) in the form:
insert into table (name, ...) values (value, ...);
May add an info object to the unknowns list for obj references that
are not yet resolved.
"""
klass = self.klass()
insertSQLStart, sqlAttrs = klass.insertSQLStart(includeSerialColumn=id)
values = []
append = values.append
extend = values.extend
if id is not None:
append(str(id))
for attr in sqlAttrs:
try:
value = attr.sqlValue(self.valueForAttr(attr))
except UnknownSerialNumberError as exc:
exc.info.sourceObject = self
unknowns.append(exc.info)
if self.store().model().setting('UseBigIntObjRefColumns', False):
value = 'NULL'
else:
value = ('NULL', 'NULL')
if isinstance(value, basestring):
append(value)
else:
# value could be sequence for attrs requiring multiple SQL columns
extend(value)
if not values:
values = ['0']
values = ','.join(values)
return insertSQLStart + values + ');'
def sqlUpdateStmt(self):
"""Return SQL update statement.
Returns the SQL update statement of the form:
update table set name=value, ... where idName=idValue;
Installed as a method of MiddleObject.
"""
assert self._mk_changedAttrs
klass = self.klass()
res = []
for attr in self._mk_changedAttrs.values():
res.append(attr.sqlUpdateExpr(self.valueForAttr(attr)))
res = ','.join(res)
res = ('update ', klass.sqlTableName(), ' set ', res, ' where ',
klass.sqlSerialColumnName(), '=', str(self.serialNum()))
return ''.join(res)
def sqlDeleteStmt(self):
"""Return SQL delete statement.
Returns the SQL delete statement for MySQL of the form:
delete from table where idName=idValue;
Or if deletion is being marked with a timestamp:
update table set deleted=Now();
Installed as a method of MiddleObject.
"""
klass = self.klass()
assert klass is not None
if self.store().model().setting('DeleteBehavior', 'delete') == 'mark':
return 'update %s set deleted=%s where %s=%d;' % (
klass.sqlTableName(), self.store().sqlNowCall(),
klass.sqlSerialColumnName(), self.serialNum())
else:
return 'delete from %s where %s=%d;' % (klass.sqlTableName(),
klass.sqlSerialColumnName(), self.serialNum())
def referencingObjectsAndAttrsFetchKeywordArgs(self, backObjRefAttr):
if self.store().setting('UseBigIntObjRefColumns'):
return dict(refreshAttrs=True, clauses='WHERE %s=%s'
% (backObjRefAttr.sqlColumnName(), self.sqlObjRef()))
else:
classIdName, objIdName = backObjRefAttr.sqlColumnNames()
return dict(refreshAttrs=True, clauses='WHERE (%s=%s AND %s=%s)'
% (classIdName, self.klass().id(), objIdName, self.serialNum()))
MixIn(MiddleObject, MiddleObjectMixIn)
# Normally we don't have to invoke MixIn()--it's done automatically.
# However, that only works when augmenting MiddleKit.Core classes
# (MiddleObject belongs to MiddleKit.Run).
import MiddleKit.Design.KlassSQLSerialColumnName
class Klass(object):
_fetchSQLStart = None # help out the caching mechanism in fetchSQLStart()
_insertSQLStart = None # help out the caching mechanism in insertSQLStart()
def sqlTableName(self):
"""Return the name of the SQL table for this class.
Returns self.name().
Subclasses may wish to override to provide special quoting that
prevents name collisions between table names and reserved words.
"""
return self.name()
def fetchSQLStart(self):
if self._fetchSQLStart is None:
attrs = self.allDataAttrs()
attrs = [attr for attr in attrs if attr.hasSQLColumn()]
colNames = [self.sqlSerialColumnName()]
colNames.extend([attr.sqlColumnName() for attr in attrs])
self._fetchSQLStart = 'select %s from %s ' % (','.join(colNames), self.sqlTableName())
return self._fetchSQLStart
def insertSQLStart(self, includeSerialColumn=False):
"""Return a tuple of insertSQLStart (a string) and sqlAttrs (a list)."""
if self._insertSQLStart is None:
res = ['insert into %s (' % self.sqlTableName()]
attrs = self.allDataAttrs()
attrs = [attr for attr in attrs if attr.hasSQLColumn()]
fieldNames = [attr.sqlColumnName() for attr in attrs]
if includeSerialColumn or len(fieldNames) == 0:
fieldNames.insert(0, self.sqlSerialColumnName())
res.append(','.join(fieldNames))
res.append(') values (')
self._insertSQLStart = ''.join(res)
self._sqlAttrs = attrs
return self._insertSQLStart, self._sqlAttrs
class Attr(object):
def shouldRegisterChanges(self):
"""Return self.hasSQLColumn().
This only makes sense since there would be no point in registering
changes on an attribute with no corresponding SQL column. The standard
example of such an attribute is "list".
"""
return self.hasSQLColumn()
def hasSQLColumn(self):
"""Check if there is a correlating SQL column.
Returns true if the attribute has a direct correlating SQL column in
its class' SQL table definition.
Most attributes do. Those of type list do not.
"""
return not self.get('isDerived', False)
def sqlColumnName(self):
"""Return the SQL column name corresponding to this attribute-
This is consisting of self.name() + self.sqlTypeSuffix().
"""
if not self._sqlColumnName:
self._sqlColumnName = self.name()
return self._sqlColumnName
def sqlValue(self, value):
"""Return SQL for Python value.
For a given Python value, this returns the correct string for use in a
SQL statement. Subclasses will typically *not* override this method,
but instead, sqlForNonNone() and on rare occasions, sqlForNone().
"""
if value is None:
return self.sqlForNone()
else:
return self.sqlForNonNone(value)
def sqlForNone(self):
return 'NULL'
def sqlForNonNone(self, value):
return repr(value)
def sqlUpdateExpr(self, value):
"""Return update assignments.
Returns the assignment portion of an UPDATE statement such as:
"foo='bar'"
Using sqlColumnName() and sqlValue(). Subclasses only need to
override this if they have a special need (such as multiple columns,
see ObjRefAttr).
"""
colName = self.sqlColumnName()
return colName + '=' + self.sqlValue(value)
def readStoreDataRow(self, obj, row, i):
"""By default, an attr reads one data value out of the row."""
value = row[i]
obj.setValueForAttr(self, value)
return i + 1
class BasicTypeAttr(object):
pass
class IntAttr(object):
def sqlForNonNone(self, value):
return str(value)
# it's important to use str() since an int might point
# to a long (whose repr() would be suffixed with an 'L')
class LongAttr(object):
def sqlForNonNone(self, value):
return str(value)
class DecimalAttr(object):
def sqlForNonNone(self, value):
return str(value) # repr() will give Decimal("3.4")
class BoolAttr(object):
def sqlForNonNone(self, value):
return '1' if value else '0' # MySQL and MS SQL will take 1 and 0 for bools
class ObjRefAttr(object):
def sqlColumnName(self):
if not self._sqlColumnName:
if self.setting('UseBigIntObjRefColumns', False):
self._sqlColumnName = self.name() + 'Id' # old way: one 64 bit column
else:
# new way: 2 int columns for class id and obj id
self._sqlColumnName = '%s,%s' % self.sqlColumnNames()
return self._sqlColumnName
def sqlColumnNames(self):
if not self._sqlColumnNames:
assert not self.setting('UseBigIntObjRefColumns', False)
name = self.name()
classIdName, objIdName = self.setting('ObjRefSuffixes')
classIdName = name + classIdName
objIdName = name + objIdName
self._sqlColumnNames = (classIdName, objIdName)
return self._sqlColumnNames
def sqlForNone(self):
if self.setting('UseBigIntObjRefColumns', False):
return 'NULL'
else:
return 'NULL,NULL'
def sqlForNonNone(self, value):
assert isinstance(value, MiddleObject)
if value.serialNum() == 0:
info = UnknownSerialNumInfo()
info.sourceAttr = self
info.targetObject = value
raise UnknownSerialNumberError(info)
else:
if self.setting('UseBigIntObjRefColumns', False):
return str(value.sqlObjRef())
else:
return str(value.klass().id()), str(value.serialNum())
def sqlUpdateExpr(self, value):
"""Return update assignments.
Returns the assignment portion of an UPDATE statement such as:
"foo='bar'"
Using sqlColumnName() and sqlValue(). Subclasses only need to
override this if they have a special need (such as multiple columns,
see ObjRefAttr).
"""
if self.setting('UseBigIntObjRefColumns', False):
colName = self.sqlColumnName()
return colName + '=' + self.sqlValue(value)
else:
classIdName, objIdName = self.sqlColumnNames()
if value is None:
classId = objId = 'NULL'
else:
classId = value.klass().id()
objId = value.serialNum()
return '%s=%s,%s=%s' % (classIdName, classId, objIdName, objId)
def readStoreDataRow(self, obj, row, i):
# This does *not* get called under the old approach of single obj ref columns.
# See MiddleObject.readStoreData.
classId, objId = row[i], row[i+1]
if objId is None:
value = None
else:
value = objRefJoin(classId, objId)
# @@ 2004-20-02 ce ^ that's wasteful to join them just so they can be split later,
# but it works well with the legacy code
obj.setValueForAttr(self, value)
return i + 2
class ListAttr(object):
def hasSQLColumn(self):
return False
def readStoreDataRow(self, obj, row, i):
return i
class AnyDateTimeAttr(object):
def sqlForNonNone(self, value):
# Chop off the milliseconds -- SQL databases seem to dislike that.
return "'%s'" % str(value).split('.', 1)[0]
class DateAttr(object):
def sqlForNonNone(self, value):
# We often get "YYYY-MM-DD HH:MM:SS" from datetime, | |
<filename>py4ami/text_lib.py
import unicodedata
import nltk
import json
import re
import xml.etree.ElementTree as ET
from collections import Counter
from pathlib import Path
from bs4 import BeautifulSoup
import os
import glob
import logging
logging.debug("loading text_lib")
from .file_lib import AmiPath, FileLib
NFKD = "NFKD"
"""tags
b i em strong
table
fig
"""
TAGS = {
"\n": "",
"</sup>": "",
"</sub>": "",
"</xref>": "",
}
TAG_REGEXES = {
" +<": "<",
"<xref[^>]*>": "@",
" *<sup>": "^",
" *<sub>": "_",
}
PUNCT = "!@#$%^&*+{}[]:;'|<>,.?/~`\"\\"
LIION_PROJ = os.path.abspath(os.path.normpath(os.path.join("../liion")))
PY_DIAG = "../../python/diagrams"
CCT_PROJ = os.path.abspath(os.path.normpath(
os.path.join(PY_DIAG, "satish/cct")))
STOPWORDS_EN = nltk.corpus.stopwords.words("english")
STOPWORDS_PUB = {
'figure', 'permission', 'reproduced', 'copyright', 'authors', 'society', "university", 'table',
"manuscript", "published", "declare", "conflict", "research", "diagram", "images", "version",
"data", "Fig", "different", "time", "min", "experiments", "group", "analysis",
"study", "activity", "treated", "Extraction", "using", "mean", "work", "path",
"samples", "performed", "analyzed", "support", "values", "approved", "significant",
"thank", "interest", "supported",
}
OIL186 = "/Users/pm286/projects/CEVOpen/searches/oil186" # pmr only
class ProjectCorpus:
"""manages an AMI CProject, not yet fully incorporated"""
def __init__(self, cwd, tree_glob="./*/"):
self.cwd = cwd
self.tree_glob = tree_glob
self.words = []
"""NEEDS REFACTORING """
def read_analyze_child_documents(self):
self.logger.warning("WARNING NYI FULLY")
# self.files = self.glob_corpus_files()
self.files = glob.glob(os.path.join(self.cwd, self.tree_glob))
self.logger.warning("glob", self.cwd, self.tree_glob,
str(len(self.files)), self.files[:5])
for file in self.files:
section = AmiSection()
section.read_file_get_text_filtered_words(file)
c = Counter(AmiSection.get_section_with_words(file).words)
# self.logger.warning("most common", path.split("/")[-2:-1], c.most_common(20))
wordz = TextUtil.get_aggregate_words_from_files(self.files)
self.logger.warning(wordz)
cc = Counter(wordz)
self.words = wordz
self.logger.warning("Common", cc.most_common(50))
def glob_corpus_files(self, glob_path, recurse=True):
ami_path = AmiPath()
ami_path.recurse = recurse
files = ami_path.get_globbed_files()
return files
@classmethod
def test(cls, project):
cls.logger.warning("start test", project)
assert (os.path.exists(project))
project = ProjectCorpus(project)
project.read_analyze_child_documents()
cls.logger.warning("end test")
@classmethod
def test_oil(cls):
cls.logger.warning("start test", OIL186)
assert (os.path.exists(OIL186))
project = ProjectCorpus(OIL186)
project.read_analyze_child_documents()
cls.logger.warning("end test")
def __str__(self):
return " ".join(map(str, self.sentences))
class Document:
""" a standalone hierarchical document
level of Tree or below
may contain a subset of the conventional document"""
def __init__(self, file="f"):
self.sections = None
self.file = file
self.words = []
# if path is not None and os.path.isfile(path):
# self.words = self.get_words_from_terminal_file(path)
def create_analyze_sections(self):
sections_file = os.path.abspath(os.path.join(self.file, "sections"))
if not os.path.exists(sections_file):
if not os.path.exists("fulltext.xml"):
logging.error("No fulltext.xml, so no sections")
else:
logging.error(
"PLEASE CREATE sections with ami sections, will add pyami later")
jats_parser = JatsParser()
jats_parser.create_sections_from_xml("fulltext.xml")
return
files = glob.glob(os.path.join(sections_file, "**/*.xml"))
for terminal_file in files:
# REFACTOR
terminal_page = TextUtil.get_words_from_terminal_file(
terminal_file)
self.words.extend(terminal_page.get_words_from_sentences())
# REFACTOR
@staticmethod
def get_words_from_file(terminal_file):
ami_section = AmiSection()
ami_section.read_file_get_text_filtered_words(terminal_file)
ami_section.sentences = [Sentence(s) for s in (
nltk.sent_tokenize(ami_section.txt))]
ami_section.sentences = ami_section.sentences
if os.path.exists(ami_section.txt_file):
logging.info("skipping existing text")
if ami_section.xml_file is not None:
"""read a path as an ami-section of larger document """
with open(ami_section.xml_file, "r", encoding="utf-8") as f:
ami_section.xml = f.read()
# assumes this has been chunked to sections
# logging.info("t", len(self.text), self.text[:50])
ami_section.txt = ami_section.flatten_xml_to_text(ami_section.xml)
# self.sentences = Sentence.merge_false_sentence_breaks(self.sentences)
sentence_file = AmiSection.create_txt_filename_from_xml(
ami_section.xml_file)
if not os.path.exists(sentence_file):
# logging.info("wrote sentence path", sentence_file)
AmiSection.write_numbered_sentence_file(
sentence_file, ami_section.sentences)
ami_section.get_words_from_sentences()
return ami_section.words
class AmiSection:
"""the xml sub-document with text
Currently either <title> or <p>
≈ Will often get annotated with sentence markers
"""
SECTION_LIST = None
XML_SUFF = ".xml"
TXT_SUFF = ".txt"
# sections in template path
ABSTRACT = "ABSTRACT"
ACKNOW = "ACKNOW"
AFFIL = "AFFIL"
AUTHOR = "AUTHOR"
BACKGROUND = "BACKGROUND"
DISCUSS = "DISCUSS"
EMPTY = "EMPTY"
ETHICS = "ETHICS"
FIG_CAPTION = "FIG_CAPTION"
FRONT = "FRONT"
INTRO = "INTRO"
JRNL = "JRNL"
KWD = "KEYWORD"
METHOD = "METHOD"
MATERIAL = "MATERIAL"
OCTREE = "OCTREE"
PDFIMAGE = "PDFIMAGE"
PUB_DATE = "PUB_DATE"
PUBLISHER = "PUBLISHER"
REFERENCE = "REFERENCE"
# RESULTS = "results_discuss"
RESULTS = "RESULTS"
SECTIONS = "SECTIONS"
SVG = "SVG"
TABLE = "TABLE"
TITLE = "TITLE"
WORD = "WORD"
SECTION_LIST0 = [
ABSTRACT,
ACKNOW,
AFFIL,
AUTHOR,
BACKGROUND,
DISCUSS,
EMPTY,
ETHICS,
FIG_CAPTION,
FRONT,
INTRO,
JRNL,
KWD,
METHOD,
MATERIAL,
OCTREE,
PDFIMAGE,
PUB_DATE,
PUBLISHER,
REFERENCE,
RESULTS,
RESULTS,
SECTIONS,
SVG,
TABLE,
TITLE,
WORD,
]
SECTION_TEMPLATES_JSON = "section_templates.json"
TEMPLATES = None
def read_section_dict(file):
"""reads the dictionary of sections"""
dictf = os.path.join(FileLib.get_py4ami(), file)
dikt = FileLib.read_pydictionary(dictf)
logging.info(f"dict_keys: {dikt.keys()}")
return dikt
templates_json = Path(FileLib.get_pyami_resources(),
SECTION_TEMPLATES_JSON)
SECTION_LIST1 = read_section_dict(templates_json)
SECTION_LIST = SECTION_LIST1
logging.debug("text_lib: reading section_templates")
logging.debug("SECTION LIST", SECTION_LIST1)
logging.warning("loading templates.json")
with open(templates_json, 'r') as json_file:
TEMPLATES = json.load(json_file)
def __init__(self):
self.words = []
self.xml_file = None
self.xml = None
self.txt_file = None
self.text = None
self.write_text = True
self.sentences = None
self.name = None
# self.read_section()
@classmethod
def get_section_with_words(cls, file, filter=True):
# document = Document(path) # level of tree
# words = document.words
section = AmiSection()
section.read_file_get_text_filtered_words(file)
if filter:
section.words = TextUtil.filter_words(section.words)
return section
def add_name(self, file):
"""creates name (within a sections/) dirx from path
e.g. /Users/pm286/projects/openDiagram/physchem/resources/oil26/PMC5485486/sections/0_front/1
_article-meta/13_abstract.xml
yields 0_front/1_article-meta/13_abstract.xml """
if file is None:
self.logger.warning("null path")
return None
file_components = file.split("/")[::-1]
components = []
# include name up to CTree
for i, c in enumerate(file_components):
# read back to "sections" and then read the CTree name
components.append(c)
if c == "sections":
components.append(file_components[i+1])
break
self.name = "/".join(components[::-1])
def read_file_get_text_filtered_words(self, file):
"""reads xml or txt path
reads path, flattens xml to text, removes stopwords and filters texts
creates instance vars:
self.xml_file
self.text_file if self_write_text
self.sentences tokenized by nltk
returns tuple(flattened text and filtered words)
"""
self.text = None
if file is None:
raise Exception("path is None")
if file.endswith(AmiSection.XML_SUFF):
self.xml_file = file
self.txt_file = AmiSection.create_txt_filename_from_xml(
self.xml_file)
if os.path.exists(self.txt_file):
self.add_name(self.txt_file)
self.sentences = AmiSection.read_numbered_sentences_file(
self.txt_file)
if os.path.exists(self.xml_file):
self.add_name(self.xml_file)
"""read a path as an ami-section of larger document """
with open(self.xml_file, "r", encoding="utf-8") as f:
try:
self.xml = f.read()
except Exception as ex:
self.logger.error("error reading: ", file, ex)
raise ex
self.text = self.flatten_xml_to_text(self.xml)
self.sentences = [Sentence(s) for s in (
nltk.sent_tokenize(self.text))]
# self.sentences = Sentence.merge_false_sentence_breaks(self.sentences)
if self.write_text and not os.path.exists(self.txt_file):
self.logger.warning("wrote sentence path", self.txt_file)
AmiSection.write_numbered_sentence_file(
self.txt_file, self.sentences)
self.words = self.get_words_from_sentences()
def __str__(self):
# self.words = []
# self.xml_file = None
# self.xml = None
# self.txt_file = None
# self.text = None
# self.write_text = True
# self.sentences = None
s = f"xml: {self.xml_file}\n"
s += f"txt: {self.txt_file}"
return self.name
# static utilities
@staticmethod
def check_sections(sections):
for section in sections:
if section not in AmiSection.SECTION_LIST:
print("\n===========allowed sections=========\n",
AmiSection.SECTION_LIST,
"\n====================================")
raise Exception("unknown section: ", section)
@staticmethod
def create_txt_filename_from_xml(xml_file):
sentence_file = xml_file[:-
len(AmiSection.XML_SUFF)] + AmiSection.TXT_SUFF
return sentence_file
@staticmethod
def flatten_xml_to_text(xml):
"""removes xml tags , diacritics, """
text = TextUtil.strip_xml_tags(xml)
text = TextUtil.remove_para_tags(text)
text = unicodedata.normalize(NFKD, text)
text = TextUtil.flatten_non_ascii(text)
return text
@classmethod
def write_numbered_sentence_file(cls, file, sentences):
"""writes numbered sentences"""
with open(file, "w", encoding="utf-8") as f:
for i, sentence in enumerate(sentences):
f.write(str(i) + Sentence.NUMBER_SPLIT +
sentence.string + "\n")
@classmethod
def read_numbered_sentences_file(cls, file):
""" read path with lines of form line_no<sep>text where line_no starts at 0"""
sentences = None
if file is not None and os.path.exists(file):
with open(file, "r", encoding="utf-8") as f:
lines = f.readlines()
if len(lines) == 0:
cls.logger.warning("warning empty path", file)
pass
try:
sentences = Sentence.read_number_sentences(lines)
except Exception as ex:
print(ex, file, "in read numbered sentences")
return sentences
def get_words_from_sentences(self) -> list:
for sentence in self.sentences:
words = sentence.words
self.words.extend(words)
return self.words
class Sentence:
NUMBER_SPLIT = ": "
def __init__(self, string):
self.string = string
# self.words = Sentence.tokenize_to_words(string)
self.words = string.split(" ")
self.words = Sentence.remove_punct(self.words)
@staticmethod
def tokenize_to_words(string):
""" may be quite slow compared to brute splitting at spaces
returns: list of words"""
return nltk.word_tokenize(string)
@staticmethod
def remove_punct(tokens):
"""removes tokens consisting of punctuation in present `PUNCT`
tokens: list of words
returns: words diminished by deleted punctuation
"""
tokens = [token for token in tokens if token not in PUNCT]
return tokens
@staticmethod
def read_numbered_line(text):
chunks = text.split(Sentence.NUMBER_SPLIT)
if not len(chunks) > 1 or not str.isdigit(text[0]):
raise Exception("Not a numbered sentence", text)
return int(chunks[0]), chunks[1]
@staticmethod
def read_number_sentences(lines):
"""reads lines of form line_no<sep>text where line_no starts at 0"""
sentences = []
lasti = -1
for i, line in enumerate(lines):
line_no, text = Sentence.read_numbered_line(line)
if i != lasti + 1 or i != line_no:
raise Exception(
"failed to read lines in order", i, line_no, line)
lasti = i
sentences.append(Sentence(text))
return sentences
def __str__(self):
return " ".join(map(str, self.words))
class TextUtil:
logger = logging.getLogger("text_util")
@staticmethod
def strip_xml_tags(text):
soup = BeautifulSoup(text, "xml")
stripped_text = soup.get_text()
return stripped_text
@staticmethod
def clean_line_ends(text):
"""change line ends such as \r, \r\n to \n
"""
return re.sub[r'[\r|\n|\r\n]+', '\n', text]
@staticmethod
def join_xml_texts(xml_string):
"""remove all tags in XML
replace all tags by spaces. We may later wish to exclude some names tags (e.g. <sup>)
:param xml_string: XML in serialized form
:returns: flattened string with spaces | |
"""Python utilities to load and wrap SEGGER JLink library."""
# The MIT License (MIT)
# Copyright (c) 2019 ezflash
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import ctypes
import logging
import os
import sys
from enum import IntEnum
class JLINKARM_HOSTIF(IntEnum):
"""HOST interface type enumeration."""
JLINKARM_HOSTIF_USB = 1
JLINKARM_HOSTIF_IP = 2
class JLINKARM_TIF(IntEnum):
"""Target interface type enumeration."""
JLINKARM_TIF_JTAG = 0
JLINKARM_TIF_SWD = 1
JLINKARM_TIF_BDM3 = 2
JLINKARM_TIF_FINE = 3
JLINKARM_TIF_2_WIRE_JTAG_PIC32 = 4
JLINKARM_TIF_SPI = 5
JLINKARM_TIF_C2 = 6
class JLINKARM_SPEED(IntEnum):
"""JLink interface speed enumeration."""
JLINKARM_SPEED_AUTO = 0
JLINKARM_SPEED_INVALID = 0xFFFE
JLINKARM_SPEED_ADAPTIVE = 0xFFFF
class JLINKARM_EMU_CONNECT_INFO(ctypes.Structure):
"""JLINK device connection information ctypes.Structure."""
_fields_ = [
(
"SerialNumber",
ctypes.c_uint,
), # This is the serial number reported in the discovery process, which is the "true serial number" for newer J-Links and 123456 for older J-Links.
(
"Connection",
ctypes.c_uint,
), # Either JLINKARM_HOSTIF_USB = 1 or JLINKARM_HOSTIF_IP = 2
(
"USBAddr",
ctypes.c_uint,
), # USB Addr. Default is 0, values of 0..3 are permitted (Only filled if for J-Links connected via USB)
(
"aIPAddr",
ctypes.c_ubyte * 16,
), # IP Addr. in case emulator is connected via IP. For IP4 (current version), only the first 4 bytes are used.
(
"Time",
ctypes.c_int,
), # J-Link via IP only: Time period [ms] after which we have received the UDP discover answer from emulator (-1 if emulator is connected over USB)
(
"Time_us",
ctypes.c_ulonglong,
), # J-Link via IP only: Time period [us] after which we have received the UDP discover answer from emulator (-1 if emulator is connected over USB)
("HWVersion", ctypes.c_uint), # J-Link via IP only: Hardware version of J-Link
("abMACAddr", ctypes.c_ubyte * 6), # J-Link via IP only: MAC Addr
("acProduct", ctypes.c_byte * 32), # Product name
("acNickName", ctypes.c_char * 32), # J-Link via IP only: Nickname of J-Link
(
"acFWString",
ctypes.c_char * 112,
), # J-Link via IP only: Firmware string of J-Link
(
"IsDHCPAssignedIP",
ctypes.c_char,
), # J-Link via IP only: Is J-Link configured for IP address reception via DHCP?
("IsDHCPAssignedIPIsValid", ctypes.c_char), # J-Link via IP only
("NumIPConnectionsIsValid", ctypes.c_char), # J-Link via IP only
("aPadding", ctypes.c_ubyte * 34),
] # Pad struct size to 264 bytes
class JLINKARM_ERROR_CODES(IntEnum):
"""JLink error codes enumeration."""
JLINK_ERR_EMU_NO_CONNECTION = (
-256
) # (0xFFFFFF00) No connection to emulator / Connection to emulator lost
JLINK_ERR_EMU_COMM_ERROR = (
-257
) # (0xFFFFFEFF) Emulator communication error (host-interface module reproted error)
JLINK_ERR_DLL_NOT_OPEN = (
-258
) # (0xFFFFFEFE) DLL has not been opened but needs to be (JLINKARM_Open() needs to be called first)
JLINK_ERR_VCC_FAILURE = (
-259
) # (0xFFFFFEFD) Target system has no power (Measured VTref < 1V)
JLINK_ERR_INVALID_HANDLE = (
-260
) # (0xFFFFFEFC) File handle / memory area handle needed for operation, but given handle is not valid
JLINK_ERR_NO_CPU_FOUND = -261 # (0xFFFFFEFB) Could not find supported CPU
JLINK_ERR_EMU_FEATURE_NOT_SUPPORTED = (
-262
) # (0xFFFFFEFA) Emulator does not support the selected feature (Usually returned by functions which need specific emulator capabilities)
JLINK_ERR_EMU_NO_MEMORY = (
-263
) # (0xFFFFFEF9) Emulator does not have enough memory to perform the requested operation
JLINK_ERR_TIF_STATUS_ERROR = (
-264
) # (0xFFFFFEF8) Things such as "TCK is low but should be high"
JLINK_ERR_FLASH_PROG_COMPARE_FAILED = -265
JLINK_ERR_FLASH_PROG_PROGRAM_FAILED = -266
JLINK_ERR_FLASH_PROG_VERIFY_FAILED = -267
JLINK_ERR_OPEN_FILE_FAILED = -268
JLINK_ERR_UNKNOWN_FILE_FORMAT = -269
JLINK_ERR_WRITE_TARGET_MEMORY_FAILED = -270
class pyJLinkException(Exception):
"""Exception wrapper for custom pyjlink."""
pass
class pyjlink(object):
"""Provides an API to a SEGGER J-Link debug probe.
The J-Link can be connected to the host PC via USB or ethernet. Specific
J-Link debug probe can be selected if multiple J-Links are connected.
The pylink class allows functionality such as:
* Initiate (open) a connection to a core on a target system
* Close a connection to a target
* Accessing the memory on a target system for reading and writing
* Reset the core on the target system
* Restart the core on the target system
* Download a binary file (.hex format) to the memory on the target system
Attributes:
key: Windows Registry path for the SEGGER software
library: Windows path for SEGGER SDK J-Link DLL
serialno: Serial number of the J-Link which shall be selected
iphost: Host name or an IP address for a connection to the J-Link via TCP/IP
speed: Speed of the JTAG connection in kHz
Device: Name of the device connected to the J-Link
jl: Handle for the J-Link DLL
logger: Handle for the class logger
"""
version = "1.01"
def __init__(self):
"""Set the initial state of the pyjlink object."""
self.serialno = None
self.iphost = None
# Speed of JTAG connection in kHz.
self.speed = 2000
self.Device = b"Cortex-M33" # select M33 by default to issue exit dormant state
self.jl = None
self.logger = logging.getLogger(self.__class__.__name__)
self.library = None
def __del__(self):
"""Close the link on class termination."""
self.close()
def init(self):
"""Initialize the connection to the target system.
Raises:
TODO: TODO
"""
self.logger.debug("init")
if self.library is None:
import platform
if platform.system() in "Darwin":
dll = "libjlinkarm.dylib"
elif platform.system() in "Linux":
if platform.machine().startswith("arm"):
dll = "libjlinkarm_arm.so.6.82.2"
else:
if platform.architecture()[0] == "64bit":
dll = "libjlinkarm.so.6.82.2"
else:
dll = "libjlinkarm_x86.so.6.82.2"
else:
if platform.architecture()[0] == "64bit":
dll = "JLink_x64.dll"
else:
dll = "JLinkARM.dll"
if getattr(sys, "frozen", False):
self.library = dll
else:
self.library = os.path.join(
os.path.dirname(__file__), "..", "third-party", "segger", dll
)
try:
self.jl = ctypes.CDLL(self.library)
except Exception as ex:
logging.error("Error loading J-Link Library: {}".format(ex))
sys.exit(1)
self.logger.debug("J-Link library loaded")
return
def browse(self):
"""Browse JLink devices attached to the host."""
maxDevice = 20
interfaces = (JLINKARM_EMU_CONNECT_INFO * maxDevice)()
if self.jl.JLINKARM_EMU_GetList(
JLINKARM_HOSTIF.JLINKARM_HOSTIF_USB,
ctypes.byref(interfaces),
ctypes.c_int(maxDevice),
):
self.logger.debug("Get device List")
return interfaces
def connect(self, serialno):
"""Initialize the connection to the target system."""
if type(serialno) != type(int):
try:
serialno = int(serialno)
except Exception as ex:
self.logger.debug(
"Failed to interpret JLink id: {}, will use default interface\nErr: {}".format(
serialno, ex
)
)
# return
if serialno:
self.logger.debug(
"Selecting J-Link with the serial number: " + str(serialno)
)
c_serialno = ctypes.c_uint32(serialno)
r = self.jl.JLINKARM_EMU_SelectByUSBSN(c_serialno)
if r < 0:
raise pyJLinkException("Error: Specific serial number not found on USB")
self.logger.debug("Opens the connection to J-Link")
self.jl.JLINKARM_Open.restype = ctypes.c_char_p
sError = self.jl.JLINKARM_Open()
if sError is not None:
raise pyJLinkException(sError)
self.logger.debug("Select device or core")
c_acIn = ctypes.c_char_p(b"Device = " + self.Device)
acOut = b" " * 80
c_acOut = ctypes.c_char_p(acOut)
c_buffersize = ctypes.c_int(80)
self.jl.JLINKARM_ExecCommand(c_acIn, c_acOut, c_buffersize)
if not acOut[0] == 0:
raise pyJLinkException(acOut)
self.logger.debug("Selects the SWD interface")
ctypes.c_interface = ctypes.c_int(JLINKARM_TIF.JLINKARM_TIF_SWD.value)
self.jl.JLINKARM_TIF_Select(ctypes.c_interface)
self.logger.debug("Set the speed for J-Link communication with the core")
c_speed = ctypes.c_uint32(self.speed)
self.jl.JLINKARM_SetSpeed(c_speed)
self.logger.debug("Establish a connection to the target system")
if self.jl.JLINKARM_IsConnected():
self.logger.debug("Closing existing connection")
self.close()
r = self.jl.JLINKARM_Connect()
# self.wr_mem(32, 0x50040300, 0x8)
if r == -1:
raise pyJLinkException("Unspecified error")
elif r < 0:
raise pyJLinkException(JLINKARM_ERROR_CODES(r).name)
try:
self.logger.debug("Read 69x identifier")
id = self.rd_mem(32, 0x50040200, 4)
except Exception:
self.logger.debug("Failed to read 69x identifier")
self.logger.debug("Read 58x/68x identifier")
id = self.rd_mem(8, 0x50003200, 5)
pass
c_acIn = ctypes.c_char_p(b"DisableInfoWinFlashDL")
acOut = b" " * 80
c_acOut = ctypes.c_char_p(acOut)
c_buffersize = ctypes.c_int(80)
self.jl.JLINKARM_ExecCommand(c_acIn, c_acOut, c_buffersize)
return str(id)
def close(self):
"""Close the connection to the target system."""
self.jl.JLINKARM_Close()
def rd_mem(self, widthBits, addr, | |
ind_ptr | pdarray
CSR format array of the ROW pointers for the matrix
gb_rep | ak.GroupBy
Multi-level GroupBy representation of the matrix.
Organized by ROW, then COLUMN. Used internally.
primary_gb | ak.GroupBy
Single-level GroupBy on ROW indices. Used internally.
off_gb | ak.GroupBy
Single-level GroupBy on COLUMN indices. Used internally.
Notes:
------
- [Default return format for all matrix operations] -
Effectively immutable - adding or subtracting specific
points from the matrix is impossible, please use COO
for those shenanigans.
- Notable exception being that you're encouraged to
scale your matrix's values directly (rather than
creating a new, scaled object with the * operator)
if you anticipate running low on memory.
Duplicate entries in the matrix are summed together upon
instantiation.
Houses the _dense_vector_mul function. Recommended format for
a matrix if you plan to be doing a number of multiplications by
a dense vector, as it will not need to convert to another format.
Recommended format for the right-hand matrix in a matrix
multiplication.
'''
def __init__(self, row_inds, col_inds, values, shape=None):
try:
assert(len(row_inds) == len(col_inds) == len(values))
except AssertionError:
error_msg = "Size mismatch in input arrays: "
error_msg1 = f"row_inds: {row_inds}, col_inds: {col_inds}"
error_msg2 = f"values: {values}"
raise AttributeError(error_msg+error_msg1+error_msg2)
# GroupBy object representing the CSR format sparse matrix
self.gb_rep = ak.GroupBy([row_inds, col_inds])
# GroupBy on primary indices (row)
self.primary_gb = ak.GroupBy(self.gb_rep.unique_keys[0])
self.off_gb = ak.GroupBy(self.gb_rep.unique_keys[1])
_sparse_matrix.__init__(self, row_inds, col_inds, values, shape)
return
def to_csr(self):
'''
Returns 'self' to keep it consistent with the other to_*
calls that return a matrix object.
'''
return self
def to_csc(self):
'''
converts matrix to CSC format.
'''
row_inds = self.gb_rep.unique_keys[0]
col_inds = self.gb_rep.unique_keys[1]
return ak_csc(row_inds=row_inds, col_inds=col_inds,
values=self.data, shape=self.shape)
def to_coo(self):
'''
converts matrix to COO format.
'''
row_inds = self.gb_rep.unique_keys[0]
col_inds = self.gb_rep.unique_keys[1]
return ak_coo(row_inds=row_inds, col_inds=col_inds,
values=self.data, shape=self.shape)
def _dense_vector_mul(self, other):
'''
other: ak.pdarray
other is treated as a dense vector in this implementation.
'''
try:
assert len(other) == self.shape[1]
except AssertionError:
print(f'size mismatch b/n vector, matrix: {len(other)} {self.shape[1]}')
raise
dot_products = (self.data * other[self.gb_rep.unique_keys[1]])
sum_results = self.primary_gb.sum(dot_products)[1]
#pdb.set_trace()
complete_result = ak.zeros(self.shape[0], other.dtype.name)
complete_result[self.primary_gb.unique_keys] = sum_results
return complete_result
def to_scipy_sparse(self, sparse_format='csr'):
'''
Supported formats: 'csr', 'csc', 'coo'
child function to normalize values before passing
them to _sparse_matrix's to_scipy_sparse for
conversion.
'''
values = self.data
row = self.gb_rep.unique_keys[0]
col = self.gb_rep.unique_keys[1]
shape=self.shape
return _sparse_matrix.to_scipy_sparse(
self,
values=values,
row=row,
col=col,
shape=shape,
sparse_format=sparse_format
)
class ak_csc(_sparse_matrix):
'''
Sparse matrix in Compressed Sparse Column format:
Instantiated with the following arguments:
row_inds | pdarray
required argument, row indices of the nonzero values
of the matrix.
col_inds | pdarray
required argument, col indices of the nonzero values
of the matrix.
values | pdarray
required argument, nonzero values of the matrix.
shape | 2-tuple
optional argument, tuple of ints representing the
dimensions of the matrix.
Attributes:
-----------
shape | 2-tuple
shape/dimensions of the matrix.
nnz | int
number of stored values, including explicit zeros.
data | pdarray
CSC format data array of the matrix, stores values
indices | pdarray
CSC format array of the ROW indices of nonzero values
ind_ptr | pdarray
CSC format array of the COLUMN pointers for the matrix
gb_rep | ak.GroupBy
Multi-level GroupBy representation of the matrix.
Organized by COLUMN, then ROW. Used internally.
primary_gb | ak.GroupBy
Single-level GroupBy on COLUMN indices. Used internally.
off_gb | ak.GroupBy
Single-level GroupBy on ROW indices. Used internally.
Notes:
------
Duplicate entries in the matrix are summed together upon
instantiation.
Effectively immutable - adding or subtracting specific
points from the matrix is impossible, please use COO
for those shenanigans.
- Notable exception being that you're encouraged to
scale your matrix's values directly (rather than
creating a new, scaled object with the * operator)
if you anticipate running low on memory.
Houses the _spm_mul function. Recommended format for the
left-hand matrix in a matrix multiplication.
Will house the operational _dense_vector_rmul function
when it actually works/becomes relevant, and as such
is the recommended format for performing a vector-matrix
multiplication (as opposed to a matrix-vector multiplication)
'''
def __init__(self, row_inds, col_inds, values, shape=None):
try:
assert(len(row_inds) == len(col_inds) == len(values))
except AssertionError:
error_msg = "Size mismatch in input arrays: "
error_msg1 = f"row_inds: {row_inds}, col_inds: {col_inds}"
error_msg2 = f"values: {values}"
raise AttributeError(error_msg+error_msg1+error_msg2)
# GroupBy object representing the CSC format sparse matrix
self.gb_rep = ak.GroupBy([col_inds, row_inds])
self.primary_gb = ak.GroupBy(self.gb_rep.unique_keys[0])
self.off_gb = ak.GroupBy(self.gb_rep.unique_keys[1])
_sparse_matrix.__init__(self, row_inds, col_inds, values, shape)
return
def to_csr(self):
'''
converts matrix to CSR format.
'''
row_inds = self.gb_rep.unique_keys[1]
col_inds = self.gb_rep.unique_keys[0]
return ak_csr(row_inds=row_inds, col_inds=col_inds,
values=self.data, shape=self.shape)
def to_csc(self):
'''
returns self for consistency w/other to_* functions
'''
return self
def to_coo(self):
'''
converts matrix to a COO format.
'''
row_inds = self.gb_rep.unique_keys[1]
col_inds = self.gb_rep.unique_keys[0]
return ak_coo(row_inds=row_inds, col_inds=col_inds,
values=self.data, shape=self.shape)
def to_scipy_sparse(self, sparse_format='csr'):
'''
Supported formats: 'csr', 'csc', 'coo'
child function to normalize values before passing
them to _sparse_matrix's to_scipy_sparse for
conversion.
'''
values = self.data
row = self.gb_rep.unique_keys[1]
col = self.gb_rep.unique_keys[0]
shape=self.shape
return _sparse_matrix.to_scipy_sparse(
self,
values=values,
row=row,
col=col,
shape=shape,
sparse_format=sparse_format
)
def _spm_mul(self, other, verbose=False):
'''
sparse matrix-matrix multiplication.
'''
# Check to make sure sizes line up before we do anything expensive
try:
assert(self.shape[1] == other.shape[0])
except AssertionError:
error_msg = f'array size mismatch: {self.shape[1]} {other.shape[0]}'
raise AttributeError(error_msg)
#pdb.set_trace()
starts = other.ind_ptr[self.gb_rep.unique_keys[0]]
ends = other.ind_ptr[self.gb_rep.unique_keys[0]+1]
fullsize = (ends-starts).sum()
# prints the number of multiplies, for debugging purposes.
if verbose:
print(fullsize)
fullsegs, zfilter, ranges = gen_ranges(starts, ends)
fullBdom = other.gb_rep.unique_keys[1][ranges]
fullAdom = expand(self.gb_rep.unique_keys[1], fullsegs, fullsize, zfilter)
fullBval = other.data[ranges]
fullAval = expand(self.data, fullsegs, fullsize, zfilter)
fullprod = fullAval * fullBval
proddomGB = ak.GroupBy([fullAdom, fullBdom])
result = proddomGB.sum(fullprod)
return ak_csr(
result[0][0],
result[0][1],
shape=(self.shape[0], other.shape[1]),
values=result[1]
)
class ak_coo(_sparse_matrix):
'''
Sparse matrix in COOrdinate Ordered format:
Instantiated with the following arguments:
row_inds | pdarray
required argument, row indices of the nonzero values
of the matrix.
col_inds | pdarray
required argument, col indices of the nonzero values
of the matrix.
values | pdarray
required argument, nonzero values of the matrix.
shape | 2-tuple
optional argument, tuple of ints representing the
dimensions of the matrix.
Attributes:
-----------
shape | 2-tuple
shape/dimensions of the matrix.
nnz | int
number of stored values, including explicit zeros.
data | pdarray
COO format data array of the matrix, stores values
row_inds | pdarray
COO format array of row indices of nonzero values
col_inds | pdarray
COO format array of col indices of nonzero values
Notes:
------
Duplicate entries are ALLOWED in this format. They will
be summed together upon conversion to any other supported
format, however.
Format most suited to adding/subtracting entries to the
matrix. No inbuilt-method currently exists (you'd have
to edit the row_inds, col_inds, data arrays directly)
but could (and should) be added in the future.
Isn't the optimal format for doing just about any arithmetic
operations - I'm far from an expert, but I'd primarily use
COO if I were interested in adding and subtracting
entries to/from the matrix.
'''
def __init__(self, row_inds, col_inds, values, shape=None):
'''
Doesn't use the _sparse_matrix __init__ function, as
it doesn't share the same structure as CSC, CSR.
'''
try:
assert(len(row_inds) == len(col_inds) == len(values))
except AssertionError:
error_msg = "Size mismatch in input arrays: "
error_msg1 = f"row_inds: {row_inds}, col_inds: {col_inds}"
error_msg2 = f"values: {values}"
raise AttributeError(error_msg+error_msg1+error_msg2)
self.row_inds = row_inds
self.col_inds = col_inds
self.data = values
self.nnz = len(self.data)
if shape != None:
self.shapetype = 'defined'
self.shape = shape
else:
self.shapetype = 'implicit'
dim_row = row_inds.max() + 1
dim_col = col_inds.max() + 1
self.shape = (dim_row, dim_col)
return
def __add__(self, other):
'''
Returns a csr matrix for now, can change later.
Addition | |
<gh_stars>10-100
import os
import json
import argparse
import numpy as np
import pprint
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from copy import deepcopy
from models.vae.parallelly_reparameterized_vae import ParallellyReparameterizedVAE
from models.vae.sequentially_reparameterized_vae import SequentiallyReparameterizedVAE
from models.student_teacher import StudentTeacher
from helpers.layers import EarlyStopping, init_weights
from datasets.loader import get_split_data_loaders, get_loader
from optimizers.adamnormgrad import AdamNormGrad
from helpers.grapher import Grapher
from helpers.fid import train_fid_model
from helpers.metrics import calculate_consistency, calculate_fid, estimate_fisher
from helpers.utils import float_type, ones_like, \
append_to_csv, num_samples_in_loader, check_or_create_dir, \
dummy_context, number_of_parameters
parser = argparse.ArgumentParser(description='LifeLong VAE Pytorch')
# Task parameters
parser.add_argument('--uid', type=str, default="",
help="add a custom task-specific unique id; appended to name (default: None)")
parser.add_argument('--task', type=str, default="mnist",
help="""task to work on (can specify multiple) [mnist / cifar10 /
fashion / svhn_centered / svhn / clutter / permuted] (default: mnist)""")
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='minimum number of epochs to train (default: 10)')
parser.add_argument('--continuous-size', type=int, default=32, metavar='L',
help='latent size of continuous variable when using mixture or gaussian (default: 32)')
parser.add_argument('--discrete-size', type=int, default=1,
help='initial dim of discrete variable when using mixture or gumbel (default: 1)')
parser.add_argument('--download', type=int, default=1,
help='download dataset from s3 (default: 1)')
parser.add_argument('--data-dir', type=str, default='./.datasets', metavar='DD',
help='directory which contains input data')
parser.add_argument('--output-dir', type=str, default='./experiments', metavar='OD',
help='directory which contains csv results')
parser.add_argument('--model-dir', type=str, default='.models', metavar='MD',
help='directory which contains trained models')
parser.add_argument('--fid-model-dir', type=str, default='.models',
help='directory which contains trained FID models')
parser.add_argument('--calculate-fid-with', type=str, default=None,
help='enables FID calc & uses model conv/inceptionv3 (default: None)')
parser.add_argument('--disable-augmentation', action='store_true',
help='disables student-teacher data augmentation')
# train / eval or resume modes
parser.add_argument('--resume-training-with', type=int, default=None,
help='tries to load the model from model_dir and resume training [use int] (default: None)')
parser.add_argument('--eval-with', type=int, default=None,
help='tries to load the model from model_dir and evaluate the test dataset [use int] (default: None)')
parser.add_argument('--eval-with-loader', type=int, default=None,
help='if there are many loaders use ONLY this loader [use int] (default: None)')
# Model parameters
parser.add_argument('--filter-depth', type=int, default=32,
help='number of initial conv filter maps (default: 32)')
parser.add_argument('--reparam-type', type=str, default='isotropic_gaussian',
help='isotropic_gaussian, discrete or mixture [default: isotropic_gaussian]')
parser.add_argument('--layer-type', type=str, default='conv',
help='dense or conv (default: conv)')
parser.add_argument('--nll-type', type=str, default='bernoulli',
help='bernoulli or gaussian (default: bernoulli)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--vae-type', type=str, default='parallel',
help='vae type [sequential or parallel] (default: parallel)')
parser.add_argument('--normalization', type=str, default='groupnorm',
help='normalization type: batchnorm/groupnorm/instancenorm/none (default: groupnorm)')
parser.add_argument('--activation', type=str, default='elu',
help='activation function (default: elu)')
parser.add_argument('--disable-sequential', action='store_true',
help='enables standard batch training')
parser.add_argument('--shuffle-minibatches', action='store_true',
help='shuffles the student\'s minibatch (default: False)')
parser.add_argument('--use-relational-encoder', action='store_true',
help='uses a relational network as the encoder projection layer')
parser.add_argument('--use-pixel-cnn-decoder', action='store_true',
help='uses a pixel CNN decoder (default: False)')
parser.add_argument('--disable-gated-conv', action='store_true',
help='disables gated convolutional structure (default: False)')
parser.add_argument('--disable-student-teacher', action='store_true',
help='uses a standard VAE without Student-Teacher architecture')
# Optimization related
parser.add_argument('--optimizer', type=str, default="adamnorm",
help="specify optimizer (default: rmsprop)")
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--early-stop', action='store_true',
help='enable early stopping (default: False)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
# Regularizer related
parser.add_argument('--disable-regularizers', action='store_true',
help='disables mutual info and consistency regularizers')
parser.add_argument('--monte-carlo-infogain', action='store_true',
help='use the MC version of mutual information gain / false is analytic (default: False)')
parser.add_argument('--continuous-mut-info', type=float, default=0.0,
help='-continuous_mut_info * I(z_c; x) is applied (opposite dir of disc)(default: 0.0)')
parser.add_argument('--discrete-mut-info', type=float, default=0.0,
help='+discrete_mut_info * I(z_d; x) is applied (default: 0.0)')
parser.add_argument('--kl-reg', type=float, default=1.0,
help='hyperparameter to scale KL term in ELBO')
parser.add_argument('--generative-scale-var', type=float, default=1.0,
help='scale variance of prior in order to capture outliers')
parser.add_argument('--consistency-gamma', type=float, default=1.0,
help='consistency_gamma * KL(Q_student | Q_teacher) (default: 1.0)')
parser.add_argument('--likelihood-gamma', type=float, default=0.0,
help='log-likelihood regularizer between teacher and student, 0 is disabled (default: 0.0)')
parser.add_argument('--mut-clamp-strategy', type=str, default="clamp",
help='clamp mut info by norm / clamp / none (default: clamp)')
parser.add_argument('--mut-clamp-value', type=float, default=100.0,
help='max / min clamp value if above strategy is clamp (default: 100.0)')
parser.add_argument('--ewc-gamma', type=float, default=0,
help='any value greater than 0 enables EWC with this hyper-parameter (default: 0)')
# Visdom parameters
parser.add_argument('--visdom-url', type=str, default="http://localhost",
help='visdom URL for graphs (default: http://localhost)')
parser.add_argument('--visdom-port', type=int, default="8097",
help='visdom port for graphs (default: 8097)')
# Device parameters
parser.add_argument('--seed', type=int, default=None,
help='seed for numpy and pytorch (default: None)')
parser.add_argument('--ngpu', type=int, default=1,
help='number of gpus available (default: 1)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# handle randomness / non-randomness
if args.seed is not None:
print("setting seed %d" % args.seed)
numpy.random.seed(args.seed)
torch.manual_seed_all(args.seed)
def build_optimizer(model):
optim_map = {
"rmsprop": optim.RMSprop,
"adam": optim.Adam,
"adamnorm": AdamNormGrad,
"adadelta": optim.Adadelta,
"sgd": optim.SGD,
"lbfgs": optim.LBFGS
}
# filt = filter(lambda p: p.requires_grad, model.parameters())
# return optim_map[args.optimizer.lower().strip()](filt, lr=args.lr)
return optim_map[args.optimizer.lower().strip()](
model.parameters(), lr=args.lr
)
def register_plots(loss, grapher, epoch, prefix='train'):
for k, v in loss.items():
if isinstance(v, map):
register_plots(loss[k], grapher, epoch, prefix=prefix)
if 'mean' in k or 'scalar' in k:
key_name = k.split('_')[0]
value = v.item() if not isinstance(v, (float, np.float32, np.float64)) else v
grapher.register_single({'%s_%s' % (prefix, key_name): [[epoch], [value]]},
plot_type='line')
def register_images(images, names, grapher, prefix="train"):
''' helper to register a list of images '''
if isinstance(images, list):
assert len(images) == len(names)
for im, name in zip(images, names):
register_images(im, name, grapher, prefix=prefix)
else:
images = torch.min(images.detach(), ones_like(images))
grapher.register_single({'{}_{}'.format(prefix, names): images},
plot_type='imgs')
def _add_loss_map(loss_tm1, loss_t):
if not loss_tm1: # base case: empty dict
resultant = {'count': 1}
for k, v in loss_t.items():
if 'mean' in k or 'scalar' in k:
resultant[k] = v.detach()
return resultant
resultant = {}
for (k, v) in loss_t.items():
if 'mean' in k or 'scalar' in k:
resultant[k] = loss_tm1[k] + v.detach()
# increment total count
resultant['count'] = loss_tm1['count'] + 1
return resultant
def _mean_map(loss_map):
for k in loss_map.keys():
loss_map[k] /= loss_map['count']
return loss_map
def train(epoch, model, fisher, optimizer, loader, grapher, prefix='train'):
''' train loop helper '''
return execute_graph(epoch=epoch, model=model, fisher=fisher,
data_loader=loader, grapher=grapher,
optimizer=optimizer, prefix='train')
def test(epoch, model, fisher, loader, grapher, prefix='test'):
''' test loop helper '''
return execute_graph(epoch, model=model, fisher=fisher,
data_loader=loader, grapher=grapher,
optimizer=None, prefix='test')
def execute_graph(epoch, model, fisher, data_loader, grapher, optimizer=None, prefix='test'):
''' execute the graph; when 'train' is in the name the model runs the optimizer '''
model.eval() if not 'train' in prefix else model.train()
assert optimizer is not None if 'train' in prefix else optimizer is None
loss_map, params, num_samples = {}, {}, 0
for data, _ in data_loader:
data = Variable(data).cuda() if args.cuda else Variable(data)
if 'train' in prefix:
# zero gradients on optimizer
# before forward pass
optimizer.zero_grad()
with torch.no_grad() if 'train' not in prefix else dummy_context():
# run the VAE and extract loss
output_map = model(data)
loss_t = model.loss_function(output_map, fisher)
if 'train' in prefix:
# compute bp and optimize
loss_t['loss_mean'].backward()
loss_t['grad_norm_mean'] = torch.norm( # add norm of vectorized grads to plot
nn.utils.parameters_to_vector(model.parameters())
)
optimizer.step()
with torch.no_grad() if 'train' not in prefix else dummy_context():
loss_map = _add_loss_map(loss_map, loss_t)
num_samples += data.size(0)
loss_map = _mean_map(loss_map) # reduce the map to get actual means
print('{}[Epoch {}][{} samples]: Average loss: {:.4f}\tELBO: {:.4f}\tKLD: {:.4f}\tNLL: {:.4f}\tMut: {:.4f}'.format(
prefix, epoch, num_samples,
loss_map['loss_mean'].item(),
loss_map['elbo_mean'].item(),
loss_map['kld_mean'].item(),
loss_map['nll_mean'].item(),
loss_map['mut_info_mean'].item()))
# gather scalar values of reparameterizers (if they exist)
reparam_scalars = model.student.get_reparameterizer_scalars()
# plot the test accuracy, loss and images
if grapher: # only if grapher is not None
register_plots({**loss_map, **reparam_scalars}, grapher, epoch=epoch, prefix=prefix)
images = [output_map['augmented']['data'], output_map['student']['x_reconstr']]
img_names = ['original_imgs', 'vae_reconstructions']
register_images(images, img_names, grapher, prefix=prefix)
grapher.show()
# return this for early stopping
loss_val = {'loss_mean': loss_map['loss_mean'].detach().item(),
'elbo_mean': loss_map['elbo_mean'].detach().item()}
loss_map.clear()
params.clear()
return loss_val
def generate(student_teacher, grapher, name='teacher'):
model = {
'teacher': student_teacher.teacher,
'student': student_teacher.student
}
if model[name] is not None: # handle base case
model[name].eval()
# random generation
gen = student_teacher.generate_synthetic_samples(model[name],
args.batch_size)
gen = torch.min(gen, ones_like(gen))
grapher.register_single({'generated_%s'%name: gen}, plot_type='imgs')
# sequential generation for discrete and mixture reparameterizations
if args.reparam_type == 'mixture' or args.reparam_type == 'discrete':
gen = student_teacher.generate_synthetic_sequential_samples(model[name]).detach()
gen = torch.min(gen, ones_like(gen))
grapher.register_single({'sequential_generated_%s'%name: gen}, plot_type='imgs')
def get_model_and_loader():
''' helper to return the model and the loader '''
if args.disable_sequential: # vanilla batch training
loaders = get_loader(args)
loaders = [loaders] if not isinstance(loaders, list) else loaders
else: # classes split
loaders = get_split_data_loaders(args, num_classes=10)
for l in loaders:
print("train = ", num_samples_in_loader(l.train_loader),
" | test = ", num_samples_in_loader(l.test_loader))
# append the image shape to the config & build the VAE
args.img_shp = loaders[0].img_shp,
if args.vae_type == 'sequential':
# Sequential : P(y|x) --> P(z|y, x) --> P(x|z)
# Keep a separate VAE spawn here in case we want
# to parameterize the sequence of reparameterizers
vae = SequentiallyReparameterizedVAE(loaders[0].img_shp,
kwargs=vars(args))
elif args.vae_type == 'parallel':
# Ours: [P(y|x), P(z|x)] --> P(x | z)
vae = ParallellyReparameterizedVAE(loaders[0].img_shp,
kwargs=vars(args))
else:
raise Exception("unknown VAE type requested")
# build the combiner which takes in the VAE as a parameter
# and projects the latent representation to the output space
student_teacher = StudentTeacher(vae, kwargs=vars(args))
#student_teacher = init_weights(student_teacher)
# build the grapher object
grapher = Grapher(env=student_teacher.get_name(),
server=args.visdom_url,
port=args.visdom_port)
| |
<filename>manage_rancher.py
import requests
import os
import logging
import re
import random
import flask
from datetime import datetime
from typing import Dict, List, Optional
# to do: move latest_narr_version() so we don't need to import app
import app
# Module wide logger
logger: Optional[logging.Logger] = None
# Setup default configuration values, overriden by values from os.environ later
cfg = {"hostname": u"localhost",
"auth2": u"https://ci.kbase.us/services/auth/api/V2/token",
"image_name": u"kbase/narrative",
"image_tag": None,
"es_type": "narrative-traefiker",
"session_cookie": u"narrative_session",
"container_name": u"narrative-{}",
"container_name_prespawn": u"narrative_pre-{}",
"reload_secs": 5,
"log_level": logging.DEBUG,
"log_dest": None,
"log_name": u"traefiker",
"rancher_user": None,
"rancher_password": <PASSWORD>,
"rancher_url": None,
"rancher_meta": "http://rancher-metadata/",
"rancher_env_url": None,
"rancher_stack_id": None,
"rancher_stack_name": None,
"mode": None,
"narrenv": dict()}
def setup(main_cfg: dict, main_logger: logging.Logger) -> None:
global cfg
global logger
if main_logger is None:
logger = logging.getLogger()
else:
logger = main_logger
if main_cfg is not None:
cfg = main_cfg
else:
# We pull any environment variable that matches a config key into the config dictionary
for cfg_item in cfg.keys():
if cfg_item in os.environ:
cfg[cfg_item] = os.environ[cfg_item]
# To support injecting arbitrary environment variables into the narrative container, we
# look for any environment variable with the prefix "NARRENV_" and add it into a narrenv
# dictionary in the the config hash, using the env variable name stripped of "NARRENV_"
# prefix as the key
for k in os.environ.keys():
match = re.match(r"^NARRENV_(\w+)", k)
if match:
cfg['narrenv'][match.group(1)] = os.environ[k]
logger.debug({"message": "Setting narrenv from environment",
"key": match.group(1), "value": os.environ[k]})
def check_session(userid: str) -> str:
"""
Check to see if we already have a container for this user by trying to pull the container object
for the userid
"""
try:
name = cfg['container_name'].format(userid)
url = "{}/service?name={}".format(cfg["rancher_env_url"], name)
r = requests.get(url, auth=(cfg["rancher_user"], cfg["rancher_password"]))
if not r.ok:
msg = "Error response code from rancher API while searching for container name {} : {}".format(name, r.status_code)
logger.error({"message": msg, "status_code": r.status_code, "service_name": name, "response_body": r.text})
raise(Exception(msg))
res = r.json()
svcs = res['data']
if len(svcs) == 0:
logger.debug({"message": "No previous session found", "service_name": name, "userid": userid})
session_id = None
else:
session_id = svcs[0]['launchConfig']['labels']['session_id']
logger.debug({"message": "Found existing session", "session_id": session_id, "userid": userid})
if len(svcs) > 1:
uuids = [svc['uuid'] for svc in svcs]
logger.warning({"message": "Found multiple session matches against container name", "userid": userid,
"service_name": name, "rancher_uuids": uuids})
except Exception as ex:
logger.debug({"message": "Error trying to find existing session", "exception": format(str(ex)), "userid": userid})
raise(ex)
return(session_id)
def start(session: str, userid: str, prespawn: Optional[bool] = False) -> Dict[str, str]:
"""
wrapper around the start_new function that checks to see if there are waiting narratives that
can be assigned. Note that this method is subject to race conditions by competing workers, so we
have 5 retries, and try to select a random waiting narrative before just spawning a new one. Someday maybe
we can implement something to serialize selecting narratives for assignment, but that's a ToDo item.
"""
if prespawn is True:
start_new(session, userid, True)
else:
prespawned = find_prespawned()
# The number of prespawned should be pretty stable around cfg['num_prespawn'], but during a
# usage there might be spike that exhausts the pool of ready containers before replacements
# are available.
if len(prespawned) > 0:
# if we're not already over the num)prespawn setting then
# spawn a replacement and immediately rename an existing container to match the
# userid. We are replicating the prespawn container name code here, maybe cause
# issues later on if the naming scheme is changed!
if len(prespawned) <= cfg['num_prespawn']:
start_new(session, session[0:6], True)
narr_name = cfg['container_name'].format(userid)
offset = random.randint(0, len(prespawned)-1)
session = None
# Try max(5, # of prespawned) times to use an existing narrative, on success assign the session and break
for attempt in range(max(5, len(prespawned))):
candidate = prespawned[(offset+attempt) % len(prespawned)]
try:
rename_narrative(candidate, narr_name)
container = find_service(narr_name)
session = container['launchConfig']['labels']['session_id']
logger.info({"message": "assigned_container", "userid": userid, "service_name": narr_name, "session_id": session,
"client_ip": "127.0.0.1", "attempt": attempt, "status": "success"})
break
except Exception as ex:
logger.info({"message": "assigned_container_fail", "userid": userid, "service_name": narr_name, "session_id": session,
"client_ip": "127.0.0.1", "attempt": attempt, "status": "fail", "error": str(ex)})
if session:
return({"session": session, "prespawned": True})
else:
# Well, that was a bust, just spin up one explicitly for this user. Maybe we hit a race condition where all of the
# cached containers have been assigned between when we queried and when we tried to rename it.
# ToDo: need to write a pool watcher thread that wakes up periodically to make sure the number of prespawned
# narratives are still at the desired level. Shouldn't be needed since there should be a 1:1 between assigning
# and spawning replacements, but errors happen
logger.debug({"message": "could not assign prespawned container, calling start_new", "userid": userid, "session_id": session})
return({"session": start_new(session, userid, False)})
else:
return({"session": start_new(session, userid, False)})
def start_new(session: str, userid: str, prespawn: Optional[bool] = False):
"""
Attempts to start a new container using the rancher API. Signature is identical to the start_docker
method, with the equivalent rancher exceptions.
"""
# Crazy long config needed for rancher container startup. Based on observing the traffic from rancher
# GUI to rancher REST APIs. Might be able to prune it down with some research
container_config = {u'assignServiceIpAddress': False,
u'createIndex': None,
u'created': None,
u'description': None,
u'externalId': None,
u'fqdn': None,
u'healthState': None,
u'kind': None,
u'launchConfig': {
u'blkioWeight': None,
u'capAdd': [],
u'capDrop': ["MKNOD", "NET_RAW", "SYS_CHROOT", "SETUID", "SETGID", "CHOWN", "SYS_ADMIN",
"DAC_OVERRIDE", "FOWNER", "FSETID", "SETPCAP", "AUDIT_WRITE", "SETFCAP"],
u'cgroupParent': None,
u'count': None,
u'cpuCount': None,
u'cpuPercent': None,
u'cpuPeriod': None,
u'cpuQuota': None,
u'cpuRealtimePeriod': None,
u'cpuRealtimeRuntime': None,
u'cpuSet': None,
u'cpuSetMems': None,
u'cpuShares': None,
u'createIndex': None,
u'created': None,
u'dataVolumes': [],
u'dataVolumesFrom': [],
u'dataVolumesFromLaunchConfigs': [],
u'deploymentUnitUuid': None,
u'description': None,
u'devices': [],
u'diskQuota': None,
u'dns': [],
u'dnsSearch': [],
u'domainName': None,
u'drainTimeoutMs': 0,
u'environment': {
u'env1': u'val1',
u'env2': u'val2'},
u'externalId': None,
u'firstRunning': None,
u'healthInterval': None,
u'healthRetries': None,
u'healthState': None,
u'healthTimeout': None,
u'hostname': None,
u'imageUuid': u'docker:kbase/narrative:latest',
u'instanceTriggeredStop': u'stop',
u'ioMaximumBandwidth': None,
u'ioMaximumIOps': None,
u'ip': None,
u'ip6': None,
u'ipcMode': None,
u'isolation': None,
u'kernelMemory': None,
u'kind': u'container',
u'labels': {
u'io.rancher.container.pull_image': u'always',
u'session_id': None,
u'traefik.enable': u'True'},
u'logConfig': {u'config': {}, u'driver': u''},
u'memory': None,
u'memoryMb': None,
u'memoryReservation': None,
u'memorySwap': None,
u'memorySwappiness': None,
u'milliCpuReservation': None,
u'networkLaunchConfig': None,
u'networkMode': u'managed',
u'oomScoreAdj': None,
u'pidMode': None,
u'pidsLimit': None,
u'ports': [u'8888/tcp'],
u'privileged': False,
u'publishAllPorts': False,
u'readOnly': False,
u'removed': None,
u'requestedIpAddress': None,
u'restartPolicy': {u'name': u'always'},
u'runInit': False,
u'secrets': [],
u'shmSize': None,
u'startCount': None,
u'startOnCreate': True,
u'stdinOpen': True,
u'stopSignal': None,
u'stopTimeout': None,
u'tty': True,
u'type': u'launchConfig',
u'user': None,
u'userdata': None,
u'usernsMode': None,
u'uts': None,
u'uuid': None,
u'vcpu': 1,
u'volumeDriver': None,
u'workingDir': None},
u'name': None,
u'removed': None,
u'scale': 1,
u'secondaryLaunchConfigs': [],
u'selectorContainer': None,
u'selectorLink': None,
u'stackId': None,
u'startOnCreate': True,
u'system': False,
u'type': u'service',
u'uuid': None,
u'vip': None}
if prespawn is False:
name = cfg['container_name'].format(userid)
client_ip = flask.request.headers.get("X-Real-Ip", flask.request.headers.get("X-Forwarded-For", None))
try: # Set client ip from request object if available
container_config['description'] = 'client-ip:{} timestamp:{}'.format(client_ip,
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
except Exception:
logger.error({"message": "Error checking flask.request.headers for X-Real-Ip or X-Forwarded-For"})
else:
name = cfg['container_name_prespawn'].format(userid)
client_ip = None
cookie = u'{}'.format(session)
labels = dict()
labels["io.rancher.container.pull_image"] = u"always"
labels["io.rancher.container.start_once"] = u"true"
labels["traefik.enable"] = u"True"
labels["session_id"] = session
# create a rule for list of hostnames that should match from cfg['hostname']
host_rules = " || ".join([u"Host(\"{}\")".format(hostname) for hostname in cfg['hostname']])
remaining_rule = u" && PathPrefix(\"{}\") && HeadersRegexp(\"Cookie\",`{}`)"
labels["traefik.http.routers." + userid + ".rule"] = host_rules + remaining_rule.format("/narrative/", cookie)
labels["traefik.http.routers." + userid + ".entrypoints"] = u"web"
container_config['launchConfig']['labels'] = labels
container_config['launchConfig']['name'] = name
if (cfg['image_tag'] is not None and cfg['image_tag'] != ''):
imageUuid = "{}:{}".format(cfg['image_name'], cfg['image_tag'])
else:
# to do: fix calling latest_narr_version() so we don't need to call the `app` method like this
imageUuid = "{}:{}".format(cfg['image_name'], app.latest_narr_version())
container_config['launchConfig']['imageUuid'] = "docker:{}".format(imageUuid)
container_config['launchConfig']['environment'].update(cfg['narrenv'])
container_config['name'] = name
container_config['stackId'] = cfg['rancher_stack_id']
# Attempt to bring up a container, if there is an unrecoverable error, clear the session variable to flag
# an error state, and overwrite the response with an error response
try:
r = requests.post(cfg["rancher_env_url"]+"/service", json=container_config, auth=(cfg["rancher_user"], cfg["rancher_password"]))
logger.info({"message": "new_container", "image": imageUuid, "userid": userid, "service_name": name, "session_id": session,
"client_ip": client_ip}) # request.remote_addr)
if not r.ok:
msg = "Error - response code {} while creating new narrative rancher service: {}".format(r.status_code, r.text)
logger.error({"message": msg})
raise(Exception(msg))
except Exception as ex:
raise(ex)
return(session)
def find_stack() -> Dict[str, str]:
"""
Query the | |
"""
Text search query language.
[ Grammar ]
Phrases:
the ducks find all instances of the "the ducks" (a bigram)
Query Expansion: [] brackets
[the ducks] "the ducks" or "the duck".
Equivalent to ("the duck" | "the ducks").
Expressions:
An expression can be:
- a phrase (e.g., "the ducks")
- a phrase with query expansion (e.g., [the ducks])
- composition of expressions with the operators below
The result of evaluating an expression is always a list of locations where
phrases that match the expression appear. There is no merging of phrases
in the text query language.
Or: |
e1 | e2 | e3 | ... expr1 or expr2 or expr3
And: &
e1 & e2 & e3 & ... expr1, expr2, and expr3 where they are nearby
I.e., instances of e1, e2, e3 that are contained in
a window that contains an instance of e1, e2, and e3
e1 & e2 & e3 :: t same as above, but with t seconds as the window
threshold
e1 & e2 & e3 // w same as above, but with w tokens as the threshold
Not near: \
e1 \ e2 \ e3 ... expr1, not near expr2 and not near expr3
I.e., instances of e1 not in any window containing
e1 or e2.
This is equivalent to (e1 \ (e2 | e3)) and
((e1 \ e2) \ e3).
e1 \ e2 \ e3 :: t same as above, but with t seconds as the window
e1 \ e2 \ e3 // w same as above, but with w tokens as the threshold
Groups: ()
(expr) evaluate expr as a group
[ Group caveats ]
&, |, and \ cannot be combined in the same group. For instance,
"(a & b | c)" is invalid and should be written as "(a & (b | c))" or
"((a & b) | c)".
[ Query Examples ]
united states
All instances of the "united states".
united | states
All instances of "united" and "states".
united & states
All instances of "united" and "states" where each "united" is near a
"states" and each "states" is near a "united".
united \ states
All instances of "united", without "states" nearby.
united \ (states | kingdom) ==equiv== united \ states \ kingdom)
All instances of "united", without "states" and without "kingdom" nearby.
"""
import heapq
from abc import ABC, abstractmethod, abstractproperty
from collections import deque
from typing import Dict, List, Iterable, NamedTuple, Optional
from parsimonious.grammar import Grammar, NodeVisitor
from .index import Lexicon, CaptionIndex
from .tokenize import default_tokenizer
from .util import PostingUtil, group_results_by_document
GRAMMAR = Grammar(r"""
expr_root = sp? expr_group sp?
expr_group = and / or / not / expr
expr = expr_paren / tokens_root
expr_paren = sp? "(" sp? expr_group sp? ")" sp?
and = expr more_and threshold?
more_and = (sp? "&" sp? expr)+
or = expr more_or
more_or = (sp? "|" sp? expr)+
not = expr more_not threshold?
more_not = (sp? "\\" sp? expr)+
threshold = sp? threshold_type sp? integer
threshold_type = "::" / "//"
integer = ~r"\d+"
tokens_root = tokens_list more_tokens_root
more_tokens_root = (sp tokens_list)*
tokens_list = tokens / tokens_exp
tokens_exp = "[" sp? tokens sp? "]"
tokens = token more_tokens
more_tokens = (sp token)*
token = ~r"[^\s()&|\\\[\]:/]+"
sp = ~r"\s+"
""")
class _Expr(ABC):
class Context(NamedTuple):
lexicon: Lexicon
index: CaptionIndex
documents: Optional[Iterable[CaptionIndex.DocIdOrDocument]]
ignore_word_not_found: bool
case_insensitive: bool
@abstractmethod
def eval(self, context: '_Expr.Context') -> Iterable[CaptionIndex.Document]:
raise NotImplementedError()
@abstractmethod
def estimate_cost(self, lexicon: Lexicon) -> float:
raise NotImplementedError()
@abstractproperty
def _pprint_data(self):
raise NotImplementedError()
def __repr__(self):
return repr(self._pprint_data)
class _JoinExpr(_Expr):
def __init__(self, children, threshold, threshold_type):
assert all(isinstance(c, _Expr) for c in children)
self.children = children
self.threshold = threshold
self.threshold_type = threshold_type
def estimate_cost(self, lexicon):
return sum(c.estimate_cost(lexicon) for c in self.children)
class _Phrase(_Expr):
class Token(NamedTuple):
text: str
expand: bool
def __init__(self, tokens):
assert all(isinstance(t, _Phrase.Token) for t in tokens)
self.tokens = tokens
@property
def _pprint_data(self):
return {
'1. op': 'Phrase',
'2. tokens': ' '.join([
'[{}]'.format(t.text) if t.expand else t.text
for t in self.tokens])
}
def eval(self, context):
kwargs = {}
if context.documents is not None:
kwargs['documents'] = context.documents
ngram_tokens = []
for t in self.tokens:
if t.expand:
tokens = [context.lexicon[x] for x in
context.lexicon.similar(t.text)]
if len(tokens) == 0:
return
ngram_tokens.append(tokens)
else:
try:
tokens = [context.lexicon[t.text]]
if context.case_insensitive:
matches = context.lexicon.case_insensitive(tokens[0])
if matches is not None:
# Some other words exist
assert len(matches) > 0
tokens = [context.lexicon[x] for x in matches]
except Lexicon.WordDoesNotExist:
if context.ignore_word_not_found:
return
else:
raise
ngram_tokens.append(tokens)
for d in context.index.ngram_search(*ngram_tokens, **kwargs):
yield d
def estimate_cost(self, lexicon):
# The cost to search is the frequency of the least frequent token
# in the ngram since this is the number of locations that need to
# be checked.
min_token_count = lexicon.word_count
for t in self.tokens:
token_count = 0
if t.expand:
tokens = [lexicon[x] for x in
lexicon.similar(t.text)]
token_count += sum(x.count for x in tokens)
else:
# FIXME: Case insensitivity not considered here
try:
token = lexicon[t.text]
token_count += token.count
except Lexicon.WordDoesNotExist:
pass
min_token_count = min(token_count, min_token_count)
return min_token_count / lexicon.word_count
def _dist_time_posting(p1, p2):
return (
max(p2.start - p1.end, 0)
if p1.start <= p2.start else _dist_time_posting(p2, p1))
def _dist_idx_posting(p1, p2):
return (
max(p2.idx - (p1.idx + p1.len), 0)
if p1.idx <= p2.idx else _dist_idx_posting(p2, p1))
class _And(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'And',
'2. thresh': '{} {}'.format(
self.threshold,
'seconds' if self.threshold_type == 't' else 'tokens'),
'3. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
results = []
for c in self.children:
child_results = deque(c.eval(context))
if len(child_results) == 0:
return
doc_ids = [d.id for d in child_results]
context = context._replace(documents=doc_ids)
results.append({d.id: d.postings for d in child_results})
dist_fn = (
_dist_time_posting if self.threshold_type == 't' else
_dist_idx_posting)
n = len(results)
for doc_id in sorted(doc_ids):
pq = []
for i, r in enumerate(results):
assert doc_id in r
ps_iter = iter(r[doc_id])
ps_head = next(ps_iter)
pq.append((ps_head.start, i, ps_head, ps_iter))
heapq.heapify(pq)
merged_postings = []
ps_prev = [None] * n
while len(pq) > 0:
# Consider first element
_, i, ps_head, ps_iter = heapq.heappop(pq)
# Check conditions
near_i = set()
for elem in pq:
ps_cmp = elem[2]
j = elem[1]
if dist_fn(ps_head, ps_cmp) < self.threshold:
near_i.add(j)
if len(near_i) < n - 1:
for j in range(n):
if j != i and j not in near_i:
ps_cmp = ps_prev[j]
if ps_cmp is not None:
if dist_fn(ps_head, ps_cmp) < self.threshold:
near_i.add(j)
else:
# No solution
break
if len(near_i) == n - 1:
merged_postings.append(ps_head)
# Advance postings
ps_prev[i] = ps_head
try:
ps_head = next(ps_iter)
heapq.heappush(pq, (ps_head.start, i, ps_head, ps_iter))
except StopIteration:
pass
merged_postings.sort(key=lambda x: x.start)
if len(merged_postings) > 0:
yield CaptionIndex.Document(
id=doc_id, postings=merged_postings)
class _Or(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'Or',
'2. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
results = [c.eval(context) for c in self.children]
for doc_id, grouped_postings in group_results_by_document(results):
yield CaptionIndex.Document(
id=doc_id,
postings=PostingUtil.union(grouped_postings)
)
class _Not(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'Not',
'2. thresh': '{} {}'.format(
self.threshold,
'seconds' if self.threshold_type == 't' else 'tokens'),
'3. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
child0_results = list(self.children[0].eval(context))
other_context = context._replace(
documents=[d.id for d in child0_results])
other_results = [c.eval(other_context) for c in self.children[1:]]
other_postings = {
doc_id: PostingUtil.union(ps_lists)
for doc_id, ps_lists in group_results_by_document(other_results)
}
dist_fn = (
_dist_time_posting if self.threshold_type == 't' else
_dist_idx_posting)
key_fn = (
(lambda x: x.start) if self.threshold_type == 't' else
(lambda x: x.idx))
for d in child0_results:
postings = []
doc_ops = other_postings.get(d.id, [])
doc_op_i = 0
prev_op = None
for p in d.postings:
p_key = key_fn(p)
while (
doc_op_i < len(doc_ops)
and key_fn(doc_ops[doc_op_i]) <= p_key
):
prev_op = doc_ops[doc_op_i]
doc_op_i += 1
if prev_op and dist_fn(p, prev_op) < self.threshold:
continue
if (
doc_op_i < len(doc_ops)
and dist_fn(p, doc_ops[doc_op_i]) < self.threshold
):
continue
postings.append(p)
if len(postings) > 0:
yield CaptionIndex.Document(id=d.id, postings=postings)
DEFAULT_AND_THRESH = 15
DEFAULT_NOT_THRESH = 15
class _QueryParser(NodeVisitor):
def __init__(self, constants={}):
self.grammar = GRAMMAR
self._constants = constants
visit_more_and = visit_more_or = visit_more_not = visit_more_tokens = \
lambda a, b, c: c
def visit_expr_root(self, node, children):
assert len(children) == 3
return children[1]
def visit_expr_group(self, node, children):
assert | |
import asyncio
import math
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, TypedDict
import pytz
from blossom_wrapper import BlossomAPI
from dateutil import parser
from discord import Embed, Forbidden, Reaction, User
from discord.ext import commands
from discord.ext.commands import Cog
from discord_slash import SlashContext, cog_ext
from discord_slash.model import SlashMessage
from discord_slash.utils.manage_commands import create_option
from buttercup.bot import ButtercupBot
from buttercup.cogs.helpers import (
BlossomException,
BlossomUser,
get_discord_time_str,
get_duration_str,
get_initial_username,
get_user,
get_username,
parse_time_constraints,
)
from buttercup.strings import translation
i18n = translation()
# Unicode characters for control emojis
first_page_emoji = "\u23EE\uFE0F" # Previous track button
previous_page_emoji = "\u25C0\uFE0F" # Left triangle button
next_page_emoji = "\u25B6\uFE0F" # Right triangle button
last_page_emoji = "\u23ED\uFE0F" # Next track button
header_regex = re.compile(
r"^\s*\*(?P<format>\w+)\s*Transcription:?(?:\s*(?P<type>[\w ]+))?\*", re.IGNORECASE
)
def get_transcription_type(transcription: Dict[str, Any]) -> str:
"""Try to determine the type of the transcription."""
text: str = transcription["text"]
header = text.split("---")[0]
match = header_regex.search(header)
if match is None:
return "Post"
tr_format = match.group("format")
if tr_format:
tr_format = tr_format.strip()
tr_type = match.group("type")
if tr_type:
tr_type = tr_type.strip()
return tr_type or tr_format
def get_transcription_source(transcription: Dict[str, Any]) -> str:
"""Try to determine the source (subreddit) of the transcription."""
# https://reddit.com/r/thatHappened/comments/qzhtyb/the_more_you_read_the_less_believable_it_gets/hlmkuau/
url: str = transcription["url"]
return "r/" + url.split("/")[4]
def format_query_occurrence(line: str, line_num: int, pos: int, query: str) -> str:
"""Format a single occurrence of the query."""
# The maximum amount of characters that fit in a single line
# Within an embedded code block
max_characters = 56
line_num_str = "L" + str(line_num) + ": "
# The number of chars that we can use to display context to the result
remaining_chars = max_characters - len(query) - len(line_num_str)
left_context = line[:pos]
right_context = line[pos + len(query) :]
left_chars = math.ceil(remaining_chars / 2)
right_chars = math.floor(remaining_chars / 2)
# Give each side as much context as possible
if len(left_context) < left_chars:
right_chars += left_chars - len(left_context)
elif len(right_context) < right_chars:
left_chars += right_chars - len(right_context)
# Truncate if necessary
if len(left_context) > left_chars:
left_context = "..." + left_context[-left_chars + 3 :]
if len(right_context) > right_chars:
right_context = right_context[: right_chars - 3] + "..."
# Calculate the offset of the query occurrence
offset = len(line_num_str) + len(left_context)
# Extract the actual occurrence (Might have different capitalization)
occurrence = line[pos : pos + len(query)]
# Show the occurrence with context
context = f"{line_num_str}{left_context}{occurrence}{right_context}\n"
# Underline the occurrence
underline = " " * offset + "-" * len(query) + "\n"
return context + underline
def create_result_description(result: Dict[str, Any], num: int, query: str) -> str:
"""Crate a description for the given result."""
transcription: str = result["text"]
total_occurrences = transcription.casefold().count(query.casefold())
# Determine meta info about the post/transcription
tr_type = get_transcription_type(result)
tr_source = get_transcription_source(result)
time = parser.parse(result["create_time"])
description = (
i18n["search"]["description"]["item"].format(
num=num,
tr_type=tr_type,
tr_source=tr_source,
url=result["url"],
timestamp=get_discord_time_str(time, "R"),
)
# Start code block for occurrences
+ "\n```\n"
)
# The maximum number of occurrences to show
max_occurrences = 4
cur_count = 0
for i, line in enumerate(transcription.splitlines()):
start = 0
pos = line.casefold().find(query.casefold())
while pos >= 0 and cur_count < max_occurrences:
# Add the line where the word occurs
description += format_query_occurrence(line, i + 1, pos, query)
# Move to the next occurrence in the line
cur_count += 1
start = pos + len(query)
pos = line.casefold().find(query.casefold(), start)
if cur_count >= max_occurrences:
break
description += "```\n"
if cur_count < total_occurrences:
description += (
i18n["search"]["description"]["more_occurrences"].format(
count=total_occurrences - cur_count
)
+ "\n\n"
)
return description
async def clear_reactions(msg: SlashMessage) -> None:
"""Clear previously set control emojis."""
if len(msg.reactions) > 0:
try:
await msg.clear_reactions()
except Forbidden:
# The bot is not allowed to clear reactions
# This can happen when the command is executed in a DM
# We need to clear the reactions manually
for emoji in msg.reactions:
await msg.remove_reaction(emoji, msg.author)
class SearchCacheItem(TypedDict):
# The query that the user searched for
query: str
# The user that the search is restricted to
user: Optional[BlossomUser]
# The time restriction for the search
after_time: Optional[datetime]
before_time: Optional[datetime]
time_str: str
# The current Discord page for the query
cur_page: int
# The id of the user who executed the query
discord_user_id: str
# The cached response data from previous requests
response_data: Optional[Dict[str, Any]]
# The page of the cached response data
request_page: int
class SearchCacheEntry(TypedDict):
last_modified: datetime
element: SearchCacheItem
class SearchCache:
def __init__(self, capacity: int) -> None:
"""Initialize a new cache."""
self.capacity = capacity
self.cache = {}
def _clean(self) -> None:
"""Ensure that the cache capacity isn't exceeded."""
if len(self.cache) > self.capacity:
# Delete the oldest entry
sorted_entries = sorted(
self.cache.items(), key=lambda x: x[1]["last_modified"]
)
self.cache.pop(sorted_entries[0][0])
def set(
self,
msg_id: str,
entry: SearchCacheItem,
time: datetime = datetime.now(tz=pytz.utc),
) -> None:
"""Set an entry of the cache.
:param msg_id: The ID of the message where the search results are displayed.
:param entry: The cache item for the corresponding message.
:param time: The time when the message was last interacted with.
This should only be set directly in tests, keep it as the default value.
"""
self.cache[msg_id] = {
"last_modified": time,
"element": entry,
}
# Make sure the capacity is not exceeded
self._clean()
def get(self, msg_id: str) -> Optional[SearchCacheItem]:
"""Get the cache entry for the corresponding message.
Note that this might return no message, even if it has been added at some point.
When the capacity of the cache is exceeded, old items get deleted.
"""
item = self.cache.get(msg_id)
if item is not None:
return item["element"]
else:
return None
class Search(Cog):
def __init__(self, bot: ButtercupBot, blossom_api: BlossomAPI) -> None:
"""Initialize the Search cog."""
self.bot = bot
self.blossom_api = blossom_api
self.cache = SearchCache(10)
# Size of a search result page on Discord
self.discord_page_size = 5
# Size of the fetched result pages from Blossom
self.request_page_size = self.discord_page_size * 5
async def _search_from_cache(
self,
msg: SlashMessage,
start: datetime,
cache_item: SearchCacheItem,
page_mod: int,
) -> None:
"""Execute the search with the given cache."""
# Clear previous control emojis
await clear_reactions(msg)
discord_page = cache_item["cur_page"] + page_mod
query = cache_item["query"]
user = cache_item["user"]
user_id = user["id"] if user else None
after_time = cache_item["after_time"]
before_time = cache_item["before_time"]
time_str = cache_item["time_str"]
from_str = after_time.isoformat() if after_time else None
until_str = before_time.isoformat() if before_time else None
request_page = (discord_page * self.discord_page_size) // self.request_page_size
if (
not cache_item["response_data"]
or request_page != cache_item["request_page"]
):
# A new request has to be made
data = {
"text__icontains": cache_item["query"],
"author": user_id,
"create_time__gte": from_str,
"create_time__lte": until_str,
"url__isnull": False,
"ordering": "-create_time",
"page_size": self.request_page_size,
"page": request_page + 1,
}
response = self.blossom_api.get(path="transcription", params=data)
if response.status_code != 200:
raise BlossomException(response)
response_data = response.json()
else:
response_data = cache_item["response_data"]
if response_data["count"] == 0:
await msg.edit(
content=i18n["search"]["no_results"].format(
query=query,
user=get_username(user),
time_str=time_str,
duration_str=get_duration_str(start),
)
)
return
# Only cache the result if the user can change pages
if response_data["count"] > self.discord_page_size:
# Update the cache
self.cache.set(
msg.id,
{
"query": query,
"user": cache_item["user"],
"after_time": after_time,
"before_time": before_time,
"time_str": time_str,
"cur_page": discord_page,
"discord_user_id": cache_item["discord_user_id"],
"response_data": response_data,
"request_page": request_page,
},
)
# Calculate the offset within the response
# The requested pages are larger than the pages displayed on Discord
request_offset = request_page * self.request_page_size
discord_offset = discord_page * self.discord_page_size
result_offset = discord_offset - request_offset
page_results: List[Dict[str, Any]] = response_data["results"][
result_offset : result_offset + self.discord_page_size
]
description = ""
for i, res in enumerate(page_results):
description += create_result_description(res, discord_offset + i + 1, query)
total_discord_pages = math.ceil(response_data["count"] / self.discord_page_size)
await msg.edit(
content=i18n["search"]["embed_message"].format(
query=query,
user=get_username(user),
time_str=time_str,
duration_str=get_duration_str(start),
),
embed=Embed(
title=i18n["search"]["embed_title"].format(
query=query, user=get_username(user)
),
description=description,
).set_footer(
text=i18n["search"]["embed_footer"].format(
cur_page=discord_page + 1,
total_pages=total_discord_pages,
total_results=response_data["count"],
),
),
)
emoji_controls = []
# Determine which controls are appropriate
if discord_page > 0:
emoji_controls.append(first_page_emoji)
emoji_controls.append(previous_page_emoji)
if discord_page < total_discord_pages - 1:
emoji_controls.append(next_page_emoji)
emoji_controls.append(last_page_emoji)
# Add control emojis to message
await asyncio.gather(*[msg.add_reaction(emoji) for emoji in emoji_controls])
@cog_ext.cog_slash(
name="search",
description="Searches for transcriptions that contain the given text.",
options=[
create_option(
name="query",
description="The text to search for (case-insensitive).",
option_type=3,
required=True,
),
create_option(
name="username",
description="The user to restrict the search to. "
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="Only show transcriptions after this date.",
option_type=3,
required=False,
),
create_option(
name="before",
description="Only show transcriptions before this date.",
option_type=3,
required=False,
),
],
)
async def search(
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# ./create_JSON_file_of_sections_in_your_courses.py
#
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./create_JSON_file_of_sections_in_your_courses --config config-test.json
#
# Purpose:
# Create a JSON file with information about courses where user enrolled as a 'TeacherEnrollment', 'Examiner', or 'TaEnrollment'
#
# The JSON file contains a course_info dict
# courses_to_ignore=course_info['courses_to_ignore'] - courses that the user wants to ignore
# courses_without_specific_sections=course_info['courses_without_specific_sections'] - courses where the user is responsible for all the students
# courses_with_sections=course_info['courses_with_sections'] - courses where the user has a specific section
# the specific section's name may be the user's name or some other unique string (such as "Chip's section")
# Because the name of the relevant section can be arbitrary, this file is necessary to know which section belongs to a given user
#
# Examples:
# create file for only exjobb courses:
# ./create_JSON_file_of_sections_in_your_courses.py -s fee.json -X
#
# update an existing file (possibly adding new courses)
# ./create_JSON_file_of_sections_in_your_courses.py -s foo.json -U
#
# <NAME>.
#
# 2020.02.04
# based on earlier list_your_courses_JSON.py
#
import requests, time
import pprint
import optparse
import sys
import json
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
def list_your_courses():
courses_found_thus_far=[]
# Use the Canvas API to get the list of all of your courses
# GET /api/v1/courses
url = "{0}/courses".format(baseUrl)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting courses: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
courses_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting courses for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
courses_found_thus_far.append(p_response)
return courses_found_thus_far
def your_user_info():
# Use the Canvas API to get yourown user information
# GET /api/v1/users/self
url = "{0}/users/self".format(baseUrl)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting your own user information: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
return page_response
return False
def sections_in_course(course_id):
sections_found_thus_far=[]
# Use the Canvas API to get the list of sections for this course
#GET /api/v1/courses/:course_id/sections
url = "{0}/courses/{1}/sections".format(baseUrl,course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting sections: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
sections_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
sections_found_thus_far.append(p_response)
return sections_found_thus_far
def students_in_course(course_id):
users_found_thus_far=[]
# Use the Canvas API to get the list of users enrolled in this course
#GET /api/v1/courses/:course_id/enrollments
url = "{0}/courses/{1}/enrollments".format(baseUrl,course_id)
if Verbose_Flag:
print("url: {}".format(url))
extra_parameters={'per_page': '100',
'type': ['StudentEnrollment']
}
r = requests.get(url, params=extra_parameters, headers = header)
if Verbose_Flag:
print("result of getting enrollments: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
users_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
users_found_thus_far.append(p_response)
return users_found_thus_far
def list_assignments(course_id):
assignments_found_thus_far=[]
# Use the Canvas API to get the list of assignments for the course
#GET /api/v1/courses/:course_id/assignments
url = "{0}/courses/{1}/assignments".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting assignments: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of assignments
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting assignments for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
return assignments_found_thus_far
def submission_for_assignment_by_user(course_id, assignment_id, user_id):
# return the submission information for a single user's assignment for a specific course as a dict
#
# Use the Canvas API to get a user's submission for a course for a specific assignment
# GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id
url = "{0}/courses/{1}/assignments/{2}/submissions/{3}".format(baseUrl, course_id, assignment_id, user_id)
if Verbose_Flag:
print("url: {}".format(url))
#extra_parameters={'student_ids[]': 'all'}
#r = requests.get(url, params=extra_parameters, headers = header)
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting submissions: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
if Verbose_Flag:
print("page_response: " + str(page_response))
return page_response
else:
return dict()
def cleanup_sections(users_name, courses_with_sections):
# if there is a section with a name == users_name, then eliminate all of the other sections
for c in courses_with_sections:
section_for_user=False
sections=courses_with_sections[c].get('sections', [])
for s in sections:
if courses_with_sections[c]['sections'][s] == users_name:
section_for_user=s
if section_for_user:
courses_with_sections[c]['sections']={section_for_user: users_name}
return courses_with_sections
def remove_courses_to_be_ignored(course_list, courses_to_ignore):
new_course_list=[]
for course in course_list:
if Verbose_Flag:
print("course['id']={}".format(course['id']))
# note that the course['id'] is an integer in course_list, but a string in courses_to_ignore
ci=courses_to_ignore.get(str(course['id']), False)
if ci:
print("ignoring course['id']={}".format(course['id']))
else:
new_course_list.append(course)
return new_course_list
def remove_courses_to_be_ignored_dict(course_dict, courses_to_ignore):
new_course_dict=dict()
for course in course_dict:
if Verbose_Flag:
print("course['id']={}".format(course['id']))
ci=courses_to_ignore.get(course, False)
if ci:
print("ignoring course with id={}".format(course))
else:
new_course_dict[course]=course_dict[course]
return new_course_dict
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('-s', '--sectionnames',
dest="course_info_file",
help="use JSON FILE giving section names for a user in each course",
metavar="FILE"
)
parser.add_option('-U', '--update',
dest="update",
default=False,
action="store_true",
help="update existing JSON file"
)
parser.add_option('-X', '--exjobs',
dest="exjobs",
default=False,
action="store_true",
help="only include degree project courses"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV : {}".format(sys.argv[1:]))
print("VERBOSE : {}".format(options.verbose))
print("REMAINING : {}".format(remainder))
print("Configuration file : {}".format(options.config_filename))
initialize(options)
user_info=your_user_info()
if user_info:
if Verbose_Flag:
pprint.pprint(user_info, indent=4)
user_id=user_info['id']
users_name=user_info['name']
else:
print("No user information")
sys.exit()
course_info=dict()
if options.course_info_file:
course_info_file=options.course_info_file
else:
course_info_file="sections_in_courses_for_{0}.json".format(users_name)
if Verbose_Flag:
print("course_info_file={}".format(course_info_file))
if options.update:
try:
with open(course_info_file) as json_data_file:
try:
course_info = json.load(json_data_file)
if Verbose_Flag:
print("course_info={}".format(course_info))
courses_to_ignore=course_info.get('courses_to_ignore',{})
courses_without_specific_sections=course_info.get('courses_without_specific_sections', {})
courses_with_sections=course_info.get('courses_with_sections', {})
except json.JSONDecodeError as e:
print("Unable to load JSON file named {}".format(course_info_file))
sys.exit()
except OSError as e:
print(e.message)
print("Unable to open JSON file named {}".format(course_info_file))
sys.exit()
else: # otherwise create empty dictionaries
courses_to_ignore=dict()
courses_without_specific_sections=dict()
courses_with_sections=dict()
course_list=list_your_courses()
if len(course_list) == 0:
print("User is not in any courses")
sys.exit()
if Verbose_Flag:
pprint.pprint(course_list, indent=4)
# remove courses that are to be ignored
if len(courses_to_ignore) > 0:
if Verbose_Flag:
print("courses_to_ignore={}".format(courses_to_ignore))
# remove the courses to be ignored from the list of the user's courses
course_list=remove_courses_to_be_ignored(course_list, courses_to_ignore)
# also remove courses to be ignored from the courses_with_sections dict
courses_without_specific_sections=remove_courses_to_be_ignored_dict(courses_without_specific_sections, courses_to_ignore)
#Note: We do not need removes from courses_with_sections - as they will recomputed from the reduced course_list
courses_with_sections=remove_courses_to_be_ignored_dict(courses_with_sections, courses_to_ignore)
# if only including degree project courses (course code of the form cc1ddX* or cc2ddX), then skip other courses
if options.exjobs:
exjobb_courses=[]
for course in course_list:
if (len(course['course_code']) > 6) and |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.