text
stringlengths 29
850k
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import logging
try:
from collections import OrderedDict # Python 2.7+ only
except:
pass
from django import forms
from django.forms.widgets import Textarea
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from omeroweb.connector import Server
from omeroweb.custom_forms import NonASCIIForm
from custom_forms import ServerModelChoiceField, GroupModelChoiceField
from custom_forms import GroupModelMultipleChoiceField, OmeNameField
from custom_forms import ExperimenterModelMultipleChoiceField, MultiEmailField
logger = logging.getLogger(__name__)
#################################################################
# Non-model Form
class LoginForm(NonASCIIForm):
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.fields['server'] = ServerModelChoiceField(
Server, empty_label=None)
self.fields.keyOrder = ['server', 'username', 'password']
username = forms.CharField(
max_length=50, widget=forms.TextInput(attrs={
'size': 22, 'autofocus': 'autofocus'}))
password = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 22, 'autocomplete': 'off'}))
def clean_username(self):
if (self.cleaned_data['username'] == 'guest'):
raise forms.ValidationError("Guest account is not supported.")
return self.cleaned_data['username']
class ForgottonPasswordForm(NonASCIIForm):
server = ServerModelChoiceField(Server, empty_label=None)
username = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={'size': 28, 'autocomplete': 'off'}))
email = forms.EmailField(
widget=forms.TextInput(attrs={'size': 28, 'autocomplete': 'off'}))
ROLE_CHOICES = (
('user', 'User'),
('administrator', 'Administrator'),
('restricted_administrator', 'Administrator with restricted privileges')
)
class RoleRenderer(forms.RadioSelect.renderer):
"""Allows disabling of 'administrator' Radio button."""
def render(self):
midList = []
for x, wid in enumerate(self):
disabled = self.attrs.get('disabled')
if ROLE_CHOICES[x][0] == 'administrator':
if hasattr(self, 'disable_admin'):
disabled = getattr(self, 'disable_admin')
if disabled:
wid.attrs['disabled'] = True
midList.append(u'<li>%s</li>' % force_unicode(wid))
finalList = mark_safe(u'<ul id="id_role">\n%s\n</ul>'
% u'\n'.join([u'<li>%s</li>'
% w for w in midList]))
return finalList
class ExperimenterForm(NonASCIIForm):
def __init__(self, name_check=False, email_check=False,
experimenter_is_me_or_system=False,
experimenter_me=False,
can_modify_user=True,
user_privileges=[],
experimenter_root=False,
*args, **kwargs):
super(ExperimenterForm, self).__init__(*args, **kwargs)
self.name_check = name_check
self.email_check = email_check
self.user_privileges = user_privileges
try:
self.fields['other_groups'] = GroupModelMultipleChoiceField(
queryset=kwargs['initial']['groups'],
initial=kwargs['initial']['other_groups'], required=False,
label="Groups")
except:
self.fields['other_groups'] = GroupModelMultipleChoiceField(
queryset=kwargs['initial']['groups'], required=False,
label="Groups")
try:
self.fields['default_group'] = GroupModelChoiceField(
queryset=kwargs['initial']['my_groups'],
initial=kwargs['initial']['default_group'],
empty_label=u"", required=False)
except:
try:
self.fields['default_group'] = GroupModelChoiceField(
queryset=kwargs['initial']['my_groups'],
empty_label=u"", required=False)
except:
self.fields['default_group'] = GroupModelChoiceField(
queryset=list(), empty_label=u"", required=False)
# 'Role' is disabled if experimenter is 'admin' or self,
# so required=False to avoid validation error.
self.fields['role'] = forms.ChoiceField(
choices=ROLE_CHOICES,
widget=forms.RadioSelect(renderer=RoleRenderer),
required=False,
initial='user')
# If current user is restricted Admin, can't create full Admin
restricted_admin = "ReadSession" not in self.user_privileges
self.fields['role'].widget.renderer.disable_admin = restricted_admin
if ('with_password' in kwargs['initial'] and
kwargs['initial']['with_password']):
self.fields['password'] = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 30,
'autocomplete': 'off'}))
self.fields['confirmation'] = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 30,
'autocomplete': 'off'}))
fields_key_order = [
'omename', 'password', 'confirmation', 'first_name',
'middle_name', 'last_name', 'email', 'institution',
'role', 'active', 'other_groups', 'default_group']
else:
fields_key_order = [
'omename', 'first_name', 'middle_name', 'last_name',
'email', 'institution', 'role', 'active',
'other_groups', 'default_group']
ordered_fields = [(k, self.fields[k]) for k in fields_key_order]
roles = [('Sudo', 'Sudo'),
# combine WriteFile/ManagedRepo/Owned roles into 'Write'
('Write', 'Write Data'),
# combine DeleteFile/ManagedRepo/Owned roles into 'Delete'
('Delete', 'Delete Data'),
('Chgrp', 'Chgrp'),
('Chown', 'Chown'),
('ModifyGroup', 'Create and Edit Groups'),
('ModifyUser', 'Create and Edit Users'),
('ModifyGroupMembership', 'Add Users to Groups'),
('Script', 'Upload Scripts')]
for role in roles:
# If current user is light-admin, ignore privileges they don't have
# So they can't add/remove these from experimenter
# We don't disable them - (not in form data and will be removed)
ordered_fields.append(
(role[0], forms.BooleanField(
required=False,
label=role[1],
widget=forms.CheckboxInput(
attrs={'class': 'privilege',
'disabled': role[0] not in user_privileges})
))
)
# Django 1.8: Form.fields uses OrderedDict from the collections module.
self.fields = OrderedDict(ordered_fields)
if experimenter_me or experimenter_root:
self.fields['omename'].widget.attrs['readonly'] = True
name = "yourself"
if experimenter_root:
name = "'root' user"
self.fields['omename'].widget.attrs['title'] = \
"You can't edit Username of %s" % name
self.fields['role'].widget.attrs['disabled'] = True
self.fields['active'].widget.attrs['disabled'] = True
self.fields['active'].widget.attrs['title'] = \
"You cannot disable %s" % name
# If we can't modify user, ALL fields are disabled
if not can_modify_user:
for field in self.fields.values():
field.widget.attrs['disabled'] = True
omename = OmeNameField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
label="Username")
first_name = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}))
middle_name = forms.CharField(max_length=250, widget=forms.TextInput(
attrs={'size': 30, 'autocomplete': 'off'}), required=False)
last_name = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}))
email = forms.EmailField(
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
required=False)
institution = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
required=False)
active = forms.BooleanField(required=False)
def clean_confirmation(self):
if self.cleaned_data.get('password'):
if len(self.cleaned_data.get('password')) < 3:
raise forms.ValidationError(
'Password must be at least 3 characters long.')
if (self.cleaned_data.get('password') or
self.cleaned_data.get('confirmation')):
if (self.cleaned_data.get('password') !=
self.cleaned_data.get('confirmation')):
raise forms.ValidationError('Passwords do not match')
else:
return self.cleaned_data.get('password')
def clean_omename(self):
if self.name_check:
raise forms.ValidationError('This username already exists.')
return self.cleaned_data.get('omename')
def clean_email(self):
if self.email_check:
raise forms.ValidationError('This email already exists.')
return self.cleaned_data.get('email')
def clean_default_group(self):
if (self.cleaned_data.get('default_group') is None or
len(self.cleaned_data.get('default_group')) <= 0):
raise forms.ValidationError('No default group selected.')
else:
return self.cleaned_data.get('default_group')
def clean_other_groups(self):
if (self.cleaned_data.get('other_groups') is None or
len(self.cleaned_data.get('other_groups')) <= 0):
raise forms.ValidationError(
'User must be a member of at least one group.')
else:
return self.cleaned_data.get('other_groups')
PERMISSION_CHOICES = (
('0', 'Private'),
('1', 'Read-Only'),
('2', 'Read-Annotate'),
('3', 'Read-Write'),
)
class GroupForm(NonASCIIForm):
def __init__(self, name_check=False, group_is_system=False,
can_modify_group=True, can_add_member=True, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
self.name_check = name_check
if can_modify_group:
self.fields['name'] = forms.CharField(
max_length=100,
widget=forms.TextInput(attrs={'size': 25,
'autocomplete': 'off'}))
self.fields['description'] = forms.CharField(
max_length=250, required=False,
widget=forms.TextInput(attrs={'size': 25,
'autocomplete': 'off'}))
if can_add_member:
try:
if kwargs['initial']['owners']:
pass
self.fields['owners'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
initial=kwargs['initial']['owners'], required=False)
except:
self.fields['owners'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
required=False)
try:
if kwargs['initial']['members']:
pass
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
initial=kwargs['initial']['members'], required=False)
except:
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
required=False)
if can_modify_group:
self.fields['permissions'] = forms.ChoiceField(
choices=PERMISSION_CHOICES, widget=forms.RadioSelect(),
required=True, label="Permissions")
if group_is_system:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['name'].widget.attrs['title'] = \
"Changing of system group name would be un-doable"
self.fields.keyOrder = [
'name', 'description', 'owners', 'members', 'permissions']
# If we can't modify group, disable fields
if not can_modify_group:
for name, field in self.fields.items():
if name not in ('owners', 'members'):
field.widget.attrs['disabled'] = True
def clean_name(self):
if self.name_check:
raise forms.ValidationError('This name already exists.')
return self.cleaned_data.get('name')
class GroupOwnerForm(forms.Form):
def __init__(self, *args, **kwargs):
super(GroupOwnerForm, self).__init__(*args, **kwargs)
try:
if kwargs['initial']['owners']:
pass
self.fields['owners'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
initial=kwargs['initial']['owners'], required=False)
except:
self.fields['owners'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'], required=False)
try:
if kwargs['initial']['members']:
pass
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
initial=kwargs['initial']['members'], required=False)
except:
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'], required=False)
self.fields.keyOrder = ['owners', 'members', 'permissions']
permissions = forms.ChoiceField(
choices=PERMISSION_CHOICES, widget=forms.RadioSelect(), required=True,
label="Permissions")
class MyAccountForm(NonASCIIForm):
def __init__(self, email_check=False, *args, **kwargs):
super(MyAccountForm, self).__init__(*args, **kwargs)
self.email_check = email_check
try:
if kwargs['initial']['default_group']:
pass
self.fields['default_group'] = GroupModelChoiceField(
queryset=kwargs['initial']['groups'],
initial=kwargs['initial']['default_group'],
empty_label=None)
except:
self.fields['default_group'] = GroupModelChoiceField(
queryset=kwargs['initial']['groups'],
empty_label=None)
self.fields.keyOrder = [
'omename', 'first_name', 'middle_name', 'last_name', 'email',
'institution', 'default_group']
omename = forms.CharField(
max_length=50,
widget=forms.TextInput(attrs={'onfocus': 'this.blur()', 'size': 30,
'autocomplete': 'off'}),
label="Username")
first_name = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}))
middle_name = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
required=False)
last_name = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}))
email = forms.EmailField(
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
required=False)
institution = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30, 'autocomplete': 'off'}),
required=False)
def clean_email(self):
if self.email_check:
raise forms.ValidationError('This email already exists.')
return self.cleaned_data.get('email')
class ContainedExperimentersForm(NonASCIIForm):
def __init__(self, *args, **kwargs):
super(ContainedExperimentersForm, self).__init__(*args, **kwargs)
try:
if kwargs['initial']['members']:
pass
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
initial=kwargs['initial']['members'],
required=False)
except:
self.fields['members'] = ExperimenterModelMultipleChoiceField(
queryset=kwargs['initial']['experimenters'],
required=False)
self.fields.keyOrder = ['members']
class UploadPhotoForm(forms.Form):
photo = forms.FileField(required=False)
def clean_photo(self):
if self.cleaned_data.get('photo') is None:
raise forms.ValidationError(
'No image selected. Supported image formats (file extensions'
' allowed): jpeg, jpg, gif, png. The maximum image size'
' allowed is 200KB.')
if not self.cleaned_data.get(
'photo').content_type.startswith("image"):
raise forms.ValidationError(
'Supported image formats (file extensions allowed):'
' jpeg, jpg, gif, png.')
if self.cleaned_data.get('photo').size > 204800:
raise forms.ValidationError(
'The maximum image size allowed is 200KB.')
return self.cleaned_data.get('photo')
class ChangePassword(NonASCIIForm):
old_password = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 30, 'autocomplete': 'off'}),
label="Current password")
password = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 30, 'autocomplete': 'off'}),
label="New password")
confirmation = forms.CharField(
max_length=50,
widget=forms.PasswordInput(attrs={'size': 30, 'autocomplete': 'off'}),
label="Confirm password")
def clean_confirmation(self):
if (self.cleaned_data.get('password') or
self.cleaned_data.get('confirmation')):
if len(self.cleaned_data.get('password')) < 3:
raise forms.ValidationError('Password must be at least 3'
' characters long.')
if (self.cleaned_data.get('password') !=
self.cleaned_data.get('confirmation')):
raise forms.ValidationError('Passwords do not match')
else:
return self.cleaned_data.get('password')
class EnumerationEntry(NonASCIIForm):
new_entry = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30}))
class EnumerationEntries(NonASCIIForm):
def __init__(self, entries, *args, **kwargs):
super(EnumerationEntries, self).__init__(*args, **kwargs)
for i, e in enumerate(entries):
try:
if kwargs['initial']['entries']:
self.fields[str(e.id)] = forms.CharField(
max_length=250,
initial=e.value,
widget=forms.TextInput(attrs={'size': 30}),
label=i+1)
else:
self.fields[str(e.id)] = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30}),
label=i+1)
except:
self.fields[str(e.id)] = forms.CharField(
max_length=250,
widget=forms.TextInput(attrs={'size': 30}),
label=i+1)
self.fields.keyOrder = [str(k) for k in self.fields.keys()]
class EmailForm(forms.Form):
"""
Form to gather recipients, subject and message for sending email
announcements
"""
error_css_class = 'field-error'
required_css_class = 'field-required'
# Define these as None just so I can order them
everyone = forms.BooleanField(required=False, label='All Users')
experimenters = forms.TypedMultipleChoiceField(
required=False,
coerce=int,
label='Users'
)
groups = forms.TypedMultipleChoiceField(
required=False,
coerce=int
)
# TODO CC isn't really CC. Maybe change label or change functionality
cc = MultiEmailField(required=False)
subject = forms.CharField(max_length=100, required=True)
message = forms.CharField(widget=Textarea, required=True)
# Include/Exclude inactive users
inactive = forms.BooleanField(label='Include inactive users',
required=False)
def __init__(self, experimenters, groups, conn, request, *args, **kwargs):
super(EmailForm, self).__init__(*args, **kwargs)
# Process Experimenters/Groups into choices (lists of tuples)
self.fields['experimenters'].choices = [
(experimenter.id, experimenter.firstName +
' ' + experimenter.lastName + ' (' + experimenter.omeName + ')' +
(' - Inactive' if not experimenter.isActive() else ''))
for experimenter in experimenters]
self.fields['groups'].choices = [
(group.id, group.name) for group in groups]
self.conn = conn
self.request = request
def clean(self):
cleaned_data = super(EmailForm, self).clean()
everyone = cleaned_data.get("everyone")
experimenters = cleaned_data.get("experimenters")
groups = cleaned_data.get("groups")
cc = cleaned_data.get("cc")
# If nobody addressed, throw an error
if not cc and not everyone and not experimenters and not groups:
raise forms.ValidationError("At least one addressee must be "
"specified in one or more of 'all',"
" 'user', 'group' or 'cc'")
return cleaned_data
|
The head of the euro zone finance ministers, Jeroen Dijsselbloem, has told CNBC that he will use the annual meetings of the International Monetary Fund (IMF) and the World Bank to discuss how U.S authorities impose fines on the European banking sector.
Dijsselbloem - who is in charge of the Eurogroup of informal meetings of the finance ministers of the euro zone - said that banks in the region have been trying hard to raise capital and tackle legacy issues, but are continuously being confronted by U.S. regulators.
"Here comes the American authorities that says 'Oh, you collected new capital, we'll take that out as a fine'," he told CNBC on the sidelines of the event taking place in Washington D.C. this weekend.
Jeroen Dijsselbloem, Dutch finance minister and head of the group of euro-area finance ministers.
"Don't get me wrong. Deutsche (Bank) has been involved in all kinds of scandals. They need to take their sanctions, but they need to be fair and effective and the last thing we want is for fines to threaten financial stability of the European banking sector," he said.
Dijsselbloem - not speaking specifically on Deutsche Bank - said that the penalties take away a lot of the new capital that European banks have created, adding that he thought it was becoming a risk.
"I think really we should discuss it ... I will bring it up here at the IMF meetings," he said.
"There is a limit to what you should and could do in relation to financial stability and as soon as you come close to that you're overstating things."
The perception is that Deutsche Bank needs to raise cash after the U.S. Justice Department (DOJ) suggested it pay $14 billion to settle a number of investigations related to mortgage securities.
Deutsche Bank's stock has slid over 46 percent so far this year and the cost of insuring exposure to its debt has risen sharply. It has come under pressure from aggressive short-selling, notably from some large hedge funds.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper routines to read in data files.
The terms horizontal and vertical below refer to the horizontal
(fixed-target) and vertical (fixed-budget) views. When considering
convergence graphs of function values over times, we can view it as:
* costs for different fixed targets represented by horizontal cuts.
* function values for different fixed budgets represented by vertical
cuts.
COCO collects experimental data with respect to these two complementary
views. This module provides data structures and methods for dealing with
the experimental data.
"""
from __future__ import absolute_import
import os, sys
import numpy
import warnings
from pdb import set_trace
#GLOBAL VARIABLES
idxEvals = 0 # index of the column where to find the evaluations
# Single objective case
idxFSingle = 2 # index of the column where to find the function values
nbPtsFSingle = 5 # nb of target function values for each decade.
# Bi-objective case
idxFBi = 1 # index of the column where to find the function values
nbPtsFBi = 10 # nb of target function values for each decade.
#CLASS DEFINITIONS
class MultiReader(list):
"""List of data arrays to be aligned.
The main purpose of this class is to be used as a single container
of the data arrays to be aligned by :py:meth:`alignData()` in the
parent module.
A data array is defined as an array where rows correspond to
recordings at different moments of an experiment. Elements of these
rows correspond to different measures.
These data arrays can be aligned along the time or the function
values for instance.
This class is part abstract. Some methods have to be defined by
inheriting classes depending on wanted alignment:
* :py:meth:`isFinished()`, True when all the data is read.
* :py:meth:`getInitialValue()`, returns the initial alignment
value.
* :py:meth:`newCurrentValue()`, returns the next alignment value.
* :py:meth:`align()`, process all the elements of self to make
them aligned.
Some attributes have to be defined as well :py:attr:`idx`,
the index of the column with alignment values in the data array,
:py:attr:`idxData`, the index of the column with the actual data.
"""
# TODO: this class and all inheriting class may have to be redesigned for
# other kind of problems to work.
# idx: index of the column in the data array of the alignment value.
# idxData: index of the column in the data array for the data of concern.
def __init__(self, data, isHArray=False):
for i in data:
if len(i) > 0: # ie. if the data array is not empty.
self.append(self.SingleReader(i, isHArray))
def currentLine(self):
"""Aggregates currentLines information."""
return numpy.array(list(i.currentLine[self.idxData] for i in self))
def currentValues(self):
"""Gets the list of the current alignment values."""
return list(i.currentLine[self.idx] for i in self)
def nextValues(self):
"""Gets the list of the next alignment values."""
return list(i.nextLine[self.idx] for i in self if not i.isFinished)
#def isFinished(self):
"""When all the data is read."""
#pass
#def getInitialValue(self):
"""Returns the initial alignment value."""
#pass
#def newCurrentValue(self):
"""Returns the next alignment value."""
#pass
#def align(self, currentValue):
"""Process all the elements of self to make them aligned."""
#pass
class SingleReader:
"""Single data array reader class."""
def __init__(self, data, isHArray=False):
if len(data) == 0:
raise ValueError, 'Empty data array.'
self.data = numpy.array(data)
self.it = self.data.__iter__()
self.isNearlyFinished = False
self.isFinished = False
self.currentLine = None
self.nextLine = self.it.next()
if isHArray:
self.idxEvals = range(1, numpy.shape(data)[1])
else:
self.idxEvals = idxEvals
def next(self):
"""Returns the next (last if undefined) line of the array data."""
if not self.isFinished:
if not self.isNearlyFinished: # the next line is still defined
self.currentLine = self.nextLine.copy()
# Update nextLine
try:
self.nextLine = self.it.next()
except StopIteration:
self.isNearlyFinished = True
else:
self.isFinished = True
self.currentLine[self.idxEvals] = numpy.nan
#TODO: the line above was not valid for the MultiArrayReader
return self.currentLine
class VMultiReader(MultiReader):
"""List of data arrays to be aligned vertically.
Aligned vertically means, all number of function evaluations are the
closest from below or equal to the alignment number of function
evaluations.
"""
idx = idxEvals # the alignment value is the number of function evaluations.
def __init__(self, data, isBiobjective):
super(VMultiReader, self).__init__(data)
self.idxData = idxFBi if isBiobjective else idxFSingle # the data of concern are the function values.
def isFinished(self):
return all(i.isFinished for i in self)
def getInitialValue(self):
for i in self:
i.next()
res = self.currentValues()
return min(res)
def newCurrentValue(self):
res = self.nextValues()
if res:
return min(self.nextValues())
else:
return None
def align(self, currentValue):
for i in self:
while not i.isFinished:
if i.nextLine[self.idx] > currentValue:
break
i.next()
return numpy.insert(self.currentLine(), 0, currentValue)
class VMultiReaderNew(MultiReader):
"""List of data arrays to be aligned vertically.
Aligned vertically means, all number of function evaluations are the
closest from below or equal to the alignment number of function
evaluations.
"""
idx = idxEvals # the alignment value is the number of function evaluations.
def __init__(self, data, isBiobjective):
super(VMultiReaderNew, self).__init__(data)
self.idxData = idxFBi if isBiobjective else idxFSingle # the data of concern are the function values.
def isFinished(self):
return all(i.isFinished for i in self)
def getAlignedValues(self, selectedValues):
res = selectedValues()
# iterate until you find the same evaluation number in all functions
while res and min(res) < max(res) and len(res) == len(self):
index = res.index(min(res))
self[index].next()
res = selectedValues()
if self[index].isFinished:
break
if res and min(res) == max(res) and len(res) == len(self):
return min(res)
else:
return None
def getInitialValue(self):
for i in self:
i.next()
return self.getAlignedValues(self.currentValues)
def newCurrentValue(self):
return self.getAlignedValues(self.nextValues)
def align(self, currentValue):
for i in self:
while not i.isFinished:
if i.nextLine[self.idx] > currentValue:
break
i.next()
return numpy.insert(self.currentLine(), 0, currentValue)
class HMultiReader(MultiReader):
"""List of data arrays to be aligned horizontally.
Aligned horizontally means all the function values are lesser than
(or equal to) the current alignment function value.
"""
idxData = idxEvals # the data of concern are the number of function evals.
def __init__(self, data, isBiobjective):
super(HMultiReader, self).__init__(data)
# the alignment value is the function value.
self.idx = idxFBi if isBiobjective else idxFSingle
self.nbPtsF = nbPtsFBi if isBiobjective else nbPtsFSingle
self.idxCurrentF = numpy.inf # Minimization
# idxCurrentF is a float for the extreme case where it is infinite.
# else it is an integer and then is the 'i' in 10**(i/nbPtsF)
def isFinished(self):
"""Is finished when we found the last alignment value reached."""
currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)
if currentValue == 0:
return True
# It can be more than one line for the previous alignment value.
# We iterate until we find a better value or to the end of the lines.
for i in self:
while i.nextLine[self.idx] > currentValue and not i.isFinished:
i.next();
return not any(i.nextLine[self.idx] <= currentValue for i in self)
def getInitialValue(self):
for i in self:
i.next()
fvalues = self.currentValues()
self.idxCurrentF = numpy.ceil(numpy.log10(max(fvalues) if max(fvalues) > 0 else 1e-19) * self.nbPtsF)
# Returns the smallest 10^i/nbPtsF value larger than max(Fvalues)
return numpy.power(10, self.idxCurrentF / self.nbPtsF)
def newCurrentValue(self):
self.idxCurrentF -= 1
return numpy.power(10, self.idxCurrentF / self.nbPtsF)
def align(self, currentValue):
fvalues = []
for i in self:
while not i.isFinished:
if i.currentLine[self.idx] <= currentValue:
break
i.next()
if i.currentLine[self.idx] <= currentValue:
fvalues.append(i.currentLine[self.idx])
#This should not happen
if not fvalues:
raise ValueError, 'Value %g is not reached.'
if max(fvalues) <= 0.:
self.idxCurrentF = -numpy.inf
currentValue = 0.
else:
self.idxCurrentF = min(self.idxCurrentF,
numpy.ceil(numpy.log10(max(fvalues)) * self.nbPtsF))
# Above line may return: Warning: divide by zero encountered in
# log10 in the case of negative fvalues.
# In the case of negative values for fvalues, self.idxCurrentF
# should be -numpy.inf at the condition that
# numpy.power(10, -inf) == 0 is true
# The update of idxCurrentF is done so all the intermediate
# function value trigger reached are not written, only the smallest
currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)
return numpy.insert(self.currentLine(), 0, currentValue)
class ArrayMultiReader(MultiReader):
"""Class of *aligned* data arrays to be aligned together.
This class is used for dealing with the output of
:py:class:`MultiReader`:
* From *raw* data arrays, :py:class:`MultiReader` generates aligned
data arrays (first column is the alignment value, subsequent
columns are aligned data).
* This class also generates aligned data arrays but from other
aligned data arrays.
"""
idx = 0 # We expect the alignment value to be the 1st column.
def __init__(self, data, isHArray=False):
#super(ArrayMultiReader, self).__init__(data, True)
MultiReader.__init__(self, data, isHArray)
#for i in self:
#i.nbRuns = (numpy.shape(i.data)[1] - 1)
def currentLine(self):
"""Aggregates currentLines information."""
res = []
res.extend(list(i.currentLine[1:] for i in self))
return numpy.hstack(res)
class VArrayMultiReader(ArrayMultiReader, VMultiReader):
"""Wrapper class of *aligned* data arrays to be aligned vertically."""
def __init__(self, data):
ArrayMultiReader.__init__(self, data)
#TODO: Should this use super?
class VArrayMultiReaderNew(ArrayMultiReader, VMultiReader):
"""Wrapper class of *aligned* data arrays to be aligned vertically."""
def __init__(self, data):
ArrayMultiReader.__init__(self, data)
#TODO: Should this use super?
class HArrayMultiReader(ArrayMultiReader, HMultiReader):
"""Wrapper class of *aligned* data arrays to be aligned horizontally."""
def __init__(self, data, isBiobjective):
ArrayMultiReader.__init__(self, data, isHArray=True)
#TODO: Should this use super?
self.nbPtsF = nbPtsFBi if isBiobjective else nbPtsFSingle
self.idxCurrentF = numpy.inf #Minimization
#FUNCTION DEFINITIONS
def alignData(data, isBiobjective):
"""Aligns the data from a list of data arrays.
This method returns an array for which the alignment value is the
first column and the aligned values are in subsequent columns.
"""
#TODO: is template dependent.
idxF = idxFBi if isBiobjective else idxFSingle
res = []
currentValue = data.getInitialValue()
#set_trace()
if data.isFinished():
res.append(data.align(currentValue))
while not data.isFinished() and currentValue:
res.append(data.align(currentValue))
currentValue = data.newCurrentValue()
return (numpy.vstack(res), numpy.array(list(i.nextLine[idxEvals] for i in data)),
numpy.array(list(i.nextLine[idxF] for i in data)))
# Hack: at this point nextLine contains all information on the last line
# of the data.
def alignArrayData(data):
"""Aligns the data from a list of aligned arrays.
This method returns an array for which the alignment value is the first
column and the aligned values are in subsequent columns.
"""
#TODO: is template dependent.
res = []
currentValue = data.getInitialValue()
#set_trace()
if data.isFinished():
res.append(data.align(currentValue))
while not data.isFinished():
res.append(data.align(currentValue))
currentValue = data.newCurrentValue()
return numpy.vstack(res)
# Hack: at this point nextLine contains all information on the last line
# of the data.
def openfile(filePath):
if not os.path.isfile(filePath):
if ('win32' in sys.platform) and len(filePath) > 259:
raise IOError(2, 'The path is too long for the file "%s".' % filePath)
else:
raise IOError(2, 'The file "%s" does not exist.' % filePath)
return open(filePath, 'r')
def split(dataFiles, isBiobjective, dim=None):
"""Split a list of data files into arrays corresponding to data sets."""
dataSets = []
for fil in dataFiles:
with openfile(fil) as f:
# This doesnt work with windows.
# content = numpy.loadtxt(fil, comments='%')
lines = f.readlines()
content = []
# Save values in array content. Check for nan and inf.
for line in lines:
# skip if comment
if line.startswith('%'):
if content:
dataSets.append(numpy.vstack(content))
content = []
if isBiobjective and len(dataSets) >= 5:
break
continue
# else remove end-of-line sign
# and split into single strings
data = line.strip('\n').split()
if dim and len(data) != dim + 5:
warnings.warn('Incomplete line %s in ' % (line) +
'data file %s: ' % (fil))
continue
for id in xrange(len(data)):
if data[id] in ('Inf', 'inf'):
data[id] = numpy.inf
elif data[id] in ('-Inf', '-inf'):
data[id] = -numpy.inf
elif data[id] in ('NaN', 'nan'):
data[id] = numpy.nan
else:
data[id] = float(data[id])
content.append(numpy.array(data))
#Check that it always have the same length?
if content:
dataSets.append(numpy.vstack(content))
return dataSets
|
I would like to know if there was any superstition about nightmares. Was there any spirit similar to the Nightmare? Were the egyptians afraid of evil dreams?
I don't know if they were necessarily afraid.
They did have people who would interpret their dreams.
The library of Scribe Kenhirkhopeshef also contained a Dream Book papyrus. This Dream Book was written long before Kenhirkhopeshef’s time – even then the text was considered ancient – but the content remained relevant.
The Dream Book contains approximately 108 ancient dreams, within which it describes about 78 activities and emotions.
You can see the actual papyrus as well as some other objects that belonged to this scribe who lived in the workers village of Deir en Medina.
I hope others may know more about possible deities associated with dreams?
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Consul-based coordinator services """
from consul import ConsulException
from consul.twisted import Consul
from requests import ConnectionError
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.error import DNSLookupError
from zope.interface import implementer
from leader import Leader
from common.utils.asleep import asleep
from common.utils.message_queue import MessageQueue
from voltha.registry import IComponent
from worker import Worker
from simplejson import dumps, loads
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
log = get_logger()
class StaleMembershipEntryException(Exception):
pass
@implementer(IComponent)
class Coordinator(object):
"""
An app shall instantiate only one Coordinator (singleton).
A single instance of this object shall take care of all external
with consul, and via consul, all coordination activities with its
clustered peers. Roles include:
- registering an ephemeral membership entry (k/v record) in consul
- participating in a symmetric leader election, and potentially assuming
the leader's role. What leadership entails is not a concern for the
coordination, it simply instantiates (and shuts down) a leader class
when it gains (or looses) leadership.
"""
CONNECT_RETRY_INTERVAL_SEC = 1
RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
# Public methods:
def __init__(self,
internal_host_address,
external_host_address,
instance_id,
rest_port,
config,
consul='localhost:8500',
container_name_regex='^.*\.([0-9]+)\..*$'):
log.info('initializing-coordinator')
self.config = config['coordinator']
self.worker_config = config['worker']
self.leader_config = config['leader']
self.membership_watch_relatch_delay = config.get(
'membership_watch_relatch_delay', 0.1)
self.tracking_loop_delay = self.config.get(
'tracking_loop_delay', 1)
self.session_renewal_timeout = self.config.get(
'session_renewal_timeout', 5)
self.session_renewal_loop_delay = self.config.get(
'session_renewal_loop_delay', 3)
self.membership_maintenance_loop_delay = self.config.get(
'membership_maintenance_loop_delay', 5)
self.session_time_to_live = self.config.get(
'session_time_to_live', 10)
self.prefix = self.config.get('voltha_kv_prefix', 'service/voltha')
self.leader_prefix = '/'.join((self.prefix, self.config.get(
self.config['leader_key'], 'leader')))
self.membership_prefix = '/'.join((self.prefix, self.config.get(
self.config['membership_key'], 'members'), ''))
self.assignment_prefix = '/'.join((self.prefix, self.config.get(
self.config['assignment_key'], 'assignments'), ''))
self.workload_prefix = '/'.join((self.prefix, self.config.get(
self.config['workload_key'], 'work'), ''))
self.core_store_prefix = '/'.join((self.prefix, self.config.get(
self.config['core_store_key'], 'data/core')))
self.core_store_assignment_key = self.core_store_prefix + \
'/assignment'
self.core_storage_suffix = 'core_store'
self.retries = 0
self.instance_id = instance_id
self.internal_host_address = internal_host_address
self.external_host_address = external_host_address
self.rest_port = rest_port
self.membership_record_key = self.membership_prefix + self.instance_id
self.session_id = None
self.i_am_leader = False
self.leader_id = None # will be the instance id of the current leader
self.shutting_down = False
self.leader = None
self.membership_callback = None
self.worker = Worker(self.instance_id, self)
self.host = consul.split(':')[0].strip()
self.port = int(consul.split(':')[1].strip())
# TODO need to handle reconnect events properly
self.consul = Consul(host=self.host, port=self.port)
self.container_name_regex = container_name_regex
self.wait_for_leader_deferreds = []
self.peers_mapping_queue = MessageQueue()
def start(self):
log.debug('starting')
reactor.callLater(0, self._async_init)
log.info('started')
return self
@inlineCallbacks
def stop(self):
log.debug('stopping')
self.shutting_down = True
yield self._delete_session() # this will delete the leader lock too
yield self.worker.stop()
if self.leader is not None:
yield self.leader.stop()
self.leader = None
log.info('stopped')
def wait_for_a_leader(self):
"""
Async wait till a leader is detected/elected. The deferred will be
called with the leader's instance_id
:return: Deferred.
"""
d = Deferred()
if self.leader_id is not None:
d.callback(self.leader_id)
return d
else:
self.wait_for_leader_deferreds.append(d)
return d
# Wait for a core data id to be assigned to this voltha instance
@inlineCallbacks
def get_core_store_id_and_prefix(self):
core_store_id = yield self.worker.get_core_store_id()
returnValue((core_store_id, self.core_store_prefix))
def recv_peers_map(self):
return self.peers_mapping_queue.get()
def publish_peers_map_change(self, msg):
self.peers_mapping_queue.put(msg)
# Proxy methods for consul with retry support
def kv_get(self, *args, **kw):
return self._retry('GET', *args, **kw)
def kv_put(self, *args, **kw):
return self._retry('PUT', *args, **kw)
def kv_delete(self, *args, **kw):
return self._retry('DELETE', *args, **kw)
# Methods exposing key membership information
@inlineCallbacks
def get_members(self):
"""Return list of all members"""
_, members = yield self.kv_get(self.membership_prefix, recurse=True)
returnValue([member['Key'][len(self.membership_prefix):]
for member in members])
# Private (internal) methods:
@inlineCallbacks
def _async_init(self):
yield self._create_session()
yield self._create_membership_record()
yield self._start_leader_tracking()
yield self.worker.start()
def _backoff(self, msg):
wait_time = self.RETRY_BACKOFF[min(self.retries,
len(self.RETRY_BACKOFF) - 1)]
self.retries += 1
log.info(msg, retry_in=wait_time)
return asleep(wait_time)
def _clear_backoff(self):
if self.retries:
log.info('reconnected-to-consul', after_retries=self.retries)
self.retries = 0
@inlineCallbacks
def _create_session(self):
@inlineCallbacks
def _create_session():
consul = yield self.get_consul()
# create consul session
self.session_id = yield consul.session.create(
behavior='release', ttl=self.session_time_to_live,
lock_delay=1)
log.info('created-consul-session', session_id=self.session_id)
self._start_session_tracking()
yield self._retry(_create_session)
@inlineCallbacks
def _delete_session(self):
try:
yield self.consul.session.destroy(self.session_id)
except Exception as e:
log.exception('failed-to-delete-session',
session_id=self.session_id)
@inlineCallbacks
def _create_membership_record(self):
yield self._do_create_membership_record_with_retries()
reactor.callLater(0, self._maintain_membership_record)
@inlineCallbacks
def _maintain_membership_record(self):
try:
while 1:
valid_membership = yield self._assert_membership_record_valid()
if not valid_membership:
log.info('recreating-membership-before',
session=self.session_id)
yield self._do_create_membership_record_with_retries()
log.info('recreating-membership-after',
session=self.session_id)
else:
log.debug('valid-membership', session=self.session_id)
# Async sleep before checking the membership record again
yield asleep(self.membership_maintenance_loop_delay)
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a short delay)
if not self.shutting_down:
reactor.callLater(self.membership_watch_relatch_delay,
self._maintain_membership_record)
def _create_membership_record_data(self):
member_record = dict()
member_record['status'] = 'alive'
member_record['host_address'] = self.external_host_address
return member_record
@inlineCallbacks
def _assert_membership_record_valid(self):
try:
log.info('membership-record-before')
is_timeout, (_, record) = yield \
self.coordinator_get_with_timeout(
key=self.membership_record_key,
index=0,
timeout=5)
if is_timeout:
returnValue(False)
log.info('membership-record-after', record=record)
if record is None or \
'Session' not in record or \
record['Session'] != self.session_id:
log.info('membership-record-change-detected',
old_session=self.session_id,
record=record)
returnValue(False)
else:
returnValue(True)
except Exception as e:
log.exception('membership-validation-exception', e=e)
returnValue(False)
@inlineCallbacks
def _do_create_membership_record_with_retries(self):
while 1:
log.info('recreating-membership', session=self.session_id)
result = yield self._retry(
'PUT',
self.membership_record_key,
dumps(self._create_membership_record_data()),
acquire=self.session_id)
if result:
log.info('new-membership-record-created',
session=self.session_id)
break
else:
log.warn('cannot-create-membership-record')
yield self._backoff('stale-membership-record')
def _start_session_tracking(self):
reactor.callLater(0, self._session_tracking_loop)
@inlineCallbacks
def _session_tracking_loop(self):
@inlineCallbacks
def _redo_session():
log.info('_redo_session-before')
yield self._delete_session()
# Create a new consul connection/session with a TTL of 25 secs
try:
self.consul = Consul(host=self.host, port=self.port)
self.session_id = yield self.consul.session.create(
behavior='release',
ttl=self.session_time_to_live,
lock_delay=1)
log.info('new-consul-session', session=self.session_id)
except Exception as e:
log.exception('could-not-create-a-consul-session', e=e)
@inlineCallbacks
def _renew_session(m_callback):
try:
log.debug('_renew_session-before')
consul_ref = self.consul
result = yield consul_ref.session.renew(
session_id=self.session_id)
log.info('just-renewed-session', result=result)
if not m_callback.called:
# Triggering callback will cancel the timeout timer
log.info('trigger-callback-to-cancel-timout-timer')
m_callback.callback(result)
else:
# Timeout event has already been called. Just ignore
# this event
log.info('renew-called-after-timout',
new_consul_ref=self.consul,
old_consul_ref=consul_ref)
except Exception, e:
# Let the invoking method receive a timeout
log.exception('could-not-renew-session', e=e)
try:
while 1:
log.debug('session-tracking-start')
rcvd = DeferredWithTimeout(
timeout=self.session_renewal_timeout)
_renew_session(rcvd)
try:
_ = yield rcvd
except TimeOutError as e:
log.info('session-renew-timeout', e=e)
# Redo the session
yield _redo_session()
except Exception as e:
log.exception('session-renew-exception', e=e)
else:
log.debug('successfully-renewed-session')
# Async sleep before the next session tracking
yield asleep(self.session_renewal_loop_delay)
except Exception as e:
log.exception('renew-exception', e=e)
finally:
reactor.callLater(self.session_renewal_loop_delay,
self._session_tracking_loop)
def _start_leader_tracking(self):
reactor.callLater(0, self._leadership_tracking_loop)
@inlineCallbacks
def _leadership_tracking_loop(self):
try:
# Attempt to acquire leadership lock. True indicates success;
# False indicates there is already a leader. It's instance id
# is then the value under the leader key service/voltha/leader.
# attempt acquire leader lock
log.info('leadership-attempt-before')
result = yield self._retry('PUT',
self.leader_prefix,
self.instance_id,
acquire=self.session_id)
log.info('leadership-attempt-after')
# read it back before being too happy; seeing our session id is a
# proof and now we have the change id that we can use to reliably
# track any changes. In an unlikely scenario where the leadership
# key gets wiped out administratively since the previous line,
# the returned record can be None. Handle it.
(index, record) = yield self._retry('GET',
self.leader_prefix)
log.info('leader-prefix',
i_am_leader=result, index=index, record=record)
if record is not None:
if result is True:
if record['Session'] == self.session_id:
yield self._assert_leadership()
else:
pass # confusion; need to retry leadership
else:
leader_id = record['Value']
yield self._assert_nonleadership(leader_id)
# if record was none, we shall try leadership again
last = record
while last is not None:
# this shall return only when update is made to leader key
# or expires after 5 seconds wait
is_timeout, (tmp_index, updated) = yield \
self.coordinator_get_with_timeout(
key=self.leader_prefix,
index=index,
timeout=5)
# Timeout means either there is a lost connectivity to
# consul or there are no change to that key. Do nothing.
if is_timeout:
continue
# After timeout event the index returned from
# coordinator_get_with_timeout is None. If we are here it's
# not a timeout, therefore the index is a valid one.
index=tmp_index
if updated is None or updated != last:
log.info('leader-key-change',
index=index, updated=updated, last=last)
# leadership has changed or vacated (or forcefully
# removed), apply now
# If I was previoulsy the leader then assert a non
# leadership role before going for election
if self.i_am_leader:
log.info('leaving-leaderdhip',
leader=self.instance_id)
yield self._assert_nonleadership(self.instance_id)
break
last = updated
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a short delay)
if not self.shutting_down:
reactor.callLater(self.tracking_loop_delay,
self._leadership_tracking_loop)
@inlineCallbacks
def _assert_leadership(self):
"""(Re-)assert leadership"""
if not self.i_am_leader:
self.i_am_leader = True
self._set_leader_id(self.instance_id)
yield self._just_gained_leadership()
@inlineCallbacks
def _assert_nonleadership(self, leader_id):
"""(Re-)assert non-leader role"""
# update leader_id anyway
self._set_leader_id(leader_id)
if self.i_am_leader:
self.i_am_leader = False
yield self._just_lost_leadership()
def _set_leader_id(self, leader_id):
self.leader_id = leader_id
deferreds, self.wait_for_leader_deferreds = \
self.wait_for_leader_deferreds, []
for d in deferreds:
d.callback(leader_id)
def _just_gained_leadership(self):
log.info('became-leader')
self.leader = Leader(self)
return self.leader.start()
def _just_lost_leadership(self):
log.info('lost-leadership')
return self._halt_leader()
def _halt_leader(self):
if self.leader:
d = self.leader.stop()
self.leader = None
return d
def get_consul(self):
return self.consul
@inlineCallbacks
def _retry(self, operation, *args, **kw):
while 1:
try:
consul = yield self.get_consul()
log.info('start', operation=operation, args=args)
if operation == 'GET':
result = yield consul.kv.get(*args, **kw)
elif operation == 'PUT':
for name, value in kw.items():
if name == 'acquire':
if value != self.session_id:
log.info('updating-session-in-put-operation',
old_session=value,
new_session=self.session_id)
kw['acquire'] = self.session_id
break
result = yield consul.kv.put(*args, **kw)
elif operation == 'DELETE':
result = yield consul.kv.delete(*args, **kw)
else:
# Default case - consider operation as a function call
result = yield operation(*args, **kw)
self._clear_backoff()
break
except ConsulException, e:
log.exception('consul-not-up',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('consul-not-up')
except ConnectionError, e:
log.exception('cannot-connect-to-consul',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('cannot-connect-to-consul')
except DNSLookupError, e:
log.info('dns-lookup-failed', operation=operation, args=args,
host=self.host)
yield self._backoff('dns-lookup-failed')
except StaleMembershipEntryException, e:
log.exception('stale-membership-record-in-the-way',
operation=operation,
args=args,
session=self.consul.Session,
e=e)
yield self._backoff('stale-membership-record-in-the-way')
except Exception, e:
if not self.shutting_down:
log.exception(e)
yield self._backoff('unknown-error')
log.info('end', operation=operation, args=args)
returnValue(result)
@inlineCallbacks
def coordinator_get_with_timeout(self, key, timeout, **kw):
"""
Query consul with a timeout
:param key: Key to query
:param timeout: timeout value
:param kw: additional key-value params
:return: (is_timeout, (index, result)).
"""
@inlineCallbacks
def _get(key, m_callback):
try:
(index, result) = yield self._retry('GET', key, **kw)
if not m_callback.called:
log.debug('got-result-cancelling-timer')
m_callback.callback((index, result))
except Exception as e:
log.exception('got-exception', e=e)
try:
rcvd = DeferredWithTimeout(timeout=timeout)
_get(key, rcvd)
try:
result = yield rcvd
log.debug('result-received', result=result)
returnValue((False, result))
except TimeOutError as e:
log.debug('timeout-or-no-data-change', consul_key=key)
except Exception as e:
log.exception('exception', e=e)
except Exception as e:
log.exception('exception', e=e)
returnValue((True, (None, None)))
|
← Bajrangi Bhaijaan And The Ant-Man.
I wear photochromic lenses because I have sensitive eyes and I develop an ache in the eyes in bright sunlight. I have had this condition since the late sixties and except when I am at home and / or in the nights, I inevitably wear glasses with photochromic lenses.
Some of my readers have commented on this whenever one of my photographs have come up in a blog post and a couple of them have wondered why I need to hide my eyes. Now you know.
This entry was posted in Blogging, Humor and tagged Pickles. Bookmark the permalink.
9 Responses to Photochromic Lenses.
I have the same issue – along with cataracts – sigh. More benefits of aging.
Let us simply enjoy while we can!
I, too, always wear sunglasses when I’m outside. I have ten gazillion pairs of wraparound ones, so they’re easy to pick up and put on.
I need mine to be prescription numbered and so I have just a few.
my eyes are blue. and have always been sensitive to the sun also.
actually what I would call overly sensitive. I always wear sun glasses.
I thought it was just a blue-eyed irritant trait … but I see it’s not.
and… you just gotta love pickels! it made me laugh right out loud.
I suddenly see that many of friends too wear photochromic glasses and it is very comforting!
I have the same blue-eye problem. I am curious to see if it eventually develops into something more. Did you notice your eyes changed color with age?
Thank you Kaitlin. You have just made my day!
|
# This file implement different thread routines which spaws on agent startup and continue to run until agnet is up.
# These threads continuously query stats from the linux system and write designated log file for a particular test if
# it is currently running on agent
#!/usr/bin/env python
import os
import common
import subprocess
import re
import action
import logging
import time
# Global
sys_logger = None
iostat_file_ext = "_iostat_block_devices.plt"
network_io_file_ext = "_network_devices.plt"
system_metrics_interval = '5'
docker_stat_header = "NAME CONTAINER CPU % MEM %"
# Bash Commands
date_cmd = ['date', '-u', '+%Y-%m-%dT%H:%M:%SZ']
top_cmd = ['top', '-b', '-i', '-d', system_metrics_interval]
top_get_header = ['top', '-b', '-n', '1', '-i']
iostat_cmd = ['iostat', '-dtx', system_metrics_interval]
iostat_get_header = ['iostat', '-dtx']
sar_get_header = {'cpu': ['sar', '-u', '1', '1'],
'task': ['sar', '-w', '1', '1'],
'nfs': ['sar', '-n', 'NFS', '1', '1'],
'mem': ['sar', '-r', '1', '1'],
'network_io': ['sar', '-n', 'DEV', '1', '1']
}
docker_version = ['docker', '-v']
docker_command = "( date -u +'%Y-%m-%dT%H:%M:%SZ' && docker stats -a --format " \
"'table {{.Name}}\t{{.Container}}\t{{.CPUPerc}}\t{{.MemPerc}}\t' --no-stream )"
sar_cmd = ['sar', '-n', 'DEV', '-n', 'NFS', '-u', '-r', '-w', system_metrics_interval]
get_pid = ["ps", "-eo", "pid,cmd,%cpu", "--sort=-%cpu"]
grep2 = ["grep", "-v", "grep"]
awk = ["awk", "FNR == 1 {print $1}"]
def loggersetup(filename):
"""
Logger object setup for capturing system metric gather activity in a given debug file filename
"""
if os.path.isfile(filename):
os.remove(filename)
logger = logging.getLogger("system_metrics")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s %(levelname)-6s {%(filename)s %(lineno)d} %(message)-100s',
'%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.propagate = False
return logger
def top_gather(self):
"""
This method implement thread routine for querying TOP output in a fixed interval of time. If any test is in
running state on this agent then this routine append top_output.txt for this test with new stat values
"""
running_queue = {}
# execute top batch command
p1 = subprocess.Popen(top_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
output = p1.stdout.readline()
if output == '' and p1.poll() is not None:
break
if output:
# Read new output
output = output.rstrip()
# if output line starts with "top" then it need to dump current timestamp value. It also dump list of test
# currently in running state in seperate list. As this is the new output sequence, we want to start writing
# subsequent logs for currently running tests. Hence it won't check running test list until new output
# sequence
if output.startswith('top'):
p2 = subprocess.Popen(date_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timestamp = p2.communicate()[0].strip()
action.action_lock.acquire()
running_queue = action.running_tests
action.action_lock.release()
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
top_file = test.statsdir + "top_output.txt"
if os.path.isfile(top_file):
# If file exists then append this new output squence in this file with current TS
with open(top_file, 'a') as fh:
fh.write("\n" + timestamp + "\n")
fh.write(output + "\n")
sys_logger.debug("Generating top output for test : " + str(testid))
else:
# If file doesn't exists then this is new test just started on agent, create
# top_output.txt and dump this new output squence in this file with current TS
with open(top_file, 'w') as fh:
fh.write(timestamp + "\n")
fh.write(output + "\n")
sys_logger.debug("Starting top output for test : " + str(testid))
continue
# Continuing writing output squence in files for running tests dump at the start of new squence
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
top_file = test.statsdir + "top_output.txt"
if os.path.isfile(top_file):
with open(top_file, 'a') as fh:
fh.write(output + "\n")
def iostat_gather(self):
"""
This method implement thread routine for querying IOSTAT output in a fixed interval of time. If any test is in
running state on this agent then this routine append seperate file for each io device it create for this test
"""
iostat_header = None
device_header = 0
device_list = []
p1 = subprocess.Popen(iostat_get_header, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p1.communicate()[0].strip()
output = output.split("\n")
# Check header and device list from iostat output
for header in output:
header = header.strip()
if header.startswith("Device"):
header = re.sub(' +', ' ', header)
header = header.replace(' ', ',')
header = header.replace("Device:", "Time")
iostat_header = header
device_header = 1
continue
if device_header:
header = re.sub(' +', ' ', header)
header = header.split(' ')
device_list.append(header[0])
# Start IOSTAT batch command for continued output
p2 = subprocess.Popen(iostat_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
running_queue = {}
timestamp = 0
try:
while True:
output = p2.stdout.readline()
if output == '' and p2.poll() is not None:
break
if output:
output = output.strip()
output = re.sub(' +', ' ', output)
output = output.replace(' ', ',')
# if output line starts with "Device" then it need to dump current timestamp value. It also dump list
# of test currently in running state in seperate list. As this is the new output sequence, we want to
# start writing subsequent logs for currently running tests. Hence it won't check running test list
# until new output sequence
if output.startswith("Device"):
p3 = subprocess.Popen(date_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timestamp = p3.communicate()[0].strip()
action.action_lock.acquire()
running_queue = action.running_tests
action.action_lock.release()
continue
output = output.split(",")
output_device = output[0]
output[0] = str(timestamp)
output = ",".join(output)
# Continuing writing output squence in files for running tests dump at the start of new squence
if output_device in device_list:
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
iostat_file_name = output_device + iostat_file_ext
iostat_file = test.statsdir + iostat_file_name
if os.path.isfile(iostat_file):
# If file exists then append this new output squence in this file with current TS
sys_logger.debug("Generating iostat output in " + iostat_file_name + " for test : "
+ str(testid))
with open(iostat_file, 'a') as fh:
fh.write(output + "\n")
else:
# If file doesn't exists then this is new test just started on agent, create
# file and dump IOSTAT header in this file with current TS
with open(iostat_file, 'w') as fh:
sys_logger.debug("Starting " + iostat_file_name + " for test : " + str(testid))
fh.write(iostat_header + "\n")
fh.write(output + "\n")
except Exception as e:
sys_logger.error(e)
def sar_gather(self):
header_row = 2 # In SAR output header is in 2nd row, modify accordingly
# getting cpu.plt header
p = subprocess.Popen(sar_get_header['cpu'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output = p.communicate()[0].strip()
output = output.split("\n")[header_row]
output = re.sub(' +', ' ', output)
output = output.split(" ")
del output[:3]
cpu_plt_header = ",".join(output)
# getting task.plt header
p = subprocess.Popen(sar_get_header['task'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output = p.communicate()[0].strip()
output = output.split("\n")[header_row]
output = re.sub(' +', ' ', output)
output = output.split(" ")
del output[:2]
task_plt_header = ",".join(output)
# getting mem.plt header
p = subprocess.Popen(sar_get_header['mem'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output = p.communicate()[0].strip()
output = output.split("\n")[header_row]
output = re.sub(' +', ' ', output)
output = output.split(" ")
del output[:2]
mem_plt_header = ",".join(output)
# getting nfs.plt header
p = subprocess.Popen(sar_get_header['nfs'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output = p.communicate()[0].strip()
output = output.split("\n")[header_row]
output = re.sub(' +', ' ', output)
output = output.split(" ")
del output[:2]
nfs_plt_header = ",".join(output)
# getting network_io.plt header
p = subprocess.Popen(sar_get_header['network_io'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
output = p.communicate()[0].strip()
header = output.split("\n")[header_row]
header = re.sub(' +', ' ', header)
header = header.split(" ")
del header[:3]
net_io_plt_header = ",".join(header)
# starting SAR gather
p = subprocess.Popen(sar_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Flags for marking the output type based on header in output sequence
print_cpu_plt = 0
print_mem_plt = 0
print_task_plt = 0
print_net_io_plt = 0
print_nfs_plt = 0
while True:
output = p.stdout.readline()
if output == '' and p.poll() is not None:
break
if output:
output = output.strip()
output = re.sub(' +', ' ', output)
output = output.replace(' ', ',')
if cpu_plt_header in output:
# Set CPU usage output flag, print subsquent lines in cpu.plt, Also this is start of new output sequence
# hence dump the current timestamp value
print_cpu_plt = 1
p3 = subprocess.Popen(date_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timestamp = p3.communicate()[0].strip()
continue
elif task_plt_header in output:
# Set task and context switch output flag, print subsquent lines in task.plt
print_task_plt = 1
continue
elif nfs_plt_header in output:
# Set NFS activity output flag, print subsquent lines in nfs.plt
print_nfs_plt = 1
continue
elif mem_plt_header in output:
# Set memory utilization output flag, print subsquent lines in mem.plt
print_mem_plt = 1
continue
elif net_io_plt_header in output:
# Set network io activity output flag, print subsquent lines in seperate file for each io device
print_net_io_plt = 1
continue
elif output == "":
# Set all flags to zero if blank line occur, this marks end of previously set flag
print_cpu_plt = 0
print_mem_plt = 0
print_task_plt = 0
print_net_io_plt = 0
print_nfs_plt = 0
continue
# Dump list of running test on agent in running_queue
action.action_lock.acquire()
running_queue = action.running_tests
action.action_lock.release()
# Print appropriate plt files based on output flags
if print_cpu_plt:
output = output.split(",")
del output[:3]
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
cpu_plt_file = test.statsdir + "cpu.plt"
if os.path.isfile(cpu_plt_file):
sys_logger.debug("Generating cpu.plt for test : " + str(testid))
with open(cpu_plt_file, 'a') as fh:
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
else:
sys_logger.debug("Starting cpu.plt for test : " + str(testid))
with open(cpu_plt_file, 'w') as fh:
header = "Time," + cpu_plt_header
fh.write(header + "\n")
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
if print_task_plt:
output = output.split(",")
del output[:2]
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
task_plt_file = test.statsdir + "task.plt"
if os.path.isfile(task_plt_file):
sys_logger.debug("Generating task.plt for test : " + str(testid))
with open(task_plt_file, 'a') as fh:
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
else:
sys_logger.debug("Starting task.plt for test : " + str(testid))
with open(task_plt_file, 'w') as fh:
header = "Time," + task_plt_header
fh.write(header + "\n")
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
if print_mem_plt:
output = output.split(",")
del output[:2]
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
mem_plt_file = test.statsdir + "mem.plt"
if os.path.isfile(mem_plt_file):
sys_logger.debug("Generating mem.plt for test : " + str(testid))
with open(mem_plt_file, 'a') as fh:
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
else:
sys_logger.debug("Starting mem.plt for test : " + str(testid))
with open(mem_plt_file, 'w') as fh:
header = "Time," + mem_plt_header
fh.write(header + "\n")
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
if print_nfs_plt:
output = output.split(",")
del output[:2]
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
nfs_plt_file = test.statsdir + "nfs.plt"
if os.path.isfile(nfs_plt_file):
sys_logger.debug("Generating nfs.plt for test : " + str(testid))
with open(nfs_plt_file, 'a') as fh:
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
else:
sys_logger.debug("Starting nfs.plt for test : " + str(testid))
with open(nfs_plt_file, 'w') as fh:
header = "Time," + nfs_plt_header
fh.write(header + "\n")
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
if print_net_io_plt:
output = output.split(",")
del output[:2]
device = output[0]
del output[:1]
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
net_io_plt_file_name = device + network_io_file_ext
net_io_plt_file = test.statsdir + net_io_plt_file_name
if os.path.isfile(net_io_plt_file):
sys_logger.debug("Generating " + net_io_plt_file_name + " for test : " + str(testid))
with open(net_io_plt_file, 'a') as fh:
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
else:
sys_logger.debug("Starting " + net_io_plt_file_name + " for test : " + str(testid))
with open(net_io_plt_file, 'w') as fh:
header = "Time," + net_io_plt_header
fh.write(header + "\n")
plt_row = [timestamp] + output
plt_row = ",".join(plt_row)
fh.write(plt_row + "\n")
def docker_stat_gather(self):
# Checking docker version
try:
p1 = subprocess.Popen(docker_version, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
version = p1.communicate()[0].strip()
version = re.findall("\d+\.\d+", version)[0]
version = float(version)
if version < 10.0:
# Docker version less than 10 is not supported
sys_logger.error("Docker version less than 10, not supported !! ")
sys_logger.error("Aborting docker stat gather thread !! ")
quit()
except Exception:
# Docker is not installed, abort this thread
sys_logger.error("Docker not installed !! ")
sys_logger.error("Aborting docker stat gather thread !! ")
quit()
# Starting docker stats
# Spawning different thread for collecting docker stat as it takes some time to collect the stats
while True:
thread = common.FuncThread(collect_docker_stats, True)
thread.start()
time.sleep(float(system_metrics_interval))
def collect_docker_stats(self):
p1 = subprocess.Popen(docker_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, err) = p1.communicate()
action.action_lock.acquire()
running_queue = action.running_tests
action.action_lock.release()
if err:
sys_logger.error("Not able to collect docker stats")
sys_logger.error(str(err.strip()))
quit()
output = output.strip()
output = output.split("\n")
for testid, test in running_queue.iteritems():
if test.status == "RUNNING":
docker_stat_file = test.statsdir + "docker_stat.txt"
if os.path.isfile(docker_stat_file):
sys_logger.debug("Generating docker_stat.txt for test : " + str(testid))
with open(docker_stat_file, 'a') as fh:
for line in output:
if line.startswith("NAME"):
continue
line = line.strip()
# line = re.sub(' +', ' ', line)
# line = line.replace(' ', ',')
fh.write(line + "\n")
fh.write("\n")
else:
sys_logger.debug("Starting docker_stat.txt for test : " + str(testid))
with open(docker_stat_file, 'w') as fh:
fh.write(docker_stat_header + "\n")
for line in output:
if line.startswith("NAME"):
continue
line = line.strip()
# line = re.sub(' +', ' ', line)
# line = line.replace(' ', ',')
fh.write(line + "\n")
fh.write("\n")
def strace_gather(self, testid, strace_config):
"""
STRACE profiler collector based on configuration provided in strace_config for a given testid
"""
delay = float(strace_config['delay'])
duration = strace_config['duration']
process = strace_config['process']
sys_logger.debug("Starting STRACE for Test " + str(testid) + " in " + str(delay) + " secs")
# Start STRACE collection after delay time provided by user
time.sleep(delay)
test = action.get_test(testid)
strace_output_file = test.statsdir + "strace_output.txt"
# PID selection based on process name provided by user, if there are multiple PIDs for same process then it
# chooses the most active process in terms of cpu usage
sys_logger.debug("Setting up STRACE for process : " + process)
grep1 = ["grep", process]
p1 = subprocess.Popen(get_pid, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(grep1, stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(grep2, stdin=p2.stdout, stdout=subprocess.PIPE)
p4 = subprocess.Popen(awk, stdin=p3.stdout, stdout=subprocess.PIPE)
pid = p4.communicate()[0].strip()
if not pid:
msg = "No active PID found for given process : " + process
sys_logger.debug(msg)
if test.status == "RUNNING":
with open(strace_output_file, 'w') as fh:
fh.write(msg + "\n")
else:
sys_logger.debug("PID selected for process " + process + " : " + pid)
strace_cmd = ["timeout", duration, "strace", "-p", pid, "-c", "-S", "time", "-o", strace_output_file]
sys_logger.debug("Executing Strace for test " + str(testid))
sys_logger.debug("Strace command : " + str(strace_cmd))
p5 = subprocess.Popen(strace_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p5.wait()
sys_logger.debug("Appending PID information in output file")
perl_cmd = ['perl', '-pi', '-e',
'print "Strace Process : ' + process + ' | PID : ' + pid + ' \\n\\n" if $. == 1',
strace_output_file]
subprocess.Popen(perl_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sys_logger.debug("Strace complete for test : " + str(testid))
def perf_gather(self, testid, perf_config):
"""
PERF profiler collector based on configuration provided in perf_config for a given testid
"""
delay = float(perf_config['delay'])
duration = perf_config['duration']
sys_logger.debug("Starting PERF for Test " + str(testid) + " in " + str(delay) + " secs")
time.sleep(delay)
test = action.get_test(testid)
perf_output_file = test.statsdir + "perf_output.txt"
# Starting system wide perf data collection
perf_system_wide_cmd = ['perf', 'stat', '-e',
'cycles,instructions,LLC-load-misses,LLC-prefetch-misses,LLC-store-misses', '-a', '-o',
perf_output_file, "sleep", duration]
if test.status == "RUNNING":
sys_logger.debug("Executing system-wide PERF")
sys_logger.debug("PERF command : " + str(perf_system_wide_cmd))
p = subprocess.Popen(perf_system_wide_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
sys_logger.debug("Finished system-wide PERF")
error = p.communicate()[1].strip()
if error:
sys_logger.debug(error)
with open(perf_output_file, 'w') as fh:
fh.write(error + "\n")
return
# Configure perf for process level data collection, if process name is provided
if "process" in perf_config:
process = perf_config['process']
sys_logger.debug("Setting up PERF for process : " + process)
grep1 = ["grep", process]
p1 = subprocess.Popen(get_pid, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(grep1, stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(grep2, stdin=p2.stdout, stdout=subprocess.PIPE)
p4 = subprocess.Popen(awk, stdin=p3.stdout, stdout=subprocess.PIPE)
pid = p4.communicate()[0].strip()
if not pid:
msg = "No active PID found for given process : " + process
sys_logger.debug(msg)
if os.path.isfile(perf_output_file):
with open(perf_output_file, 'a') as fh:
fh.write(msg + "\n")
else:
msg = "PID selected for process " + process + " : " + pid
sys_logger.debug(msg)
perf_process_cmd = ['perf', 'stat', '-e', 'cycles:u,instructions:u', '-a', '-p', pid, '-o',
perf_output_file, '--append', 'sleep', duration]
sys_logger.debug("Executing PERF for process " + process)
sys_logger.debug("PERF command : " + str(perf_process_cmd))
p5 = subprocess.Popen(perf_process_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p5.wait()
error = p5.communicate()[1].strip()
if error:
sys_logger.debug(error)
sys_logger.debug("Finished PERF on process")
sys_logger.debug("PERF complete for test : " + str(testid))
def init_sar_iostat_top():
"""
Agent process invoke this method on startup. This will spawn 4 threads for system metrics collection. Below are
thread details:
1. top_gather - For TOP output collection
2. iostat_gather - For iostat output collection
3. sar_gather - For SAR data collection
4. docker_stat_gather - For docker stat of all active containers
"""
global sys_logger
logger_file = os.getcwd() + "/system_metrics_gather_debug.out"
sys_logger = loggersetup(logger_file)
sys_logger.debug("Starting system metrics gather threads")
sys_logger.debug("Starting top gather")
t1 = common.FuncThread(top_gather, True)
t1.start()
sys_logger.debug("Starting iostat gather")
t2 = common.FuncThread(iostat_gather, True)
t2.start()
sys_logger.debug("Starting SAR gather")
t3 = common.FuncThread(sar_gather, True)
t3.start()
sys_logger.debug("Starting docker stat gather")
t4 = common.FuncThread(docker_stat_gather, True)
t4.start()
def perf_strace_gather(testid, perf_config=None, strace_config=None):
"""
Agent invoke this procedure on test startup for configuring profiler information provided in test details
"""
sys_logger.debug("Starting Profilers setup for test ID : " + str(testid))
sys_logger.debug("Perf configuration details")
if "process" in perf_config:
sys_logger.debug(
"Delay - " + perf_config['delay'] + " Duration - " + perf_config['duration'] + " Process - " + perf_config[
'process'])
else:
sys_logger.debug("Delay - " + perf_config['delay'] + " Duration - " + perf_config['duration'])
t1 = common.FuncThread(perf_gather, True, testid, perf_config)
t1.start()
if strace_config is not None:
sys_logger.debug("Strace configuration details")
sys_logger.debug(
"Delay - " + strace_config['delay'] + " Duration - " + strace_config['duration'] + " Process - " +
strace_config['process'])
t2 = common.FuncThread(strace_gather, True, testid, strace_config)
t2.start()
else:
sys_logger.debug("Strace not configured ")
|
Sport BOP Profile | Introducing Pam!
I work in the community health section of Sport Bay of Plenty as a Green Prescription advisor and help people improve people's lifestyle through sustainable physical activity and nutritional changes. I also work as a Keep on Your Feet strength and balance programme co-ordinator to help improve access for older adults to approved community strength and balance classes.
Helping people gain confidence and enjoy increasing their physical activity to experience an enhanced quality of life – regain their “zing”.
Sharing the positive feeling of “get up and go” which comes from living in a healthier body.
Extended family (especially one month old granddaughter), and friend time is very precious.
Off road walking tracks; mountain biking; landscape photography; gardening and indoors I enjoy instructing RPM indoor bike sessions & gym workouts. Also, relaxing at the movies, reading a good book, watching Coronation Street (no laughing!).
Leap off tall buildings and survive, I am “height-phobic” and the mere thought turns my legs to jelly.
“Too old” does not feature in my vocabulary. “Make It Happen” does, which was the name of our team successfully completing the Taupo 100km Oxfam walk. And more importantly, not that I watch much TV but coming to terms with the catch-up episodes of Coronation Street (so many new characters) – traumatised!
|
import numpy as np
import pylab as plt
def integrate(x, y, weight, xmin, xmax, vary=None):
"""
Return integrated flux of array y (already integrated in
each pixel) with respecto to x, with variance vary and
weight function weigth between limits xmin and xmax.
"""
# Keep only elements of x, y, and weight within the interval,
# include fractional pixels
deltax = np.diff(x)
# Reduce x of first element for compatibility with deltax
x = x[1:]
cond = (x > xmin - deltax/2.0) * (x <= xmax + deltax/2.0)
# Compute fraction of pixel within interval
fraction_left = 0.5 - (xmin - x[cond])/deltax[cond]
fraction_right = 0.5 - (x[cond] - xmax)/deltax[cond]
fraction_left = np.where(fraction_left > 1, 1.0, fraction_left)
fraction_right = np.where(fraction_right > 1, 1.0, fraction_right)
fraction = np.minimum(fraction_left, fraction_right)
# Sum contributions of pixels inside interval, considering fractions
# when necessary
summed_y = np.sum(y[1:][cond] * weight[1:][cond] * fraction)
summed_weight = np.sum(weight[1:][cond] * fraction)
integral = np.divide(summed_y, summed_weight)
if vary is not None:
# Also compute error
summed_var = np.sum(vary[1:][cond] * (weight[1:][cond] * fraction)**2)
var_integral = np.divide(summed_var, summed_weight**2)
else:
var_integral = None
return integral, var_integral
def plot_windows(ww, y, windows):
ax = plt.subplots(1, 1)[1]
for win in windows:
indices = np.where(np.logical_and(ww > windows[win]['wwmin'],
ww < windows[win]['wwmax']))
for order in np.unique(indices[0]):
ax.plot(ww[order], y[order], 'k')
for ind in enumerate(indices):
ax.plot(ww[ind], y[ind], 'r', lw=2)
return
def load_rdbfile(rdb_file, sepchar='\t'):
"""
Loads data from an rdb file.
"""
# read file
f = open(rdb_file, 'r')
lines = f.readlines()
f.close()
header = lines[0].split()
#
fmt_all = dict((header[i], i) for i in range(len(header)))
data = {}
for line in lines[2:]:
if line.startswith('#'):
continue
for fmt in fmt_all.keys():
elem = line.split(sepchar)[fmt_all[fmt]]
try:
elem = float(elem)
except ValueError:
pass
if fmt in data:
data[fmt].append(elem)
else:
data[fmt] = [elem, ]
for dd in data:
data[dd] = np.array(data[dd])
return data
__author__ = 'Rodrigo F. Diaz'
|
“I’m taking pictures for my Ba-WoG (blog)” my 3-year old told me as she walked around the house snapping shots with an old camera.
It’s hard to watch yourself in the mirror when the reflection isn’t pretty. But it is also a chance for making a ChAnGe. One that is admirable, honorable, and and FiT for a child to imitate.
|
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class binutils(test.test):
"""
Autotest module for testing basic functionality
of binutils
@author Xu Zheng zhengxu@cn.ibm.com
"""
version = 1
nfail = 0
path = ''
def initialize(self, test_path=''):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
ret_val = subprocess.Popen(['make', 'all'], cwd="%s/binutils" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./gprof.sh'], cwd="%s/binutils" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
|
All New Posts from the Strength Blog | How To Get Stronger For Life!
Read all the new Strength Blog Posts! Learn everything you need to know about how to get stronger in training and life!
All posts cover subjects such as strength training, diet and nutrition, health and wellness, motivation and inspiration and so much more!
Every day we are presented with choices. Choices that we may not even know about, yet they are still present. We have a choice every day to continue along our path to greatness or wander off the path to do something easier.
We all have goals that we want to achieve, yet we may not always “feel like” working towards them. The fact is, achieving any goal is hard. That is why there is such a sense of accomplishment when you are done, and the feeling is great.
Still, it is much easier to go do the things you “feel like” doing instead of working towards your goal.
However, the longer you wait to work towards your goal, the harder it is to start. So you just have to START! It is not going to get easier and there is not going to be a better time.
There is no time like the present, to start changing your world!
Whatever it is that you wish to achieve in your life, start right now!
Right now is the time to make the right choice and start working towards your goals! Even if you have just 5 minutes right now, and everyday, start working towards your goals!
If you just do one thing to work towards your goal each and every day, you can accomplish anything. The time is going to pass anyway, so you might as well use it to your advantage.
By simply doing one thing every day, you will soon reach the mountain top!
That is how I started my business! In my “FREE time” when I wasn’t working, going to school, and preparing for my powerlifting competitions, I would make sure I forced myself to do at least one thing every day to build my business.
In that time, I created my websites (MathiasMethod.com and StrengthWorld.store), I improved my health with the Daily 30, I wrote this, and all my other books! Yeah, I gave up sleep and things were hard at first.
For the first few months, I did most of my work between 10:00p.m. and 1:00a.m. almost every night. That was my only “FREE time”, but eventually, as I got into the groove of working towards my goal every day, things got easier.
Sometimes I would work for a few minutes while other times I would work for hours, but no matter what, I did something every day and started to make my dreams come true. And so can YOU!
Do what you must to succeed, whether you feel like it or not.
Time is the one thing that we are always losing and can never get back. Knowing this, we have to organize our time and prioritize what we do with it.
Realize that your time is very valuable, even in small quantities. We all need some free time to relax, but too many people waste their own time by doing so many things for others, so then they have no time or energy for themselves.
Without time to yourself, how can you move forward in life? Eventually, we end up making the same mistakes over and over until we wake up 20 years later finally realizing how important all that time was.
If you want to do something with your life, you need to do it right now! Start planning and taking action, because no one is going to hand you the time to do it. You have to fight for your time and take advantage of every free moment to improve yourself. There is no time to waste!
You can start by setting aside, a few minutes each day to work on a project, take 1 day per week, or even use a vacation as your work time to start creating your success.
100% of your time is yours to give.
We need to give some time to our jobs, our families, and our friends, but no matter what, at least 10% of your time needs to be kept to yourself, for yourself! That 10% is 2.5 hours per day, 17 hours per week, 3 days per month, and 36 days per year!
Just think of how much you can get done with just that 10%! Use it to the fullest! Many people use that time just to relax and watch TV, while successful people use that time to advance themselves, even if they don’t feel like it. Which are you?
You are in control of your time, so make sure you are using that time to your advantage. Don’t just giving charity to others, when they don’t return the favor, or be lazy yourself.
Most of the time spent to yourself should be towards your own self-improvement. Success is a habit, so make it a habit to prioritize your time and you will be successful.
You can unwind when needed, but also use your time to create your own success! Success takes time and time is not on our side. Be strong and make your time/success a priority!
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
from common import agent_logger
cmd_list = {}
def command_add(cmd_name, data_keys=[]):
def wrap(_func):
agent_logger.log(logging.error, str(data_keys))
if cmd_name in cmd_list.keys():
agent_logger.log(logging.error,
"%s already exists in feature list. Duplicacy." %
cmd_name)
else:
_data_keys = data_keys
if isinstance(_data_keys, str):
_data_keys = _data_keys.split()
with open("/tmp/pylog", "a") as fyl:
fyl.write(repr(_data_keys))
cmd_list[cmd_name] = {"func": _func,
"data_keys": _data_keys}
agent_logger.log(logging.info,
"%s added to feature list with keys: %s." %
(cmd_name, _data_keys))
return _func
return wrap
def _dict_value_or_none(dictionary, key):
if isinstance(dictionary, dict):
if dictionary.has_key(key):
return dictionary[key]
return None
def run(cmd, get_value_for):
try:
data_keys = cmd_list[cmd]["data_keys"]
data_values = {}
for _data_key in data_keys:
data_values[_data_key] = get_value_for(_data_key)
agent_logger.log(logging.info, ">>>> %s" % repr(data_keys))
agent_logger.log(logging.info, ">>>> %s" % repr(data_values))
config_result = cmd_list[cmd]["func"](data_values)
agent_logger.log(logging.info, "Running '%s'" % cmd,
data=data_values, result=config_result)
return config_result
except Exception, e:
agent_logger.log(logging.error, e.message)
return None
|
This seminar will provide an overview of carbon capture technologies and focus on the challenges associated with producing research outputs that are valuable in industry. Bill will give a narrative of his PhD journey and provide some advice and lessons learned from the experience and then explain the future plans for commercialisation of his technology.
Dr. Bill Buschle is a post-doctoral researcher within the Institute of Energy Systems interested in industrial instrumentation, process control strategies, and novel plant configurations for post-combustion capture technologies used in electricity generation and industrial processes. He received his PhD in Chemical Engineering from the University of Edinburgh in 2015 and also holds BSc and MSc degrees in Materials Engineering from the University of Cincinnati.
Prior to completing his PhD, Bill served in scientific research and manufacturing product development roles in both the public and private sector as well as gained experience at a technology based venture capital fund.
He is currently developing novel chemical analysis instruments and methods for use at industrial scale post-combustion capture plants through an EPSRC Impact Acceleration project. Bill is also involved in several collaborative and multi-disciplinary RCUK funded projects.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of MSM.
MSM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MSM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MSM. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
logger = logging.getLogger( __name__ )
import datetime
import threading
import dateutil.parser
from gi.repository import Gtk, GObject, GLib
from core import paths
import core.database
from core.errors import InvoiceError
import msmgui.widgets.invoicetable
from msmgui.widgets.base import ScopedDatabaseObject
class InvoicingAssistant( GObject.GObject, ScopedDatabaseObject ):
__gsignals__ = { 'saved': ( GObject.SIGNAL_RUN_FIRST, None, ( int, ) ) }
def __init__( self ):
ScopedDatabaseObject.__init__( self )
GObject.GObject.__init__( self )
# Build GUI
self.builder = Gtk.Builder()
self.builder.add_from_file( paths.data("ui","assistants","invoicing.glade" ))
self._assistant = self.builder.get_object( "content" )
self._assistant.set_modal( True )
self._invoicetable = msmgui.widgets.invoicetable.InvoiceTable()
self._invoicetable.active_only = False
self.builder.get_object( "invoicetablebox" ).add( self._invoicetable )
self._assistant.set_forward_page_func( self.page_forward_func )
# Connect Signals
self.builder.connect_signals( self )
def set_parent( self, parent ):
self._assistant.set_transient_for( parent )
def show( self ):
invoice_date = datetime.date.today()
maturity_date = invoice_date + datetime.timedelta( days=14 )
self.builder.get_object( "invoice_date_entry" ).set_text( invoice_date.strftime("%x") )
self.builder.get_object( "invoice_maturitydate_entry" ).set_text( maturity_date.strftime("%x") )
self.builder.get_object( "invoice_accountingenddate_entry" ).set_text( invoice_date.strftime("%x") )
self.builder.get_object( "content" ).show_all()
class Page:
""" Page Enum """
Intro, Select, Details, Generate, Confirm, Save, Summary = range( 7 )
def page_forward_func( self, page ):
"""
Function called when the forward button is pressed,
Arguments:
page:
integer index of the current page
returns:
integer index of the next page to display
"""
if page == InvoicingAssistant.Page.Intro and self.builder.get_object( "contracts_all_radiobutton" ).get_active():
return InvoicingAssistant.Page.Details
elif page == InvoicingAssistant.Page.Generate and len( self._invoicetable.get_contents() ) == 0:
return InvoicingAssistant.Page.Summary
else:
return page + 1
"""
Page prepare funcs
"""
def page_generate_prepare_func( self, assistant, page ):
class ThreadObject( GObject.GObject, threading.Thread ):
__gsignals__ = {
'start': ( GObject.SIGNAL_RUN_FIRST, None, () ),
'stop': ( GObject.SIGNAL_RUN_FIRST, None, ( int, int ) )
}
def __init__( self, contracts, invoice_options, gui_objects ):
GObject.GObject.__init__( self )
threading.Thread.__init__( self )
self.contracts = contracts
self.invoice_options = invoice_options
self.gui_objects = gui_objects
self.invoices = []
def run( self ):
GLib.idle_add( self._gui_start )
local_session = core.database.Database.get_scoped_session()
i = 1
num_contracts = len( self.contracts )
for unmerged_contract in self.contracts:
contract = local_session.merge( unmerged_contract ) # add them to the local session
try:
invoice = contract.add_invoice( **self.invoice_options )
except InvoiceError as err:
logger.critical( "Error adding invoice: %r", err )
invoice = None
if invoice is not None:
self.invoices.append( invoice )
i += 1
GLib.idle_add( self._gui_update, i, num_contracts )
local_session.expunge_all() # expunge everything afterwards
local_session.remove()
GLib.idle_add( self._gui_stop, len( self.invoices ), num_contracts )
def _gui_start( self ):
invoicingassistant, spinner, label, assistant, page, invoicetable = self.gui_objects
label.set_text( "Generiere Rechnungen..." )
spinner.start()
def _gui_update( self, contract_current, contract_total ):
invoicingassistant, spinner, label, assistant, page, invoicetable = self.gui_objects
label.set_text( "Generiere Rechnungen... (Vertrag {}/{})".format( contract_current, contract_total ) )
def _gui_stop( self, num_invoices, num_contracts ):
invoicingassistant, spinner, label, assistant, page, invoicetable = self.gui_objects
merged_contracts = []
for unmerged_contract in self.contracts:
contract = invoicingassistant.session.merge( unmerged_contract ) # Readd the object to the main thread session
merged_contracts.append( contract )
self.contracts = merged_contracts
invoicetable.clear()
def gen( invoicetable, invoices, session, step=10 ):
treeview = invoicetable.builder.get_object( "invoices_treeview" )
model = invoicetable.builder.get_object( "invoices_liststore" )
treeview.freeze_child_notify()
sort_settings = model.get_sort_column_id()
model.set_default_sort_func( lambda *unused: 0 )
model.set_sort_column_id( -1, Gtk.SortType.ASCENDING )
i = 0
for unmerged_invoice in invoices:
invoice = session.merge( unmerged_invoice )
invoicetable.add_invoice( invoice )
i += 1
# change something
if i % step == 0:
# freeze/thaw not really necessary here as sorting is wrong because of the
# default sort function
yield True
if sort_settings != ( None, None ):
model.set_sort_column_id( *sort_settings )
treeview.thaw_child_notify()
yield False
g = gen( invoicetable, self.invoices, invoicingassistant.session )
if next( g ): # run once now, remaining iterations when idle
GLib.idle_add( next, g )
label.set_text( "Fertig! {} Rechnungen aus {} Verträgen generiert.".format( num_invoices, num_contracts ) )
spinner.stop()
assistant.set_page_complete( page, True )
def parse_date( text ):
new_date = None
if text:
try:
new_date = dateutil.parser.parse( text, dayfirst=True )
except Exception as error:
logger.warning( 'Invalid date entered: %s (%r)', text, error )
else:
return new_date.date()
assistant.set_page_complete( page, False )
spinner = self.builder.get_object( "generate_spinner" )
label = self.builder.get_object( "generate_label" )
gui_objects = ( self, spinner, label, assistant, page, self._invoicetable )
self._session.close()
contracts = core.database.Contract.get_all( session=self.session ) # We expunge everything, use it inside the thread and readd it later
self._session.expunge_all()
invoice_date = parse_date( self.builder.get_object( "invoice_date_entry" ).get_text().strip() )
if not invoice_date:
invoice_date = datetime.date.today()
maturity_date = parse_date( self.builder.get_object( "invoice_maturitydate_entry" ).get_text().strip() )
if not maturity_date:
maturity_date = invoice_date + datetime.timedelta( days=14 )
accounting_enddate = parse_date( self.builder.get_object( "invoice_accountingenddate_entry" ).get_text().strip() )
if not accounting_enddate:
accounting_enddate = invoice_date
self.invoice_generator_threadobj = ThreadObject( contracts, {"date":invoice_date, "maturity_date":maturity_date, "accounting_enddate": accounting_enddate}, gui_objects )
self.invoice_generator_threadobj.start()
def page_save_prepare_func( self, assistant, page ):
class ThreadObject( GObject.GObject, threading.Thread ):
__gsignals__ = {
'start': ( GObject.SIGNAL_RUN_FIRST, None, () ),
'stop': ( GObject.SIGNAL_RUN_FIRST, None, ( int, int ) )
}
def __init__( self, invoices, gui_objects ):
GObject.GObject.__init__( self )
threading.Thread.__init__( self )
self.gui_objects = gui_objects
self.invoices = invoices
def run( self ):
GLib.idle_add( lambda: self._gui_start() )
local_session = core.database.Database.get_scoped_session()
for invoice in self.invoices:
local_session.add( invoice ) # add them to the local session
local_session.commit()
local_session.remove() # expunge everything afterwards
GLib.idle_add( lambda: self._gui_stop( len( self.invoices ) ) )
def _gui_start( self ):
spinner, label, assistant, page, window = self.gui_objects
label.set_text( "Speichere Rechnungen..." )
spinner.start()
def _gui_stop( self, num_invoices ):
spinner, label, assistant, page, window = self.gui_objects
assistant.commit()
label.set_text( "Fertig! {} Rechnungen gespeichert.".format( num_invoices ) )
spinner.stop()
assistant.set_page_complete( page, True )
window.emit( "saved", num_invoices )
assistant.set_page_complete( page, False )
spinner = self.builder.get_object( "save_spinner" )
label = self.builder.get_object( "save_label" )
gui_objects = ( spinner, label, assistant, page, self )
invoices = self.invoice_generator_threadobj.invoices
self._session.expunge_all()
self._session.close()
threadobj = ThreadObject( invoices, gui_objects )
threadobj.start()
"""
Callbacks
"""
def hide_cb( self, assistant ):
self._session.rollback()
self._session.close()
def close_cb( self, assistant ):
assistant.hide()
def cancel_cb( self, assistant ):
assistant.hide()
def apply_cb( self, assistant ):
pass
def prepare_cb( self, assistant, page ):
if page == assistant.get_nth_page( InvoicingAssistant.Page.Intro ):
assistant.set_page_complete( page, True )
elif page == assistant.get_nth_page( InvoicingAssistant.Page.Details ):
assistant.set_page_complete( page, True )
elif page == assistant.get_nth_page( InvoicingAssistant.Page.Generate ):
self.page_generate_prepare_func( assistant, page )
elif page == assistant.get_nth_page( InvoicingAssistant.Page.Save ):
self.page_save_prepare_func( assistant, page )
|
The Gibson Island Yacht Squadron is open to members of the Gibson Island Club. Facilities include a boathouse with restroom and showers, moorings, slips, and a full service yacht yard. The Gibson Island Yacht Squadron has a formidable history of sailing achievements and a long list of sailing legacies! Throughout its history, the Gibson Island Yacht Squadron has been the sponsor of nationally-recognized regattas, most notably the Gibson Island to New London race, and many prominent sailors started their career at the Yacht Squadron. Members enjoy the camaraderie of fellow sailors in a beautiful boathouse with expansive views of the Chesapeake Bay and the Gibson Island Harbor, one of the most scenic and secure harbors on the Bay. Recreational boating, organized cruises, boating education, and weekly racing both on the Bay and the Magothy River, are all part of the fun! Our popular summer Junior Fleet Program introduces sailing to our young novice sailors and sharpens the skills of our more experienced junior racers. The Yacht Squadron hosts two junior regattas and Junior Fleet racers have the option to compete in many other local regattas.
|
import glob
import time
from printWithFlush import p
from model.storage import Storage
from model.builder import Builder
from commandQueueProcessor import CommandQueueProcessor
TICK_INTERVAL_SEC = 1
class TickSystem(object):
def __init__(self, players):
self._players = players
def process_user_commands(self):
for player_name in self._players:
cqp = CommandQueueProcessor(player_name)
cqp.processCommands()
def get_unit_files(self):
return glob.glob("/world/**/unit-*.json", recursive=True)
def units_tick(self):
unit_files = self.get_unit_files()
for f in unit_files:
with Storage.from_file(f) as u:
u.move()
if isinstance(u, Builder):
u.build()
def start(self):
tick = 0
while True:
time.sleep(TICK_INTERVAL_SEC)
p("tick ", tick)
tick += 1
self.process_user_commands()
self.units_tick()
|
Welcome to another installment of This Week in Spring! This week I’m in London, England for µCon, Antwerp, Belgium for Devoxx, and Malmö, Sweden for Oredev. If you’re around, ping me and say hi.
Spring Framework and Spring Boot ninja Stephane Nicoll just announced that Spring Framework 4.3.4, mostly bugfixes, is available now.
Spring Security ninja Joe Grandja just announced Spring Security OAuth 2.0.12 which is primarily bugfixes and paves the way for some nice new features.
Spring Cloud Data Flow and Spring Cloud Task ninja Glenn Renfro just announced Spring Cloud Task 1.1.0.RC1. Spring Cloud Task continues the journey to 1.1 which adds a lot of new features including error handling, improved options for customizing the launch of partitioned job workers.
Spring Data lead Oliver Gierke just announced Spring Data release train Hopper SR5. This release includes a lot of updates across all of the modules including the modules for JPA, KeyValue, Cassandra, SOLR, Gemfire, Neo4J, MongoDB, Envers, Redis, ElasticSearch, Couchbase, and updates to Spring Data REST and Spring Data Commons.
Spring Cloud co-lead Spencer Gibb just announced the Spring Cloud CLI launcher. This is one of my favorite new utilities – it makes getting to the punch even easier with Spring Cloud! Install it and then run spring cloud configserver dataflow eureka h2 hystrixdashboard kafka zipkin and you’ll have those services running on the appropriate ports! Simple.
Spring Cloud Data Flow ninja Thomas Risberg just announced Spring Cloud Data Flow for Kubernetes 1.1 M2.
Spring Cloud ninja Ryan Baxter just announced Spring Cloud Camden SR2 which consists of bugfixes to the various Spring Cloud modules.
Last week, I looked at how to explicitly lesser known aspects of the Spring Framework component model.
William Witt took one of my talks and adapted the examples to work in the PCFDev Cloud Foundry environment and documented it step-by-step along with code – this is an awesome resource! Good job William!
Want to use JHipster and Stormpath together? Spring community hero Matt Raible’s got just the fix.
Friend of the Spring community Michael Simons just put together a really interesting look at exposing a JOOQ-based analytics API as a Spring Boot and Spring MVC-based REST API.
Pivotal’s own Ben Wilcock put together a very nice look at using Spring Boot and the Axon 2.0 framework together.
If you want to learn more about the new version of Axon 3.0, be sure to join Axon lead Allard Buijze and me for a webinar on November 16!
|
"""IO methods for NetCDF files."""
import os
import gzip
import shutil
import tempfile
from netCDF4 import Dataset
from gewittergefahr.gg_utils import error_checking
GZIP_FILE_EXTENSION = '.gz'
def open_netcdf(netcdf_file_name, raise_error_if_fails=False):
"""Attempts to open NetCDF file.
Code for handling gzip files comes from jochen at the following
StackOverflow page: https://stackoverflow.com/posts/45356133/revisions
:param netcdf_file_name: Path to input file.
:param raise_error_if_fails: Boolean flag. If raise_error_if_fails = True
and file cannot be opened, this method will throw an error.
:return: netcdf_dataset: Instance of `NetCDF4.Dataset`, containing all data
from the file. If raise_error_if_fails = False and file could not be
opened, this will be None.
:raises: IOError: if file could not be opened and raise_error_if_fails =
True.
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_boolean(raise_error_if_fails)
gzip_as_input = netcdf_file_name.endswith(GZIP_FILE_EXTENSION)
if gzip_as_input:
gzip_file_object = gzip.open(netcdf_file_name, 'rb')
netcdf_temporary_file_object = tempfile.NamedTemporaryFile(delete=False)
netcdf_file_name = netcdf_temporary_file_object.name
success = False
try:
shutil.copyfileobj(gzip_file_object, netcdf_temporary_file_object)
success = True
except:
if raise_error_if_fails:
raise
gzip_file_object.close()
netcdf_temporary_file_object.close()
if not success:
os.remove(netcdf_file_name)
return None
try:
netcdf_dataset = Dataset(netcdf_file_name)
except IOError:
if raise_error_if_fails:
if gzip_as_input:
os.remove(netcdf_file_name)
raise
netcdf_dataset = None
if gzip_as_input:
os.remove(netcdf_file_name)
return netcdf_dataset
|
This workshop will provide immediate impact and influence individually – and for your team.
Discover how the world sees you by taking our quick (3 minute) Assessment that will astound you with accuracy, speed and impact.
Learn what your brand of personality says to others about you.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Source.list_page'
db.add_column(u'hansard_source', 'list_page',
self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True),
keep_default=False)
# Set the list_page property for those sources where we can
# make a resonable guess at which list page it came from:
for source in orm.Source.objects.all():
if 'plone/national-assembly' in source.url:
source.list_page = 'national-assembly'
source.save()
elif 'plone/senate' in source.url:
source.list_page = 'senate'
source.save()
# For any sources where this didn't help, and there is an
# associated sitting and venue, use that venue to set
# list_page:
venue_slug_to_list_page = {
'national_assembly': 'national-assembly',
'senate': 'senate',
}
for venue in orm.Venue.objects.all():
for source in orm.Source.objects.filter(sitting__venue=venue, list_page__isnull=True):
source.list_page = venue_slug_to_list_page[venue.slug]
source.save()
def backwards(self, orm):
# Deleting field 'Source.list_page'
db.delete_column(u'hansard_source', 'list_page')
models = {
u'core.person': {
'Meta': {'ordering': "['sort_name']", 'object_name': 'Person'},
'_biography_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'biography': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}),
'can_be_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}),
'date_of_death': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legal_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'national_identity': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'hansard.alias': {
'Meta': {'ordering': "['alias']", 'object_name': 'Alias'},
'alias': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
},
'hansard.entry': {
'Meta': {'ordering': "['sitting', 'text_counter']", 'object_name': 'Entry'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_number': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'sitting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hansard.Sitting']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hansard_entries'", 'null': 'True', 'to': u"orm['core.Person']"}),
'speaker_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'speaker_title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'text_counter': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'hansard.sitting': {
'Meta': {'ordering': "['-start_date']", 'object_name': 'Sitting'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hansard.Source']"}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['hansard.Venue']"})
},
'hansard.source': {
'Meta': {'ordering': "['-date', 'name']", 'object_name': 'Source'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_processing_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_processing_success': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'list_page': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'hansard.venue': {
'Meta': {'ordering': "['slug']", 'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['hansard']
|
Title: The latest and largest bangladesh website database.
Description: www.socialbangla.com is Largest website in Bangladesh. You can find here news, jobs, education, maps, businss, bangldeshi media etc more than 100.
|
########
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Stdlib imports
import os
# Third party imports
import unittest
import mock
import pytest
# Cloudify imports
from cloudify.workflows import local
from cloudify import mocks as cfy_mocks
from cloudify.state import current_ctx
from cloudify_cli import constants as cli_constants
import cloudify_nsx.library.nsx_common as common
import cloudify_nsx.library.nsx_security_tag as nsx_security_tag
import cloudify_nsx.library.nsx_security_group as nsx_security_group
import cloudify_nsx.library.nsx_security_policy as nsx_security_policy
class SecurityTest(unittest.TestCase):
def setUp(self):
super(SecurityTest, self).setUp()
self.local_env = None
self.ext_inputs = {
# prefix for run
'node_name_prefix': os.environ.get('NODE_NAME_PREFIX', ""),
# nsx inputs
'nsx_ip': os.environ.get('NSX_IP'),
'nsx_user': os.environ.get('NSX_USER'),
'nsx_password': os.environ.get('NSX_PASSWORD'),
}
if (
not self.ext_inputs['nsx_ip'] or
not self.ext_inputs['nsx_ip'] or
not self.ext_inputs['nsx_password']
):
self.skipTest("You dont have credentials for nsx")
blueprints_path = os.path.split(os.path.abspath(__file__))[0]
self.blueprints_path = os.path.join(
blueprints_path,
'resources'
)
self._regen_ctx()
# credentials
self.client_session = common.nsx_login({
'nsx_auth': {
'username': self.ext_inputs['nsx_user'],
'password': self.ext_inputs['nsx_password'],
'host': self.ext_inputs['nsx_ip']
}
})
def _regen_ctx(self):
self.fake_ctx = cfy_mocks.MockCloudifyContext()
instance = mock.Mock()
instance.runtime_properties = {}
self.fake_ctx._instance = instance
node = mock.Mock()
self.fake_ctx._node = node
node.properties = {}
node.runtime_properties = {}
current_ctx.set(self.fake_ctx)
def tearDown(self):
current_ctx.clear()
if self.local_env:
try:
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
except Exception as ex:
print str(ex)
super(SecurityTest, self).tearDown()
@pytest.mark.external
def test_security_tag(self):
"""Platform check: security tag"""
# set blueprint name
blueprint = os.path.join(
self.blueprints_path,
'security_tag.yaml'
)
# check prexist of security tag
resource_id, _ = nsx_security_tag.get_tag(
self.client_session,
self.ext_inputs['node_name_prefix'] + "secret_tag"
)
self.assertIsNone(resource_id)
# cfy local init
self.local_env = local.init_env(
blueprint,
inputs=self.ext_inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
# cfy local execute -w install
self.local_env.execute(
'install',
task_retries=50,
task_retry_interval=3,
)
# check security tag properties
resource_id, info = nsx_security_tag.get_tag(
self.client_session,
self.ext_inputs['node_name_prefix'] + "secret_tag"
)
self.assertIsNotNone(resource_id)
self.assertIsNotNone(info)
self.assertEqual(
info['name'], self.ext_inputs['node_name_prefix'] + "secret_tag"
)
self.assertEqual(info['description'], "What can i say?")
# cfy local execute -w uninstall
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
# must be deleted
resource_id, _ = nsx_security_tag.get_tag(
self.client_session,
self.ext_inputs['node_name_prefix'] + "secret_tag"
)
self.assertIsNone(resource_id)
self.local_env = None
@pytest.mark.external
def test_security_tag_vm_bind(self):
"""Platform check: bind security tag to vm"""
inputs = {k: self.ext_inputs[k] for k in self.ext_inputs}
# Define inputs related to this function
inputs.update({
'name_of_tag': str(os.environ.get('NAME_OF_TAG', 'tag_name')),
# vcenter inputs
'vcenter_ip': os.environ.get('VCENTER_IP'),
'vcenter_user': os.environ.get('VCENTER_USER'),
'vcenter_password': os.environ.get('VCENTER_PASSWORD'),
})
# update custom params
if os.environ.get('VCENTER_PORT'):
inputs['vcenter_port'] = str(os.environ.get(
'VCENTER_PORT'
))
# update custom params
if os.environ.get('VCENTER_DATACENTER'):
inputs['vcenter_datacenter'] = os.environ.get(
'VCENTER_DATACENTER'
)
if os.environ.get('VCENTER_RESOURCE_POOL'):
inputs['vcenter_resource_pool'] = os.environ.get(
'VCENTER_RESOURCE_POOL'
)
if os.environ.get('VCENTER_TEMPLATE'):
inputs['template_name'] = os.environ.get('VCENTER_TEMPLATE')
if (
not inputs['vcenter_ip'] or
not inputs['vcenter_ip'] or
not inputs['vcenter_password']
):
self.skipTest("You dont have credentials for vcenter")
# set blueprint name
blueprint = os.path.join(
self.blueprints_path,
'security_tag_vm.yaml'
)
# check prexist of security tag
resource_id, _ = nsx_security_tag.get_tag(
self.client_session,
inputs['node_name_prefix'] + inputs['name_of_tag']
)
self.assertIsNone(resource_id)
# cfy local init
self.local_env = local.init_env(
blueprint,
inputs=inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
# cfy local execute -w install
self.local_env.execute(
'install',
task_retries=4,
task_retry_interval=3,
)
# check security tag properties
resource_id, info = nsx_security_tag.get_tag(
self.client_session,
inputs['node_name_prefix'] + inputs['name_of_tag']
)
self.assertIsNotNone(resource_id)
self.assertIsNotNone(info)
self.assertEqual(
info['name'],
inputs['node_name_prefix'] + inputs['name_of_tag']
)
self.assertEqual(
info['description'],
"Example security tag which will be assigned to example VM"
)
# cfy local execute -w uninstall
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
# must be deleted
resource_id, _ = nsx_security_tag.get_tag(
self.client_session,
inputs['node_name_prefix'] + inputs['name_of_tag']
)
self.assertIsNone(resource_id)
@pytest.mark.external
def test_security_group(self):
"""Platform check: security group"""
inputs = {k: self.ext_inputs[k] for k in self.ext_inputs}
# Define inputs related to this function
inputs['security_group_name'] = os.environ.get(
'SECURITY_GROUP_NAME', "security_group_name"
)
inputs['nested_security_group_name'] = os.environ.get(
'NESTED_SECURITY_GROUP_NAME', "nested_security_group_name"
)
# set blueprint name
blueprint = os.path.join(
self.blueprints_path,
'security_groups.yaml'
)
# check prexist of security groups
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNone(resource_id)
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['nested_security_group_name']
)
self.assertIsNone(resource_id)
# cfy local init
self.local_env = local.init_env(
blueprint,
inputs=inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
# cfy local execute -w install
self.local_env.execute(
'install',
task_retries=4,
task_retry_interval=3,
)
# check security groups properties
resource_id, main_properties = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNotNone(resource_id)
nested_resource_id, nested_properties = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['nested_security_group_name']
)
self.assertIsNotNone(nested_resource_id)
self.assertEqual(
main_properties['member']['name'],
inputs['node_name_prefix'] + inputs['nested_security_group_name']
)
self.assertEqual(
main_properties['member']['objectId'],
nested_resource_id
)
self.assertFalse(nested_properties.get('member'))
# cfy local execute -w uninstall
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
# must be deleted
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNone(resource_id)
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['nested_security_group_name']
)
self.assertIsNone(resource_id)
@pytest.mark.external
def test_security_policy(self):
"""Platform check: security policy"""
inputs = {k: self.ext_inputs[k] for k in self.ext_inputs}
# Define inputs related to this function
inputs['policy_name'] = os.environ.get(
'POLICY_NAME', 'policy_name'
)
# set blueprint name
blueprint = os.path.join(
self.blueprints_path,
'security_policy.yaml'
)
# check prexist of security policy
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNone(resource_id)
# cfy local init
self.local_env = local.init_env(
blueprint,
inputs=inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
# cfy local execute -w install
self.local_env.execute(
'install',
task_retries=4,
task_retry_interval=3,
)
# check security policy properties
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNotNone(resource_id)
self.assertIsNotNone(policy)
# cfy local execute -w uninstall
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
# must be deleted
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNone(resource_id)
@pytest.mark.external
def test_security_policy_bind(self):
"""Platform check: bind security policy to security group"""
inputs = {k: self.ext_inputs[k] for k in self.ext_inputs}
# Define inputs related to this function
inputs['security_group_name'] = os.environ.get(
'SECURITY_GROUP_NAME', "security_group_name"
)
inputs['policy_name'] = os.environ.get(
'POLICY_NAME', 'policy_name'
)
# set blueprint name
blueprint = os.path.join(
self.blueprints_path,
'bind_policy_group.yaml'
)
# check prexist of security policy
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNone(resource_id)
# check prexist of security group
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNone(resource_id)
# cfy local init
self.local_env = local.init_env(
blueprint,
inputs=inputs,
name=self._testMethodName,
ignored_modules=cli_constants.IGNORED_LOCAL_WORKFLOW_MODULES)
# cfy local execute -w install
self.local_env.execute(
'install',
task_retries=4,
task_retry_interval=3,
)
# check security policy properties
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNotNone(resource_id)
self.assertIsNotNone(policy)
# check security group
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNotNone(resource_id)
# cfy local execute -w uninstall
self.local_env.execute(
'uninstall',
task_retries=50,
task_retry_interval=3,
)
# must be deleted
resource_id, policy = nsx_security_policy.get_policy(
self.client_session,
inputs['node_name_prefix'] + inputs['policy_name']
)
self.assertIsNone(resource_id)
resource_id, _ = nsx_security_group.get_group(
self.client_session,
'globalroot-0',
inputs['node_name_prefix'] + inputs['security_group_name']
)
self.assertIsNone(resource_id)
if __name__ == '__main__':
unittest.main()
|
To make sure the figures they have is accurate and ensure all parties involved are in agreement.
Some family members have become jittery with whoever was earmarked to receive the compensation on their behalf and have requested to be included as recipients too.
Will the verification process affect the already agreed awards?
Dr Swazuri said the award may be affected and gave a scenario of how the award can change; if the land which was marked for the acquisition was one acre and then, Kenya Railways realizes the land required for the project is actually three quarters of an acre, then the award will change. In case the team finds out the SGR project will require more land, the award will increase.
Dr Swazuri said there was a meeting yesterday and as a soon as the verification process is over, a request will be made to the Treasury, who will then release the money to Kenya Railways and finally to the National Land Commission to pay those affected by the SGR project. He said the process will approximately take one month.
|
#!/usr/bin/python
"""
This script is an HTSeq alternatives.
It is count reads in regions, but if there
are overlapping regions the read is counted in both
regions. Read also counted if partially overlap
"""
import sys
import gzip
class Exon:
def __init__(self, fields):
self.chrx = fields[0]
self.start = int(fields[3])
self.stop = int(fields[4])
self.exonnum = int(fields[17].replace('"', "")[:-1])
self.genesym = fields[19].replace('"', "")[:-1]
self.tr = fields[13].replace('"', "")[:-1]
self.count = 0
def incAllExons(pos, length, array, lowindex, hiindex):
if hiindex - lowindex < 2:
# found
for i in range(lowindex, len(array)):
if array[i].start < pos + length and array[i].stop > pos:
array[i].count += 1
if array[i].start > pos + length:
break
else:
midindex = lowindex + (hiindex - lowindex) / 2
if array[lowindex].start < pos + length and array[midindex].stop > pos:
incAllExons(pos, length, array, lowindex, midindex)
else:
incAllExons(pos, length, array, midindex, hiindex)
gtf = dict()
gtffile = gzip.open(sys.argv[1])
for line in gtffile:
if line.startswith("#"):
continue
fields = line.split()
if fields[2] != "exon":
continue
exon = Exon(fields)
if fields[0] not in gtf:
gtf[fields[0]] = list()
gtf[fields[0]].append(exon)
gtffile.close()
for i in gtf:
gtf[i] = sorted(gtf[i], key = lambda x:x.start)
readcount = 0
for i in sys.stdin:
fields = i.rstrip().split("\t")
if fields[4] == "255":
pos = int(fields[3])
chrx = fields[2]
length = len(fields[9])
readcount += 1
incAllExons(pos, length, gtf[chrx], 0, len(gtf[chrx]) - 1)
for i in gtf:
for exon in gtf[i]:
print "%s_%s:%d\t%d" % (exon.genesym, exon.tr, exon.exonnum, exon.count)
print "__readcount\t" + str(readcount)
|
At Hawkins Scientific, Inc. we offer a custom approach to your wood laboratory furniture needs. Building special sizes to accommodate specific instruments or building custom casework from special materials is our specialty.
Wood laboratory furniture drawers are constructed of a “baked on” epoxy coated steel side with integral ball bearing slides for easy opening.
Wood lab furniture doors are hung with Blum European style hinges for greatest durability.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Lib for handling message sending to the Inora server"""
import time
import requests
import lib.inoralogger
LOGGING = lib.inoralogger.InoraLogger.get_logger()
class EventSender(object):
"""Class to send messages over IP"""
def __init__(self, ip_address, port):
self.__ip_address = ip_address
self.__port = port
def send_https_message(self, message):
"""sending a message over https"""
url = 'https://' + self.__ip_address + ':' + str(self.__port)
headers = {'Content-Length': str(len(message))}
# tweak in case of Connection exception
while True:
try:
LOGGING.debug(" Sending poll result over Https. dst_IP:%s port:%s", self.__ip_address, self.__port)
requests.post(url, data=message, headers=headers, verify=False, timeout=30)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as exception:
LOGGING.error(exception)
LOGGING.info("Retrying to connect...")
time.sleep(3)
continue
break
|
Current Residence: White Lake, Mich.
Personal Claim to Fame: I have seven of them that I feed everyday – my children. And, ranking 23rd on TV Guide and TV Land's List of the 100 Most Unexpected TV Moments. It’s an honor to have been there when it all started!
Inspiration in Life: My dad, a true warrior who lost a battle to cancer when he was 28 and my mom who battled emphysema for 17 years and never complained. She taught me what tough was.
Hobbies: Sports and fitness, coaching my kids and anything to do with water… in it, on it, over it, looking at it or listening to it.
Pet Peeves: Negativity and pessimism. The glass is always full.
3 Words to Describe You: Adventurous, spontaneous and competitive.
1) Animals to hunt for food.
Reason for Being on SURVIVOR: To avenge a long-standing internal battle, wondering if I can win this game in spite of the changes and my 12 year absence. No matter what any returning player may say, you never stop thinking about going back. And: 7 cars when they turn 16, 7 college educations, 7 weddings… I want to be there for my kids at all times when they need me.
Why You Think You’ll “Survive” SURVIVOR: The physical part I’m ultra prepared for. I never stop moving. The survival part won’t be an issue. I grew up in the woods. My crazy family life has prepared me exceptionally for the mental part. I’m surviving seven kids!
Why You Think You Will Be the Sole SURVIVOR: There is no other returning player in the history of this game with more to prove than me. I’ll find a way to win, whatever it takes! I won’t give up… ever, ever , ever, ever!
Such a nice guy. And who can forget him falling into that fire!
^Agree. I liked him. Glad to see him return.
Recall the fire thing... ( sorta ). Him, not at all.
Was he the one who looked like a psycho hunting some animal on his season? I remember an attitude that seemed to go beyond hunting for food.
By your comment I can assume your in your 30's?
Have to say that I'm very happy that he is getting a second chance, I've been waiting for this one.
I wish 30's. eeek. Try 50's.
That was hilarious last night. If he wanted to prove something the second time around, he's not doing a very good job.
I got your back ~ Stay in there.
At the rate he's going, he's going to lose an appendage if he isn't careful.
He was awesome killing the pig. That season he absolutely had my vote because nothing survives like having some food to eat. LOL Most of the contestants just loll about hoping to hang onto their fat so they can eat sparingly off themselves. Noone knows to gather seeds, greens, roots, grubs. Not one ever researches the area they are going into to check on what native people eat and forage for. This year finally they've got someone able to make fire without flint. I made a fire all on my own with a stick when I was eight and no one showed me, I just heard of the practice and did it. You want to survive, make fire, hunt and gather those are real survivor skills. There are skills like making lean to and using leaves to make a roof also that can be researched. Not one of these idiots ever thinks to learn. Even the clothes they bring to the island, no thought to having layers, a poncho to make into a tent against rain...and why does the show provide white rice when brown rice has so much more food value?
I would for sure smuggle in my moisturizer and hang really close to Michael so he would feed me.
He was awesome killing the pig. That season he absolutely had my vote because nothing survives like having some food to eat. LOL Most of the contestants just loll about hoping to hang onto their fat so they can eat sparingly off themselves. No one knows to gather seeds, greens, roots, grubs. Not one ever researches the area they are going into to check on what native people eat and forage for. This year finally they've got someone able to make fire without flint. I made a fire all on my own with a stick when I was eight and no one showed me, I just heard of the practice and did it. You want to survive, make fire, hunt and gather those are real survivor skills. There are skills like making lean to and using leaves to make a roof also that can be researched. Not one of these idiots ever thinks to learn. Even the clothes they bring to the island, no thought to having layers, a poncho to make into a tent against rain...and why does the show provide white rice when brown rice has so much more food value?
Surely these people do some research on the indigenous flora and fauna? Does Survivor provide any material on say dangerous snakes, insects and fish? I know if I was going out there, I'd be studying and researching all information I could find. And building a fire would be at the top of my list.
However, it's common sense to me. I think it should be to them also. Why place yourself in a position to have to depend on another person for the basic ammenities such as fire and food?
One of my favs (even tho I dislike returning players)...hope he does well this time around.
I like him, but I don't know if he'll make it out alive or not....man, how much more accident prone can you get, lol!
Nicest guy in the world but he's an accident waiting to happen. I'd hate to see him go home on a medical evac again!!!
Absolutely - he's gonna need a transfusion if he keeps losing blood.
|
from app import db
import csv
from app.models.movie import Movie
from app.models.location import Location
from app.models.director import Director
from app.models.writer import Writer
from app.models.actor import Actor
def import_data_from_database():
"""
Build dictionaries from database
:return:
"""
# Init dictionaries
movies, actors, writers, directors, locations = {}, {}, {}, {}, {}
for movie in Movie.query.all():
# Save director information
movies[movie.name] = movie.id
for actor in Actor.query.all():
# Save actor information
actors[actor.name] = actor.id
for writer in Writer.query.all():
# Save writer information
writers[writer.name] = writer.id
for director in Director.query.all():
# Save director information
directors[director.name] = director.id
for location in Location.query.all():
locations[(location, location.movie_id)] = location.id
return movies, actors, writers, directors, locations
def import_data_from_csv(file_path):
"""
Import data from a csv file into database
:return:
"""
try:
with open(file_path) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# Init dictionaries
movies, actors, writers, directors, locations = import_data_from_database()
# FIXME : test header !
header = next(reader)
if header[0] != 'Title' or header[1] != 'Release Year':
return "Bad File.."
for row in reader:
# Read CSV line
name = row[0].strip()
location = row[2]
fun_facts = row[3]
# Movie already exists create new location
if name in movies:
if '' != location:
new_location = Location(location, fun_facts, movies[name])
db.session.add(new_location)
continue
# Read more information from csv line about movie
release_year = row[1]
production = row[4]
distributor = row[5]
director = row[6]
writer = row[7]
movie_actors = [row[8], row[9], row[10]]
# Create a new Movie
movie = Movie(name, release_year, production, distributor)
# Add director
if '' != director:
if director not in directors:
director = Director(director)
db.session.add(director)
db.session.flush()
# Save director id in local dictionary
directors[director.name] = director.id
# add director_id to movie
movie.add_director(director.id)
else:
movie.add_director(directors[director])
# Add writer
if '' != writer:
if writer not in writers:
writer = Writer(writer)
db.session.add(writer)
db.session.flush()
# Save director information
writers[writer.name] = writer.id
# add director_id to movie
movie.add_writer(writer.id)
else:
movie.add_writer(writers[writer])
# Add Actors
for actor_name in movie_actors:
if actor_name != '':
if actor_name not in actors:
actor = Actor(actor_name)
db.session.add(actor)
db.session.flush()
# Save director information
actors[actor_name] = actor.id
# add actor to movie
movie.add_actor(actor)
else:
movie.add_actor(actor_name)
# Add Movie in DB
db.session.add(movie)
db.session.flush()
# Store movie id in local dictionary
movies[name] = movie.id
# Create new Location, if not empty and does not exist
if '' != location:
if (location, movie.id) not in locations:
new_location = Location(location, fun_facts, movie.id)
db.session.add(new_location)
db.session.flush()
locations[(location, movie.id)] = new_location.id
# Commit imported data
db.session.commit()
except FileNotFoundError:
print("File : `" + file_path + '` not found')
|
Men Yeast Infection Treatments – Who is able to Help?
July 11, 2016 Male Yeast Infection Treatment Articles Comments Off on Men Yeast Infection Treatments – Who is able to Help?
Man Yeast Infection Treatment – Just how Effective are Vinegar and also other Natural Home Remedies?
July 11, 2016 Male Yeast Infection Treatment Articles Comments Off on Man Yeast Infection Treatment – Just how Effective are Vinegar and also other Natural Home Remedies?
Man Yeast Infection Treatment – A Few Powerful Tips to Eliminate Your current Yeast Infection – Fast!
July 11, 2016 Male Yeast Infection Treatment Articles Comments Off on Man Yeast Infection Treatment – A Few Powerful Tips to Eliminate Your current Yeast Infection – Fast!
|
""" Convert a BAM or a SAM into a bed depth file
The file is a TSV format with the fields
1. Chromosome
2. Start (0-index)
3. End (1-index)
4. Read depth
The file is ordered and covers all regions covered by alignments
"""
import argparse, sys, os
from shutil import rmtree
from multiprocessing import cpu_count, Lock, Pool
from tempfile import mkdtemp, gettempdir
from seqtools.format.sam.bam.files import BAMFile
from seqtools.format.sam import SAMStream
from seqtools.stream import LocusStream
from seqtools.range.multi import ranges_to_coverage, sort_genomic_ranges
current = 0
glock = Lock()
results = {}
of = None
def main(args):
#do our inputs
args = do_inputs()
bf = None
if args.input != '-':
bf = BAMFile(args.input)
else:
bf = SAMStream(sys.stdin)
ls = LocusStream(bf)
if args.output:
args.output = open(args.output,'w')
else:
args.output = sys.stdout
global of
of = args.output
z = 0
if args.threads > 1:
p = Pool(processes=args.threads)
for entries in ls:
bedarray = []
for e in entries.payload:
if not e.is_aligned(): continue
tx = e.get_target_transcript(min_intron=args.minimum_intron_size)
for exon in tx.exons:
bedarray.append(exon.copy())
if len(bedarray) == 0: continue
if args.threads > 1:
p.apply_async(get_output,args=(bedarray,z,),callback=do_output)
else:
r = get_output(bedarray,z)
do_output(r)
z += 1
if args.threads > 1:
p.close()
p.join()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
args.output.close()
def do_output(outputs):
global glock
global current
global of
global results
glock.acquire()
oline = outputs[0]
z = outputs[1]
results[z] = oline
while current in results:
prev = current
of.write(results[current])
del results[prev]
current += 1
glock.release()
def get_output(bedarray,z):
sarray = sort_genomic_ranges(bedarray[:])
covs = ranges_to_coverage(bedarray)
olines = ''
for c in covs:
olines += c.chr+"\t"+str(c.start-1)+"\t"+str(c.end)+"\t"+str(c.payload)+"\n"
return [olines,z]
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Convert a sorted bam file (all alignments) into a bed file with depth. If you want to limit it to primary alignments you better filter the bam.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="BAM file or - for SAM stdin")
parser.add_argument('-o','--output',help="OUTPUTFILE or STDOUT if not set")
parser.add_argument('--minimum_intron_size',default=68,type=int,help="any gaps smaller than this we close")
parser.add_argument('--threads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
Body off and completely rebuilt with new suspension, rebuilt drive train, wiring, lights, and interior. Body left as is and painted in a matty gray.
this thing goes down the road great!
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 René Kijewski <rene.kijewski@fu-berlin.de>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
from itertools import groupby
from os import devnull, environ, listdir
from os.path import abspath, dirname, isfile, join
from subprocess import CalledProcessError, check_call, PIPE, Popen
from sys import exit, stdout
riotbase = environ.get('RIOTBASE') or abspath(join(dirname(abspath(__file__)), '../' * 3))
null = open(devnull, 'w', 0)
success = []
failed = []
exceptions = []
def is_tracked(application_folder):
if not isfile(join(application_folder, 'Makefile')):
return False
try:
check_call(('git', 'ls-files', '--error-unmatch', 'Makefile'),
stdin=null, stdout=null, stderr=null, cwd=application_folder)
except CalledProcessError:
return False
else:
return True
def get_lines(readline, prefix):
while 1:
result = readline()
if not result:
break
elif not result.startswith(prefix):
continue
result = result[len(prefix):].rstrip().split(' .. ')[::-1]
if len(result) == 2:
stdout.write('.')
stdout.flush()
yield result
for folder in ('examples', 'tests'):
print('Building all applications in: \033[1;34m{}\033[0m'.format(folder))
applications = listdir(join(riotbase, folder))
applications = filter(lambda app: is_tracked(join(riotbase, folder, app)), applications)
applications = sorted(applications)
for nth, application in enumerate(applications, 1):
stdout.write('\tBuilding application: \033[1;34m{}\033[0m ({}/{}) '.format(application, nth, len(applications)))
stdout.flush()
try:
subprocess = Popen(('make', 'buildtest'),
bufsize=1, stdin=null, stdout=PIPE, stderr=null,
cwd=join(riotbase, folder, application))
lines = get_lines(subprocess.stdout.readline, 'Building for ')
lines = groupby(sorted(lines), lambda (outcome, board): outcome)
for group, results in lines:
print('\n\t\t{}: {}'.format(group, ', '.join(sorted(board for outcome, board in results))))
returncode = subprocess.wait()
(failed if returncode else success).append(application)
except Exception, e:
print('\n\t\tException: {}'.format(e))
exceptions.append(application)
finally:
try:
subprocess.kill()
except:
pass
print('Outcome:')
for color, group in (('2', 'success'), ('1', 'failed'), ('4', 'exceptions')):
applications = locals()[group]
if applications:
print('\t\033[1;3{}m{}\033[0m: {}'.format(color, group, ', '.join(applications)))
if exceptions:
exit(2)
elif failed:
exit(1)
else:
exit(0)
|
Copyright 2013. KALINA ZHANG. All Rights Reserved.
Open every day. Please call 805-471-3360 now for appointment!
|
import json
import logging
import requests
import settings
from seqr.views.utils.proxy_request_utils import proxy_request
logger = logging.getLogger(__name__)
def delete_phenotips_patient(project, individual):
"""Deletes patient from PhenoTips for the given patient_id.
Args:
project (Model): seqr Project - used to retrieve PhenoTips credentials
individual (Model): seqr Individual
Raises:
PhenotipsException: if api call fails
"""
if phenotips_patient_exists(individual):
url = phenotips_patient_url(individual)
auth_tuple = get_phenotips_uname_and_pwd_for_project(project.phenotips_user_id, read_only=False)
return make_phenotips_api_call('DELETE', url, auth_tuple=auth_tuple, expected_status_code=204)
def phenotips_patient_url(individual):
if individual.phenotips_patient_id:
return '/rest/patients/{0}'.format(individual.phenotips_patient_id)
else:
return '/rest/patients/eid/{0}'.format(individual.phenotips_eid)
def phenotips_patient_exists(individual):
return individual.phenotips_patient_id or individual.phenotips_eid
def create_phenotips_user(username, password):
"""Creates a new user in PhenoTips"""
headers = { "Content-Type": "application/x-www-form-urlencoded" }
data = { 'parent': 'XWiki.XWikiUsers' }
url = '/rest/wikis/xwiki/spaces/XWiki/pages/{username}'.format(username=username)
make_phenotips_api_call(
'PUT',
url,
http_headers=headers,
data=data,
auth_tuple=(settings.PHENOTIPS_ADMIN_UNAME, settings.PHENOTIPS_ADMIN_PWD),
parse_json_resonse=False,
expected_status_code=[201, 202],
)
data = {
'className': 'XWiki.XWikiUsers',
'property#password': password,
#'property#first_name': first_name,
#'property#last_name': last_name,
#'property#email': email_address,
}
url = '/rest/wikis/xwiki/spaces/XWiki/pages/{username}/objects'.format(username=username)
return make_phenotips_api_call(
'POST',
url,
data=data,
auth_tuple=(settings.PHENOTIPS_ADMIN_UNAME, settings.PHENOTIPS_ADMIN_PWD),
parse_json_resonse=False,
expected_status_code=201,
)
def make_phenotips_api_call(
method,
url,
http_headers=None,
data=None,
auth_tuple=None,
expected_status_code=200,
parse_json_resonse=True,
verbose=False):
"""Utility method for making an API call and then parsing & returning the json response.
Args:
method (string): 'GET' or 'POST'
url (string): url path, starting with '/' (eg. '/bin/edit/data/P0000001')
data (string): request body - used for POST, PUT, and other such requests.
auth_tuple (tuple): ("username", "password") pair
expected_status_code (int or list): expected server response code
parse_json_resonse (bool): whether to parse and return the json response
verbose (bool): whether to print details about the request & response
Returns:
json object or None if response content is empty
"""
try:
response = proxy_request(None, url, headers=http_headers or {}, method=method, scheme='http', data=data,
auth_tuple=auth_tuple, host=settings.PHENOTIPS_SERVER, verbose=verbose)
except requests.exceptions.RequestException as e:
raise PhenotipsException(e.message)
if (isinstance(expected_status_code, int) and response.status_code != expected_status_code) or (
isinstance(expected_status_code, list) and response.status_code not in expected_status_code):
raise PhenotipsException("Unable to retrieve %s. response code = %s: %s" % (
url, response.status_code, response.reason_phrase))
if parse_json_resonse:
if not response.content:
return {}
try:
return json.loads(response.content)
except ValueError as e:
logger.error("Unable to parse PhenoTips response for %s request to %s" % (method, url))
raise PhenotipsException("Unable to parse response for %s:\n%s" % (url, e))
else:
return dict(response.items())
def get_phenotips_uname_and_pwd_for_project(phenotips_user_id, read_only=False):
"""Return the PhenoTips username and password for this seqr project"""
if not phenotips_user_id:
raise ValueError("Invalid phenotips_user_id: " + str(phenotips_user_id))
uname = phenotips_user_id + ('_view' if read_only else '')
pwd = phenotips_user_id + phenotips_user_id
return uname, pwd
class PhenotipsException(Exception):
pass
|
Dog Walkers in Mineola, NY - Dog Walking Near You | Wag!
Mineola is a furiendly village that you will find in Nassau County, New York. This pawsome place sits proudly on Long Island, and most of it is found in the Town of North Hempstead. This means there are some puptastic dog friendly activities you have got to check out! To make sure that your furry friend is fit and healthy, there are three veterinary clinics and animal hospitals in Mineola.
One of the best places for pooches in Mineola has got to be the Mineola Memorial Park! This is a pawsome place to let your canine run and roam, with lots of green grass, as well as a sniff-tastic walking trail around the whole park. The large trees provide shade when it is hot, and there are also some puptastic amenities nestled away at Mineola Memorial Park. This includes tennis courts, a playground for the kids, and benches and gardens to sit and relax with your pup.
If you get hungry when you are in Mineola, be sure to check out Piccolo’s when you are in town! Here you can dig into some delicious food, which includes pasta, homemade ravioli and seafood dishes. This place is also dog friendly, which means you can bring your canine to sit out at one of the outdoor tables with you.
Want to have some fun with your pooch? Head on over to the puptown of Mineola!
For a fun day in the sun, head on over to the pawsome Wilson Park when you are in Mineola! This is a puptastic green spot in town where you can bring your canine for a walk and some fresh air. There is lots of grass here for rolling and roaming, as well as space for a game of fetch. In addition, there are some furrific amenities on offer at Wilson Park that you can take advantage of with the family. This includes a baseball field, soccer field, basketball court, tennis courts and picnic benches for enjoying a treat with your pooch.
When you are in Mineola, you have got to head to the pawsome Roslyn Road Park with your furry friend! This is a puptastic place for pooches since there is an abundance of green grass where they can expend some excitable energy. There is plenty of open space for enjoying a game of fetch with your pup’s favorite ball or Frisbee, and there are lots of trees here that provide some much-need shade when it is hot and sunny outside. There are also some furrific amenities at Roslyn Road Park, which includes a baseball field that you can enjoy with the family.
Right next door to Mineola, you will find the furrific Garden City! Here you can visit the pawsome Triangle Park, which is a green spot in town that your pup will love. Here there is open grass for your furry friend to bound around on, with a walking path all the way around the park where you can enjoy a leisurely stroll. In addition, the trees here provide some much-needed shade when it is hot for your panting pup. Another puptastic place is Hemlock Playground! Here there is grass for playing fetch, as well as some great amenities that you can use. This includes a tennis court and baseball field.
For a pawsome day out with your pooch, jump in the car and head to the next door puptown of Garden City! Here you will find the Nassau Haven Park where you can bring your furry friend for some exercise and playtime. There is lots of green grass for your pup to romp on, as well as big trees that provide shade when it is hot outside. There are also some furrific amenities here that you can use with the whole family. This includes a tennis court, soccer field, playground for the kids, baseball field, basketball court and comfort station.
To try some pawsome dog friendly eateries, head on over to the puptown of Williston Park! Here you will find Yummy Gyro, which is a puptastic place that serves up delicious sandwiches, wings, chicken and seafood for lunch and dinner. Of course, there are outdoor tables here where you are welcome to bring your canine while you eat. For a treat, head to Carvel Ice Cream! Here you can get your paws into some famous ice cream cakes, as well as choose one of the many flavors of ice cream that there are. Your pooch can chill out with you at one of the benches outside.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 00:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('lbutton', '0003_auto_20150314_2025'),
]
operations = [
migrations.AddField(
model_name='linkbutton',
name='chrome_file',
field=models.FilePathField(null=True, path='/home/michael/WebSites/dev/git/codefisher_org/../www/media/lbutton/chrome'),
),
migrations.AddField(
model_name='linkbutton',
name='firefox_file',
field=models.FilePathField(null=True, path='/home/michael/WebSites/dev/git/codefisher_org/../www/media/lbutton/firefox'),
),
migrations.AddField(
model_name='linkbutton',
name='time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Ever since the 1979 box office hit “Alien”, there has been attempted replications and duplication’s. Some have come close, but most have failed.
“Life” is nothing more than an attempted replication with A-List Actors and far more stupidity and nonsense.
We as humans on earth continue to be fascinated with the possibility of life on Mars. A team of astronauts and scientists operate a space station near Mars. Exploration is done and what appears to be a life form is found. Our curiosity gets the best of us as we poke, prod and attempt chemical modifications to the form we have found.
The outcome of “Life” is pretty obvious. Is there ever a good ending to a Sci-Fi Thriller that has any sort of Alien being?
After the first 70 minutes I was bored and actually considered leaving. I did go and stand near the exit because the movie was getting on my nerves. I had seen it all before. There is nothing really new except several A-List Actors in the movie. There was no real suspense, some action, a lot of stupidity and nonsense.
The paid critics believe “Life” is fresh. I think it’s as stale as 2 week old bread.
|
# coding: utf-8
import logging
from celery import shared_task
from django.conf import settings
from onadata.apps.restservice.models import RestService
@shared_task(bind=True)
def service_definition_task(self, rest_service_id, data):
"""
Tries to send data to the endpoint of the hook
It retries 3 times maximum.
- after 2 minutes,
- after 20 minutes,
- after 200 minutes
:param self: Celery.Task.
:param rest_service_id: RestService primary key.
:param data: dict.
"""
try:
rest_service = RestService.objects.get(pk=rest_service_id)
service = rest_service.get_service_definition()()
service.send(rest_service.service_url, data)
except Exception as e:
logger = logging.getLogger("console_logger")
logger.error("service_definition_task - {}".format(str(e)), exc_info=True)
# Countdown is in seconds
countdown = 120 * (10 ** self.request.retries)
# Max retries is 3 by default.
raise self.retry(countdown=countdown, max_retries=settings.REST_SERVICE_MAX_RETRIES)
return True
|
In Bangladesh, there was a substantial increase in remittance inflow this past October. This follows a decline seen in September earlier this year. With the government encouraging banks to embrace remittance payments, this uptick is definitely a good sign.
Utilising the thorough data provided by the Bangladesh Bank, we can examine which banks received what in terms of remittance payments, and who the highest and lowest performers were.
Overall, Bangladesh received $1.239 billion in remittance in October from Bangladeshis living all over the world. In September, expatriate Bangladeshis sent back $1.138 billion, and in August, the figure was $1.411 billion.
As you can see, the total remittance figures fluctuate substantially, but generally stand beyond the $1.1 billion figure.
When it comes to which banks performed best and worst, the commercial state-owned banks took a total of $299,36 million combined from expatriate Bangladeshis, while specialised banks received $14.57 million.
These six commercial banks include Janata, Agrani, Rupali, BASIC, Sonali, and BDBL, and the two specialised banks are BKB and RAKUB. All of these banks are state-owned, and the more specialised banks took substantially less overall, owing to them focusing more on other services.
The banks that stood at the top, earning more than the others combined were the private commercial ones, namely Islami Bank Bangladesh Limited (IBBL), Dutch-Bangla Banks, and Mutual Trust Bank. Combined these earned $369.84 million, and individually $224.50 million, $93.51 million and $51.83 million, respectively. Earning $8.88 million were the nine foreign banks.
Four state-owned banks earned a combined total of almost $300 million. BDBL took no remittance at all, Agrani Bank earned $122.56 million, Sonali Bank took $87.68 million, Janata Bank $72.08 million, Rupali $16.87 million and BASIC bank $0.17 million.
Overall, the poorest performers when it comes to remittance taking are DBBL, Rajshahi Krishi, National Bank of Pakistan and Unnayan Bank (Rakub). All of those earned zero remittance.
Usually, the poor performers are NRB Banks, but the government encouraged these, allowing NRB Banks to take $0.76 million, NRB Commercial Bank $0.42 million and NRB Global Bank $0.55 million.
Sonali Bank (UK) Limited is authorised by the Prudential Regulation Authority and regulated by the Financial Conduct Authority and the Prudential Regulation Authority. Registered in England and Wales. Registered number 3792250. Registered Office 29-33 Osborn Street, London E1 6TD.
|
import os
from rpython.jit.metainterp.history import Const, REF, JitCellToken
from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.jit.metainterp.resoperation import rop, AbstractValue
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # too bad
class TempVar(AbstractValue):
def __init__(self):
pass
def __repr__(self):
return "<TempVar at %s>" % (id(self),)
class NoVariableToSpill(Exception):
pass
class Node(object):
def __init__(self, val, next):
self.val = val
self.next = next
def __repr__(self):
return '<Node %d %r>' % (self.val, next)
class LinkedList(object):
def __init__(self, fm, lst=None):
# assume the list is sorted
if lst is not None:
node = None
for i in range(len(lst) - 1, -1, -1):
item = lst[i]
node = Node(item, node)
self.master_node = node
else:
self.master_node = None
self.fm = fm
def append(self, size, item):
key = self.fm.get_loc_index(item)
if size == 2:
self._append(key)
self._append(key + 1)
else:
assert size == 1
self._append(key)
def _append(self, key):
if self.master_node is None or self.master_node.val > key:
self.master_node = Node(key, self.master_node)
else:
node = self.master_node
prev_node = self.master_node
while node and node.val < key:
prev_node = node
node = node.next
prev_node.next = Node(key, node)
@specialize.arg(1)
def foreach(self, function, arg):
node = self.master_node
while node is not None:
function(arg, node.val)
node = node.next
def pop(self, size, tp, hint=-1):
if size == 2:
return self._pop_two(tp) # 'hint' ignored for floats on 32-bit
assert size == 1
if not self.master_node:
return None
node = self.master_node
#
if hint >= 0:
# Look for and remove the Node with the .val matching 'hint'.
# If not found, fall back to removing the first Node.
# Note that the loop below ignores the first Node, but
# even if by chance it is the one with the correct .val,
# it will be the one we remove at the end anyway.
prev_node = node
while prev_node.next:
if prev_node.next.val == hint:
node = prev_node.next
prev_node.next = node.next
break
prev_node = prev_node.next
else:
self.master_node = node.next
else:
self.master_node = node.next
#
return self.fm.frame_pos(node.val, tp)
def _candidate(self, node):
return (node.val & 1 == 0) and (node.val + 1 == node.next.val)
def _pop_two(self, tp):
node = self.master_node
if node is None or node.next is None:
return None
if self._candidate(node):
self.master_node = node.next.next
return self.fm.frame_pos(node.val, tp)
prev_node = node
node = node.next
while True:
if node.next is None:
return None
if self._candidate(node):
# pop two
prev_node.next = node.next.next
return self.fm.frame_pos(node.val, tp)
node = node.next
def len(self):
node = self.master_node
c = 0
while node:
node = node.next
c += 1
return c
def __len__(self):
""" For tests only
"""
return self.len()
def __repr__(self):
if not self.master_node:
return 'LinkedList(<empty>)'
node = self.master_node
l = []
while node:
l.append(str(node.val))
node = node.next
return 'LinkedList(%s)' % '->'.join(l)
class FrameManager(object):
""" Manage frame positions
start_free_depth is the start where we can allocate in whatever order
we like.
"""
def __init__(self, start_free_depth=0, freelist=None):
self.bindings = {}
self.current_frame_depth = start_free_depth
self.hint_frame_pos = {}
self.freelist = LinkedList(self, freelist)
def get_frame_depth(self):
return self.current_frame_depth
def get(self, box):
return self.bindings.get(box, None)
def loc(self, box):
"""Return or create the frame location associated with 'box'."""
# first check if it's already in the frame_manager
try:
return self.bindings[box]
except KeyError:
pass
return self.get_new_loc(box)
def get_new_loc(self, box):
size = self.frame_size(box.type)
hint = self.hint_frame_pos.get(box, -1)
# frame_depth is rounded up to a multiple of 'size', assuming
# that 'size' is a power of two. The reason for doing so is to
# avoid obscure issues in jump.py with stack locations that try
# to move from position (6,7) to position (7,8).
newloc = self.freelist.pop(size, box.type, hint)
if newloc is None:
#
index = self.get_frame_depth()
if index & 1 and size == 2:
# we can't allocate it at odd position
self.freelist._append(index)
newloc = self.frame_pos(index + 1, box.type)
self.current_frame_depth += 3
index += 1 # for test
else:
newloc = self.frame_pos(index, box.type)
self.current_frame_depth += size
#
if not we_are_translated(): # extra testing
testindex = self.get_loc_index(newloc)
assert testindex == index
#
self.bindings[box] = newloc
if not we_are_translated():
self._check_invariants()
return newloc
def bind(self, box, loc):
pos = self.get_loc_index(loc)
size = self.frame_size(box.type)
self.current_frame_depth = max(pos + size, self.current_frame_depth)
self.bindings[box] = loc
def finish_binding(self):
all = [0] * self.get_frame_depth()
for b, loc in self.bindings.iteritems():
size = self.frame_size(b.type)
pos = self.get_loc_index(loc)
for i in range(pos, pos + size):
all[i] = 1
self.freelist = LinkedList(self) # we don't care
for elem in range(len(all)):
if not all[elem]:
self.freelist._append(elem)
if not we_are_translated():
self._check_invariants()
def mark_as_free(self, box):
try:
loc = self.bindings[box]
except KeyError:
return # already gone
del self.bindings[box]
size = self.frame_size(box.type)
self.freelist.append(size, loc)
if not we_are_translated():
self._check_invariants()
def _check_invariants(self):
all = [0] * self.get_frame_depth()
for b, loc in self.bindings.iteritems():
size = self.frame_size(b)
pos = self.get_loc_index(loc)
for i in range(pos, pos + size):
assert not all[i]
all[i] = 1
node = self.freelist.master_node
while node is not None:
assert not all[node.val]
all[node.val] = 1
node = node.next
@staticmethod
def _gather_gcroots(lst, var):
lst.append(var)
# abstract methods that need to be overwritten for specific assemblers
def frame_pos(loc, type):
raise NotImplementedError("Purely abstract")
@staticmethod
def frame_size(type):
return 1
@staticmethod
def get_loc_index(loc):
raise NotImplementedError("Purely abstract")
@staticmethod
def newloc(pos, size, tp):
""" Reverse of get_loc_index
"""
raise NotImplementedError("Purely abstract")
class RegisterManager(object):
""" Class that keeps track of register allocations
"""
box_types = None # or a list of acceptable types
all_regs = []
no_lower_byte_regs = []
save_around_call_regs = []
frame_reg = None
def __init__(self, longevity, frame_manager=None, assembler=None):
self.free_regs = self.all_regs[:]
self.free_regs.reverse()
self.longevity = longevity
self.temp_boxes = []
if not we_are_translated():
self.reg_bindings = OrderedDict()
else:
self.reg_bindings = {}
self.bindings_to_frame_reg = {}
self.position = -1
self.frame_manager = frame_manager
self.assembler = assembler
def is_still_alive(self, v):
# Check if 'v' is alive at the current position.
# Return False if the last usage is strictly before.
return self.longevity[v][1] >= self.position
def stays_alive(self, v):
# Check if 'v' stays alive after the current position.
# Return False if the last usage is before or at position.
return self.longevity[v][1] > self.position
def next_instruction(self, incr=1):
self.position += incr
def _check_type(self, v):
if not we_are_translated() and self.box_types is not None:
assert isinstance(v, TempVar) or v.type in self.box_types
def possibly_free_var(self, v):
""" If v is stored in a register and v is not used beyond the
current position, then free it. Must be called at some
point for all variables that might be in registers.
"""
self._check_type(v)
if isinstance(v, Const):
return
if v not in self.longevity or self.longevity[v][1] <= self.position:
if v in self.reg_bindings:
self.free_regs.append(self.reg_bindings[v])
del self.reg_bindings[v]
if self.frame_manager is not None:
self.frame_manager.mark_as_free(v)
def possibly_free_vars(self, vars):
""" Same as 'possibly_free_var', but for all v in vars.
"""
for v in vars:
self.possibly_free_var(v)
def possibly_free_vars_for_op(self, op):
for i in range(op.numargs()):
self.possibly_free_var(op.getarg(i))
def free_temp_vars(self):
self.possibly_free_vars(self.temp_boxes)
self.temp_boxes = []
def _check_invariants(self):
if not we_are_translated():
# make sure no duplicates
assert len(dict.fromkeys(self.reg_bindings.values())) == len(self.reg_bindings)
rev_regs = dict.fromkeys(self.reg_bindings.values())
for reg in self.free_regs:
assert reg not in rev_regs
assert len(rev_regs) + len(self.free_regs) == len(self.all_regs)
else:
assert len(self.reg_bindings) + len(self.free_regs) == len(self.all_regs)
assert len(self.temp_boxes) == 0
if self.longevity:
for v in self.reg_bindings:
assert self.longevity[v][1] > self.position
def try_allocate_reg(self, v, selected_reg=None, need_lower_byte=False):
""" Try to allocate a register, if we have one free.
need_lower_byte - if True, allocate one that has a lower byte reg
(e.g. eax has al)
selected_reg - if not None, force a specific register
returns allocated register or None, if not possible.
"""
self._check_type(v)
assert not isinstance(v, Const)
if selected_reg is not None:
res = self.reg_bindings.get(v, None)
if res is not None:
if res is selected_reg:
return res
else:
del self.reg_bindings[v]
self.free_regs.append(res)
if selected_reg in self.free_regs:
self.free_regs = [reg for reg in self.free_regs
if reg is not selected_reg]
self.reg_bindings[v] = selected_reg
return selected_reg
return None
if need_lower_byte:
loc = self.reg_bindings.get(v, None)
if loc is not None and loc not in self.no_lower_byte_regs:
return loc
for i in range(len(self.free_regs) - 1, -1, -1):
reg = self.free_regs[i]
if reg not in self.no_lower_byte_regs:
if loc is not None:
self.free_regs[i] = loc
else:
del self.free_regs[i]
self.reg_bindings[v] = reg
return reg
return None
try:
return self.reg_bindings[v]
except KeyError:
if self.free_regs:
loc = self.free_regs.pop()
self.reg_bindings[v] = loc
return loc
def _spill_var(self, v, forbidden_vars, selected_reg,
need_lower_byte=False):
v_to_spill = self._pick_variable_to_spill(v, forbidden_vars,
selected_reg, need_lower_byte=need_lower_byte)
loc = self.reg_bindings[v_to_spill]
del self.reg_bindings[v_to_spill]
if self.frame_manager.get(v_to_spill) is None:
newloc = self.frame_manager.loc(v_to_spill)
self.assembler.regalloc_mov(loc, newloc)
return loc
def _pick_variable_to_spill(self, v, forbidden_vars, selected_reg=None,
need_lower_byte=False):
""" Slightly less silly algorithm.
"""
cur_max_age = -1
candidate = None
for next in self.reg_bindings:
reg = self.reg_bindings[next]
if next in forbidden_vars:
continue
if selected_reg is not None:
if reg is selected_reg:
return next
else:
continue
if need_lower_byte and reg in self.no_lower_byte_regs:
continue
max_age = self.longevity[next][1]
if cur_max_age < max_age:
cur_max_age = max_age
candidate = next
if candidate is None:
raise NoVariableToSpill
return candidate
def force_allocate_reg(self, v, forbidden_vars=[], selected_reg=None,
need_lower_byte=False):
""" Forcibly allocate a register for the new variable v.
It must not be used so far. If we don't have a free register,
spill some other variable, according to algorithm described in
'_pick_variable_to_spill'.
Will not spill a variable from 'forbidden_vars'.
"""
self._check_type(v)
if isinstance(v, TempVar):
self.longevity[v] = (self.position, self.position)
loc = self.try_allocate_reg(v, selected_reg,
need_lower_byte=need_lower_byte)
if loc:
return loc
loc = self._spill_var(v, forbidden_vars, selected_reg,
need_lower_byte=need_lower_byte)
prev_loc = self.reg_bindings.get(v, None)
if prev_loc is not None:
self.free_regs.append(prev_loc)
self.reg_bindings[v] = loc
return loc
def force_allocate_frame_reg(self, v):
""" Allocate the new variable v in the frame register."""
self.bindings_to_frame_reg[v] = None
def force_spill_var(self, var):
self._sync_var(var)
try:
loc = self.reg_bindings[var]
del self.reg_bindings[var]
self.free_regs.append(loc)
except KeyError:
pass # 'var' is already not in a register
def loc(self, box, must_exist=False):
""" Return the location of 'box'.
"""
self._check_type(box)
if isinstance(box, Const):
return self.convert_to_imm(box)
try:
return self.reg_bindings[box]
except KeyError:
if box in self.bindings_to_frame_reg:
return self.frame_reg
if must_exist:
return self.frame_manager.bindings[box]
return self.frame_manager.loc(box)
def return_constant(self, v, forbidden_vars=[], selected_reg=None):
""" Return the location of the constant v. If 'selected_reg' is
not None, it will first load its value into this register.
"""
self._check_type(v)
assert isinstance(v, Const)
immloc = self.convert_to_imm(v)
if selected_reg:
if selected_reg in self.free_regs:
self.assembler.regalloc_mov(immloc, selected_reg)
return selected_reg
loc = self._spill_var(v, forbidden_vars, selected_reg)
self.free_regs.append(loc)
self.assembler.regalloc_mov(immloc, loc)
return loc
return immloc
def make_sure_var_in_reg(self, v, forbidden_vars=[], selected_reg=None,
need_lower_byte=False):
""" Make sure that an already-allocated variable v is in some
register. Return the register. See 'force_allocate_reg' for
the meaning of the optional arguments.
"""
self._check_type(v)
if isinstance(v, Const):
return self.return_constant(v, forbidden_vars, selected_reg)
prev_loc = self.loc(v, must_exist=True)
if prev_loc is self.frame_reg and selected_reg is None:
return prev_loc
loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
need_lower_byte=need_lower_byte)
if prev_loc is not loc:
self.assembler.regalloc_mov(prev_loc, loc)
return loc
def _reallocate_from_to(self, from_v, to_v):
reg = self.reg_bindings[from_v]
del self.reg_bindings[from_v]
self.reg_bindings[to_v] = reg
def _move_variable_away(self, v, prev_loc):
if self.free_regs:
loc = self.free_regs.pop()
self.reg_bindings[v] = loc
self.assembler.regalloc_mov(prev_loc, loc)
else:
loc = self.frame_manager.loc(v)
self.assembler.regalloc_mov(prev_loc, loc)
def force_result_in_reg(self, result_v, v, forbidden_vars=[]):
""" Make sure that result is in the same register as v.
The variable v is copied away if it's further used. The meaning
of 'forbidden_vars' is the same as in 'force_allocate_reg'.
"""
self._check_type(result_v)
self._check_type(v)
if isinstance(v, Const):
if self.free_regs:
loc = self.free_regs.pop()
else:
loc = self._spill_var(v, forbidden_vars, None)
self.assembler.regalloc_mov(self.convert_to_imm(v), loc)
self.reg_bindings[result_v] = loc
return loc
if v not in self.reg_bindings:
prev_loc = self.frame_manager.loc(v)
loc = self.force_allocate_reg(v, forbidden_vars)
self.assembler.regalloc_mov(prev_loc, loc)
assert v in self.reg_bindings
if self.longevity[v][1] > self.position:
# we need to find a new place for variable v and
# store result in the same place
loc = self.reg_bindings[v]
del self.reg_bindings[v]
if self.frame_manager.get(v) is None:
self._move_variable_away(v, loc)
self.reg_bindings[result_v] = loc
else:
self._reallocate_from_to(v, result_v)
loc = self.reg_bindings[result_v]
return loc
def _sync_var(self, v):
if not self.frame_manager.get(v):
reg = self.reg_bindings[v]
to = self.frame_manager.loc(v)
self.assembler.regalloc_mov(reg, to)
# otherwise it's clean
def before_call(self, force_store=[], save_all_regs=0):
""" Spill registers before a call, as described by
'self.save_around_call_regs'. Registers are not spilled if
they don't survive past the current operation, unless they
are listed in 'force_store'. 'save_all_regs' can be 0 (default),
1 (save all), or 2 (save default+PTRs).
"""
for v, reg in self.reg_bindings.items():
if v not in force_store and self.longevity[v][1] <= self.position:
# variable dies
del self.reg_bindings[v]
self.free_regs.append(reg)
continue
if save_all_regs != 1 and reg not in self.save_around_call_regs:
if save_all_regs == 0:
continue # we don't have to
if v.type != REF:
continue # only save GC pointers
self._sync_var(v)
del self.reg_bindings[v]
self.free_regs.append(reg)
def after_call(self, v):
""" Adjust registers according to the result of the call,
which is in variable v.
"""
self._check_type(v)
r = self.call_result_location(v)
if not we_are_translated():
assert r not in self.reg_bindings.values()
self.reg_bindings[v] = r
self.free_regs = [fr for fr in self.free_regs if fr is not r]
return r
# abstract methods, override
def convert_to_imm(self, c):
""" Platform specific - convert a constant to imm
"""
raise NotImplementedError("Abstract")
def call_result_location(self, v):
""" Platform specific - tell where the result of a call will
be stored by the cpu, according to the variable type
"""
raise NotImplementedError("Abstract")
def get_scratch_reg(self, type, forbidden_vars=[], selected_reg=None):
""" Platform specific - Allocates a temporary register """
raise NotImplementedError("Abstract")
class BaseRegalloc(object):
""" Base class on which all the backend regallocs should be based
"""
def _set_initial_bindings(self, inputargs, looptoken):
""" Set the bindings at the start of the loop
"""
locs = []
base_ofs = self.assembler.cpu.get_baseofs_of_frame_field()
for box in inputargs:
assert not isinstance(box, Const)
loc = self.fm.get_new_loc(box)
locs.append(loc.value - base_ofs)
if looptoken.compiled_loop_token is not None: # <- for tests
looptoken.compiled_loop_token._ll_initial_locs = locs
def next_op_can_accept_cc(self, operations, i):
op = operations[i]
next_op = operations[i + 1]
opnum = next_op.getopnum()
if (opnum != rop.GUARD_TRUE and opnum != rop.GUARD_FALSE
and opnum != rop.COND_CALL):
return False
if next_op.getarg(0) is not op:
return False
if self.longevity[op][1] > i + 1:
return False
if opnum != rop.COND_CALL:
if op in operations[i + 1].getfailargs():
return False
else:
if op in operations[i + 1].getarglist()[1:]:
return False
return True
def locs_for_call_assembler(self, op):
descr = op.getdescr()
assert isinstance(descr, JitCellToken)
if op.numargs() == 2:
self.rm._sync_var(op.getarg(1))
return [self.loc(op.getarg(0)), self.fm.loc(op.getarg(1))]
else:
return [self.loc(op.getarg(0))]
def compute_vars_longevity(inputargs, operations):
# compute a dictionary that maps variables to index in
# operations that is a "last-time-seen"
# returns a pair longevity/useful. Non-useful variables are ones that
# never appear in the assembler or it does not matter if they appear on
# stack or in registers. Main example is loop arguments that go
# only to guard operations or to jump or to finish
last_used = {}
last_real_usage = {}
for i in range(len(operations)-1, -1, -1):
op = operations[i]
if op.type != 'v':
if op not in last_used and op.has_no_side_effect():
continue
opnum = op.getopnum()
for j in range(op.numargs()):
arg = op.getarg(j)
if isinstance(arg, Const):
continue
if arg not in last_used:
last_used[arg] = i
if opnum != rop.JUMP and opnum != rop.LABEL:
if arg not in last_real_usage:
last_real_usage[arg] = i
if op.is_guard():
for arg in op.getfailargs():
if arg is None: # hole
continue
assert not isinstance(arg, Const)
if arg not in last_used:
last_used[arg] = i
#
longevity = {}
for i, arg in enumerate(operations):
if arg.type != 'v' and arg in last_used:
assert not isinstance(arg, Const)
assert i < last_used[arg]
longevity[arg] = (i, last_used[arg])
del last_used[arg]
for arg in inputargs:
assert not isinstance(arg, Const)
if arg not in last_used:
longevity[arg] = (-1, -1)
else:
longevity[arg] = (0, last_used[arg])
del last_used[arg]
assert len(last_used) == 0
if not we_are_translated():
produced = {}
for arg in inputargs:
produced[arg] = None
for op in operations:
for arg in op.getarglist():
if not isinstance(arg, Const):
assert arg in produced
produced[op] = None
return longevity, last_real_usage
def is_comparison_or_ovf_op(opnum):
from rpython.jit.metainterp.resoperation import opclasses
cls = opclasses[opnum]
# hack hack: in theory they are instance method, but they don't use
# any instance field, we can use a fake object
class Fake(cls):
pass
op = Fake()
return op.is_comparison() or op.is_ovf()
def valid_addressing_size(size):
return size == 1 or size == 2 or size == 4 or size == 8
def get_scale(size):
assert valid_addressing_size(size)
if size < 4:
return size - 1 # 1, 2 => 0, 1
else:
return (size >> 2) + 1 # 4, 8 => 2, 3
def not_implemented(msg):
msg = '[llsupport/regalloc] %s\n' % msg
if we_are_translated():
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
|
That Little Something Extra—This elegant selection features a mica-coated metallic finish—with both a pearlescent and iridescent range. This will give your DIY invitations and programs a little something extra.
The Sparkle Does Not Transfer—At Cut Card Stock we pride ourselves in providing discount card stock, of only the highest quality. Although are metallic card stock sparkles—the sparkle will not transfer to your hands, or any other items it comes in contact with.
It Is Versatile—Our metallic white card stock is available in either standard 8.5x11 and is offered in many custom sizes—and is compatible with laser printers. This makes it ideal for a wide range of specialty projects for both your personal and professional needs. It adds an instant touch of class to cards and DIY stationary, and is elegant enough for wedding invitations and refined arts and crafts.
If plain white doesn’t quite cut it—our metallic white discount card stock is an excellent alternative!
Although the Curious Metallic Ice Silver Card Stock is laser and digital compatible, we recommend test printing or checking with your printer on printing and paper weights. The sturdy 111 lb cover weight stock may not go through a home printer.
|
from __future__ import print_function
import re, sys, site, getpass, socket, argparse, collections
from threading import Thread
from ipmi import ipmitool
# Todo: add logging
class Runner(Thread):
"""
Build ipmitool object and run through tasks as per requested command
"""
ipmi_map = { "reboot" : "chassis power reset",
"pxe" : "chassis bootdev pxe",
"fix" : "chassis bootdev cdrom",
"disk" : "chassis bootdev disk",
"status": "chassis power status",
"off": "chassis power off",
"on": "chassis power on" }
def __init__(self, console, password, command="disk", username="root"):
"""
:param console: The console dns or ip address
:param command: The ipmi command to execute specified in `ipmi_map`
:param username: Console username
:param password: Console password
"""
Thread.__init__(self)
self.console = console
self.command = command
self.username = username
self.password = password
try:
socket.inet_aton(self.console)
self.consoleip = socket.gethostbyname(self.console)
except socket.error:
try:
self.consoleip = socket.gethostbyname(self.console)
except socket.gaierror:
raise NameError('Console Ip or dns name is invalid')
self.error = None
self.output = None
self.status = None
def ipmi_method(self, command):
"""Use ipmitool to run commands with ipmi protocol
"""
ipmi = ipmitool(self.console, self.password, self.username)
if command == "reboot":
self.ipmi_method(command="status")
if self.output == "Chassis Power is off":
command = "on"
ipmi.execute(self.ipmi_map[command])
if ipmi.status:
self.error = ipmi.error.strip()
else:
self.output = ipmi.output.strip()
self.status = ipmi.status
def run(self):
"""Start thread run here
"""
try:
if self.command == "pxer":
self.ipmi_method(command="pxe")
if self.status == 0 or self.status == None:
self.command = "reboot"
else:
return
self.ipmi_method(self.command)
except Exception as e:
self.error = str(e)
#raise
def print_report(runner_results):
"""
Print collated report with output and errors if any
"""
error_report = collections.defaultdict(list)
output_report = collections.defaultdict(list)
success_report = list()
for runner_info in runner_results:
hostname = runner_info['console']
error = runner_info['error']
output = runner_info['output']
if error:
error_report[error].append(hostname)
elif output:
output_report[output].append(hostname)
else:
success_report.append(hostname)
if error_report:
print("Errors : ")
for error in error_report:
print("{0} -- [{1}] {2}".format(error.strip(), len(error_report[error]), ", ".join(error_report[error])))
print()
if output_report:
for output in output_report:
print("{0} -- [{1}] {2}".format(output, len(output_report[output]), ", ".join(output_report[output])))
if success_report:
print("Completed config on {0} hosts".format(len(success_report)))
def main():
parser = argparse.ArgumentParser(
description="Run ipmitool commands on consoles")
group = parser.add_argument_group("Host selectors")
group.add_argument("-H", "--host", help="Console Ip or Dns Name")
group.add_argument("-f", "--file", help="File with list of Consoles")
group.add_argument("-u", "--username", default='root', help="Console username to use")
parser.add_argument("-v", "--verbose", help="Verbose output", action="store_true")
parser.add_argument("command", choices=["pxer", "pxe", "disk", "reboot", "off", "on", "status"],
help= "pxer - set to PXE and reboot host")
args = parser.parse_args()
if args.file:
try:
host_list = open(args.file).read().split()
except IOError as err:
print("Error: cannot open {0} ({1})".format(hostfile, err))
exit(1)
elif args.host:
host_list = [args.host]
else:
parser.print_usage()
sys.exit(1)
# Confirm with user for host power changes
if args.command == "reboot" or args.command == "off" or args.command == "pxer":
print("Power will be changed for the following hosts: ")
for host in host_list:
print(host)
choice = raw_input("Do you want to proceed? (y/n): ")
if not choice == "y":
exit(1)
# Get console password
passwd = getpass.getpass()
if not passwd:
print("Please provide the console password")
exit(1)
runner_list = []
for host in host_list:
runner_thread = Runner(host, command=args.command, username=args.username, password=passwd)
runner_thread.start()
runner_list.append(runner_thread)
runner_results = list()
for runner in runner_list:
runner.join()
runner_info = { 'console': runner.console, 'error': runner.error, 'output': runner.output }
runner_results.append(runner_info)
print_report(runner_results)
if __name__ == "__main__":
main()
# vim: autoindent tabstop=4 expandtab smarttab shiftwidth=4 softtabstop=4 tw=0
|
Paint is not the only source of color at our disposal. What about paint chips!
Cozy little cave did a stair way spruce with a little paint chip wallpaper. Notice the yellow and white painted stripes on the ceiling leading to the magical stair.
Ish and Chi made a striking color wall composition.
Design Verb has a tutorial for how to make business card holders out of chips.
Scott Prendergast used 500 chips from Home Depot for his color fade wall.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
import xml.etree.ElementTree as ET
from flask import abort, Flask, escape, request, session
from passlib.hash import sha256_crypt
from core.db import Retriever
def check_auth(session):
if 'username' in session:
xml_res = ET.fromstring("<response></response>")
msg = ET.Element('message')
# escape will protect against XSS if we decide to render this
msg.text = "%s - You are authenticated." % escape(session['username'])
xml_res.append(msg)
return ET.tostring(xml_res)
return None
def login(request, session):
xml_res = ET.fromstring("<response></response>")
login, passwd = request.form['usernm'], request.form['userpwd']
db_info = json.loads(Retriever(
['login_user', 'password'],
'utilisateur',
"login_user='%s'" % login
).fetch())
if not db_info:
abort(401)
# if the user exists and the password matches
elif 'password' in db_info.keys() and sha256_crypt.verify(passwd, db_info['password']):
session['username'] = login
msg = ET.Element('message')
msg.text = '%s - You are now authenticated.' % escape(login)
xml_res.append(msg)
return ET.tostring(xml_res)
else:
abort(401)
def logout(session):
session.pop('username', None)
xml_res = ET.fromstring("<response></response>")
msg = ET.Element('message')
msg.text = 'Log out.'
xml_res.append(msg)
return ET.tostring(xml_res)
|
It only takes a small water leak, or even excess humidity, for mold to form in your Edwards home or business. Then, mold can spread quickly through the property in as little as 48 hours. Mold can produce allergens and irritants, and it has the potential to cause other health effects. If your home or business has a mold problem, we can inspect and assess your property and use our specialized training, equipment, and expertise to remediate your mold infestation.
We are proud to be an active member of this community. SERVPRO of Summit, Lake, Park & Eagle Counties is locally owned and operated, so we’re already nearby and ready to help Edwards residents and business owners with any size mold contamination.
To keep the mold spores from spreading further, we will use various mold containment procedures. Our professionals may use physical barriers and negative air pressure to isolate the contaminated area and to help prevent the mold spores from spreading through your Edwards, CO home or business. Also, we’ll turn off the property’s fans and heating and cooling systems to prevent the spread of mold.
In Edwards, CO, SERVPRO of Summit, Lake, Park & Eagle Counties is ready to tackle and resolve any mold issues that arise. SERVPRO professionals understand the anxiety that can happen when a homeowner uncovers mold. Let us help put your worries to rest. Our knowledgeable crew is ready with the gear needed to tackle any mold job. Reach us at (970) 949-3235 - We’re ready to activate our team to remediate your mold.
|
from docutils.nodes import list_item, paragraph
from mock import Mock
import six
from releases import (
Issue,
issues_role,
Release,
release_role,
construct_releases,
)
from releases.util import make_app, changelog2dict
def inliner(app=None):
app = app or make_app()
return Mock(document=Mock(settings=Mock(env=Mock(app=app))))
# Obtain issue() object w/o wrapping all parse steps
def issue(type_, number, **kwargs):
text = str(number)
if kwargs.get("backported", False):
text += " backported"
if kwargs.get("major", False):
text += " major"
if kwargs.get("spec", None):
text += " (%s)" % kwargs["spec"]
app = kwargs.get("app", None)
return issues_role(
name=type_,
rawtext="",
text=text,
lineno=None,
inliner=inliner(app=app),
)[0][0]
# Even shorter shorthand!
def b(number, **kwargs):
return issue("bug", str(number), **kwargs)
def f(number, **kwargs):
return issue("feature", str(number), **kwargs)
def s(number, **kwargs):
return issue("support", str(number), **kwargs)
def entry(i):
"""
Easy wrapper for issue/release objects.
Default is to give eg an issue/release object that gets wrapped in a LI->P.
May give your own (non-issue/release) object to skip auto wrapping. (Useful
since entry() is often called a few levels deep.)
"""
if not isinstance(i, (Issue, Release)):
return i
return list_item("", paragraph("", "", i))
def release(number, **kwargs):
app = kwargs.get("app", None)
nodes = release_role(
name=None,
rawtext="",
text="%s <2013-11-20>" % number,
lineno=None,
inliner=inliner(app=app),
)[0]
return list_item("", paragraph("", "", *nodes))
def release_list(*entries, **kwargs):
skip_initial = kwargs.pop("skip_initial", False)
entries = list(entries) # lol tuples
# Translate simple objs into changelog-friendly ones
for index, item in enumerate(entries):
if isinstance(item, six.string_types):
entries[index] = release(item)
else:
entries[index] = entry(item)
# Insert initial/empty 1st release to start timeline
if not skip_initial:
entries.append(release("1.0.0"))
return entries
def releases(*entries, **kwargs):
app = kwargs.pop("app", None) or make_app()
return construct_releases(release_list(*entries, **kwargs), app)[0]
def setup_issues(self):
self.f = f(12)
self.s = s(5)
self.b = b(15)
self.mb = b(200, major=True)
self.bf = f(27, backported=True)
self.bs = s(29, backported=True)
def expect_releases(entries, release_map, skip_initial=False, app=None):
kwargs = {"skip_initial": skip_initial}
# Let high level tests tickle config settings via make_app()
if app is not None:
kwargs["app"] = app
changelog = changelog2dict(releases(*entries, **kwargs))
snapshot = dict(changelog)
err = "Got unexpected contents for {}: wanted {}, got {}"
err += "\nFull changelog: {!r}\n"
for rel, issues in six.iteritems(release_map):
found = changelog.pop(rel)
msg = err.format(rel, issues, found, snapshot)
assert set(found) == set(issues), msg
# Sanity: ensure no leftover issue lists exist (empty ones are OK)
for key in list(changelog.keys()):
if not changelog[key]:
del changelog[key]
assert not changelog, "Found leftovers: {}".format(changelog)
|
How Much Power Do ‘Millennials’ Actually Have?
Millennials, clearly, are not living the lives of easy abundance bestowed on generations past — no fighting over the check at Outback Steakhouse, no need (or budget) for a station wagon.
The platform’s radical simplicity is gaining followers, and the old guard wants in.
In less than two years, skateboarding will be in the Olympics. I spent my weekend at the Youth Olympic Games in Buenos Aires to get a better perspective of how that will work and my potential role in the process.
|
""" Take combinations of datasets in the summary for training and test each
The input of this
operation has to contain several comparable datasets of the same type.
Depending on whether the input datasets contain split data, the behavior
of this operation differs slightly.
.. note:: This operation creates an output directory with links,
not duplicated files!
If the input datasets are not split, the result of this operation
contains one dataset for every pair of datasets of the *input_path*.
For instance, if the input consists of the three datasets "A", "B",
"C", the result will at least contain the 6 datasets "A_vs_B",
"A_vs_C", "B_vs_A", "B_vs_C, "C_vs_A", "C_vs_B". The result dataset "A_vs_B"
uses the feature vectors from dataset "A" as training data and the feature
vectors from dataset "B" as test data.
If the input datasets contain split data, additionally the input
datasets are copied to the result directory so that this would contain
9 datasets. The dataset "X_vs_Y" contains the train data from dataset X
from the respective split for training and the test data from dataset Y for
testing.
A typical operation specification file might look like this
Specification file Parameters
+++++++++++++++++++++++++++++
type
----
Has to be set to *shuffle* to use this operation!
(*obligatory, shuffle*)
input_path
----------
Location of the input data
(*obligatory*)
dataset_constraints
-------------------
Optionally, constraints can be passed to the operation that specify which
datasets are combined based on the dataset name. For instance, the
constraint
'"%(dataset_name1)s".strip("}{").split("}{")[1:] == "%(dataset_name2)s".strip("}{").split("}{")[1:]'
would cause that only datasets are combined,
that were created by the same
preprocessing with the same parameterization.
(*optional, default: []*)
Exemplary Call
++++++++++++++
.. code-block:: yaml
type: shuffle
input_path: "operation_results/2009_8_13_15_8_57"
dataset_constraints:
# Combine only datasets that have been created using the same parameterization
- '"%(dataset_name1)s".strip("}{").split("}{")[1:] == "%(dataset_name2)s".strip("}{").split("}{")[1:]'
"""
import sys
import os
import glob
import time
import yaml
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
import processing
else:
import multiprocessing as processing
import logging
import pySPACE
from pySPACE.missions.operations.base import Operation, Process
from pySPACE.tools.filesystem import create_directory
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.tools.filesystem import get_author
class ShuffleOperation(Operation):
""" Forwards processing to process
.. todo:: Precalculate one process for each shuffling
"""
def __init__(self, processes, operation_spec, result_directory,
number_processes, create_process=None):
super(ShuffleOperation, self).__init__(processes, operation_spec,
result_directory)
self.number_processes = number_processes
self.create_process = create_process
@classmethod
def create(cls, operation_spec, result_directory, debug = False, input_paths=[]):
""" Factory method that creates a ShuffleOperation
A factory method that creates a ShuffleOperation based on the
information given in the operation specification operation_spec
"""
assert(operation_spec["type"] == "shuffle")
# Determine constraints on datasets that are combined
dataset_constraints = []
if "dataset_constraints" in operation_spec:
dataset_constraints.extend(operation_spec["dataset_constraints"])
# Create the ShuffleProcess (shuffling is not distributed over different
# processes)
number_processes = 1
processes = processing.Queue()
cls._createProcesses(processes, result_directory, input_paths,
dataset_constraints)
# create and return the shuffle operation object
return cls(processes, operation_spec, result_directory, number_processes)
@classmethod
def _createProcesses(cls, processes, result_directory, input_datasets,
dataset_constraints):
"""Function that creates the shuffle process.
Create the ShuffleProcess (shuffling is not distributed over different
processes)
"""
# Create the shuffle process and put it in the execution queue
processes.put(ShuffleProcess(input_datasets, result_directory,
dataset_constraints))
# give executing process the sign that creation is now finished
processes.put(False)
def consolidate(self):
""" Consolidation of the operation's results """
# Just do nothing
pass
class ShuffleProcess(Process):
""" The shuffle process
Combines datasets that fulfill all *dataset_constraints*
"""
def __init__(self, input_dataset, result_directory, dataset_constraints):
super(ShuffleProcess, self).__init__()
self.input_datasets = input_dataset
self.result_directory = result_directory
self.dataset_constraints = dataset_constraints
def __call__(self):
""" Executes this process on the respective modality """
############## Prepare benchmarking ##############
super(ShuffleProcess, self).pre_benchmarking()
for dataset_dir1 in self.input_datasets:
for dataset_dir2 in self.input_datasets:
dataset_name1 = dataset_dir1.split(os.sep)[-2]
dataset_name2 = dataset_dir2.split(os.sep)[-2]
# Check if the input data is split
splitted = len(glob.glob(os.sep.join([dataset_dir1, "data_run0",
"*"]))) > 1
# Check that all constraints are fulfilled for this pair of
# input datasets
if not all(eval(constraint_template % {'dataset_name1': dataset_name1,
'dataset_name2': dataset_name2})
for constraint_template in self.dataset_constraints):
continue
if dataset_name1 == dataset_name2:
if splitted:
# Copy the data
os.symlink(dataset_dir1,
os.sep.join([self.result_directory,
dataset_name1]))
continue
# Determine names of the original data sets the input
# datasets are based on
base_dataset1 = dataset_name1.strip("}{").split("}{")[0]
base_dataset2 = dataset_name2.strip("}{").split("}{")[0]
# Determine target dataset name and create directory
# for it
mixed_base_dataset = "%s_vs_%s" % (base_dataset1,
base_dataset2)
target_dataset_name = dataset_name1.replace(base_dataset1,
mixed_base_dataset)
target_dataset_dir = os.sep.join([self.result_directory,
target_dataset_name])
create_directory(os.sep.join([target_dataset_dir, "data_run0"]))
if splitted:
# For each split, copy the train data from dataset 1 and
# the test data from dataset 2 to the target dataset
for source_train_file_name in glob.glob(os.sep.join([dataset_dir1,
"data_run0",
"*_sp*_train.*"])):
# TODO: We have $n$ train sets and $n$ test sets, we "metadata.yaml"])),
# could use all $n*n$ combinations
target_train_file_name = source_train_file_name.replace(dataset_dir1,
target_dataset_dir)
if source_train_file_name.endswith("arff"):
self._copy_arff_file(source_train_file_name,
target_train_file_name,
base_dataset1,
mixed_base_dataset)
else:
os.symlink(source_train_file_name,
target_train_file_name)
source_test_file_name = source_train_file_name.replace(dataset_dir1,
dataset_dir2)
source_test_file_name = source_test_file_name.replace("train.",
"test.")
target_test_file_name = target_train_file_name.replace("train.",
"test.")
if source_train_file_name.endswith("arff"):
self._copy_arff_file(source_test_file_name,
target_test_file_name,
base_dataset2,
mixed_base_dataset)
else:
os.symlink(source_test_file_name,
target_test_file_name)
else:
# Use the data set from dataset 1 as training set and
# the data set from dataset 2 as test data
for source_train_file_name in glob.glob(os.sep.join([dataset_dir1,
"data_run0",
"*_sp*_test.*"])):
target_train_file_name = source_train_file_name.replace("test.",
"train.")
target_train_file_name = target_train_file_name.replace(dataset_dir1,
target_dataset_dir)
if source_train_file_name.endswith("arff"):
self._copy_arff_file(source_train_file_name,
target_train_file_name,
base_dataset1,
mixed_base_dataset)
else:
os.symlink(source_train_file_name,
target_train_file_name)
source_test_file_name = source_train_file_name.replace(dataset_dir1,
dataset_dir2)
target_test_file_name = target_train_file_name.replace("train.",
"test.")
if source_train_file_name.endswith("arff"):
self._copy_arff_file(source_test_file_name,
target_test_file_name,
base_dataset2,
mixed_base_dataset)
else:
os.symlink(source_test_file_name,
target_test_file_name)
# Write metadata.yaml based on input meta data
input_dataset1_meta = BaseDataset.load_meta_data(dataset_dir1)
output_dataset_meta = dict(input_dataset1_meta)
output_dataset_meta['train_test'] = True
output_dataset_meta['date'] = time.strftime("%Y%m%d_%H_%M_%S")
output_dataset_meta['author'] = get_author()
BaseDataset.store_meta_data(target_dataset_dir,output_dataset_meta)
############## Clean up after benchmarking ##############
super(ShuffleProcess, self).post_benchmarking()
def _copy_arff_file(self, input_arff_file_name, target_arff_file_name,
input_dataset_name, target_dataset_name):
""" Copy the arff files and adjust the relation name in the arff file"""
file = open(input_arff_file_name, 'r')
content = file.readlines()
file.close()
content[0] = content[0].replace(input_dataset_name,
target_dataset_name)
file = open(target_arff_file_name, 'w')
file.writelines(content)
file.close()
|
So I was *delighted* when I saw the theme for this month's blog hop, and had to get on it. So many beautiful colour combos.
Check them out- aren't they just a breath of fresh air?!
And here is a picture of my card- I didn't have any navy card stock, so I made some by rubbing my HA shadow ink Navy ink pad all over a white piece of card. I then clear embossed some stars, and gold embossed The Star in the corner. I gold embossed the sentiment from PTI's Beautiful Blessings: Holiday on a die cut panel of melon mambo card stock. I also heat embossed just the word 'peace' on some Rich Razzleberry card stock and cut it out and popped it up on a banner shape (hand-cut).
Some blue sequins finished it all off before I popped up the whole front panel on a black card base.
it has been a while since my last post, but things have been a little crazy around here with me being really REALLY sick, and then my little girl catching it the following week (and her first ever treatment with anti-biotics *sniff*). And all this while I was in the early stages of pregnancy. And to top it all off, I heard on Tuesday that my latest scans showed that little bubby had no heartbeat. I guess it was all too much for our little angel to bear. I was 9 weeks along.
So, I've taken the rest of the week off to relax and come to grips with everything. I am doing well (so far…), and am grateful for all the women before me who have been open enough to relate their experiences with miscarriage. I am so grateful to know I am not alone. It's not an easy thing to talk about, but I think its important. Especially since we've just observed Pregnancy and Infant Loss Day on October 15.
Still, if there was ever a day for papercraft therapy, today would be it, don't you think? And so thats what I did :) It did relax me and cheer me up somewhat.
About 3 hours after I told her my sad news, I received a bunch of 12 long stemmed pink roses from my bestie Denise…So I think I'll send her this card.
|
#!/usr/bin/env python3
import os
import pingTracker
from time import strftime
#Set all variables here
API = "o.fdUi5rBICoNHmCf02ANsN5evuPvmZJDv"
pingTrack = pingTracker.pingTracker()
def addToDayCounter( dayArgs):
if dayArgs == "Monday":
pingTrack.addToMonday()
if dayArgs == "Tuesday":
pingTrack.addToTuesday()
if dayArgs == "Wednesday":
pingTrack.addToWednesday()
if dayArgs == "Thursday":
pingTrack.addToThursday()
if dayArgs == "Friday":
pingTrack.addToThursday()
if dayArgs == "Saturday":
pingTrack.addToSaturday()
if dayArgs == "Sunday":
pingTrack.addToSunday()
def pushSummary():
TTL = "Mailbox Alert: Delivery Summary"
MSG = pingTrack.statMostDay()
osCMD = "sh /home/pi/PING-Mailbox/PingPush.sh '%s' '%s' '%s'" % (API, TTL, MSG)
os.system(osCMD)
pingTrack.resetStats()
def pushMsg():
time = strftime("%I:%M %p")
day = strftime("%A")
TTL = "Mailbox Alert: You have recieved a package"
MSG = "You have recieved a package at %s on %s" % (time, day)
osCMD = "sh /home/pi/PING-Mailbox/PingPush.sh '%s' '%s' '%s'" % (API,TTL, MSG)
os.system(osCMD)
addToDayCounter(day)
#TODO: Save how many and at what time each package arrived
#TODO: After 5 packages delivered, send logistic summary notification
|
Each year about half a million students fail to make planned academic progress due to college algebra, hence the need for researchers to find ways of improving the quality of instruction in the course. Recent research suggests that flipping college algebra to allow time for active learning in the classroom may improve student performance. Also, the costs of college textbooks have skyrocketed in the last few years, preventing or discouraging students from obtaining crucial learning resources. To address both concerns, the researcher implemented a flipped college algebra classroom and based all lessons on a free textbook. Videos and corresponding problem sets were created and made available to students on the Internet. Outside class, students viewed videos, took notes, and completed video problem assignments. Inside class, students worked with other students to complete in-class problem assignments. Students described their experience in the flipped classroom in an anonymous essay and an online survey, and the researcher recorded field notes of his observations throughout the term. This study reports student and instructor perceptions of the flipped college algebra classroom.
|
from csv import DictWriter
from datetime import datetime
from elasticsearch_dsl.query import Q
from django.core.management.base import BaseCommand
from catalog.elastic_models import NACPDeclaration
AGGREGATED_FIELD_NAME = 'aggregated'
class Command(BaseCommand):
help = 'Export aggregated values from NACP declarations '
'(annual only with corrected declarations resolved by default) into CSV format'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
parser.add_argument(
'--export_all',
dest='export_all',
default=False,
action='store_true',
help='Export all declarations (all types, both corrected and originals)',
)
parser.add_argument(
'--filter_future_declarations',
dest='filter_future_declarations',
default=False,
action='store_true',
help='Export only declarations submitted for previous years',
)
parser.add_argument(
'destination',
help='Path to csv file',
)
def handle(self, *args, **options):
to_export = NACPDeclaration.search().source(
include=[AGGREGATED_FIELD_NAME]).query("exists", field=AGGREGATED_FIELD_NAME)
if not options["export_all"]:
to_export = to_export.query(
"bool",
must=[Q("term", intro__doc_type="Щорічна")],
must_not=[Q("exists", field="corrected_declarations")]
)
if options["filter_future_declarations"]:
to_export = to_export.query(
"range",
intro__declaration_year={
"lt": datetime.now().year
}
)
w = None
with open(options["destination"], "w") as fp:
for i, d in enumerate(to_export.scan()):
row = d[AGGREGATED_FIELD_NAME].to_dict()
row['id'] = d.meta.id
if not w:
w = DictWriter(fp, fieldnames=row.keys())
w.writeheader()
w.writerow(row)
if i % 10000 == 0 and i:
self.stdout.write("{} declarations exported".format(i))
|
English Letter Writing Format Informal Image Photo Album Beautifu is just one of the many collections of Best Of Simple Report Format Example Reference that we have on this website. We have a lot of Resume Template With Photo or Best Of Simple Report Format Example and any other things concerning in this website. We're not just providing info about , but , you can get a lot more reference to create your Resume Template With Photo or Best Of Simple Report Format Example as well. So , don't forget to keep visiting Resume Template With Photo to get the latest update about Resume Template With Photo or Best Of Simple Report Format Example and more.
English Letter Writing Format Informal Image Photo Album Beautifu was posted in February 27, 2018 at 1:36 pm. English Letter Writing Format Informal Image Photo Album Beautifu has viewed by 66 users. Click it and download the English Letter Writing Format Informal Image Photo Album Beautifu.
Resume Template With Photo, Signing Off A French Formal Letter Best Of English Date Format In A Letter Best Formal Letter Format was posted February 19, 2018 at 7:47 pm by Resume Template With Photo . More over Signing Off A French Formal Letter Best Of English Date Format In A Letter Best Formal Letter Format has viewed by 11687 visitor.
Resume Template With Photo, Date Format In English Letter Copy English Date Format In A Letter Best Letter Writing Samples Pdf was posted July 21, 2018 at 1:31 pm by Resume Template With Photo . More over Date Format In English Letter Copy English Date Format In A Letter Best Letter Writing Samples Pdf has viewed by 14936 visitor.
Resume Template With Photo, English Letter Envelope Format was posted March 22, 2018 at 11:04 pm by Resume Template With Photo . More over English Letter Envelope Format has viewed by 5760 visitor.
Resume Template With Photo, Format Of Informal Letter Writing In English In India was posted March 22, 2018 at 12:22 am by Resume Template With Photo . More over Format Of Informal Letter Writing In English In India has viewed by 14579 visitor.
Resume Template With Photo, English Date Format In A Letter was posted September 8, 2018 at 11:35 pm by Resume Template With Photo . More over English Date Format In A Letter has viewed by 6599 visitor.
Resume Template With Photo, English Letter In Marathi Format was posted August 21, 2018 at 11:51 am by Resume Template With Photo . More over English Letter In Marathi Format has viewed by 3986 visitor.
Resume Template With Photo, Love Letter Format English was posted March 24, 2018 at 1:10 am by Resume Template With Photo . More over Love Letter Format English has viewed by 7439 visitor.
Resume Template With Photo, Format Of Formal Letter In English was posted January 21, 2018 at 3:32 am by Resume Template With Photo . More over Format Of Formal Letter In English has viewed by 7136 visitor.
Resume Template With Photo, Format Of English Letter Writing was posted June 16, 2018 at 3:20 pm by Resume Template With Photo . More over Format Of English Letter Writing has viewed by 8160 visitor.
Resume Template With Photo, English Letter Format Template was posted May 25, 2018 at 7:34 am by Resume Template With Photo . More over English Letter Format Template has viewed by 5558 visitor.
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""#{{{
Plots reflection and transmission of a metamaterial structure
Tries to calculate its effective parameters [Smith2002], avoiding branch jumps
Enables to save to several types of output (cartesian graphs, polar graphs, nice PDF graphs...)
Exports the effective parameters to another data file for further processing
About this script:
* Written in 2012-13 by Filip Dominec (dominecf at the server of fzu.cz).
* Being distributed under the GPL license, this script is free as speech after five beers.
* You are encouraged to use and modify it as you need. Feel free to write me if needed.
* Hereby I thank to the MEEP/python_meep authors and people of meep mailing list who helped me a lot.
TODOs:
* Guess the correct branch for N (using Kramers-Kronig relations?)
* Fix the passivity criterion for Im N > 0, Re Z > 0
"""
import numpy as np
import sys, os, re, matplotlib
matplotlib.use('Agg') ## Enable plotting even in the GNU screen session?
import matplotlib.pyplot as plt
plt.ioff() ## is this useful?
from scipy.optimize import fsolve, fmin
c = 2.99792458e8 # speed of light
## == User settings for postprocessing and plotting ==
frequnit, frequnitname = 1e12, "THz"
#}}}
N_init_branch = -3
N_init_sign = 1
autocorrect_signs = True
Z_init_sign = -1
check_hilbert = 1 ## Verifies if Kramers-Kronig relations hold for N ###XXX
legend_enable = 0
brillouin_boundaries = 1 ## Plots thin lines where the N would exceed the allowed
## range for 0-th Bloch mode
autobranch = 0
plot_publi = 0 ## prepares nice small graphs for publication
savedat = 1 ## saves eff params to PKGraph-compatible ascii file
savedat_wd = 1 ## uses the working directory to save the eff params
plot_polar = 0 ## plots them to polar graphs for diagnostics
plot_bands = 0 ## plots them to k-omega graphs for diagnostics
plot_expe = 1 ## if 'r.dat', 't.dat', 'N.dat', 'Z.dat', 'eps.dat' or 'mu.dat' available, overlay them
find_plasma_frequency = 0 ## find frequencies where epsilon crosses zero
plot_freq_min = None
#plot_freq_max = None
plot_freq_max = 3e12
padding = None
np.seterr(all='ignore') ## do not print warnings for negative-number logarithms etc.
## == </user settings> ==
## == Auxiliary functions ==
def get_simulation_name(argindex=1): #{{{
"""Get the name of the last simulation run.
Priority: 1) parameter, 2) last_simulation_name.txt, 3) working directory"""
cwd = os.getcwd()
if len(sys.argv)>argindex and sys.argv[argindex] != "-" and __name__ == "__main__":
print "Parameter passed:", sys.argv[argindex]
last_simulation_name = sys.argv[argindex]
elif os.path.exists(os.path.join(cwd, 'last_simulation_name.txt')):
print "Loading from", os.path.join(cwd, 'last_simulation_name.txt')
last_simulation_name = os.path.join(cwd, open(os.path.join(cwd, 'last_simulation_name.txt'),'r').read().strip())
else:
print "Error: No input file provided and 'last_simulation_name.txt' not found!"
last_simulation_name = cwd
if (last_simulation_name[-4:] == ".dat"): last_simulation_name = last_simulation_name[:-4] # strip the .dat extension
return last_simulation_name
#}}}
def load_rt(filename, layer_thickness=None, plot_freq_min=None, plot_freq_max=None, truncate=True, padding=None): #{{{
""" Loads the reflection and transmission spectra and simulation settings
Returns:
* frequency axis
* reflection s11 and transmission s12 as complex np arrays
Compatible with the PKGraph text data file with polar data:
* parameters in header like: #param name,value
* column identification like: #column Ydata
* data columns in ascii separated by space
Expects polar data with columns: frequency, s11 ampli, s11 phase, s12 ampli, s12 phase
"""
with open(filename+'.dat') as datafile:
for line in datafile:
if line[0:1] in "0123456789": break # end of file header
value = line.replace(",", " ").split()[-1] # the value of the parameter will be separated by space or comma
if ("layer_thickness" in line) and (layer_thickness == None): d = float(value)
if ("plot_freq_min" in line) and (plot_freq_min == None): plot_freq_min = float(value)
if ("plot_freq_max" in line) and (plot_freq_max == None): plot_freq_max = float(value)
if ("param padding" in line) and (padding == None): padding = float(value)
xlim = (plot_freq_min, plot_freq_max)
(freq, s11amp, s11phase, s12amp, s12phase) = \
map(lambda a: np.array(a, ndmin=1), np.loadtxt(filename+".dat", unpack=True))
## Limit the frequency range to what will be plotted (recommended)
if truncate and len(freq)>1:
(d0,d1) = np.interp((plot_freq_min, plot_freq_max), freq, range(len(freq)))
(freq, s11amp, s11phase, s12amp, s12phase) = \
map(lambda a: a[int(d0):int(d1)], (freq, s11amp, s11phase, s12amp, s12phase))
return freq, s11amp, s11phase, s12amp, s12phase, xlim, (d, plot_freq_min, plot_freq_max, padding)
#}}}
def shiftmp(freq, s11, shiftplanes):#{{{
""" Adjusts the reflection phase like if the monitor planes were not centered.
For symmetric metamaterial cell, this function is not needed. The symmetry requires that
the monitor planes in front of and behind the mm cell are centered.
However, for an asymmetric metamaterial, the correct position has to be found. Otherwise
the Fresnel inversion gives negative imaginary part of N and/or negative real part of Z, which
is quite spooky for passive medium.
Even such metamaterials, however, may be properly homogenized if we define the
position of monitor planes as a function of frequency. We can assume that:
1) This optimum shift shall hold for all simulations with one or more unit cells.
2) When the wave direction is reversed (to get s21, s22 parameters), the shift should be negated.
These rules should enable us to homogenize any asymmetric non-chiral metamaterial.
Note that this shifting is still an experimental technique and has to be tested out thoroughly.
"""
return np.array(s11) * np.exp(1j*np.array(shiftplanes)/(c/freq) * 2*np.pi * 2)
#}}}
def find_maxima(x, y, minimum_value=.1):#{{{
"""
Returns the x points where
1) y has a local maximum (i. e. dx/dy goes negative) AND
2) where y is above minimum_value
"""
d = y[1:-1] - y[0:-2] ## naïve first derivative
maxima = x[1:][np.sign(d[0:-2])-np.sign(d[1:-1]) + np.sign(y[2:-2]-minimum_value)==3]
return maxima
#}}}
def reasonable_ticks(a): #{{{
""" Define the grid and ticks a bit denser than by default """
x=np.trunc(np.log10(a)); y=a/10**x/10
return (10**x, 2*10**x,5*10**x)[np.int(3*y)]
#}}}
## == Homogenisation functions (processing whole np.array at once) ==
def polar2complex(amp, phase): return amp*np.exp(1j*phase) #{{{
#}}}
def unwrap_ofs(p, ofs):#{{{
""" Similar to np.unwrap, but take into account the initial offset.
Increment this offset if needed, and return it as the second return value.
"""
return np.unwrap(p)+ofs, (np.unwrap(p)-p)[-1]+ofs
#}}}
def rt2n(frequency, s11, s12, d, init_branch=0, init_sign=1, uo=[0,0,0,0]): #{{{
""" Invert Fresnel equations to obtain complex refractive index N, with autodetection of arccosine branch#{{{
Accepts:
* s11 - np.array of reflection,
* s12 - np.array of transmission,
* d - layer thickness
Returns: a tuple of three np.arrays
* the retrieved effective index of refraction,
* the arccos() branch used for its calculation,
* the debug information
Technical details are commented in the code.
This algorithm implements the method for effective refractive index retrieval from the
s11 and s12 scattering coefficients [Smith2002].
Such calculation is not unambiguous due to multiple branches of complex arccosine. If the branches
of the solution are not switched at proper frequencies, the index of refraction often becomes discontinuous
and generally wrong.
This function fixes this problem by the analysis of the arccos() behaviour. It requires that the r(f) and t(f)
are supplied as whole spectra. It is then possible to trace the argument passed to arccos() and properly choose
the correct branch for whole spectral region.
Limitations of this procedure:
* If structure is highly reflective at lowest frequencies (metallic wires etc.), the N branch cannot be determined
reliably. To fix this, increase 'plot_freq_min' (the start of computed frequency range), or provide init_branch.
Initial branch choosing is not implemented. Its value may be optionally provided in the argument init_branch and
init_sign. The user should choose theme so that the initial values for
i) the curves are continuous
ii) and: Im(N) > 0 (for a nonamplifying medium)
* The np.unwrap() function requires that the frequency is sampled fine enough. If the branch is wrongly detected
at sharp resonances, there are good news: You probably do not have to run the simulation longer; often is
sufficient to pad the time-domain data with zeroes.
* For some simulations, there is a weird _continuous_ branch transition at higher frequencies for thicker
metamaterial samples. The returned index of refraction breaks Kramers-Kronig relations.
However, the Hilbert transform of the imaginary part of N" gives proper data. Single-cell simulation also gives
proper data...
Putting the layers far apart alleviates this for 2 cells: can it be related to higher-order Bloch modes?
"""#}}}
## Argument passed to arccos():
arg = (1+0j-s11**2+s12**2)/2/(s12)
## Count passing through complex arccos() branch cuts in the complex plane:
lu, uo[0] = unwrap_ofs(np.angle(arg + 1. + 0e-3j) + np.pi, uo[0])
ru, uo[1] = unwrap_ofs(np.angle(arg - 1. + 0e-3j), uo[1])
lbc = np.floor(lu/2/np.pi)
rbc = np.floor(ru/2/np.pi)
anl = (-1)**(lbc) ## left cut: (-inf .. -1]
anr = (-1)**(rbc) ## right cut: [1 .. +inf)
## Retrieve the sign and branch of the arccos()
sign = anr*anl*init_sign
lbr, uo[2] = unwrap_ofs(np.angle(-anr + 1j*anl) + np.pi, uo[2])
rbr, uo[3] = unwrap_ofs(np.angle(+anr - 1j*anl) + np.pi, uo[3])
branch = np.floor(lbr/2/np.pi) + np.floor(rbr/2/np.pi) + 1 + init_branch
#branch = np.floor(np.unwrap(np.angle(rbc + 1j*lbc))/2/np.pi) + \
#np.floor(np.unwrap(np.angle(-rbc - 1j*lbc))/2/np.pi) + 1 + init_branch
## Standard Fresnel inversion:
k = 2*np.pi * frequency/c # the wave vector
N = np.conj((np.arccos(arg)*sign + 2*np.pi*branch) / (k*d))
#if abs(frequency[-1]-387.3e9)<1e9: ## debug
#print "f branch uo", frequency, branch, uo
return N, uo, (branch, sign, arg, anr, anl)
""" For diagnostics, you may also wish to plot these values:#{{{
#argLog = np.e**(1j*np.angle(arg))*np.log(1+abs(arg)) ## shrinked graph to see the topology
plt.plot(freq, arg.real, color="#aaaaaa", label=u"$arg$'", lw=1)
plt.plot(freq, arg.imag, color="#aaaaaa", label=u"$arg$'", lw=1, ls='--')
#plt.plot(freq, argLog.real, color="#000000", label=u"$arg$'", lw=1)
#plt.plot(freq, argLog.imag, color="#000000", label=u"$arg$'", lw=1, ls="--")
#plt.plot(freq, np.ones_like(freq)*np.log(2), color="#bbbbbb", label=u"$arg$'", lw=1)
#plt.plot(freq, -np.ones_like(freq)*np.log(2), color="#bbbbbb", label=u"$arg$'", lw=1)
#plt.plot(freq, anr, color="#aaaaff", label=u"$anr$'", lw=1)
#plt.plot(freq, anl, color="#aaffaa", label=u"$anr$'", lw=1)
#plt.plot(freq, anr_trunc, color="#0000ff", label=u"$anrR$'", lw=1)
#plt.plot(freq, anl_trunc*.9, color="#00dd00", label=u"$anrR$'", lw=1)
#plt.plot(freq, branch*.8, color="#dd0000", label=u"$turn$'", lw=2)
#plt.plot(freq, sign*.7, color="#ffbb00", label=u"$sign$'", lw=2)
"""#}}}
#}}}
def rt2z(s11, s12, init_sign=1, uo=0):#{{{
""" Invert Fresnel equations to obtain complex impedance Z
This function complements the refractive index obtained by rt2n() with the effective impedance.
The computation is much easier, because the only unambiguous function is the complex square root.
It allows two solutions differing by their sign. To prevent discontinuities, we calculate the
square root in polar notation.
Initial sign may be supplied by the user.
Returns complex impedance as np.array
"""
#def get_phase(complex_data):
#""" Unwraps and shifts the phase from Fourier transformation """
#if len(complex_data) <= 1: return np.angle(complex_data)
#phase, uo = unwrap,ofs(np.angle(complex_data), uo)
#center_phase = phase[min(5, len(phase)-1)] ## 5 is chosen to avoid zero freq.
#return phase-(round(center_phase/2/np.pi)*2*np.pi)
## Calculate square root arguments
Zarg1=((1+s11)**2 - s12**2)
Zarg2=((1-s11)**2 - s12**2)
## Get impedance from polar notation of (Zarg1/Zarg2)
Zamp = abs(Zarg1 / Zarg2)**.5 ## amplitude of square root
if hasattr(Zarg1, '__len__') and len(Zarg1)>1:
Zphase, uo = unwrap_ofs(np.angle(Zarg1/Zarg2), uo) ## phase of square root (without discontinuities) TODO
else:
Zphase = np.angle(Zarg1/Zarg2)
uo = 0
Z = np.conj(np.exp(1j*Zphase/2) * Zamp) * init_sign
return Z, uo
"""
### Possible experimental improvements:
EnforceZrePos = True
FlipZByPhaseMagic = True
Zrealflipper = 1 ## unphysical if not 1
Zconjugator = 1
## Exception to the Re(Z)>0 rule:
Z_turnaround = (-1)**np.round(Zphase/np.pi)
if FlipZByPhaseMagic:
Z = Z * Z_turnaround
## For testing only
Z = (Z.real * Zrealflipper + 1j*Z.imag * Zconjugator)
if EnforceZrePos:
Z *= np.sign(Z.real)
"""
#}}}
def nz2epsmu(N, Z):#{{{
""" Accepts index of refraction and impedance, returns effective permittivity and permeability"""
return N/Z, N*Z
#}}}
def epsmu2nz(eps, mu):#{{{
""" Accepts permittivity and permeability, returns effective index of refraction and impedance"""
N = np.sqrt(eps*mu)
N *= np.sign(N.imag)
Z = np.sqrt(mu / eps)
return N, Z
#}}}
def nz2rt(freq, N, Z, d):#{{{
""" Returns the complex reflection and transmission parameters for a metamaterial slab.
Useful for reverse calculation of eps and mu (to check results)
Accepts:
* frequency array,
* effective refractive index N,
* effective impedance Z,
* vacuum wave vector k and
* thickness d of the layer.
"""
## Direct derivation from infinite sum of internal reflections
k = 2*np.pi * freq/c # the wave vector
t1 = 2 / (1+Z) # transmission of front interface
t2 = 2*Z / (Z+1) # transmission of back interface
t1prime = Z*t1
r1=(Z-1)/(Z+1) # reflection of front interface
r2=(1-Z)/(1+Z) # reflection of back interface
s12 = t1*t2*np.exp(1j*k*N*d) / (1 + r1*r2*np.exp(2j*k*N*d))
s11 = r1 + t1prime*t1*r2*np.exp(2j*k*N*d)/(1+r1*r2*np.exp(2j*k*N*d))
return s11, s12
"""
Note: these results may be also re-expressed using goniometric functions.
Equations from Smith2002 or Cai-Shalaev, mathematically equivalent to those above
(only Smith's s11 has negative sign convention).
s12new = 1/(np.cos(N*k*d) - .5j*(Z+1/Z)*np.sin(N*k*d))
s11new = -s12new * .5j*(Z-1/Z)*np.sin(N*k*d)
TODO: implement also for other surrounding media than vacuum.
"""
#}}}
## == Auxiliary functions for monitor-plane fitting ==
def error_func(N1,Z1,N2,Z2,lastdif=0,p0=[0]):#{{{
""" Used for optimization: tries to match N1,N2 and Z1,Z2, avoiding forbidden values """
return abs(N1-N2) + abs(Z1-Z2) + \
lastdif + (abs(p0[0])*1e4)**2 +\
(abs(np.imag(N1))-np.imag(N1))*100 + (abs(np.imag(N2))-np.imag(N2))*100 + \
(abs(np.real(Z1))-np.real(Z1))*100 + (abs(np.real(Z2))-np.real(Z2))*100
#}}}
def eval_point(p0):#{{{
freq_p = freq[i-1:i+1]
s11p1 = shiftmp(freq[i-1:i+1], s11[i-1:i+1], p0[0])
s12p1 = s11[i-1:i+1]
new_N1, Nuo1x = rt2n(freq_p, s11p1, s12p1, d, init_branch=0, uo=Nuo1)[0:2]
new_Z1, Zuo1x = rt2z(s11p1, s12[i-1:i+1], uo=Zuo1)
s11p2 = shiftmp(freq[i-1:i+1], s11_2[i-1:i+1], p0[0])
s12p2 = s11_2[i-1:i+1]
new_N2, Nuo2x = rt2n(freq_p, s11p2, s12p2, d2, init_branch=0, uo=Nuo2)[0:2]
new_Z2, Zuo2x = rt2z(s11p2, s12[i-1:i+1], uo=Zuo2)
lastdif = abs(p0s[-1]-p0[0])*1e5 if (p0s[-1] != np.NaN) else 0
return error_func(new_N1[1], new_Z1[1], new_N2[1], new_Z2[1], lastdif=lastdif)
#}}}
## --- Calculation --------------------------------------------
## Get reflection and transmission data
last_simulation_name = get_simulation_name()
freq, s11amp, s11phase, s12amp, s12phase, xlim, (d, plot_freq_min, plot_freq_max, padding) = load_rt(last_simulation_name,
plot_freq_min=plot_freq_min, plot_freq_max=plot_freq_max, truncate=True, padding=padding)
## Compensating the additional padding of the monitor planes
s11 = shiftmp(freq, polar2complex(s11amp, s11phase), padding*np.ones_like(freq))
s12 = shiftmp(freq, polar2complex(s12amp, s12phase), padding*np.ones_like(freq))
## Calculate N, Z and try to correct the signs (TODO use K-K branch selection!)
if len(freq)>2:
N, N_uo, N_debug = rt2n(freq, s11, s12, d, init_branch=N_init_branch, init_sign=N_init_sign)
#print "N before correctio1", N[0:10]
Z, Z_uo = rt2z(s11, s12, init_sign=Z_init_sign)
if autocorrect_signs:
## Fix N sign so that N.imag > 0
if sum(np.clip(N.imag,-10., 10.))<0:
N *= -1
## Fix N branch so that N.real does not diverge at low frequencies
ii = 3
det_branch = np.round(2*np.real(N[ii]*freq[ii]/c*d))
#print "N before correction", N[0:10]
N -= det_branch / (freq/c*d)/2
#print "N after correction", N[0:10]
## Fixing Z sign so that Z.real > 0
#Z *= np.sign(Z.real)
if sum(np.clip(Z.real,-10., 10.))<0:
Z *= -1
#Z, Z_uo = rt2z(s11, s12, init_sign=Z_init_sign)
else:
N = np.zeros_like(freq)
Z = np.zeros_like(freq)
#}}}
## Detect resonances
losses = 1-abs(s11)**2-abs(s12)**2
loss_maxima = np.array(find_maxima(freq,losses))
print "Detected loss maxima at frequencies:", loss_maxima
np.savetxt("last_found_modes.dat", loss_maxima)
## Get epsilon and mu
eps, mu = nz2epsmu(N, Z)
## Verify the results by back-calculating s11, s12
s11backcalc, s12backcalc = nz2rt(freq, N, Z, d)
## Build the debug plots
arg = (1+0j-s11**2+s12**2)/2/(s12)
argLog = np.e**(1j*np.angle(arg))*np.log(1+abs(arg)) ## shrinked graph to see the topology
## --- Plotting to cartesian graphs -------------------------------------------- #{{{
plt.figure(figsize=(15,15))
xticks = np.arange(xlim[0], xlim[1], reasonable_ticks((xlim[1]-xlim[0])/3))
xnumbers = [("%.2f"%(f/frequnit) if abs(f%reasonable_ticks((xlim[1]-xlim[0])/5))<(frequnit/1000) else "") for f in xticks]
marker = "s" if (len(freq) < 20) else "" # Use point markers for short data files
subplot_number = 4
## Plot reflection and transmission amplitudes
plt.subplot(subplot_number, 1, 1)
plt.plot(freq, s11amp, marker=marker, color="#AA4A00", label=u'$|s_{11}|$')
plt.plot(freq, s12amp, marker=marker, color="#004AAA", label=u'$|s_{12}|$')
plt.plot(freq, losses, color="#aaaaaa", label=u'loss')
if plot_expe and os.path.exists('t.dat'):
tf, ty = np.loadtxt('t.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#004AAA', marker='o')
if plot_expe and os.path.exists('../t00kVcm_Comsol.dat'): ## XXX
tf, ty = np.loadtxt('../t00kVcm_Comsol.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#4A00AA', marker='o')
if plot_expe and os.path.exists('../t90kVcm_Comsol.dat'):
tf, ty = np.loadtxt('../t90kVcm_Comsol.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#00AA4A', marker='s')
plt.ylabel(u"Amplitude"); plt.ylim((-0.1,1.1)); plt.xlim(xlim)
#plt.xticks(xticks, xnumbers); plt.minorticks_on(); plt.grid(True)
if legend_enable: plt.legend(loc="upper right");
#for lm in loss_maxima: plt.axvspan(lm,lm+1e8, color='r')
## Plot r and t phase
# (Note: phase decreases with frequency, because meep uses the E=E0*exp(-i omega t) convention )
plt.subplot(subplot_number, 1, 2)
plt.plot(freq, np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#AA4A00", label=u'$\\phi(s_{11})/\\pi$')
plt.plot(freq, np.unwrap(np.angle(s12))/np.pi, marker=marker, color="#004AAA", label=u'$\\phi(s_{12})/\\pi$')
#
#plt.plot(freq, np.unwrap(np.angle(s12))/np.pi + np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#888AAA", label=u'$(\\phi(s_{11})+\\phi(s_{11}))/\\pi$')
#plt.plot(freq, np.unwrap(np.angle(s12))/np.pi - np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#AA8A88", label=u'$(\\phi(s_{11})-\\phi(s_{11}))/\\pi$')
#plt.plot(freq, 2*np.unwrap(np.angle(s12))/np.pi + np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#8A88AA", label=u'$(2\\phi(s_{11})+\\phi(s_{11}))/\\pi$')
#plt.plot(freq, 2*np.unwrap(np.angle(s12))/np.pi - np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#8AAA88", label=u'$(2\\phi(s_{11})-\\phi(s_{11}))/\\pi$')
#plt.plot(freq, np.unwrap(np.angle(s12))/np.pi + 2*np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#88AA8A", label=u'$(\\phi(s_{11})+2\\phi(s_{11}))/\\pi$')
#plt.plot(freq, np.unwrap(np.angle(s12))/np.pi - 2*np.unwrap(np.angle(s11))/np.pi, marker=marker, color="#AA888A", label=u'$(\\phi(s_{11})-2\\phi(s_{11}))/\\pi$')
# Optional: debugging curves(branch, sign, arg, anr, anl)
if len(freq)>2:
#plt.plot(freq, N_debug[0]*.95, color="#dd0000", label=u"$br$", lw=1.6)
#plt.plot(freq, N_debug[1]*.90, color="#dd8800", label=u"$si$", lw=1.6)
#plt.plot(freq, N_debug[2].real, color="#00dd00", label=u"$arg^'$", lw=.6, ls='-')
#plt.plot(freq, N_debug[2].imag, color="#00dd00", label=u"$arg^{''}$", lw=.6, ls='--')
#plt.plot(freq, np.sign(N_debug[2].imag), color="#008800", label=u"sign$arg^{''}$", lw=.3, ls='-')
#plt.plot(freq, np.arccos(N_debug[2]).real, color="#0000dd", label=u"arccos$arg^'$", lw=1.6, ls='-')
#plt.plot(freq, np.log10(np.pi-np.arccos(N_debug[2]).real), color="#0000dd", label=u"arccos$arg^'$", lw=.6, ls='-')
#plt.plot(freq, np.arccos(N_debug[2]).imag, color="#0000dd", label=u"arccos$arg^{''}$", lw=1.6, ls='--')
#plt.plot(freq, np.log10(abs(N_debug[2].imag)), color="#000000", label=u"log$arg^{''}$", lw=.6, ls='--')
#plt.plot(freq, abs(N_debug[2] - (1+0j)), color="#0088dd", label=u"$|arg-1|$", lw=2, ls='-')
#plt.plot(freq, abs(N_debug[2] + (1+0j)), color="#8800dd", label=u"$|arg+1|$", lw=2, ls='-')
#plt.plot(freq, np.log10(abs(N_debug[2] - (1+0j))), color="#0088dd", label=u"", lw=1, ls='-')
#plt.plot(freq, np.log10(abs(N_debug[2] + (1+0j))), color="#8800dd", label=u"", lw=1, ls='-')
#plt.plot(freq, np.sign(N_debug[2].imag), color="#00dd00", label=u"$sgn arg^{''}$", lw=.6, ls=':')
plt.plot(freq, -np.ones_like(freq), color="k", label=u"", lw=.3, ls='-')
plt.plot(freq, np.ones_like(freq), color="k", label=u"", lw=.3, ls='-')
if autobranch:
# Detection of key points in the spectrum (PBG boundaries, branch skips etc.)
def find_maxima(x, y, minimum_value=.1):
"""
Returns the x points where
1) y has a local maximum (i. e. dx/dy goes negative) AND
2) where y is above minimum_value
"""
d = y[1:-1] - y[0:-2] ## naïve first derivative
maxima = x[1:][np.sign(d[0:-2])-np.sign(d[1:-1]) + np.sign(y[2:-2]-minimum_value)==3]
return maxima
def find_maxima_indices(x, y, minimum_value=.1):
"""
Returns the x points where
1) y has a local maximum (i. e. dx/dy goes negative) AND
2) where y is above minimum_value
"""
d = y[1:-1] - y[0:-2] ## naïve first derivative
maximai = np.arange(1,len(x), dtype=np.dtype(np.int16))[np.sign(d[0:-2])-np.sign(d[1:-1]) + np.sign(y[2:-2]-minimum_value)==3]
return maximai
argPmin = find_maxima_indices(freq, -abs(N_debug[2] - (1+0j)), minimum_value=-np.inf)
argNmin = find_maxima_indices(freq, -abs(N_debug[2] + (1+0j)), minimum_value=-np.inf)
## (todo) check: maybe required, maybe not
#argNmax = find_maxima_indices(freq, abs(N_debug[2] + (1+0j)), minimum_value=-np.inf)
#plt.plot(freq[argNmax], np.zeros_like(argNmax), marker='o', color="#dd0000")
#allindices = np.hstack([np.array([0]), argPmin, argNmin, argNmax])
## Concatenate & sort all indices of interesting points
allindices = np.hstack([np.array([0]), argPmin, argNmin])
allindices.sort()
## Remove duplicate indices
allindices = np.hstack([allindices[0], [x[0] for x in zip(allindices[1:],allindices[:-1]) if x[0]!=x[1]]])
plt.plot(freq[allindices], np.zeros_like(allindices), marker='x', color="k")
## Scan through all photonic bands/bandgaps, seleting the correct N branch
print 'allindices', allindices
#N_init_branch = 0
print 'N_init_sign', N_init_sign
#N_init_sign = -1
#pN_uo = [0,0,0,0]
pN_uo = [2*np.pi,2*np.pi,2*np.pi,0]
det_branch = 0
#for i in [0]: ## whole spectrum
#i1 = 0
#i2 = len(freq)-1
for i in range(len(allindices)-1): ## spectrum by chunks
for q in (0,1):
if q==0:
print 'LONG ',
i1 = allindices[i]
i2 = allindices[i+1]-1
#i2 = allindices[i+1]+1 ## .. works for 'q in [0]'
else:
print 'SHORT',
i1 = allindices[i+1]-1
i2 = allindices[i+1]+1
if i1>=i2: continue
pfreq = freq[i1:i2]
if not q and pfreq[0] > 600e9: break
pts = np.arange(10000)[i1:i2]; print pts[0], pts[-1],; print pfreq[0]/1e9,
ps11 = s11[i1:i2]
ps12 = s12[i1:i2]
print 'start=', np.array(pN_uo)/np.pi,
## Plot oldschool N
pN_uo_old = pN_uo
pN, pN_uo, pN_debug = rt2n(pfreq, ps11, ps12, d, init_branch=N_init_branch, init_sign=N_init_sign, uo=pN_uo)
#if q!=0: pN_uo = pN_uo_old
print 'end=', np.array(pN_uo)/np.pi
if i == 0:
try:
#print len(pN)
ii = 0
det_branch = np.round(2*np.real(pN[ii]*freq[ii]/c*d))
#print 'det_branch', det_branch
except:
pass
#print "N before correction", N[0:10]
pN -= det_branch / (pfreq/c*d)/2
plt.plot(pfreq, pN.real, lw=1.2, marker='o', markersize=2)
#plt.plot(pfreq, pN.imag, lw=.8, ls='--')
## Plot oldschool UO
#plt.plot(pfreq, np.ones_like(3pfreq)*pN_uo_old[0]/10, lw=3, c='#8888ff')
#plt.plot(pfreq, np.ones_like(pfreq)*pN_uo_old[1]/10, lw=3, c='#88ff88', ls='-')
#plt.plot(pfreq, np.ones_like(pfreq)*pN_uo_old[2]/10, lw=3, c='#ff8888', ls='-')
#plt.plot(pfreq, np.ones_like(pfreq)*pN_uo_old[3]/10, lw=3, c='#88ffff', ls='-')
plt.ylabel(u"Phase"); None
plt.ylim((-15,15))
plt.xlim(xlim) # XXX
#plt.xlim((00e9, 440e9))
plt.xticks(xticks, xnumbers); plt.minorticks_on(); plt.grid(True)
if legend_enable: plt.legend();
## Plot Z, N and figure-of-merit
plt.subplot(subplot_number, 1, 3)
if brillouin_boundaries:
for i in range(1,4):
plt.plot(freq, c/(2*freq*d)*i, color="#000000", label=u'', ls='-', lw=.5, alpha=.5)
plt.plot(freq, -c/(2*freq*d)*i, color="#000000", label=u'', ls='-', lw=.5, alpha=.5)
if check_hilbert and len(freq)>1:
import scipy.fftpack
N[0] = N[1] ## avoid NaN
#np.kaiser(len(N), 5)
N_KK = scipy.fftpack.hilbert(N.real + 1j*abs(N.imag)) / 1j
plt.plot(freq, np.real(N_KK), color="#FF9900", label=u"$N^{'}_{KK}$", alpha=1)
plt.plot(freq, np.imag(N_KK), color="#FF9900", label=u'$N^{''}_{KK}$', ls='--', alpha=1)
plt.plot(freq, np.real(N_KK)-np.real(N), color="#99FF00", label=u"$\\Delta N^{'}_{KK}$", alpha=.5)
plt.plot(freq, np.imag(N_KK)-np.imag(N), color="#99FF00", label=u'$\\Delta N^{''}_{KK}$', ls='--', alpha=.5)
Z[0] = Z[1]
Z_KK = scipy.fftpack.hilbert(Z.real + 1j*Z.imag) / 1j ## Why minus needed?
#plt.plot(freq, np.real(Z_KK), color="#0099FF", label=u"$Z^{'}_{KK}$", alpha=.3)
#plt.plot(freq, np.imag(Z_KK), color="#4499FF", label=u'$Z^{''}_{KK}$', ls='--', alpha=.3)
DZr = np.real(Z_KK)-np.real(Z)
DZi = np.imag(Z_KK)-np.imag(Z)
#plt.plot(freq, DZr, color="#DDDD00", label=u"$\\Delta Z^{'}_{KK}$", alpha=.3)
#plt.plot(freq, DZi, color="#DDDD44", label=u'$\\Delta Z^{''}_{KK}$', ls='--', alpha=.3)
#plt.plot(freq[1:], (DZr[1:]+DZr[:-1])/2, color="#DDDD00", label=u"$\\Delta Z^{'}_{KK}$", alpha=.31)
#plt.plot(freq[1:], (DZi[1:]+DZi[:-1])/2, color="#DDDD44", label=u'$\\Delta Z^{''}_{KK}$', ls='--', alpha=.31)
plt.plot(freq, np.real(N), color="#33AA00", label=u"$N$'")
plt.plot(freq, np.imag(N), color="#33AA33", label=u'$N$"', ls='--')
plt.plot(freq, np.real(Z), color="#0044DD", label=u"$Z$'")
plt.plot(freq, np.imag(Z), color="#4466DD", label=u'$Z$"', ls='--')
plt.plot(freq, np.log(-(np.real(N)/np.imag(N)))/np.log(10),
color="#FF9922", ls=":", label=u"$N^'<0$ FOM")
plt.plot(freq, np.log((np.real(N)/np.imag(N)))/np.log(10), \
color="#BB22FF", ls=":", label=u"$N^'>0$ FOM")
plt.ylabel(u"Value");
plt.ylim((-5., 15.));
plt.xlim(xlim);
plt.xticks(xticks, xnumbers); plt.minorticks_on(); plt.grid(True)
if legend_enable: plt.legend();
## 4) Plot epsilon and mu
plt.subplot(subplot_number, 1, 4)
if find_plasma_frequency:
try:
from scipy.optimize import fsolve
x, y = freq, eps.real
estimates = x[np.where(np.diff(np.sign(y)))[0]]
print "Plasma frequency (eps=0) at:", fsolve(lambda x0: np.interp(x0, x, y), estimates)
except:
print "Plasma frequency (epsilon(f) == 0) detection failed"
plt.xlabel(u"Frequency [%s]" % frequnitname)
if plot_expe and os.path.exists('eps.dat'):
tf, ty = np.loadtxt('eps.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#AA0088', marker='o') ## XXX
plt.plot(tf*frequnit, -ty, lw=0, color='#AA8888', marker='s') ## XXX
#plt.plot(tf , ty, lw=0, color='#AA0088', marker='o') ## XXX
if plot_expe and os.path.exists('mu.dat'):
tf, ty = np.loadtxt('mu.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#AA8800', marker='o') ## XXX
plt.plot(tf*frequnit, -ty, lw=0, color='#AA8888', marker='s') ## XXX
#plt.plot(tf , ty, lw=0, color='#AA0088', marker='o') ## XXX
if check_hilbert and len(freq)>1:
import scipy.fftpack
eps[0] = 0 ## avoid NaN
eps_KK = scipy.fftpack.hilbert(eps.real + 1j*abs(eps.imag)) / 1j
plt.plot(freq, np.real(eps_KK), color="#FF9900", label=u"$eps^{'}_{KK}$", alpha=.5)
plt.plot(freq, np.imag(eps_KK), color="#FF9900", label=u'$eps^{''}_{KK}$', ls='--', alpha=.5)
plt.plot(freq, np.real(eps_KK)-np.real(eps), color="#FF0099", label=u"$eps^{'}_{KK}$", alpha=.5)
plt.plot(freq, np.imag(eps_KK)-np.imag(eps), color="#FF0099", label=u'$eps^{''}_{KK}$', ls='--', alpha=.5)
mu[0] = 0
mu_KK = scipy.fftpack.hilbert(N.real + 1j*abs(N.imag)) / 1j
plt.plot(freq, np.real(mu_KK), color="#0099FF", label=u"$mu^{'}_{KK}$", alpha=.5)
plt.plot(freq, np.imag(mu_KK), color="#4499FF", label=u'$mu^{''}_{KK}$', ls='--', alpha=.5)
plt.plot(freq, np.real(mu_KK)-np.real(mu), color="#0099FF", label=u"$mu^{'}_{KK}$", alpha=.5)
plt.plot(freq, np.imag(mu_KK)-np.imag(mu), color="#4499FF", label=u'$mu^{''}_{KK}$', ls='--', alpha=.5)
plt.plot(freq, np.real(eps), color="#AA0088", label=u"$\\varepsilon_{eff}$'")
plt.plot(freq, np.imag(eps), color="#AA66DD", label=u'$\\varepsilon_{eff}$"', ls='--')
plt.plot(freq, np.real(mu), color="#AA8800", label=u"$\\mu_{eff}$'")
plt.plot(freq, np.imag(mu), color="#AA8844", label=u'$\\mu_{eff}$"', ls='--')
plt.ylabel(u"Value"); plt.ylim((-1000.,1000.)); plt.yscale('symlog', linthreshy=10.); plt.xlim(xlim)
#plt.xticks(xticks, xnumbers); plt.minorticks_on(); plt.grid(True)
if legend_enable: plt.legend();
## 5) Verification of calculated data by calculating reflection and transmission again
plt.subplot(subplot_number, 1, 1)
plt.plot(freq, abs(s11backcalc), color="#FA9962", label=u'$|s_{11FD}|$', ls='--')
plt.plot(freq, abs(s12backcalc), color="#6299FA", label=u'$|s_{12FD}|$', ls='--')
plt.xticks(xticks, xnumbers); plt.minorticks_on(); plt.grid(1)
plt.xlim(xlim); plt.ylim((-0.1,1.1))
## Final plotting
plt.savefig(last_simulation_name+".png", bbox_inches='tight')
#}}}
## --- Plotting to k-omega graph -------------------------------------------- #{{{
if plot_bands and not os.path.exists("band"): os.mkdir("band")
if plot_bands and os.path.isdir("band"):
plt.figure(figsize=(8,8))
plt.plot(np.arcsin(np.sin(np.real(N*freq*d/c) * np.pi)) / np.pi, freq, color="#33AA00", label=u"$k$'")
plt.plot(np.imag(N*freq*d/c), freq, color="#33AA33", label=u'$\\kappa$', ls='--')
## Detection of bandgap: ratio of the real to the imaginary part of complex wavenumber
## the real part however may reach borders of Brillouin zone: we will use its sine
try:
realpart = np.arcsin(np.sin(np.pi * 2*np.real(N*freq/c*d)))
imagpart = np.abs(np.imag(N*freq/c*d))
pbg_indicator = np.sign(abs(realpart) - abs(imagpart))
## starts and ends of band-gap
pbg_starts = np.interp(np.where(pbg_indicator[1:] < pbg_indicator[0:-1]), range(len(freq)), freq)[0]
pbg_ends = np.interp(np.where(pbg_indicator[1:] > pbg_indicator[0:-1]), range(len(freq)), freq)[0]
## Fix the un-started and un-ended bandgaps (TODO)
#print len(pbg_starts), len(pbg_ends)
if len(pbg_starts) < len(pbg_ends): pbg_starts = np.concatenate([np.array([0]), pbg_starts])
#print len(pbg_starts), len(pbg_ends)
if len(pbg_starts) > len(pbg_ends): pbg_starts = pbg_starts[:-1]
#print pbg_ends, pbg_starts
for start, end in np.vstack([pbg_starts, pbg_ends]).T:
#print start, end
plt.axhspan(start, end, color='#FFDD00', alpha=.1)
except:
print "Bandgap detection failed"
plt.ylabel(u"frequency");
plt.xlabel(u"wavenumber $ka/\\pi$");
plt.xlim((-.5, .5));
plt.xticks(xticks, xnumbers); plt.minorticks_on();
plt.grid(True)
if legend_enable: plt.legend(loc="upper right");
## Final plotting
splitpath = os.path.split(last_simulation_name)
outfile = os.path.join(splitpath[0], "band", splitpath[1]+"_band.png")
plt.savefig(outfile, bbox_inches='tight')
#}}}
## --- Nice plotting to PDF ----------------------------------------------------------------------------------#{{{
if plot_publi and not os.path.exists("publi"): os.mkdir("publi")
if plot_publi:
#matplotlib.rc('text', usetex=True)
#matplotlib.rc('text.latex', preamble = \
#'\usepackage{amsmath}, \usepackage{yfonts}, \usepackage{txfonts}, \usepackage{lmodern},')
# ../../effparam_clean.py SphereWireYaki_resolution=4.00e-06_comment=XYS_simtime=5.00e-10_wlth=2.40e-05_wzofs=0.00e+00_monzd=1.00e-04_cells=1.00e+00_spacing=9.00e-05_monzc=0.00e+00_radius=3.75e-05_wtth=6.00e-06
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=14)
matplotlib.rc('text.latex', preamble = \
'\usepackage{amsmath}, \usepackage{palatino},\usepackage{upgreek}')
matplotlib.rc('font',**{'family':'serif','serif':['palatino, times']}) ## select fonts
fig = plt.figure(figsize=(8,8)); toplot = {'rt':1, 'N':1, 'eps':1, 'mu':1, 'Z':0} ## For XYS, XYSAs
fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05) ## XXX
#plt.figure(figsize=(6,6)); toplot = {'rt':1, 'N':0, 'eps':1, 'mu':1, 'Z':0} ## For XYS, XYSAs
#plt.figure(figsize=(6,5)); toplot = {'rt':1, 'N':0, 'eps':0, 'mu':1, 'Z':0} ## For S
#plt.figure(figsize=(6,5)); toplot = {'rt':1, 'N':0, 'eps':1, 'mu':0, 'Z':0} ## For XY
subplot_count = sum(toplot.values())
subplot_index = 1
subplot_columns = [1,1,1,1,1]
## ---- r, t -----
if toplot['rt']:
ax= plt.subplot(subplot_count, subplot_columns[subplot_index], subplot_index)
#plt.title(u"Dielectric spheres $r=%d\\;\\upmu$m" % 25)
#plt.title(u"Dielectric spheres in wire mesh")
plt.title(u"Wire mesh")
ax.label_outer()
plt.grid()
plt.plot(freq, s11amp, marker=marker, color="#880000", label=u'$|r|$', lw=1)
plt.plot(freq, s12amp, marker=marker, color="#0088ff", label=u'$|t|$', lw=1)
plt.ylabel(u"Amplitude");
if plot_expe and os.path.exists('t.dat'):
tf, ty = np.loadtxt('t.dat', usecols=list(range(2)), unpack=True)
plt.plot(tf*frequnit, ty, lw=0, color='#004AAA', marker='o', ms=2, label=u'$|t|$ exp')
subplot_index += 1
plt.xticks(xticks, xnumbers); plt.minorticks_on();
plt.xlim(xlim); plt.ylim((0,1.)); plt.legend(loc='center right');
## Todo allow plotting phase! (And in the 'cartesian' plot, too)
## ---- N -----
if toplot['N']:
ax = plt.subplot(subplot_count, subplot_columns[subplot_index], subplot_index)
ax.label_outer()
plt.grid()
plt.ylabel(u"Index of refraction $N_{\\text{eff}}$");
for ii in np.arange(-5, 5):
plt.plot(freq, ii*c/freq/d, color="#000000", label=u"", lw=.2)
plt.plot(freq, (ii+.5)*c/freq/d, color="#777777", label=u"", lw=.2)
#TODO if plot_expe and os.path.exists('k.dat'):
#tf, ty = np.loadtxt('t.dat', usecols=list(range(2)), unpack=True)
#plt.plot(tf*frequnit, ty, lw=0, color='#004AAA', marker='o', ms=2, label=u'$|t|$ exp')
plt.plot(freq, np.real(N), color="#448800", label=u"$N'$")
plt.plot(freq, np.imag(N), color="#448800", label=u"$N''$", ls='--')
if check_hilbert and len(freq)>1:
plt.plot(freq, np.real(N_KK), color="#dd88aa", label=u"")
plt.plot(freq, np.imag(N_KK), color="#dd88aa", label=u"", ls='--')
plt.xticks(xticks, xnumbers); plt.minorticks_on()
plt.xlim(xlim); plt.ylim((-5,5)); plt.legend(loc='lower right');
subplot_index += 1
## ----- EPS -----
if toplot['eps']:
ax = plt.subplot(subplot_count, subplot_columns[subplot_index], subplot_index)
ax.label_outer()
plt.grid()
plt.ylabel(u"Permittivity $\\varepsilon_{\\text{eff}}$")
plt.plot(freq, np.real(eps), color="#660044", label=u"$\\varepsilon'$")
plt.plot(freq, np.imag(eps), color="#660044", label=u"$\\varepsilon''$", ls='--')
plt.plot(freq, 1-(1100e9/freq)**2, color="#888888", label=u"$1-\\frac{f_p^2}{f^2}$", ls='-') ## Drude model
plt.xticks(xticks, xnumbers); plt.minorticks_on()
plt.xlim(xlim); plt.ylim((-12.,3.)); plt.legend(loc='lower right');
subplot_index += 1
## ----- MU -----
if toplot['mu']:
ax = plt.subplot(subplot_count, subplot_columns[subplot_index], subplot_index)
ax.label_outer()
plt.grid()
plt.ylabel(u"Permeability $\\mu_{\\text{eff}}$");
plt.plot(freq, np.real(mu), color="#663300", label=u"$\\mu'$")
plt.plot(freq, np.imag(mu), color="#663300", label=u"$\\mu''$", ls='--')
plt.xticks(xticks, xnumbers); plt.minorticks_on();
plt.xlim(xlim);
plt.ylim((-5,10));
plt.legend(loc='upper right');
subplot_index += 1
### ----- Z -----
if toplot['Z']:
ax = plt.subplot(subplot_number, 1, subplot_index)
ax.label_outer()
plt.ylabel(u"Impedance"); plt.ylim((-2.,4.))
plt.plot(freq, np.real(Z), color="#004488", label=u"$Z'$")
plt.plot(freq, np.imag(Z), color="#004488", label=u"$Z''$", ls='--')
plt.xticks(xticks, xnumbers); plt.minorticks_on();
plt.xlim(xlim); plt.legend(loc=(.03,.6));
subplot_index += 1
plt.xlabel(u"Frequency [%s]" % frequnitname)
#plt.xlim((0, 1.8))
#plt.grid()
splitpath = os.path.split(last_simulation_name)
outfile = os.path.join(splitpath[0], "publi", splitpath[1]+"_publi.pdf")
plt.savefig(outfile, bbox_inches='tight')
#}}}
## --- Save data to /tmp/effparam.dat ------------------------------------------#{{{
## This is again in the PKGraph ascii format; see loadrt() docstring for further info
if savedat_wd:
if not os.path.exists("effparam"): os.mkdir("effparam")
splitpath = os.path.split(last_simulation_name)
savedatfile = os.path.join(splitpath[0], "effparam", splitpath[1]+"_effparam.dat")
else:
savedatfile = "/tmp/effparam.dat"
if savedat or savedat_wd:
header = ""
## Copy parameters
with open(last_simulation_name+".dat") as datafile:
for line in datafile:
if (line[:1]=="#") and (not "olumn" in line): header+=line
with open(savedatfile, "w") as outfile:
## Post-fixing the older files from rtsim to PKGraph
if not "itle" in header: outfile.write("#title Simulation %s\n" % last_simulation_name.split("_")[0])
if not "arameters" in header: outfile.write("#Parameters Parameters\n")
header = re.sub("Parameter", "param", header)
## Write column headers
outfile.write(header)
outfile.write("#x-column Frequency [Hz]\n#Column |r|\n#Column r phase\n#Column |t|\n#Column t phase\n" + \
"#Column real N\n#Column imag N\n#Column real Z\n#Column imag Z\n" + \
"#Column real eps\n#Column imag eps\n#Column real mu\n#Column imag mu\n")
## Write column data
np.savetxt(outfile, zip(freq, s11amp, s11phase, s12amp, s12phase,
N.real, N.imag, Z.real, Z.imag, eps.real, eps.imag, mu.real, mu.imag), fmt="%.8e")
#}}}
## --- Plot polar ------------------------------------------------------------#{{{
if plot_polar and not os.path.exists("polar"): os.mkdir("polar")
if plot_polar and os.path.isdir("polar"):
## Truncate the arrays (optional)
#(d0,d1) = np.interp((500e9, 650e9), freq, range(len(freq)))
#(freq, s11, s12, N, Z, eps, mu, arg, argLog) = \
#map(lambda a: a[int(d0):int(d1)], (freq, s11, s12, N, Z, eps, mu, arg, argLog))
print "Plotting polar..."
from matplotlib.collections import LineCollection
lims={"s11":(-1,1), "s12":(-1,1), "N":(-10,10), "Z":(-5,5),
"mu":(-10,10), "eps":(-10,10), "arg":(-3,3), "argLog":(-10,10) }
datalist=(s11, s12, N, Z, eps, mu, arg, argLog)
plotlabels=("s11", "s12", "N", "Z", "eps", "mu", "arg", "argLog")
freqlabels = np.append(loss_maxima[loss_maxima<plot_freq_max], freq[-1])
fig = plt.figure(figsize=(11,22))
subplot_number = len(datalist)
for (subpl, data, plotlabel) in zip(range(subplot_number), datalist, plotlabels):
plt.subplot(4,2,subpl+1)
if plotlabel.startswith('s'):
plt.plot(np.sin(np.linspace(0,2*np.pi)), np.cos(np.linspace(0,2*np.pi)), c='#888888')
plt.plot(np.sin(np.linspace(0,2*np.pi))/2+.5, np.cos(np.linspace(0,2*np.pi))/2, c='#aaaaaa')
plt.plot(np.sin(np.linspace(0,2*np.pi))+1, np.cos(np.linspace(0,2*np.pi))+1, c='#aaaaaa')
plt.plot(np.sin(np.linspace(0,2*np.pi))+1, np.cos(np.linspace(0,2*np.pi))-1, c='#aaaaaa')
x = data.real; y = data.imag
t = np.linspace(0, 10, len(freq))
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=plt.get_cmap('jet'), norm=plt.Normalize(0, 10))
lc.set_array(t)
lc.set_linewidth(2)
plt.gca().add_collection(lc)
## Add black points to every xtick
xpoints = np.interp(xticks, freq, x)
ypoints = np.interp(xticks, freq, y)
for xpoint, ypoint in zip(xpoints, ypoints):
plt.plot(xpoint, ypoint, marker="o", markersize=3, color="#000000", label='')
## Annotate resonant frequencies
xpoints = np.interp(freqlabels, freq, x.real)
ypoints = np.interp(freqlabels, freq, y.real)
freqlabelstxt = [("%d" % (fr*1000/frequnit)) for fr in freqlabels]
for label, xpoint, ypoint in zip(freqlabelstxt, xpoints, ypoints):
plt.annotate(label, xy = (xpoint, ypoint), xytext = (-10, 10),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=.15', fc = 'white', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.plot(xpoint, ypoint, marker="o", markersize=2, color="#000000", label='')
lim = lims[plotlabel]
plt.xlim(lim); plt.ylim(lim); plt.grid(True); plt.title(plotlabel)
## Final plotting
splitpath = os.path.split(last_simulation_name)
outfile = os.path.join(splitpath[0], "polar", splitpath[1]+"_polar.png")
plt.savefig(outfile, bbox_inches='tight')
#}}}
## ----------------------------------------------------------------------------------------------------
## Notes
# arh = (1+0j+s11**2-s12**2)/2/(s11); np.round(get_phase(arh)/np.pi/2) ## This is a Z.imag zero-pass detector
def get_cmdline_parameters():#{{{
# (optional) Manual N branch override
if len(sys.argv)>2 and sys.argv[2] != "-" and __name__ == "__main__":
print "Setting branch:", sys.argv[2]
branch_offset = np.ones(len(freq))*int(sys.argv[2])
last_simulation_name += "_BRANCH=%s" % sys.argv[2]
if len(sys.argv)>3 and sys.argv[3] != "-" and __name__ == "__main__":
print "Setting branch sign:", sys.argv[3]
Nsign = np.ones(len(freq))*int(sys.argv[3])
last_simulation_name += "_SIGN=%s" % sys.argv[3]
return branch_offset, Nsign#}}}
|
Buy Delicious Vegan Caviar in Lantry South, Dakota. We are proud to supply all of Dakota with our large caviar selections and gourmet foods. We strive in making it simple for Dakota customers not only to get the highest possible quality, but at affordable prices delivered right to their door. We provide FedEx Overnight shipping (including Same Day, Every Day caviar delivery Throughout Los Angeles, California.) Also providing Saturday delivery throughout Lantry South, Dakota and all of the United States.
|
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
from pool import *
from debug_se import *
from analytics_policy import *
from vs_datascript import *
from application_policy import *
from auth import *
from rate import *
from gslb import *
from dns import *
from dns_policy import *
from content_rewrite_profile import *
from traffic_clone_profile import *
from error_page import *
from l4_policy import *
class ServicePoolSelector(object):
# all schemas
service_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Pool based destination port"),
required=True,
update_allowed=True,
)
service_pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
service_protocol_schema = properties.Schema(
properties.Schema.STRING,
_("Destination protocol to match for the pool selection. If not specified, it will match any protocol."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PROTOCOL_TYPE_TCP_FAST_PATH', 'PROTOCOL_TYPE_TCP_PROXY', 'PROTOCOL_TYPE_UDP_FAST_PATH', 'PROTOCOL_TYPE_UDP_PROXY']),
],
)
service_pool_group_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
service_port_range_end_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.4) The end of the Service port number range. (Default: 0)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'service_port',
'service_pool_uuid',
'service_protocol',
'service_pool_group_uuid',
'service_port_range_end',
)
# mapping of properties to their schemas
properties_schema = {
'service_port': service_port_schema,
'service_pool_uuid': service_pool_uuid_schema,
'service_protocol': service_protocol_schema,
'service_pool_group_uuid': service_pool_group_uuid_schema,
'service_port_range_end': service_port_range_end_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'service_pool_uuid': 'pool',
'service_pool_group_uuid': 'poolgroup',
}
unique_keys = {
'my_key': 'service_port,service_protocol,service_port_range_end',
}
class VirtualServiceResource(object):
# all schemas
num_vcpus_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
memory_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
is_exclusive_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
scalein_primary_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
num_se_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
scalein_se_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
num_standby_se_schema = properties.Schema(
properties.Schema.NUMBER,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'num_vcpus',
'memory',
'is_exclusive',
'scalein_primary',
'num_se',
'scalein_se_uuid',
'num_standby_se',
)
# mapping of properties to their schemas
properties_schema = {
'num_vcpus': num_vcpus_schema,
'memory': memory_schema,
'is_exclusive': is_exclusive_schema,
'scalein_primary': scalein_primary_schema,
'num_se': num_se_schema,
'scalein_se_uuid': scalein_se_uuid_schema,
'num_standby_se': num_standby_se_schema,
}
class Service(object):
# all schemas
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("The Virtual Service's port number."),
required=True,
update_allowed=True,
)
enable_ssl_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable SSL termination and offload for traffic from clients. (Default: False)"),
required=False,
update_allowed=True,
)
override_network_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Override the network profile for this specific service port. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
port_range_end_schema = properties.Schema(
properties.Schema.NUMBER,
_("The end of the Virtual Service's port number range. (Default: 0)"),
required=False,
update_allowed=True,
)
override_application_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.4) Enable application layer specific features for the this specific service. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'port',
'enable_ssl',
'override_network_profile_uuid',
'port_range_end',
'override_application_profile_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'port': port_schema,
'enable_ssl': enable_ssl_schema,
'override_network_profile_uuid': override_network_profile_uuid_schema,
'port_range_end': port_range_end_schema,
'override_application_profile_uuid': override_application_profile_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'override_network_profile_uuid': 'networkprofile',
'override_application_profile_uuid': 'applicationprofile',
}
class PerformanceLimits(object):
# all schemas
max_concurrent_connections_schema = properties.Schema(
properties.Schema.NUMBER,
_("The maximum number of concurrent client conections allowed to the Virtual Service."),
required=False,
update_allowed=True,
)
max_throughput_schema = properties.Schema(
properties.Schema.NUMBER,
_("The maximum throughput per second for all clients allowed through the client side of the Virtual Service."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'max_concurrent_connections',
'max_throughput',
)
# mapping of properties to their schemas
properties_schema = {
'max_concurrent_connections': max_concurrent_connections_schema,
'max_throughput': max_throughput_schema,
}
class SidebandProfile(object):
# all schemas
ip_item_schema = properties.Schema(
properties.Schema.MAP,
_("IP Address of the sideband server."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
ip_schema = properties.Schema(
properties.Schema.LIST,
_("IP Address of the sideband server."),
schema=ip_item_schema,
required=False,
update_allowed=True,
)
sideband_max_request_body_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum size of the request body that will be sent on the sideband. (Units: BYTES) (Default: 1024)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'ip',
'sideband_max_request_body_size',
)
# mapping of properties to their schemas
properties_schema = {
'ip': ip_schema,
'sideband_max_request_body_size': sideband_max_request_body_size_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'ip': getattr(IpAddr, 'unique_keys', {}),
}
class TLSTicket(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
aes_key_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
hmac_key_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'aes_key',
'hmac_key',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'aes_key': aes_key_schema,
'hmac_key': hmac_key_schema,
}
class IPNetworkSubnet(object):
# all schemas
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Network for VirtualService IP allocation with Vantage as the IPAM provider. Network should be created before this is configured. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
subnet_schema = properties.Schema(
properties.Schema.MAP,
_("Subnet for VirtualService IP allocation with Vantage or Infoblox as the IPAM provider. Only one of subnet or subnet_uuid configuration is allowed."),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
subnet_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Subnet UUID or Name or Prefix for VirtualService IP allocation with AWS or OpenStack as the IPAM provider. Only one of subnet or subnet_uuid configuration is allowed."),
required=False,
update_allowed=True,
)
subnet6_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) Subnet for VirtualService IPv6 allocation with Vantage or Infoblox as the IPAM provider. Only one of subnet or subnet_uuid configuration is allowed."),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
subnet6_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 18.1.1) Subnet UUID or Name or Prefix for VirtualService IPv6 allocation with AWS or OpenStack as the IPAM provider. Only one of subnet or subnet_uuid configuration is allowed."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'network_uuid',
'subnet',
'subnet_uuid',
'subnet6',
'subnet6_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'network_uuid': network_uuid_schema,
'subnet': subnet_schema,
'subnet_uuid': subnet_uuid_schema,
'subnet6': subnet6_schema,
'subnet6_uuid': subnet6_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
'subnet6': getattr(IpAddrPrefix, 'field_references', {}),
'network_uuid': 'network',
}
unique_keys = {
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'subnet6': getattr(IpAddrPrefix, 'unique_keys', {}),
}
class VsSeVnic(object):
# all schemas
mac_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['VNIC_TYPE_BE', 'VNIC_TYPE_FE']),
],
)
lif_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'mac',
'type',
'lif',
)
# mapping of properties to their schemas
properties_schema = {
'mac': mac_schema,
'type': type_schema,
'lif': lif_schema,
}
class VsApicExtension(AviResource):
resource_name = "vsapicextension"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
txn_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
se_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
vnic_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VsSeVnic.properties_schema,
required=True,
update_allowed=False,
)
vnic_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=vnic_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'txn_uuid',
'se_uuid',
'vnic',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'txn_uuid': txn_uuid_schema,
'se_uuid': se_uuid_schema,
'vnic': vnic_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'vnic': getattr(VsSeVnic, 'field_references', {}),
}
unique_keys = {
'vnic': getattr(VsSeVnic, 'unique_keys', {}),
}
class SeVipInterfaceList(object):
# all schemas
vip_intf_mac_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
vlan_id_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 0)"),
required=False,
update_allowed=True,
)
vip_intf_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
is_portchannel_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
vip_intf_ip6_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'vip_intf_mac',
'vlan_id',
'vip_intf_ip',
'is_portchannel',
'vip_intf_ip6',
)
# mapping of properties to their schemas
properties_schema = {
'vip_intf_mac': vip_intf_mac_schema,
'vlan_id': vlan_id_schema,
'vip_intf_ip': vip_intf_ip_schema,
'is_portchannel': is_portchannel_schema,
'vip_intf_ip6': vip_intf_ip6_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'vip_intf_ip6': getattr(IpAddr, 'field_references', {}),
'vip_intf_ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'vip_intf_ip6': getattr(IpAddr, 'unique_keys', {}),
'vip_intf_ip': getattr(IpAddr, 'unique_keys', {}),
}
class SeList(object):
# all schemas
se_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
is_primary_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
is_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
is_connected_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
delete_in_progress_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
vcpus_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 2)"),
required=False,
update_allowed=True,
)
memory_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 2001)"),
required=False,
update_allowed=True,
)
vip_intf_mac_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
vip_subnet_mask_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 32)"),
required=False,
update_allowed=True,
)
vnic_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VsSeVnic.properties_schema,
required=True,
update_allowed=False,
)
vnic_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=vnic_item_schema,
required=False,
update_allowed=True,
)
pending_download_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
sec_idx_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 1)"),
required=False,
update_allowed=True,
)
download_selist_only_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
vlan_id_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 0)"),
required=False,
update_allowed=True,
)
snat_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
vip_intf_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
vip_intf_list_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SeVipInterfaceList.properties_schema,
required=True,
update_allowed=False,
)
vip_intf_list_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=vip_intf_list_item_schema,
required=False,
update_allowed=True,
)
floating_intf_ip_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
floating_intf_ip_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=floating_intf_ip_item_schema,
required=False,
update_allowed=True,
)
is_portchannel_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
scalein_in_progress_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
admin_down_requested_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
at_curr_ver_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
version_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
gslb_download_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) This flag indicates whether the gslb, ghm, gs objects have been pushed to the DNS-VS's SE. (Default: False)"),
required=False,
update_allowed=True,
)
geo_download_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) This flag indicates whether the geo-files have been pushed to the DNS-VS's SE. (Default: False)"),
required=False,
update_allowed=True,
)
geodb_download_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.2) This flag indicates whether the geodb object has been pushed to the DNS-VS's SE. (Default: False)"),
required=False,
update_allowed=True,
)
attach_ip_success_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.3) (Default: False)"),
required=False,
update_allowed=True,
)
attach_ip_status_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) "),
required=False,
update_allowed=True,
)
vip6_subnet_mask_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.1) (Default: 128)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'se_uuid',
'is_primary',
'is_standby',
'is_connected',
'delete_in_progress',
'vcpus',
'memory',
'vip_intf_mac',
'vip_subnet_mask',
'vnic',
'pending_download',
'sec_idx',
'download_selist_only',
'vlan_id',
'snat_ip',
'vip_intf_ip',
'vip_intf_list',
'floating_intf_ip',
'is_portchannel',
'scalein_in_progress',
'admin_down_requested',
'at_curr_ver',
'version',
'gslb_download',
'geo_download',
'geodb_download',
'attach_ip_success',
'attach_ip_status',
'vip6_subnet_mask',
)
# mapping of properties to their schemas
properties_schema = {
'se_uuid': se_uuid_schema,
'is_primary': is_primary_schema,
'is_standby': is_standby_schema,
'is_connected': is_connected_schema,
'delete_in_progress': delete_in_progress_schema,
'vcpus': vcpus_schema,
'memory': memory_schema,
'vip_intf_mac': vip_intf_mac_schema,
'vip_subnet_mask': vip_subnet_mask_schema,
'vnic': vnic_schema,
'pending_download': pending_download_schema,
'sec_idx': sec_idx_schema,
'download_selist_only': download_selist_only_schema,
'vlan_id': vlan_id_schema,
'snat_ip': snat_ip_schema,
'vip_intf_ip': vip_intf_ip_schema,
'vip_intf_list': vip_intf_list_schema,
'floating_intf_ip': floating_intf_ip_schema,
'is_portchannel': is_portchannel_schema,
'scalein_in_progress': scalein_in_progress_schema,
'admin_down_requested': admin_down_requested_schema,
'at_curr_ver': at_curr_ver_schema,
'version': version_schema,
'gslb_download': gslb_download_schema,
'geo_download': geo_download_schema,
'geodb_download': geodb_download_schema,
'attach_ip_success': attach_ip_success_schema,
'attach_ip_status': attach_ip_status_schema,
'vip6_subnet_mask': vip6_subnet_mask_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'vnic': getattr(VsSeVnic, 'field_references', {}),
'vip_intf_ip': getattr(IpAddr, 'field_references', {}),
'vip_intf_list': getattr(SeVipInterfaceList, 'field_references', {}),
'snat_ip': getattr(IpAddr, 'field_references', {}),
'se_uuid': 'serviceengine',
'floating_intf_ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'vnic': getattr(VsSeVnic, 'unique_keys', {}),
'vip_intf_ip': getattr(IpAddr, 'unique_keys', {}),
'snat_ip': getattr(IpAddr, 'unique_keys', {}),
'floating_intf_ip': getattr(IpAddr, 'unique_keys', {}),
'vip_intf_list': getattr(SeVipInterfaceList, 'unique_keys', {}),
}
class VipDbExtension(object):
# all schemas
vip_id_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) "),
required=False,
update_allowed=True,
)
se_list_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) "),
schema=SeList.properties_schema,
required=True,
update_allowed=False,
)
se_list_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) "),
schema=se_list_item_schema,
required=False,
update_allowed=True,
)
requested_resource_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) "),
schema=VirtualServiceResource.properties_schema,
required=False,
update_allowed=True,
)
first_se_assigned_time_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) "),
schema=TimeStamp.properties_schema,
required=False,
update_allowed=True,
)
num_additional_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) "),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'vip_id',
'se_list',
'requested_resource',
'first_se_assigned_time',
'num_additional_se',
)
# mapping of properties to their schemas
properties_schema = {
'vip_id': vip_id_schema,
'se_list': se_list_schema,
'requested_resource': requested_resource_schema,
'first_se_assigned_time': first_se_assigned_time_schema,
'num_additional_se': num_additional_se_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'se_list': getattr(SeList, 'field_references', {}),
'first_se_assigned_time': getattr(TimeStamp, 'field_references', {}),
'requested_resource': getattr(VirtualServiceResource, 'field_references', {}),
}
unique_keys = {
'se_list': getattr(SeList, 'unique_keys', {}),
'first_se_assigned_time': getattr(TimeStamp, 'unique_keys', {}),
'requested_resource': getattr(VirtualServiceResource, 'unique_keys', {}),
}
class Vip(object):
# all schemas
vip_id_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Unique ID associated with the vip."),
required=True,
update_allowed=True,
)
ip_address_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) IPv4 Address of the Vip. For IPv6 address support please use ip6_address field"),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Enable or disable the Vip. (Default: True)"),
required=False,
update_allowed=True,
)
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Manually override the network on which the Vip is placed. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
port_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) (internal-use) Network port assigned to the Vip IP address."),
required=False,
update_allowed=True,
)
subnet_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) If auto_allocate_ip is True, then the subnet for the Vip IP address allocation. This field is applicable only if the VirtualService belongs to an Openstack or AWS cloud, in which case it is mandatory, if auto_allocate is selected."),
required=False,
update_allowed=True,
)
subnet_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Subnet providing reachability for client facing Vip IP."),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
discovered_networks_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Discovered networks providing reachability for client facing Vip IP."),
schema=DiscoveredNetwork.properties_schema,
required=True,
update_allowed=False,
)
discovered_networks_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) Discovered networks providing reachability for client facing Vip IP."),
schema=discovered_networks_item_schema,
required=False,
update_allowed=True,
)
availability_zone_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Availability-zone to place the Virtual Service."),
required=False,
update_allowed=True,
)
auto_allocate_ip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Auto-allocate VIP from the provided subnet. (Default: False)"),
required=False,
update_allowed=True,
)
floating_ip_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Floating IPv4 to associate with this Vip."),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
auto_allocate_floating_ip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Auto-allocate floating/elastic IP from the Cloud infrastructure. (Default: False)"),
required=False,
update_allowed=True,
)
floating_subnet_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) If auto_allocate_floating_ip is True and more than one floating-ip subnets exist, then the subnet for the floating IP address allocation."),
required=False,
update_allowed=True,
)
avi_allocated_vip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) (internal-use) VIP allocated by Avi in the Cloud infrastructure. (Default: False)"),
required=False,
update_allowed=True,
)
avi_allocated_fip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) (internal-use) FIP allocated by Avi in the Cloud infrastructure. (Default: False)"),
required=False,
update_allowed=True,
)
ipam_network_subnet_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Subnet and/or Network for allocating VirtualService IP by IPAM Provider module."),
schema=IPNetworkSubnet.properties_schema,
required=False,
update_allowed=True,
)
ip6_address_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) IPv6 Address of the Vip."),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
subnet6_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 18.1.1) If auto_allocate_ip is True, then the subnet for the Vip IPv6 address allocation. This field is applicable only if the VirtualService belongs to an Openstack or AWS cloud, in which case it is mandatory, if auto_allocate is selected."),
required=False,
update_allowed=True,
)
subnet6_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) Subnet providing reachability for client facing Vip IPv6."),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
floating_ip6_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) Floating IPv6 address to associate with this Vip."),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
floating_subnet6_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 18.1.1) If auto_allocate_floating_ip is True and more than one floating-ip subnets exist, then the subnet for the floating IPv6 address allocation."),
required=False,
update_allowed=True,
)
auto_allocate_ip_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 18.1.1) Specifies whether to auto-allocate only a V4 address, only a V6 address, or one of each type. (Default: V4_ONLY)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['V4_ONLY', 'V4_V6', 'V6_ONLY']),
],
)
# properties list
PROPERTIES = (
'vip_id',
'ip_address',
'enabled',
'network_uuid',
'port_uuid',
'subnet_uuid',
'subnet',
'discovered_networks',
'availability_zone',
'auto_allocate_ip',
'floating_ip',
'auto_allocate_floating_ip',
'floating_subnet_uuid',
'avi_allocated_vip',
'avi_allocated_fip',
'ipam_network_subnet',
'ip6_address',
'subnet6_uuid',
'subnet6',
'floating_ip6',
'floating_subnet6_uuid',
'auto_allocate_ip_type',
)
# mapping of properties to their schemas
properties_schema = {
'vip_id': vip_id_schema,
'ip_address': ip_address_schema,
'enabled': enabled_schema,
'network_uuid': network_uuid_schema,
'port_uuid': port_uuid_schema,
'subnet_uuid': subnet_uuid_schema,
'subnet': subnet_schema,
'discovered_networks': discovered_networks_schema,
'availability_zone': availability_zone_schema,
'auto_allocate_ip': auto_allocate_ip_schema,
'floating_ip': floating_ip_schema,
'auto_allocate_floating_ip': auto_allocate_floating_ip_schema,
'floating_subnet_uuid': floating_subnet_uuid_schema,
'avi_allocated_vip': avi_allocated_vip_schema,
'avi_allocated_fip': avi_allocated_fip_schema,
'ipam_network_subnet': ipam_network_subnet_schema,
'ip6_address': ip6_address_schema,
'subnet6_uuid': subnet6_uuid_schema,
'subnet6': subnet6_schema,
'floating_ip6': floating_ip6_schema,
'floating_subnet6_uuid': floating_subnet6_uuid_schema,
'auto_allocate_ip_type': auto_allocate_ip_type_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
'network_uuid': 'network',
'floating_ip6': getattr(IpAddr, 'field_references', {}),
'discovered_networks': getattr(DiscoveredNetwork, 'field_references', {}),
'floating_ip': getattr(IpAddr, 'field_references', {}),
'ipam_network_subnet': getattr(IPNetworkSubnet, 'field_references', {}),
'ip6_address': getattr(IpAddr, 'field_references', {}),
'subnet6': getattr(IpAddrPrefix, 'field_references', {}),
'ip_address': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'my_key': 'vip_id',
'floating_ip6': getattr(IpAddr, 'unique_keys', {}),
'discovered_networks': getattr(DiscoveredNetwork, 'unique_keys', {}),
'floating_ip': getattr(IpAddr, 'unique_keys', {}),
'ipam_network_subnet': getattr(IPNetworkSubnet, 'unique_keys', {}),
'ip6_address': getattr(IpAddr, 'unique_keys', {}),
'subnet6': getattr(IpAddrPrefix, 'unique_keys', {}),
'ip_address': getattr(IpAddr, 'unique_keys', {}),
}
class VirtualService(AviResource):
resource_name = "virtualservice"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("Name for the Virtual Service."),
required=True,
update_allowed=True,
)
fqdn_schema = properties.Schema(
properties.Schema.STRING,
_("DNS resolvable, fully qualified domain name of the virtualservice. Only one of 'fqdn' and 'dns_info' configuration is allowed."),
required=False,
update_allowed=True,
)
ip_address_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) IP Address of the Virtual Service."),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable or disable the Virtual Service. (Default: True)"),
required=False,
update_allowed=True,
)
services_item_schema = properties.Schema(
properties.Schema.MAP,
_("List of Services defined for this Virtual Service."),
schema=Service.properties_schema,
required=True,
update_allowed=False,
)
services_schema = properties.Schema(
properties.Schema.LIST,
_("List of Services defined for this Virtual Service."),
schema=services_item_schema,
required=False,
update_allowed=True,
)
application_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Enable application layer specific features for the Virtual Service. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'. (Default: System-HTTP)"),
required=False,
update_allowed=True,
)
network_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Determines network settings such as protocol, TCP or UDP, and related options for the protocol. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'. (Default: System-TCP-Proxy)"),
required=False,
update_allowed=True,
)
server_network_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Determines the network settings profile for the server side of TCP proxied connections. Leave blank to use the same settings as the client to VS side of the connection. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("The pool is an object that contains destination servers and related attributes such as load-balancing and persistence. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
se_group_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("The Service Engine Group to use for this Virtual Service. Moving to a new SE Group is disruptive to existing connections for this VS. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
network_security_policy_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Network security policies for the Virtual Service. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
http_policies_item_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP Policies applied on the data traffic of the Virtual Service"),
schema=HTTPPolicies.properties_schema,
required=True,
update_allowed=False,
)
http_policies_schema = properties.Schema(
properties.Schema.LIST,
_("HTTP Policies applied on the data traffic of the Virtual Service"),
schema=http_policies_item_schema,
required=False,
update_allowed=True,
)
dns_policies_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) DNS Policies applied on the dns traffic of the Virtual Service"),
schema=DnsPolicies.properties_schema,
required=True,
update_allowed=False,
)
dns_policies_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) DNS Policies applied on the dns traffic of the Virtual Service"),
schema=dns_policies_item_schema,
required=False,
update_allowed=True,
)
ssl_key_and_certificate_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_("Select or create one or two certificates, EC and/or RSA, that will be presented to SSL/TLS terminated connections."),
required=True,
update_allowed=False,
)
ssl_key_and_certificate_uuids_schema = properties.Schema(
properties.Schema.LIST,
_("Select or create one or two certificates, EC and/or RSA, that will be presented to SSL/TLS terminated connections. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=ssl_key_and_certificate_uuids_item_schema,
required=False,
update_allowed=True,
)
ssl_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Determines the set of SSL versions and ciphers to accept for SSL/TLS terminated connections. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
performance_limits_schema = properties.Schema(
properties.Schema.MAP,
_("Optional settings that determine performance limits like max connections or bandwdith etc."),
schema=PerformanceLimits.properties_schema,
required=False,
update_allowed=True,
)
analytics_policy_schema = properties.Schema(
properties.Schema.MAP,
_("Determines analytics settings for the application."),
schema=AnalyticsPolicy.properties_schema,
required=False,
update_allowed=True,
)
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) Manually override the network on which the Virtual Service is placed. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
vrf_context_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Virtual Routing Context that the Virtual Service is bound to. This is used to provide the isolation of the set of networks the application is attached to. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
enable_autogw_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Response traffic to clients will be sent back to the source MAC address of the connection, rather than statically sent to a default gateway. (Default: True)"),
required=False,
update_allowed=True,
)
port_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) (internal-use) Network port assigned to the Virtual Service IP address."),
required=False,
update_allowed=True,
)
subnet_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) It represents subnet for the Virtual Service IP address allocation when auto_allocate_ip is True.It is only applicable in OpenStack or AWS cloud. This field is required if auto_allocate_ip is True."),
required=False,
update_allowed=True,
)
analytics_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Specifies settings related to analytics. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'. (Default: System-Analytics-Profile)"),
required=False,
update_allowed=True,
)
discovered_network_uuid_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) (internal-use) Discovered networks providing reachability for client facing Virtual Service IP. This field is deprecated."),
required=True,
update_allowed=False,
)
discovered_network_uuid_schema = properties.Schema(
properties.Schema.LIST,
_("(Deprecated in: 17.1.1) (internal-use) Discovered networks providing reachability for client facing Virtual Service IP. This field is deprecated. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=discovered_network_uuid_item_schema,
required=False,
update_allowed=True,
)
discovered_subnet_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) (internal-use) Discovered subnets providing reachability for client facing Virtual Service IP. This field is deprecated."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
discovered_subnet_schema = properties.Schema(
properties.Schema.LIST,
_("(Deprecated in: 17.1.1) (internal-use) Discovered subnets providing reachability for client facing Virtual Service IP. This field is deprecated."),
schema=discovered_subnet_item_schema,
required=False,
update_allowed=True,
)
host_name_xlate_schema = properties.Schema(
properties.Schema.STRING,
_("Translate the host name sent to the servers to this value. Translate the host name sent from servers back to the value used by the client."),
required=False,
update_allowed=True,
)
subnet_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) Subnet providing reachability for client facing Virtual Service IP."),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
discovered_networks_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) (internal-use) Discovered networks providing reachability for client facing Virtual Service IP. This field is used internally by Avi, not editable by the user."),
schema=DiscoveredNetwork.properties_schema,
required=True,
update_allowed=False,
)
discovered_networks_schema = properties.Schema(
properties.Schema.LIST,
_("(Deprecated in: 17.1.1) (internal-use) Discovered networks providing reachability for client facing Virtual Service IP. This field is used internally by Avi, not editable by the user."),
schema=discovered_networks_item_schema,
required=False,
update_allowed=True,
)
vs_datascripts_item_schema = properties.Schema(
properties.Schema.MAP,
_("Datascripts applied on the data traffic of the Virtual Service"),
schema=VSDataScripts.properties_schema,
required=True,
update_allowed=False,
)
vs_datascripts_schema = properties.Schema(
properties.Schema.LIST,
_("Datascripts applied on the data traffic of the Virtual Service"),
schema=vs_datascripts_item_schema,
required=False,
update_allowed=True,
)
client_auth_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP authentication configuration for protected resources."),
schema=HTTPClientAuthenticationParams.properties_schema,
required=False,
update_allowed=True,
)
weight_schema = properties.Schema(
properties.Schema.NUMBER,
_("The Quality of Service weight to assign to traffic transmitted from this Virtual Service. A higher weight will prioritize traffic versus other Virtual Services sharing the same Service Engines. (Default: 1)"),
required=False,
update_allowed=True,
)
delay_fairness_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Select the algorithm for QoS fairness. This determines how multiple Virtual Services sharing the same Service Engines will prioritize traffic over a congested network. (Default: False)"),
required=False,
update_allowed=True,
)
max_cps_per_client_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum connections per second per client IP. (Default: 0)"),
required=False,
update_allowed=True,
)
limit_doser_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Limit potential DoS attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while. (Default: False)"),
required=False,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_("Specify if this is a normal Virtual Service, or if it is the parent or child of an SNI-enabled virtual hosted Virtual Service. (Default: VS_TYPE_NORMAL)"),
required=False,
update_allowed=False,
constraints=[
constraints.AllowedValues(['VS_TYPE_NORMAL', 'VS_TYPE_VH_CHILD', 'VS_TYPE_VH_PARENT']),
],
)
vh_parent_vs_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Specifies the Virtual Service acting as Virtual Hosting (SNI) parent."),
required=False,
update_allowed=True,
)
vh_domain_name_item_schema = properties.Schema(
properties.Schema.STRING,
_("The exact name requested from the client's SNI-enabled TLS hello domain name field. If this is a match, the parent VS will forward the connection to this child VS."),
required=True,
update_allowed=False,
)
vh_domain_name_schema = properties.Schema(
properties.Schema.LIST,
_("The exact name requested from the client's SNI-enabled TLS hello domain name field. If this is a match, the parent VS will forward the connection to this child VS."),
schema=vh_domain_name_item_schema,
required=False,
update_allowed=True,
)
availability_zone_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) Availability-zone to place the Virtual Service."),
required=False,
update_allowed=True,
)
auto_allocate_ip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Deprecated in: 17.1.1) Auto-allocate VIP from the provided subnet."),
required=False,
update_allowed=True,
)
floating_ip_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) Floating IP to associate with this Virtual Service."),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
auto_allocate_floating_ip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Deprecated in: 17.1.1) Auto-allocate floating/elastic IP from the Cloud infrastructure."),
required=False,
update_allowed=True,
)
floating_subnet_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) If auto_allocate_floating_ip is True and more than one floating-ip subnets exist, then the subnet for the floating IP address allocation. This field is applicable only if the VirtualService belongs to an OpenStack or AWS cloud. In OpenStack or AWS cloud it is required when auto_allocate_floating_ip is selected."),
required=False,
update_allowed=True,
)
cloud_type_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: CLOUD_NONE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['CLOUD_APIC', 'CLOUD_AWS', 'CLOUD_AZURE', 'CLOUD_DOCKER_UCP', 'CLOUD_LINUXSERVER', 'CLOUD_MESOS', 'CLOUD_NONE', 'CLOUD_OPENSTACK', 'CLOUD_OSHIFT_K8S', 'CLOUD_RANCHER', 'CLOUD_VCA', 'CLOUD_VCENTER']),
],
)
avi_allocated_vip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Deprecated in: 17.1.1) (internal-use) VIP allocated by Avi in the Cloud infrastructure."),
required=False,
update_allowed=True,
)
avi_allocated_fip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Deprecated in: 17.1.1) (internal-use) FIP allocated by Avi in the Cloud infrastructure."),
required=False,
update_allowed=True,
)
connections_rate_limit_schema = properties.Schema(
properties.Schema.MAP,
_("Rate limit the incoming connections to this virtual service"),
schema=RateProfile.properties_schema,
required=False,
update_allowed=True,
)
requests_rate_limit_schema = properties.Schema(
properties.Schema.MAP,
_("Rate limit the incoming requests to this virtual service"),
schema=RateProfile.properties_schema,
required=False,
update_allowed=True,
)
use_bridge_ip_as_vip_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use Bridge IP as VIP on each Host in Mesos deployments (Default: False)"),
required=False,
update_allowed=True,
)
flow_dist_schema = properties.Schema(
properties.Schema.STRING,
_("Criteria for flow distribution among SEs. (Default: LOAD_AWARE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['CONSISTENT_HASH_SOURCE_IP_ADDRESS', 'CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT', 'LOAD_AWARE']),
],
)
ign_pool_net_reach_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Ignore Pool servers network reachability constraints for Virtual Service placement. (Default: False)"),
required=False,
update_allowed=True,
)
ssl_sess_cache_avg_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("Expected number of SSL session cache entries (may be exceeded). (Default: 1024)"),
required=False,
update_allowed=True,
)
pool_group_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("The pool group is an object that contains pools. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
remove_listening_port_on_vs_down_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Remove listening port if VirtualService is down (Default: False)"),
required=False,
update_allowed=True,
)
close_client_conn_on_config_update_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.4) close client connection on vs config update (Default: False)"),
required=False,
update_allowed=True,
)
bulk_sync_kvcache_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.1) (This is a beta feature). Sync Key-Value cache to the new SEs when VS is scaled out. For ex: SSL sessions are stored using VS's Key-Value cache. When the VS is scaled out, the SSL session information is synced to the new SE, allowing existing SSL sessions to be reused on the new SE. (Default: False)"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
east_west_placement_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Force placement on all SE's in service group (Mesos mode only) (Default: False)"),
required=False,
update_allowed=True,
)
scaleout_ecmp_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Disable re-distribution of flows across service engines for a virtual service. Enable if the network itself performs flow hashing with ECMP in environments such as GCP (Default: False)"),
required=False,
update_allowed=True,
)
microservice_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Microservice representing the virtual service You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
service_pool_select_item_schema = properties.Schema(
properties.Schema.MAP,
_("Select pool based on destination port"),
schema=ServicePoolSelector.properties_schema,
required=True,
update_allowed=False,
)
service_pool_select_schema = properties.Schema(
properties.Schema.LIST,
_("Select pool based on destination port"),
schema=service_pool_select_item_schema,
required=False,
update_allowed=True,
)
created_by_schema = properties.Schema(
properties.Schema.STRING,
_("Creator name"),
required=False,
update_allowed=True,
)
cloud_config_cksum_schema = properties.Schema(
properties.Schema.STRING,
_("Checksum of cloud configuration for VS. Internally set by cloud connector"),
required=False,
update_allowed=True,
)
enable_rhi_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable Route Health Injection using the BGP Config in the vrf context"),
required=False,
update_allowed=True,
)
snat_ip_item_schema = properties.Schema(
properties.Schema.MAP,
_("NAT'ted floating source IP Address(es) for upstream connection to servers"),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
snat_ip_schema = properties.Schema(
properties.Schema.LIST,
_("NAT'ted floating source IP Address(es) for upstream connection to servers"),
schema=snat_ip_item_schema,
required=False,
update_allowed=True,
)
active_standby_se_tag_schema = properties.Schema(
properties.Schema.STRING,
_("This configuration only applies if the VirtualService is in Legacy Active Standby HA mode and Load Distribution among Active Standby is enabled. This field is used to tag the VirtualService so that VirtualServices with the same tag will share the same Active ServiceEngine. VirtualServices with different tags will have different Active ServiceEngines. If one of the ServiceEngine's in the ServiceEngineGroup fails, all VirtualServices will end up using the same Active ServiceEngine. Redistribution of the VirtualServices can be either manual or automated when the failed ServiceEngine recovers. Redistribution is based on the auto redistribute property of the ServiceEngineGroup. (Default: ACTIVE_STANDBY_SE_1)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ACTIVE_STANDBY_SE_1', 'ACTIVE_STANDBY_SE_2']),
],
)
flow_label_type_schema = properties.Schema(
properties.Schema.STRING,
_("Criteria for flow labelling. (Default: NO_LABEL)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['APPLICATION_LABEL', 'NO_LABEL', 'SERVICE_LABEL']),
],
)
enable_rhi_snat_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable Route Health Injection for Source NAT'ted floating IP Address using the BGP Config in the vrf context"),
required=False,
update_allowed=True,
)
static_dns_records_item_schema = properties.Schema(
properties.Schema.MAP,
_("List of static DNS records applied to this Virtual Service. These are static entries and no health monitoring is performed against the IP addresses."),
schema=DnsRecord.properties_schema,
required=True,
update_allowed=False,
)
static_dns_records_schema = properties.Schema(
properties.Schema.LIST,
_("List of static DNS records applied to this Virtual Service. These are static entries and no health monitoring is performed against the IP addresses."),
schema=static_dns_records_item_schema,
required=False,
update_allowed=True,
)
ipam_network_subnet_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) Subnet and/or Network for allocating VirtualService IP by IPAM Provider module."),
schema=IPNetworkSubnet.properties_schema,
required=False,
update_allowed=True,
)
dns_info_item_schema = properties.Schema(
properties.Schema.MAP,
_("Service discovery specific data including fully qualified domain name, type and Time-To-Live of the DNS record. Note that only one of fqdn and dns_info setting is allowed."),
schema=DnsInfo.properties_schema,
required=True,
update_allowed=False,
)
dns_info_schema = properties.Schema(
properties.Schema.LIST,
_("Service discovery specific data including fully qualified domain name, type and Time-To-Live of the DNS record. Note that only one of fqdn and dns_info setting is allowed."),
schema=dns_info_item_schema,
required=False,
update_allowed=True,
)
service_metadata_schema = properties.Schema(
properties.Schema.STRING,
_("Metadata pertaining to the Service provided by this virtual service. In Openshift/Kubernetes environments, egress pod info is stored. Any user input to this field will be overwritten by Avi Vantage."),
required=False,
update_allowed=True,
)
traffic_clone_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Server network or list of servers for cloning traffic. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
content_rewrite_schema = properties.Schema(
properties.Schema.MAP,
_("Profile used to match and rewrite strings in request and/or response body."),
schema=ContentRewriteProfile.properties_schema,
required=False,
update_allowed=True,
)
sideband_profile_schema = properties.Schema(
properties.Schema.MAP,
_("Sideband configuration to be used for this virtualservice.It can be used for sending traffic to sideband VIPs for external inspection etc."),
schema=SidebandProfile.properties_schema,
required=False,
update_allowed=True,
)
vip_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) List of Virtual Service IPs. While creating a 'Shared VS',please use vsvip_ref to point to the shared entities."),
schema=Vip.properties_schema,
required=True,
update_allowed=False,
)
vip_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) List of Virtual Service IPs. While creating a 'Shared VS',please use vsvip_ref to point to the shared entities."),
schema=vip_item_schema,
required=False,
update_allowed=True,
)
nsx_securitygroup_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) A list of NSX Service Groups representing the Clients which can access the Virtual IP of the Virtual Service"),
required=True,
update_allowed=False,
)
nsx_securitygroup_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) A list of NSX Service Groups representing the Clients which can access the Virtual IP of the Virtual Service"),
schema=nsx_securitygroup_item_schema,
required=False,
update_allowed=True,
)
vsvip_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Mostly used during the creation of Shared VS, this field refers to entities that can be shared across Virtual Services. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
waf_policy_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.1) WAF policy for the Virtual Service. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
sp_pool_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.2) GSLB pools used to manage site-persistence functionality. Each site-persistence pool contains the virtualservices in all the other sites, that is auto-generated by the GSLB manager. This is a read-only field for the user."),
required=True,
update_allowed=False,
)
sp_pool_uuids_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.2) GSLB pools used to manage site-persistence functionality. Each site-persistence pool contains the virtualservices in all the other sites, that is auto-generated by the GSLB manager. This is a read-only field for the user. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=sp_pool_uuids_item_schema,
required=False,
update_allowed=False,
)
use_vip_as_snat_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.9,17.2.3) Use the Virtual IP as the SNAT IP for health monitoring and sending traffic to the backend servers instead of the Service Engine interface IP. The caveat of enabling this option is that the VirtualService cannot be configued in an Active-Active HA mode. DNS based Multi VIP solution has to be used for HA & Non-disruptive Upgrade purposes. (Default: False)"),
required=False,
update_allowed=True,
)
error_page_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.4) Error Page Profile to be used for this virtualservice.This profile is used to send the custom error page to the client generated by the proxy You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
l4_policies_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.7) L4 Policies applied to the data traffic of the Virtual Service"),
schema=L4Policies.properties_schema,
required=True,
update_allowed=False,
)
l4_policies_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.7) L4 Policies applied to the data traffic of the Virtual Service"),
schema=l4_policies_item_schema,
required=False,
update_allowed=True,
)
traffic_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.8) Knob to enable the Virtual Service traffic on its assigned service engines. This setting is effective only when the enabled flag is set to True. (Default: True)"),
required=False,
update_allowed=True,
)
apic_contract_graph_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.12,18.1.2) The name of the Contract/Graph associated with the Virtual Service. Should be in the <Contract name>:<Graph name> format. This is applicable only for Service Integration mode with Cisco APIC Controller "),
required=False,
update_allowed=True,
)
vsvip_cloud_config_cksum_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.9) Checksum of cloud configuration for VsVip. Internally set by cloud connector"),
required=False,
update_allowed=True,
)
azure_availability_set_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.12, 18.1.2) (internal-use)Applicable for Azure only. Azure Availability set to which this VS is associated. Internally set by the cloud connector"),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'fqdn',
'ip_address',
'enabled',
'services',
'application_profile_uuid',
'network_profile_uuid',
'server_network_profile_uuid',
'pool_uuid',
'se_group_uuid',
'network_security_policy_uuid',
'http_policies',
'dns_policies',
'ssl_key_and_certificate_uuids',
'ssl_profile_uuid',
'performance_limits',
'analytics_policy',
'network_uuid',
'vrf_context_uuid',
'enable_autogw',
'port_uuid',
'subnet_uuid',
'analytics_profile_uuid',
'discovered_network_uuid',
'discovered_subnet',
'host_name_xlate',
'subnet',
'discovered_networks',
'vs_datascripts',
'client_auth',
'weight',
'delay_fairness',
'max_cps_per_client',
'limit_doser',
'type',
'vh_parent_vs_uuid',
'vh_domain_name',
'availability_zone',
'auto_allocate_ip',
'floating_ip',
'auto_allocate_floating_ip',
'floating_subnet_uuid',
'cloud_type',
'avi_allocated_vip',
'avi_allocated_fip',
'connections_rate_limit',
'requests_rate_limit',
'use_bridge_ip_as_vip',
'flow_dist',
'ign_pool_net_reach',
'ssl_sess_cache_avg_size',
'pool_group_uuid',
'remove_listening_port_on_vs_down',
'close_client_conn_on_config_update',
'bulk_sync_kvcache',
'description',
'cloud_uuid',
'east_west_placement',
'scaleout_ecmp',
'microservice_uuid',
'service_pool_select',
'created_by',
'cloud_config_cksum',
'enable_rhi',
'snat_ip',
'active_standby_se_tag',
'flow_label_type',
'enable_rhi_snat',
'static_dns_records',
'ipam_network_subnet',
'dns_info',
'service_metadata',
'traffic_clone_profile_uuid',
'content_rewrite',
'sideband_profile',
'vip',
'nsx_securitygroup',
'vsvip_uuid',
'waf_policy_uuid',
'sp_pool_uuids',
'use_vip_as_snat',
'error_page_profile_uuid',
'l4_policies',
'traffic_enabled',
'apic_contract_graph',
'vsvip_cloud_config_cksum',
'azure_availability_set',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'fqdn': fqdn_schema,
'ip_address': ip_address_schema,
'enabled': enabled_schema,
'services': services_schema,
'application_profile_uuid': application_profile_uuid_schema,
'network_profile_uuid': network_profile_uuid_schema,
'server_network_profile_uuid': server_network_profile_uuid_schema,
'pool_uuid': pool_uuid_schema,
'se_group_uuid': se_group_uuid_schema,
'network_security_policy_uuid': network_security_policy_uuid_schema,
'http_policies': http_policies_schema,
'dns_policies': dns_policies_schema,
'ssl_key_and_certificate_uuids': ssl_key_and_certificate_uuids_schema,
'ssl_profile_uuid': ssl_profile_uuid_schema,
'performance_limits': performance_limits_schema,
'analytics_policy': analytics_policy_schema,
'network_uuid': network_uuid_schema,
'vrf_context_uuid': vrf_context_uuid_schema,
'enable_autogw': enable_autogw_schema,
'port_uuid': port_uuid_schema,
'subnet_uuid': subnet_uuid_schema,
'analytics_profile_uuid': analytics_profile_uuid_schema,
'discovered_network_uuid': discovered_network_uuid_schema,
'discovered_subnet': discovered_subnet_schema,
'host_name_xlate': host_name_xlate_schema,
'subnet': subnet_schema,
'discovered_networks': discovered_networks_schema,
'vs_datascripts': vs_datascripts_schema,
'client_auth': client_auth_schema,
'weight': weight_schema,
'delay_fairness': delay_fairness_schema,
'max_cps_per_client': max_cps_per_client_schema,
'limit_doser': limit_doser_schema,
'type': type_schema,
'vh_parent_vs_uuid': vh_parent_vs_uuid_schema,
'vh_domain_name': vh_domain_name_schema,
'availability_zone': availability_zone_schema,
'auto_allocate_ip': auto_allocate_ip_schema,
'floating_ip': floating_ip_schema,
'auto_allocate_floating_ip': auto_allocate_floating_ip_schema,
'floating_subnet_uuid': floating_subnet_uuid_schema,
'cloud_type': cloud_type_schema,
'avi_allocated_vip': avi_allocated_vip_schema,
'avi_allocated_fip': avi_allocated_fip_schema,
'connections_rate_limit': connections_rate_limit_schema,
'requests_rate_limit': requests_rate_limit_schema,
'use_bridge_ip_as_vip': use_bridge_ip_as_vip_schema,
'flow_dist': flow_dist_schema,
'ign_pool_net_reach': ign_pool_net_reach_schema,
'ssl_sess_cache_avg_size': ssl_sess_cache_avg_size_schema,
'pool_group_uuid': pool_group_uuid_schema,
'remove_listening_port_on_vs_down': remove_listening_port_on_vs_down_schema,
'close_client_conn_on_config_update': close_client_conn_on_config_update_schema,
'bulk_sync_kvcache': bulk_sync_kvcache_schema,
'description': description_schema,
'cloud_uuid': cloud_uuid_schema,
'east_west_placement': east_west_placement_schema,
'scaleout_ecmp': scaleout_ecmp_schema,
'microservice_uuid': microservice_uuid_schema,
'service_pool_select': service_pool_select_schema,
'created_by': created_by_schema,
'cloud_config_cksum': cloud_config_cksum_schema,
'enable_rhi': enable_rhi_schema,
'snat_ip': snat_ip_schema,
'active_standby_se_tag': active_standby_se_tag_schema,
'flow_label_type': flow_label_type_schema,
'enable_rhi_snat': enable_rhi_snat_schema,
'static_dns_records': static_dns_records_schema,
'ipam_network_subnet': ipam_network_subnet_schema,
'dns_info': dns_info_schema,
'service_metadata': service_metadata_schema,
'traffic_clone_profile_uuid': traffic_clone_profile_uuid_schema,
'content_rewrite': content_rewrite_schema,
'sideband_profile': sideband_profile_schema,
'vip': vip_schema,
'nsx_securitygroup': nsx_securitygroup_schema,
'vsvip_uuid': vsvip_uuid_schema,
'waf_policy_uuid': waf_policy_uuid_schema,
'sp_pool_uuids': sp_pool_uuids_schema,
'use_vip_as_snat': use_vip_as_snat_schema,
'error_page_profile_uuid': error_page_profile_uuid_schema,
'l4_policies': l4_policies_schema,
'traffic_enabled': traffic_enabled_schema,
'apic_contract_graph': apic_contract_graph_schema,
'vsvip_cloud_config_cksum': vsvip_cloud_config_cksum_schema,
'azure_availability_set': azure_availability_set_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'client_auth': getattr(HTTPClientAuthenticationParams, 'field_references', {}),
'network_uuid': 'network',
'network_profile_uuid': 'networkprofile',
'dns_info': getattr(DnsInfo, 'field_references', {}),
'vs_datascripts': getattr(VSDataScripts, 'field_references', {}),
'content_rewrite': getattr(ContentRewriteProfile, 'field_references', {}),
'vip': getattr(Vip, 'field_references', {}),
'snat_ip': getattr(IpAddr, 'field_references', {}),
'waf_policy_uuid': 'wafpolicy',
'discovered_network_uuid': 'network',
'sideband_profile': getattr(SidebandProfile, 'field_references', {}),
'vrf_context_uuid': 'vrfcontext',
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
'vsvip_uuid': 'vsvip',
'sp_pool_uuids': 'pool',
'ssl_profile_uuid': 'sslprofile',
'error_page_profile_uuid': 'errorpageprofile',
'traffic_clone_profile_uuid': 'trafficcloneprofile',
'se_group_uuid': 'serviceenginegroup',
'l4_policies': getattr(L4Policies, 'field_references', {}),
'requests_rate_limit': getattr(RateProfile, 'field_references', {}),
'application_profile_uuid': 'applicationprofile',
'pool_group_uuid': 'poolgroup',
'analytics_profile_uuid': 'analyticsprofile',
'performance_limits': getattr(PerformanceLimits, 'field_references', {}),
'http_policies': getattr(HTTPPolicies, 'field_references', {}),
'server_network_profile_uuid': 'networkprofile',
'floating_ip': getattr(IpAddr, 'field_references', {}),
'microservice_uuid': 'microservice',
'services': getattr(Service, 'field_references', {}),
'connections_rate_limit': getattr(RateProfile, 'field_references', {}),
'ip_address': getattr(IpAddr, 'field_references', {}),
'service_pool_select': getattr(ServicePoolSelector, 'field_references', {}),
'network_security_policy_uuid': 'networksecuritypolicy',
'discovered_networks': getattr(DiscoveredNetwork, 'field_references', {}),
'ssl_key_and_certificate_uuids': 'sslkeyandcertificate',
'ipam_network_subnet': getattr(IPNetworkSubnet, 'field_references', {}),
'discovered_subnet': getattr(IpAddrPrefix, 'field_references', {}),
'dns_policies': getattr(DnsPolicies, 'field_references', {}),
'static_dns_records': getattr(DnsRecord, 'field_references', {}),
'analytics_policy': getattr(AnalyticsPolicy, 'field_references', {}),
'pool_uuid': 'pool',
}
unique_keys = {
'client_auth': getattr(HTTPClientAuthenticationParams, 'unique_keys', {}),
'vs_datascripts': getattr(VSDataScripts, 'unique_keys', {}),
'content_rewrite': getattr(ContentRewriteProfile, 'unique_keys', {}),
'vip': getattr(Vip, 'unique_keys', {}),
'static_dns_records': getattr(DnsRecord, 'unique_keys', {}),
'sideband_profile': getattr(SidebandProfile, 'unique_keys', {}),
'requests_rate_limit': getattr(RateProfile, 'unique_keys', {}),
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'l4_policies': getattr(L4Policies, 'unique_keys', {}),
'performance_limits': getattr(PerformanceLimits, 'unique_keys', {}),
'http_policies': getattr(HTTPPolicies, 'unique_keys', {}),
'floating_ip': getattr(IpAddr, 'unique_keys', {}),
'services': getattr(Service, 'unique_keys', {}),
'connections_rate_limit': getattr(RateProfile, 'unique_keys', {}),
'ip_address': getattr(IpAddr, 'unique_keys', {}),
'service_pool_select': getattr(ServicePoolSelector, 'unique_keys', {}),
'discovered_networks': getattr(DiscoveredNetwork, 'unique_keys', {}),
'dns_info': getattr(DnsInfo, 'unique_keys', {}),
'ipam_network_subnet': getattr(IPNetworkSubnet, 'unique_keys', {}),
'discovered_subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'dns_policies': getattr(DnsPolicies, 'unique_keys', {}),
'snat_ip': getattr(IpAddr, 'unique_keys', {}),
'analytics_policy': getattr(AnalyticsPolicy, 'unique_keys', {}),
}
class VsVip(AviResource):
resource_name = "vsvip"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Name for the VsVip object."),
required=True,
update_allowed=True,
)
vip_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) List of Virtual Service IPs and other shareable entities."),
schema=Vip.properties_schema,
required=True,
update_allowed=False,
)
vip_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) List of Virtual Service IPs and other shareable entities."),
schema=vip_item_schema,
required=False,
update_allowed=True,
)
dns_info_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Service discovery specific data including fully qualified domain name, type and Time-To-Live of the DNS record."),
schema=DnsInfo.properties_schema,
required=True,
update_allowed=False,
)
dns_info_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) Service discovery specific data including fully qualified domain name, type and Time-To-Live of the DNS record."),
schema=dns_info_item_schema,
required=False,
update_allowed=True,
)
vrf_context_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Virtual Routing Context that the Virtual Service is bound to. This is used to provide the isolation of the set of networks the application is attached to. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
east_west_placement_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Force placement on all Service Engines in the Service Engine Group (Container clouds only) (Default: False)"),
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) "),
required=False,
update_allowed=False,
)
vsvip_cloud_config_cksum_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.9) Checksum of cloud configuration for VsVip. Internally set by cloud connector"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'vip',
'dns_info',
'vrf_context_uuid',
'east_west_placement',
'cloud_uuid',
'vsvip_cloud_config_cksum',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'vip': vip_schema,
'dns_info': dns_info_schema,
'vrf_context_uuid': vrf_context_uuid_schema,
'east_west_placement': east_west_placement_schema,
'cloud_uuid': cloud_uuid_schema,
'vsvip_cloud_config_cksum': vsvip_cloud_config_cksum_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'vrf_context_uuid': 'vrfcontext',
'vip': getattr(Vip, 'field_references', {}),
'dns_info': getattr(DnsInfo, 'field_references', {}),
}
unique_keys = {
'vip': getattr(Vip, 'unique_keys', {}),
'dns_info': getattr(DnsInfo, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::VsApicExtension': VsApicExtension,
'Avi::LBaaS::VirtualService': VirtualService,
'Avi::LBaaS::VsVip': VsVip,
}
|
Marmot, husky, mountain goat or rabbit, discover the stuffed animals that talk thanks to Créations Dani! These endearing soft toys will brighten up your day by repeating everything you tell them. Laugh are guaranteed!
|
from __future__ import division
import multiprocessing
import os
import os.path
import sys
import yaml, collections
import numpy as np
from time import sleep
import math
import argparse
import itertools
counter = None
counter_lock = multiprocessing.Lock()
proc_per_processor = 0;
def flatten(x):
if isinstance(x, collections.Iterable):
return [a for i in x for a in flatten(i)]
else:
return [x]
def main():
# parse arguments
parser = argparse.ArgumentParser(description="Parser")
parser.add_argument('-c', '--cores', type=int, help='specify maximum number of cores')
args = parser.parse_args()
if args.cores:
args.cores = min(multiprocessing.cpu_count(), args.cores)
else:
args.cores = min(multiprocessing.cpu_count(), 32)
print 'Using {} cores.'.format(args.cores)
prepare_multiprocessing()
# for walking with yaml files
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
# Parameters
runs = range(30)
options = []
for r in itertools.product(runs): options.append(r)
options = [flatten(tupl) for tupl in options]
# Main
rl_run_param(args, ["leo/leosim_rl_sym_full/leosim_sarsa_walk_egreedy.yaml", "leo/leosim_rl_sym_full/leosim_sarsa_walk_ou.yaml"], options)
######################################################################################
def rl_run_param(args, list_of_cfgs, options):
list_of_new_cfgs = []
loc = "tmp"
if not os.path.exists(loc):
os.makedirs(loc)
for cfg in list_of_cfgs:
conf = read_cfg(cfg)
# after reading cfg can do anything with the name
fname, fext = os.path.splitext( cfg.replace("/", "_") )
for o in options:
str_o = "-".join(map(lambda x : "{:05d}".format(int(round(10000*x))), o[:-1])) # last element in 'o' is reserved for mp
str_o += "mp{}".format(o[-1])
print "Generating parameters: {}".format(str_o)
# create local filename
list_of_new_cfgs.append( "{}/{}-{}{}".format(loc, fname, str_o, fext) )
# modify options
conf['experiment']['output'] = "{}-{}".format(fname, str_o)
if "exporter" in conf['experiment']['environment']:
conf['experiment']['environment']['exporter']['file'] = "{}-{}".format(fname, str_o)
conf = remove_viz(conf)
write_cfg(list_of_new_cfgs[-1], conf)
#print list_of_new_cfgs
do_multiprocessing_pool(args, list_of_new_cfgs)
######################################################################################
def mp_run(cfg):
# Multiple copies can be run on one computer at the same time, which results in the same seed for a random generator.
# Thus we need to wait for a second or so between runs
global counter
global proc_per_processor
with counter_lock:
wait = counter.value
counter.value += 2
# wait for the specified number of seconds
#print 'floor {0}'.format(math.floor(wait / multiprocessing.cpu_count()))
#wait = wait % multiprocessing.cpu_count() + (1.0/proc_per_processor.value) * math.floor(wait / multiprocessing.cpu_count())
#print 'wait {0}'.format(wait)
sleep(wait)
print 'wait finished {0}'.format(wait)
# Run the experiment
code = os.system('./grld %s' % cfg)
if not code == 0:
errorString = "Exit code is '{0}' ({1})".format(code, cfg)
print errorString
f = open("bailing.out", "a")
try:
f.write(errorString + "\n")
finally:
f.close()
######################################################################################
def init(cnt, num):
''' store the counter for later use '''
global counter
global proc_per_processor
counter = cnt
proc_per_processor = num
######################################################################################
def do_multiprocessing_pool(args, list_of_new_cfgs):
"""Do multiprocesing"""
counter = multiprocessing.Value('i', 0)
proc_per_processor = multiprocessing.Value('d', math.ceil(len(list_of_new_cfgs)/args.cores))
print 'proc_per_processor {0}'.format(proc_per_processor.value)
pool = multiprocessing.Pool(args.cores, initializer = init, initargs = (counter, proc_per_processor))
pool.map(mp_run, list_of_new_cfgs)
pool.close()
######################################################################################
def prepare_multiprocessing():
# clean bailing.out file
f = open("bailing.out", "w")
f.close()
######################################################################################
def read_cfg(cfg):
"""Read configuration file"""
# check if file exists
yfile = '../qt-build/cfg/%s' % cfg
if os.path.isfile(yfile) == False:
print 'File %s not found' % yfile
sys.exit()
# open configuration
stream = file(yfile, 'r')
conf = yaml.load(stream)
stream.close()
return conf
######################################################################################
def write_cfg(outCfg, conf):
"""Write configuration file"""
# create local yaml configuration file
outfile = file(outCfg, 'w')
yaml.dump(conf, outfile)
outfile.close()
######################################################################################
def remove_viz(conf):
"""Remove everything in conf related to visualization"""
if "visualize" in conf['experiment']['environment']:
conf['experiment']['environment']['visualize'] = 0
if "target_env" in conf['experiment']['environment']:
if "visualize" in conf['experiment']['environment']['target_env']:
conf['experiment']['environment']['target_env']['visualize'] = 0
if "visualizer" in conf:
del conf["visualizer"]
if "visualization" in conf:
del conf["visualization"]
if "visualization2" in conf:
del conf["visualization2"]
return conf
######################################################################################
def dict_representer(dumper, data):
return dumper.represent_dict(data.iteritems())
######################################################################################
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
######################################################################################
if __name__ == "__main__":
main()
|
An exhibition which will provide an unparalleled insight into the workings of the artist's mind, 'Leonardo da Vinci: A Life in Drawing' is part of a nationwide event organised by Royal Collection Trust to mark 500 years since the Renaissance master's death. In total, 144 of Leonardo's drawings will go on display in 12 simultaneous exhibitions across the UK, in an initiative designed to give the widest-ever UK audience the opportunity to see the work of this extraordinary artist.
The Ulster Museum's exhibition will include two of Leonardo's most famous works – The Head of St Anne, made around 1510 in preparation for his famous masterpiece The Virgin and Child with St Anne, which hangs in the Louvre, and an anatomical drawing from 1489, The Skull Sectioned.
The exhibition will include examples of all the drawing materials employed by the artist, including pen and ink, red and black chalks, watercolour and metalpoint.
Leonardo da Vinci: A Life in Drawing will run at the Ulster Museum from 1 February until 6 May 2019 and will be be accompanied by a series of events including special tours, lectures, art sessions and sketching workshops. Admission to the exhibition will be free.
In May 2019, following the exhibitions at Royal Collection Trust's partner venues, the drawings will be brought together to form part of an exhibition of over 200 sheets at The Queen's Gallery, Buckingham Palace, the largest exhibition of Leonardo's work in over 65 years.
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from confparser import load_config
from expyriment import stimuli, misc
def launch_instructions(instructions_ini, exp):
# Select .ini file for instructions
setting = load_config(instructions_ini)
# Define the pathway of the instructions file
instructions_fname = ''.join((setting["inst_filename"], ".csv"))
instructions_dir = os.path.abspath((setting["inputs_dir"]))
instructions_path = os.path.join(instructions_dir, instructions_fname)
# Generate a dataframe containing the instructions
df_inst = pd.read_csv(instructions_path, sep='|')
# Convert the dataframe into a list
instructions = df_inst.values.tolist()
# Convert each element of the dataframe into a string
instructions = [[''.join(instructions[i][j])
for j in np.arange(len(instructions[i]))]
for i in np.arange(len(df_inst))]
# Initialization of variable containing the value of the key pressed
found_key = 0
response_key = 0
# While "h" key to return to main menu is not pressed...
while not (found_key == misc.constants.K_h or response_key == 'h'):
# Read the instructions file, line by line
ldx = 0
while ldx < len(instructions):
line = instructions[ldx]
# ... and item by item
for word in line:
# For lines with one item
if word in ("no_item", "no_probe", "fdbk_yes",
"fdbk_no"):
pass
# For lines corresponding to the examples, i.e. containing
# more than one item
else:
text_display = stimuli.TextBox(
word.decode('utf-8'),
map(int, setting["box_size"]),
position=map(int, setting["box_position"]),
text_size=setting["txtsize"],
text_colour=map(int, setting["txtcolour"]))
text_display.present()
exp.clock.wait(300)
# Check whether "h" key was pressed
found_key = exp.keyboard.check([misc.constants.K_h])
# If yes, breaks the loop
if found_key == misc.constants.K_h:
break
# If "h" key was pressed during the presentation of the example,
# it breaks the loop and return to main menu
if found_key == misc.constants.K_h:
break
# After the display of the last word of sentence's example,
# goes straight to the next line of instructions
elif line[-1] not in ("no_item", "fdbk_yes", "fdbk_no"):
exp.clock.wait(300)
# Waits for the participant's response and gives feedback whether
# the answer was correct or not
elif line[-1] in ("fdbk_yes", "fdbk_no"):
response_key, _ = exp.keyboard.wait_char([setting["YES"],
setting["NO"], 'h'])
if response_key == 'h':
break
elif ((response_key == setting["YES"] and
line[-1] == "fdbk_yes") or
(response_key == setting["NO"] and
line[-1] == "fdbk_no")):
message_display = stimuli.TextLine(
"Correct!", text_size=setting["txtsize"],
text_colour=(0, 204, 0))
message_display.present()
exp.clock.wait(2000)
else:
message_display = stimuli.TextLine(
"Incorrect!", text_size=setting["txtsize"],
text_colour=(255, 0, 0))
message_display.present()
exp.clock.wait(2000)
# Checks whether "ENTER", "LEFT" or m" key were pressed.
# If "ENTER", goes to the next line;
# if "LEFT", goes to the previous slide
# if "h", returns to main menu.
else:
found_key, _ = exp.keyboard.wait([misc.constants.K_RETURN,
misc.constants.K_LEFT,
misc.constants.K_h])
if found_key == misc.constants.K_LEFT:
ldx = ldx - 2
if ldx < 0:
ldx = -1
elif found_key == misc.constants.K_h:
break
ldx = ldx + 1
|
One might think that wood business cards are just a concept. Well, today we would like to tell you that it’s a growing reality. Your business card is your most essential element of your visual identity because it is the first thing that people see when they meet you.
Cool and creative business card collection wooden and furniture inspired, using latest wooden business card is one of the popular craze of recent times. So today we have hand-selected the best wooden business card examples. We hope that you will enjoy this wonderful showcase. Don’t forget to share your comment in our comment section.
|
# -*- python -*-
"""
File-Content Substitution builder for SCons
"""
__author__ = "Hartmut Goebel <h.goebel@crazy-compilers.com>"
import os, re
import SCons
def _action(target, source, env):
def _substitute(matchobj, env=env):
sym = matchobj.group(1)
try:
return env.subst(str(env[sym]))
except: # TypeError: # sym not a string
txt = matchobj.group(0) # the string matched
print 'Not substituting', txt
return txt
delim = re.escape(env['FILESUBSTDELIM'])
# compile a non-greedy pattern
subst_pattern = re.compile('%s(.*?)%s' % (delim, delim))
for t, s in zip(target, source):
t = str(t)
s = s.rstr()
text = open(s, 'rb').read()
text = subst_pattern.sub(_substitute, text)
open(t, 'wb').write(text)
os.chmod(t, os.stat(s)[0])
return None
def _strfunc(target, source, env):
return "generating '%s' from '%s'" % (target[0], source[0])
_builder = SCons.Builder.Builder(
action = SCons.Action.Action(_action, _strfunc),
src_suffix = '.in',
)
def generate(env):
env['BUILDERS']['FileSubst'] = _builder
env['FILESUBSTDELIM'] = '@'
def exists(env):
return 1
|
We will also get to know each other better at your complimentary engagement session.
Our Collections begin at $3250. Contact us today to inquire about our availability for your wedding date. Below are some of our additional products and services for you to love.
Whether your destination is across the state, across the country, or across the world, don't just settle for whichever photographer your destination resort provides/recommends. You can have the same confidence of all my local brides after meeting me in person, seeing my work, and experiencing your engagement session. Besides, I love to travel and will help you create a custom collection that will fit all of your needs.
Did I mention that I love to travel?! Maybe there's a special place that you would love to use for your engagement or senior photos but it is out of the area. Talk to me about what you have in mind. We've even gone on road trips with our couples before!
Announce your graduation, wedding date, or the birth of your baby with a custom-designed card or magnet featuring one or more of your photos. We also offer custom thank you cards, wedding invitations, and more.
Let your guests have fun looking at your engagement photos as they leave you congratulatory messages and words of advice.
|
import httplib
import urllib,urllib2,re,sys
import cookielib,os,string,cookielib,StringIO,gzip
import os,time,base64,logging
from t0mm0.common.net import Net
import xml.dom.minidom
import xbmcaddon,xbmcplugin,xbmcgui
from xml.dom.minidom import Document
__settings__ = xbmcaddon.Addon(id='plugin.video.DKEvents')
home = __settings__.getAddonInfo('path')
filename = xbmc.translatePath(os.path.join(home, 'resources', 'DKEvents.xml'))
def SearchXml(SearchText):
if os.path.isfile(filename)==False:
BuildXMl()
f = open(filename, "r")
text = f.read()
if SearchText=='-1':
match=re.compile('<movie name="[^A-Za-z](.+?)" url="(.+?)" year="(.+?)"/>', re.IGNORECASE).findall(text)
SearchText=""
else:
match=re.compile('<movie name="' + SearchText + '(.+?)" url="(.+?)" year="(.+?)"/>', re.IGNORECASE).findall(text)
for i in range(len(match)):
(mName,mNumber,vyear)=match[i]
addDir(SearchText+mName,mNumber,6,"")
def ParseXml(tagname):
f = open(filename, "r")
text = f.read()
xmlcontent=xml.dom.minidom.parseString(text)
items=xmlcontent.getElementsByTagName('channel')
print "calling " + tagname
for channelitem in items:
if(len(channelitem.getElementsByTagName('item'))>=1 and channelitem.getElementsByTagName('name')[0].childNodes[0].data==tagname):
chitems = channelitem.getElementsByTagName('item')
for itemXML in chitems:
vname=itemXML.getElementsByTagName('title')[0].childNodes[0].data.strip()
vurl=itemXML.getElementsByTagName('link')[0].childNodes[0].data.strip()
vimg=itemXML.getElementsByTagName('thumbnail')[0].childNodes[0].data.strip()
addLink(vname,vurl,3,vimg)
def GetXMLChannel():
f = open(filename, "r")
text = f.read()
xmlcontent=xml.dom.minidom.parseString(text)
items=xmlcontent.getElementsByTagName('channel')
for channelitem in items:
vname=channelitem.getElementsByTagName('name')[0].childNodes[0].data.strip()
addDir(vname,"",2,"")
def playVideo(url):
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(url)
def addLink(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
contextMenuItems = []
liz.addContextMenuItems(contextMenuItems, replaceItems=True)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addNext(formvar,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&formvar="+str(formvar)+"&name="+urllib.quote_plus('Next >')
ok=True
liz=xbmcgui.ListItem('Next >', iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": 'Next >' } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
mode=None
formvar=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
formvar=int(params["formvar"])
except:
pass
sysarg=str(sys.argv[1])
if mode==None:
GetXMLChannel()
elif mode==2:
ParseXml(name)
elif mode==3:
playVideo(url)
xbmcplugin.endOfDirectory(int(sysarg))
|
Paul Aucoin, Dean of Academic Services and Registrar at Samford University, received two top national honors April 18 during the annual meeting of the American Association of Collegiate Registrars and Admissions Officers (AACRAO) in Charlotte, N.C.
He was awarded the Distinguished Service Award and the Achieving Professional Excellence (APEX) Award. The latter award includes a $5,000 gift to the general scholarship fund at Samford. It was the first time in the 85-year history of the organization that the same person has received both awards.
Also during the meeting, he was elected chair-elect of AACRAO's 1999-2000 Nominations and Elections Committee by vote of the membership.
Dr. Aucoin was cited for his committee work, presentations at conferences and service as editor of the recently revised Academic Record and Transcript Guide. He has led AACRAO in creating and implementing electronic tools which benefit membership communication. He is also editor of AACRAO NetNews and assists with production of Data Dispenser, AACRAO's monthly newsletter.
AACRAO includes 9,000 members from the U.S., Canada, South America, Europe and Asia.
|
#!/usr/bin/python3
import numpy as np
from learning.model_free import Problem
from learning.model_free import sarsa
from learning.model_free import qlearning
from learning.model_free import mc_value_iteration
from learning.model_free import sarsa_lambda
from learning.model_free import q_lambda
from learning.model_building import dyna_q_learning
from learning.model_building import dyna_q_learning_last_visit
from learning.model_building import dyna_q_learning_stochastic
class GridObstacles(Problem):
def __init__(self):
self.m = 6
self.n = 9
self.obstacles = np.zeros((self.m, self.n), dtype=np.int)
self.obstacles[1:4, 2] = 1.0
self.obstacles[4, 5] = 1.0
self.obstacles[0:3, 7] = 1.0
self.start = self.coord_to_state(2, 0)
self.goal = self.coord_to_state(0, 8)
self.init_actions()
Problem.__init__(self, self.m * self.n, 4)
def sample_initial_state(self):
return self.start
def init_actions(self):
self._actions = []
for s in range(self.m * self.n):
s_actions = []
i, j = self.state_to_coord(s)
if self.valid_coord(i + 1, j):
s_actions.append(0)
if self.valid_coord(i - 1, j):
s_actions.append(1)
if self.valid_coord(i, j + 1):
s_actions.append(2)
if self.valid_coord(i, j - 1):
s_actions.append(3)
self._actions.append(s_actions)
self._action_offsets = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def actions(self, s):
return self._actions[s]
def state_reward(self, s, a):
if a not in self._actions[s]:
raise Exception('State {0} does not allow action {1}'.format(s, a))
i, j = self.state_to_coord(s)
di, dj = self._action_offsets[a]
nexti, nextj = i + di, j + dj
nexts = self.coord_to_state(nexti, nextj)
if not self.is_final(s) and self.is_final(nexts):
return (nexts, 1.0)
else:
return (nexts, 0.0)
def is_final(self, s):
return s == self.goal
def state_to_coord(self, s):
return (s / self.n, s % self.n)
def coord_to_state(self, i, j):
return i * self.n + j
def valid_coord(self, i, j):
return i >= 0 and i < self.m \
and j >= 0 and j < self.n \
and not self.obstacles[i, j]
def print_policy(self, pi):
pi = pi.reshape((self.m, self.n))
actions = ['v', '^', '>', '<']
for i in range(self.m):
for j in range(self.n):
if self.is_final(self.coord_to_state(i, j)):
print("*"),
elif self.start == self.coord_to_state(i, j):
print("*"),
elif self.obstacles[i, j]:
print("-"),
else:
print(actions[pi[i, j]]),
print('')
def print_values(self, v):
np.set_printoptions(precision=2)
print(v.reshape((self.m, self.n)))
def main():
problem = GridObstacles()
pi, v = sarsa(problem, 1000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = qlearning(problem, 1000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = sarsa_lambda(problem, 1000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = q_lambda(problem, 1000, epsilon=0.1, alpha=0.1, gamma=1.0)
problem.print_policy(pi)
problem.print_values(v)
pi, v = mc_value_iteration(problem, 1000, 1000, 0.2)
problem.print_policy(pi)
problem.print_values(v)
pi, v = dyna_q_learning(problem, 30, 50, epsilon=0.1, alpha=0.1, gamma=0.9)
problem.print_policy(pi)
problem.print_values(v)
pi, v = dyna_q_learning_last_visit(
problem, 30, 50, epsilon=0.1, alpha=0.1, gamma=0.9, kappa=0.00)
problem.print_policy(pi)
problem.print_values(v)
pi, v = dyna_q_learning_stochastic(
problem, 30, 50, epsilon=0.1, alpha=0.1, gamma=0.9)
problem.print_policy(pi)
problem.print_values(v)
if __name__ == "__main__":
main()
|
Altruistic World Online Library • View topic - Monsanto's Smear Merchant Attacks, by Food Democracy Now!
Monsanto's Smear Merchant Attacks, by Food Democracy Now!
We’re getting close to victory, so now Monsanto and their media attack dogs are getting desperate. In a new attack on a research scientist and Food Democracy Now!, they’ve gone so far as publish our home addresses and cell phone numbers to silence us. But we’re not backing down, we’re just getting started!
I’ll admit, this is an email that I had hoped I would never have to write.
Last week, in an effort to retaliate against Food Democracy Now! for daring to stand up and tell the truth about GMOs and stop a prominent research scientist from being attacked publicly, a Monsanto smear merchant published my personal cell phone numbers and a home address on the Internet in an effort to intimidate us. This is a new low.
I can’t imagine how executives at Monsanto or their paid flack, Jon Entine, who runs the biotech industry sponsored website Genetic Literacy Project, can sleep with themselves at night, but this is how far they’re willing to go in their efforts to silence us.
We can’t let Monsanto and their biotech bullies win because they’re willing to sink this low. This is a fight for the future of our democracy and our planet – and at Food Democracy Now! we will not be intimidated or silenced.
This below the belt personal attack against us was the result of us refusing to be silent when the Genetic Literacy Project published the home address, home and cell phone number and email of Dr. Don Huber, a plant pathologist with a long history of exposing the fraudulent science behind GMOs and Monsanto’s bestselling weed killer Roundup.
When we saw the attack piece on Dr. Huber on Genetic Literacy Project’s website, we knew that the other side was just getting started with their more vicious public attacks against those of us who are on the front lines of exposing the biotech industry’s corruption of science and our democracy. And we knew that we had to stand up.
Food Democracy Now! @food_democracy. Dear @MonsantoCo your smear merchant @JonEntine is publishing home addresses and phone numbers. Are these your new terms of engagement?
The good news, within minutes of this Tweet, the Genetic Literacy Project removed Dr. Huber’s home address and phone numbers. But this hasn’t ended the campaign of harassment against him and others on our side who dare to stand up to Monsanto’s corruption of our elected officials, our regulatory process, our democracy and ultimately our food supply.
In retaliation, the Genetic Literacy Project attacked us and published personal information in hopes that they could silence us. He and his financial supporters at Monsanto made a big mistake, because we’re not backing down.
We can’t afford to be silent, especially not to a bully like Monsanto or their paid smear merchants.
There’s too much at stake and we’ve fought too hard to let these desperate tactics stop us when we’re so close to our goal of winning GMO labeling in Washington DC.
And make no doubt about it, the national movement to label GMOs and expose Monsanto has got them and their cronies on the run. So now they’re getting desperate to stop us – but we can’t let them.
In the past 4 years, you’ve help build a movement that’s driven the national conversation on GMOs from California in 2012 to DC today – and now the opposition is getting desperate.
Monsanto has $15 Billion in annual sales, so we are up against a lot. But because of people like you, our movement grows stronger every day and the world is rejecting GMOs. Every week a new country is banning GMOs or Roundup and Monsanto knows if they lose control of science and regulatory policy in the U.S. they will lose worldwide. Which is why our at Food Democracy Now! is such a threat.
In reality, these ruthlessly underhanded tactics are a sign of the fact that we’re winning the fight for GMO labeling and transparency in our food supply, but that doesn’t make it any easier when you have a family and loved ones to protect.
The truth is the more we fight and win, the more these despicable tactics are going to be used against us.
Ever since we started Food Democracy Now!, I knew that we were going to be fighting an uphill battle against some of the most powerful companies on the planet and it wasn’t going to be easy.
This is a dark moment for our movement, but we have faith in the fact that we have hundreds of thousands of people, if not millions, who are waking up everyday to Monsanto’s corruption and the movement to end their corruption is just getting started!
We can’t wait any longer for efforts that stall simple reforms, such as common sense labeling of GMO foods, and we’re certainly not going to allow tactics like this to silence us.
Thanks for standing with us. Be a part of this Grassroots Surge to defeat Monsanto’s lie machine!
Thank you for participating in food democracy. Together we are unstoppable!
|
import re,os.path,sys,datetime
#Time
t1 = '{0:%y%m%d}'.format(datetime.datetime.now())
#Files and search string in HTTP request
tmpFile = 'C:\\log\\tmpCounter'+ t1 + '.txt'
logfile = 'C:\\inetpub\\logs\\LogFiles\\W3SVC1\\u_ex' + t1 + '_x.log'
searchString = 'some_string_in_url'
#Creating temp file to track checked lines in log file
if not os.path.exists(tmpFile):
with open(tmpFile,'w') as t:
t.write('0')
if not os.path.exists(logfile ):
print('Log file does not exist'.format(logfile))
sys.exit(1)
#Regexp for searched string: 200 0 0 218 => time of execution (218 ms, last value)
regex1 = re.compile(r'\s([0-9]+)\s([0-9]+)\s([0-9]+)\s([0-9]+)\s')
#Line counter
c = 0
#Event counters
j = 0
m = 0
#Time execution array
timeExcecute = []
#Reading line in temp file where to start counting events (point where parsing stopped in previous check)
with open(tmpFile,'r') as t:
intl1 = int(t.readline())
#Parsing log file
with open(logfile,'r') as logIIS:
for line in logIIS:
#If line count is bigger and equal than point of last check time of executions will be added to array
if c >= intl1 :
if searchString in line:
r1 = regex1.search(str(line))
timeExcecute.append(int(r1.group(4)))
c += 1
#Longer execution time count
for k in timeExcecute:
if k >= 30000:
m += 1
#Shorter execution time count (in miliseconds)
for i in timeExcecute:
if i >= 5000:
j += 1
#Writing count of checked lines in log file, next check counting of critical events will start from this line
with open(tmpFile,'w') as t:
t.write(str(c))
#Nagios plugin related part
if m >= 5:
print('URL {} execution time (30000 ms) is CRITICAL'.format(searchString))
sys.exit(2)
if m >= 3 and m < 5:
print('URL {} execution time (30000 ms) is WARNING'.format(searchString))
sys.exit(1)
if j >= 30:
print('URL {} execution time (5000 ms) is CRITICAL'.format(searchString))
sys.exit(2)
if j >= 20 and j < 30:
print('URL {} execution time (5000 ms) is WARNING'.format(searchString))
sys.exit(1)
else:
print('URL {} execution time is OK'.format(searchString))
sys.exit(0)
|
Whether you're in a country town, suburban area or Sydney, Melbourne, Adelaide, Perth, Brisbane or Canberra, we have plumbers in hundreds of locations across Australia. To book a visit from the Tap Doctor, call us now on 1300-655-827, we're available to take your call 24/7 We're Not just any Plumber Once you've had the Tap Doctor experience, you'll rave about our service. This is the way plumbing should have been all along.
Had a great experience dealing with Tap Doctor? Share how great it was.
Owner of Tap Doctor? Click here to edit or enhance this listing.
|
import os
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
class SwapAssembler(BaseAssembler, IPlugin):
def run(self, reads):
## Swap only supports a single FastA file.
# Convert Fastq files
fasta_files = []
for f in self.get_files(reads):
if f.endswith('.fq') or f.endswith('.fastq'): #Convert
in_fasta = f.rpartition('.')[0] + '.fasta'
self.arast_popen([self.fastq_to_fasta, '-i', f, '-o', in_fasta])
if not os.path.getsize(in_fasta):
raise Exception('Error converting to FastQ')
fasta_files.append(in_fasta)
else:
fasta_files.append(f)
# Concatenate multiple files
if len(fasta_files) == 1:
reads_file = fasta_files[0]
else:
reads_file = os.path.join(self.outpath, 'reads.fa')
with open(reads_file, 'w') as outfile:
for fa in fasta_files:
with open(fa) as reads:
for line in reads:
outfile.write(line)
## Run assembly
self.arast_popen(['mpirun', '-n', self.process_threads_allowed,
self.executable, '-k', self.k,'-o',
self.outpath + 'swap', '-i', reads_file])
contig = os.path.join(self.outpath, 'swap', 'CEContig.fasta')
if os.path.exists(contig):
return [contig]
else:
return []
|
No study has confirmed the most frequent zodiac signs. Psychological research has also aided in drug development and the capacity to diagnose a variety of diseases (like Alzheimer’s and Parkinson’s). Research is crucial to societal improvement. Should you need additional research you may look for some academic studies that discuss the advantages of studying abroad. Most credible research suggests that there’s very little danger of damage from watching an LED TV past the danger inherent in watching any sort of television.
What Is So Fascinating About Media Studies in Uk?
Literary communication features insight for some other disciplines like anthropology, history and psychology. For instance, you need to discover whether the communication is a component of a normal routine or a particular need. Email works well for day-to-day small business communications since it’s fast and productive.
If you devote a great deal of time doing things by yourself rather than with pals, there are numerous jobs where being in a position to work alone is needed. Right from school that you’ll need to be thinking about turning into a cruise ship captain and work towards that objective. Working in a cruise ship is an enjoyable and exciting job which can be particularly rewarding, there are numerous diverse careers which are available on a cruise ship and below are some tips about how to develop into a cruise ship captain in britain. 2, you’ll be more likely to have a better paying job if its international and you’ve demonstrated a willingness to learn a language. The job is extremely rewarding and the pay is quite good as well plus you’re going to be in a position to travel around the world. Firstly it’s a very important job and there’s no quick track approach to becoming cruise ship captain.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/justinvieira/Documents/ezdmb/View/mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1124, 1333)
MainWindow.setMinimumSize(QtCore.QSize(1124, 1333))
MainWindow.setIconSize(QtCore.QSize(18, 18))
MainWindow.setDocumentMode(False)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setStyleSheet("")
self.centralWidget.setObjectName("centralWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout_2.setContentsMargins(11, 11, 11, 11)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(1050, 73))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(32)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setFrameShape(QtWidgets.QFrame.WinPanel)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(False)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox_2.setMinimumSize(QtCore.QSize(1050, 94))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
self.groupBox_2.setPalette(palette)
self.groupBox_2.setTitle("")
self.groupBox_2.setFlat(False)
self.groupBox_2.setObjectName("groupBox_2")
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(130, 20, 981, 101))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMinimumSize(QtCore.QSize(981, 61))
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(24)
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setAutoFillBackground(False)
self.label_2.setStyleSheet("QGroupBox {\n"
" border: none;\n"
"}")
self.label_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.label_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.label_2.setScaledContents(False)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.current_menu = QtWidgets.QLabel(self.centralWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.current_menu.sizePolicy().hasHeightForWidth())
self.current_menu.setSizePolicy(sizePolicy)
self.current_menu.setMinimumSize(QtCore.QSize(960, 954))
self.current_menu.setFrameShape(QtWidgets.QFrame.NoFrame)
self.current_menu.setText("")
self.current_menu.setPixmap(QtGui.QPixmap("default.jpg"))
self.current_menu.setScaledContents(True)
self.current_menu.setAlignment(QtCore.Qt.AlignCenter)
self.current_menu.setObjectName("current_menu")
self.gridLayout.addWidget(self.current_menu, 0, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 2, 0, 1, 1)
self.groupBox = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox.setMinimumSize(QtCore.QSize(1112, 93))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 85, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
self.groupBox.setPalette(palette)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.pushButton_2 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_2.setGeometry(QtCore.QRect(90, 20, 951, 91))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setMinimumSize(QtCore.QSize(951, 91))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(24)
font.setBold(False)
font.setWeight(50)
self.pushButton_2.setFont(font)
self.pushButton_2.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout_2.addWidget(self.groupBox, 3, 0, 1, 1)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1124, 17))
self.menuBar.setObjectName("menuBar")
MainWindow.setMenuBar(self.menuBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DMB Configuration"))
self.label.setText(_translate("MainWindow", "Digital Menu Board Config"))
self.label_2.setText(_translate("MainWindow", "Current Menu:"))
self.pushButton_2.setText(_translate("MainWindow", "Display Settings"))
|
We’ve all done it: fantasized about how we’d run things if we were in charge. Lately, I’ve been thinking about what I’d do if I were responsible for reinvigorating theater in the United States. Here’s a list of just a few things, in no particular order. It’s only a start — I’m sure we could go much farther — but I think it’s pretty nifty.
In any crowded marketplace, a smart business owner asks: how can I differentiate my organization? Of late, it seems to me, we have been trying to make theater more and more like television and film: bigger special effects, more realistic sets, film actors given high-profile roles, and an increasing focus on spectacle. All those things are nice, sure… but I think they’re doing us harm. We can never do those things as well as the folks in Hollywood can, so why do we even try? I believe a renewed focus on the imaginative simplicity of bare-bones sets and costumes and props—along with the visceral nature of live performance, as well as theater’s ability to break the so-called fourth wall in ways television and film simply can’t—would help audiences understand what to expect when they go see a play… and, more importantly, value it as a vital experience they can’t get anywhere else.
As I have written about before, I think a significant reason that audiences for theater have shrunk is that we’ve effectively abandoned them. We theater practitioners all too often make work to impress each other or to please our own palates or to simply do something new and innovative, because we’re weary of more conservative or traditional forms of storytelling. One artistic director after another goes on record as saying “I just couldn’t put this play down” or “I felt drawn to the work for some reason.” Far too rarely do we hear anything like this: “I programmed this play in response to the concerns of my community” or “This is the sort of story that people have been asking us to do.” We need to find out what kinds of plays people want to see—not only the people who currently come to our theaters, but (more importantly) the people who don’t yet come—then stage those plays. We won’t hurt (as badly) for audiences if we do.
Whoever decided it made sense to keep all our writers and actors and directors in a few major cities and then ship them around the country to make art? If you think about it, it’s ridiculously inefficient. We end up having to find housing for people when they come into town and build new collaborative relationships on the fly, rather than relying on rapport developed over years of working together. More importantly (see my previous point), the work they make is inspired by their lives in the big cities, rather than what they might experience in, say, Baltimore or (here’s an idea) Crookston, MN. We need to decentralize our business. We need to start theaters in smaller cities and in rural areas and in neglected neighborhoods in big cities… and yes, perhaps let some of our big city theaters die. And we need to produce plays written by playwrights who live near our audiences and know them well.
I’ve appropriated the title of this point from a great post on 2amtheatre.com. Here’s the gist: we need to make theaters more fully integrated into the communities in which they operate. We need to think more broadly about the ways in which theaters serve people: as spaces in which new ideas and new narratives are shared and discussed. If that’s the real mission of a theater, then why aren’t we using our lovely spaces to host guest lectures from thoughtful people every Monday night of the year? Why aren’t we treating them like secular churches, inviting inspirational leaders to “preach,” as it were, on Sunday mornings? Why aren’t we inviting other theater companies to perform in our spaces when we don’t have shows running? We should be booked 350 nights a year, for several events a day on some days. We should also be opening day care services so that parents can come see shows while their children are cared for. We should be teaching classes (not just theater classes, but all sorts of classes) in unused rehearsal spaces. We should be serving drinks seven nights a week in honest-to-goodness theater bars. Get people familiar with the space, and they’ll start coming to see shows, too. You can bank on that.
As I wrote about on 2amtheatre quite a while ago, there are significant ways in which technology could be—and perhaps should be—radically disrupting the ways in which we work. We focus a great deal of attention on innovation in our art form, which is only natural (though please see point #2 above), but not on the ways in which we administer our theaters. Our websites and blogs are largely lackluster, for the most part. Our engagements in social media are getting better—at least for some of us—but we still have quite a ways to go to catch up to the strongest practitioners. More importantly, we have yet to allow technology to mediate and streamline some of the difficult data problems we face, from ticket pricing to script submission management. Technology can and will liberate us from some of our heaviest burdens… if we let it.
Why do most major grants go to institutions and not to artists? Why aren’t we stamping our feet and advocating wildly for the United States government to create an Art Corps like the Peace Corps: a body of government-funded artists to make work all around the country and the world? Why are there three arts administrators for every artist? (I must admit to doubting the veracity of that data point, though I’ve heard it a great deal.) Trisha Mead has written about the “legitimizing role” of arts institutions, and she has a point… but an institution is really just a brand, and brands come and go. (That’s why we visit Starbucks today, rather than, oh, the Maxwell House Coffee Shop.) In the end, people matter; artists matter. Institutions should not be sacred cows.
Those will do for now, I suppose. Give me time, however, and I’ll think of a few more things I might try, if I were in charge.
Perhaps what you’re experiencing is simply “personal frustrations,” though they’re frustrations I’m sure we’ve all experienced at one time or another. I will only say in response that if there was ever a time to simply break out and make theater on your own, your own way, this is it. Innovate and people will follow. Good luck!
These are all great ideas. In these days of high ticket prices and bad traffic, I would add “play where you are” and making the audience (local) part of the act. Even if it means taking it to the streets. Or especially if it means taking it to the streets.
You know, taking theater outside the theater is one I considered adding. I’m not convinced it isn’t a fad, though. I just don’t think I understand the long-term vision for it… which is not to say that such a vision doesn’t exist.
I’d submit that a lot of us have been working hard at #1 for years; as an example, I’ve done exactly two genuinely realistic, literal set designs in the last three years, out of somewhere between 55 and 60 shows. And I’m just one designer — there’s a lot of great, striking, fabulously abstract work going on just in DC (where I work) alone, by some excellent theatre artists.
The movement towards #3 has been going on since Zelda Fitchandler was launching Arena Stage using little more than spit and baling wire. No question that we could do better. But there are folks fighting to do just that all the time … finding the funding to make it happen is the hard part. And the smaller and more under-served the community, the harder to make it work fiscally.
#2 is … complicated. Yes, we need to listen to our audiences. But that doesn’t always work. When filling out response cards, audiences often choose shows they know as something they’d like to see, but when the time comes, they don’t attend. Why should they? They’ve seen it before. I know — every production of Fiddler on the Roof is different, but let’s be honest … nobody’s surprised by the ending. So it’s a tricky thing choosing a season, balancing familiar or highly accessible pieces with things that might reach new audiences, or make existing audiences appreciate theatre in a new way.
#5: Definitely! Spot on. We’re getting better at this, as an industry, but we still have lots of room to better leverage tech to help us.
Number 4 is logistically more-or-less impossible. Theatres are dark, when they’re dark, for a REASON. We’re teching, or building, or painting, or writing light cues. Every booking added into a space makes the production team cry just a little more! The money folks always want more bookings, the production folks want none, most places find a balance where everyone is mildly miserable.
And number six. Art Corps? Interesting! Pie-in-the-sky, of course, but every good thing was at some point. Don’t know how it would ever get funded, but that’s a battle for another day … it’s a wonderful notion to blue-sky.
In any case — keep brainstorming! We really do need new ideas and vigorous discussion to keep our industry from becoming moribund.
Hello, Elizabeth. Given that we’re both in DC — did you know that when you posted? — I’m surprised we haven’t met. Welcome.
I have to say while I’m sure you intended your “keep brainstorming” comment to be supportive, I also find it (I’m sorry) a touch dismissive as well. What I’ve written here is far more than a brainstorm. This is a considered list of ideas, some of which I’ve written about before at length, others of which have been discussed in great depth elsewhere.
In any event, I want to respond to your responses, for which I’m thankful.
With regard to #1: I think you are confusing realism vs. abstraction with the issue I’ve raised. What I’m talking about is keeping sets minimal, period. I call your attention to the phrase “bare bones.” I believe that a “fully-fleshed” set — either realistic or literal — is too close to television and film. Leaving the details out of any kind of set, on the other hand, invites audiences to do a great deal more to “flesh out” the set in their minds. It’s the equivalent of listening to radio drama rather than watching a film or television show: sitting in the audience, you become a co-creator in the experience, and you also personalize it, to some extent, with your imagination. That’s what I’m suggesting.
With regard to #2: I am not talking about comment cards. Comment cards are the entirely uninspired, unimaginative way of going about doing what I’m talking about; I’ve written about this extensively elsewhere on this blog, so I’d rather not repeat myself, but I’ll say that I’m talking about playwrights paying attention to the issues and concerns of their communities and letting those things guide the stories they tell; I’m talking about theaters choosing plays to produce — new plays, not chestnuts — that are “called for” by audiences’ concerns, rather than their own artistic preferences; I’m talking about the work that Luis Alfaro has done to connect his work with audiences who haven’t traditionally visited theaters. Comment cards, by contrast, are ineffectual.
#3, I agree, is (in some ways) old news. But I don’t think funding is the issue. I think it’s about a major cultural shift we need to make across America. We need to overcome our NYLACHI-centric biases; we need to create our own culture rather than importing it. That’s starting to happen here in DC; when it’s happening in San Antonio and Ames, IA, we’ll have arrived.
The fact that you find #4 impossible irks me. It’s not. It’s only impossible if we fail to think imaginatively. I understand perfectly well that there are times when there’s construction happening on a stage; I’m not talking about interfering with anyone’s work; I’m talking about expanding the possibilities for a building; re-thinking what its purpose is, how it serves its community. I’m talking about designing buildings in a different way from the get-go so that they can incorporate the sorts of things I’ve suggested. I suggest you read the Never Be Dark post I linked to for an indication of how many theaters are already moving in this direction.
Finally, #6. Is it pie-in-the-sky? Well, so was the Peace Corps, 50 years ago. And so was Teach for America in the far more recent past. It’s only blue sky until it’s not. We just have to will it to happen.
Hello, Gwydion. No, I didn’t realize you were DC based when I posted. Nice to “meet” you.
First, and most importantly — My apologies if my closing seemed dismissive in any way. It wasn’t remotely intended that way. I think this kind of Big Thinking is important — essential! — to the survival of our industry. And I think our industry needs to survive; I think it brings something of great value to society.
#1: I wasn’t confused; I was responding specifically to your phrase “more realistic sets” but I perhaps could have been clearer about that! In any case, I agree with your central premise, that simple, bare-bones scenery can often be the most effective.
#2: Oh, I agree … comment cards are dreadful! But they’ve long been the only method some organizations had for getting feedback. The answer in part might have something to do with #5 on your list. Using social media to connect with current — and potential –audiences could open a whole new form of dialogue between theatres and theatregoers. At least, I hope so. Certainly your ideas about playwrights brings relevant, community based stories to the stage seem terrific!
#3: I actually encounter almost no NY-LA-CHI influence in my professional life at all! (But that may just be that I work for some pretty small, poor theatres.) I’d argue that funding is PART of the issue. Theatre is expensive to get going — and it takes time to build an audience, especially in areas that don’t have going to the theatre as part of their existing culture. Finding enough funding to get started and survive the audience-building can be tough. But I’d agree that a cultural shift is needed. And I’d add this: there’s a disturbingly large part of American society that has what I’d have to call a certain disdain for the performing arts — a sense that the performing arts have absolutely nothing to offer them, and no relevance to their lives. How do we demonstrate that our work does have meaning for them? I think that’s a significant part of the cultural change you call for. I just wish I had answers.
#4: Fair enough — and I’ll read your other posts. Just discovered your page yesterday, so I’m not caught up yet. I didn’t realize that you were thinking of new, different sorts of buildings. And I don’t disagree with the dream! (I’ve personally always wished for a theatre with a 24 hour coffee shop, 24 hour bar, and 24 hour hardware store in the lobby — but that’s a little self-serving, I suppose.) I suspect my response was a little knee-jerk. I can’t tell you how often a tech schedule, or load-in schedule, or paint schedule gets mucked up by administrators forcing bookings in where there isn’t time or space for them. And I know far too many carpenters and TD’s who already have to come in at 5 or 6am, in order to have time to work, when the space is free, and too many painters who have to start their paint calls at midnight. So it’s a hot-button topic for those of us on the production side of the team.
#6: Sure it’s pie-in-the-sky. But all good ideas start out that way. I think it’s a wonderful vision. A friend reminded me that donkey’s years ago, the WPA was, in part, a sort of Art Corp, for a short while. What can be done once, can perhaps be done again.
In any case, thanks for the discussion!
Thanks for this, Elizabeth. I think you’ve made some interesting points.
I don’t disagree! That’s what I’m getting at … how do we reach them? How do we demonstrate the relevance of our work to their lives? Or how do we find work that will be meaningful to them?
I think your last question is the answer; we need to take it upon ourselves to make work that’s relevant to people different than the people we currently make work for.
Oh, and by the way … I’m a secular humanist insomniac set designer. Really, how have we not met?
Well, it’s about time, at least electronically!
Well, I’d comment, but I really can’t add anything to what Elizabeth said…and I certainly can’t say it any better! I agree (#1) that aiming for spectacle, etc., shouldn’t be the goal, but neither should ALL sets, etc., be “bare bones”.
Thx, Donna! Appreciate the kind words. And I certainly agree that there’s a time and a place for all things. Sometimes realistic sets, etc are spot on, but sometimes wild abstraction, or sometimes even bare bones serves the play, and company, best. Even pure spectacle has it’s place. I love a lot of Cirque, which is blatant spectacle!
I will say, more-or-less in support of idea #1, that there are times when extravagant spectacle on Broadway does make it harder for the rest of us, out in the trenches. Example: Did a production of Beauty and the Beast a few years ago. It was a miserable experience. Half the audience community knew the Disney Broadway version, and expected that. Worse … the other half only knew the movie, and expected that! I can do wild abstract versions of anything, but that’s not what the audience wanted to see. Both halves wanted something I had no way … for about 0.5% of the Broadway budget, and with no fly system … to give them. The powers-that-be in the organization weren’t interested in re-inventing the show in a stark, modern form. So I had to put onstage a lame, utterly half-assed version of a show that we had no business doing, and no means at all of doing well.
In sum … I love grand spectacle, at times. But if I could never have to do another Disney musical with a set budget of about fifty cents, I’d be a happy camper!
Cirque is lovely entertainment; I enjoy it, too. What it’s lacking, for my money, is a clear narrative. If they were better storytellers, they’d astonish me. In any event, yes, spectacle is nice… but we have television and film for spectacle. I’m not saying we should cede the spectacular to those art forms entirely; I wouldn’t presume to set limits on our imagination. But I am saying that the extra emphasis we’ve been placing on spectacle in our work (Spider-Man: Turn Off the Dark, I’m looking at you) has undermined the core promise of theater: storytelling.
Again, though, I’m not saying ALL sets should be bare-bones: merely that a stripped-down aesthetic is more often appropriate for the times in which we’re making art and within our competitive set.
I haven’t seen the specific shows you cite, so I can’t speak to that. But I absolutely agree that a stripped-down approach can often be the best choice!
Much of the work I see around DC is already quite spare. To name just two designers, Jim Kronzer and Tony Cisik both frequently do stunningly lovely sets that are clean, simple, stark, and wonderful.
I agree, also, about Cirque — if the storytelling lived up to the spectacle, they’d astonish us all, I think.
In this darn thesis I keep talking about I’m hitting on a number of the same points you have here. I just have one little comment about #1. I agree we need to differentiate between the work we do on stage and the work that happens in film and television. However, I do not think it’s about saying goodbye to spectacle and hello to simplicity. Theater since the Greeks with their deus ex machina have embraced spectacle & technology there within. The problem is one of resources and collaboration. What theater has to give an audience is a collaboration and (this relates to your #2) if while making theater we forget about that collaboration we are lost. The audiences loses whatever story we are trying to tell and our attention goes into making some thing we think is cool or breathtaking or special without looking at the whole.
I fully embrace spectacle because to me, spectacle is theatrical. I would much rather be thrilled visually then see a decent story told on a realistic set. TV and movies can do realism better than we can, what we can do is create impossible worlds. We just need to do it responsibly, while paying attention to the whole.
I really can’t wait to read this thesis. It sounds better all the time.
I guess we fundamentally disagree about spectacle.
The argument that “we’ve always done it, so we should continue” doesn’t hold water for me. Technologies evolve. Culture evolves. We need to evolve, too. Or at least be unafraid to do so.
I also have to note that television and film do at LEAST as good a job as we can, if not far better, at creating impossible worlds. There’s nothing we can do that compares to, say, Inception or The Matrix. That’s because we have to adhere to the laws of physics, and they do not. Even the so-called “flying” that’s part of Spider-Man is feckless at best compared to what Tobey and company did on screen.
In the end, though, I do take your fundamental point: it’s about collaboration, though I’d refer to it as connection with the audience. If we focus on the breathtaking stuff to the detriment of that element, we lose every time.
|
# -*- coding: utf-8 -*-
#
# Copyright 2012 Manuel Stocker <mensi@mensi.ch>
#
# This file is part of Cydra.
#
# Cydra is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cydra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cydra. If not, see http://www.gnu.org/licenses
import os.path
import re
import warnings
import ldap
from ldap.ldapobject import ReconnectLDAPObject
from cydra.component import Component, implements
from cydra.permission import User, Group
from cydra.permission.interfaces import IUserTranslator, IUserAuthenticator
import logging
logger = logging.getLogger(__name__)
LDAP_ESCAPES = {
'*': '\\2A',
'(': '\\28',
')': '\\29',
'\\': '\\5C',
'\0': '\\00',
}
_ldap_escape_pat = re.compile('|'.join(re.escape(k) for k in LDAP_ESCAPES.keys()))
def ldap_escape(s):
return _ldap_escape_pat.sub(lambda x: LDAP_ESCAPES[x.group()], s)
def force_unicode(txt):
try:
return unicode(txt)
except UnicodeDecodeError:
pass
orig = txt
if type(txt) != str:
txt = str(txt)
for args in [('utf-8',), ('latin1',), ('ascii', 'replace')]:
try:
return txt.decode(*args)
except UnicodeDecodeError:
pass
raise ValueError("Unable to force %s object %r to unicode" % (type(orig).__name__, orig))
class LdapLookup(object):
connection = None
uri = None
user = None
password = None
user_searchbase = ''
group_searchbase = ''
user_searchfilter = {'objectClass': 'user'}
group_searchfilter = {'objectClass': 'group'}
def __init__(self, **kw):
for key, item in kw.items():
if hasattr(self, key) and not key.startswith('_'):
setattr(self, key, item)
def connect(self):
try:
self.connection = ReconnectLDAPObject(self.uri)
if self.user is not None:
self.connection.simple_bind_s(self.user, self.password)
except:
logger.exception("LDAP connection failed")
return False
return True
def get_safe(self, basedn, **kw):
return self.get(basedn, **dict([(ldap_escape(k), ldap_escape(v)) for k, v in kw.iteritems()]))
def get(self, basedn, **kw):
search = '(&%s)' % ''.join(['(%s=%s)' % item for item in kw.iteritems()])
result = self.connection.search_s(basedn, ldap.SCOPE_SUBTREE, search)
return result
def get_dn(self, dn):
res = self.connection.search_s(dn, ldap.SCOPE_BASE, '(objectClass=*)')
if len(res) == 0:
return None
else:
return res[0]
def get_users(self):
return self.get(self.user_searchbase, **self.user_searchfilter)
def get_user(self, username):
search = self.user_searchfilter.copy()
if '@' in username:
search['userPrincipalName'] = username
else:
search['sAMAccountName'] = username
res = self.get_safe(self.user_searchbase, **search)
if len(res) == 0:
return None
else:
return res[0]
def get_groups(self):
return self.get(self.group_searchbase, **self.group_searchfilter)
def get_group(self, groupname):
search = self.group_searchfilter.copy()
search['name'] = groupname
res = self.get_safe(self.group_searchbase, **search)
if len(res) == 0:
return None
else:
return res[0]
class ADUser(User):
valid_for_authentication = True
supports_check_password = True
supports_set_password = False
def __init__(self, adusers, userid, **kwargs):
super(ADUser, self).__init__(adusers.compmgr, userid, **kwargs)
self._adusers = adusers
def check_password(self, password):
return self._adusers.user_password(self, password)
class ADUsers(Component):
implements(IUserAuthenticator)
implements(IUserTranslator)
def __init__(self):
config = self.get_component_config()
self.ldap = LdapLookup(**config)
if not self.ldap.connect():
raise Exception('Connection failed')
def username_to_user(self, username):
user = self._ldap_to_user(self.ldap.get_user(username))
if user is None:
logger.error("Translation failed for: %s" % username)
return user
def userid_to_user(self, userid):
if userid is None or userid == '*':
warnings.warn("You should not call this directly. Use cydra.get_user()", DeprecationWarning, stacklevel=2)
return self.compmgr.get_user(userid='*')
user = self._ldap_to_user(self.ldap.get_user(userid))
if user is None:
logger.error("Translation failed for: %s" % userid)
# since the client was looking for a specific ID,
# we return a dummy user object with empty data
return User(self.compmgr, userid, full_name='N/A')
else:
return user
def _ldap_to_user(self, data):
if data is None:
return None
dn, userobj = data
if 'memberOf' in userobj:
groups = [self._ldap_to_group(self.ldap.get_dn(x)) for x in userobj['memberOf']]
else:
groups = []
return ADUser(self,
userobj['userPrincipalName'][0],
username=userobj['sAMAccountName'][0],
full_name=force_unicode(userobj['displayName'][0]), groups=groups)
def groupid_to_group(self, groupid):
group = self._ldap_to_group(self.ldap.get_group(groupid))
if group is None:
logger.error("Group lookup error for %s", groupid)
return group
def _ldap_to_group(self, data):
if data is None:
return None
dn, groupobj = data
return Group(self.compmgr,
groupobj['name'][0],
name=groupobj['name'][0])
def user_password(self, user, password):
if not user or not password:
return False
logger.debug("Trying to perform AD auth for %r" % user)
try:
conn = ldap.initialize(self.get_component_config()['uri'])
conn.simple_bind_s(user.id, password)
conn.unbind_s()
except ldap.INVALID_CREDENTIALS:
logger.exception("Authentication failed")
return False
logger.debug("AD auth complete")
return True
|
New Delhi: Prime Minister Narendra Modi reviewed the progress of preparations of launch of the ambitious Health Assurance programme under Ayushman Bharat.
Mr Modi stressed on providing maximum benefit to the poor and marginalised sections of society, under the scheme.
He was apprised of the preparation done so far, including consultations with states for smooth and expeditious rollout of the programme.
The scheme would provide a cover of up to Rs five lakh per family and the target would be to cover more than 10 crore poor and vulnerable families.
Mr Modi was also briefed on various aspects of the scheme by top officials of the Health Ministry, NITI Aayog and PMO, a release of the Prime Minister’s office said.
Last month, on the occasion of Ambedkar Jayanti, Mr Modi had inaugurated the first ‘Health and Wellness Centre’ under Ayushman Bharat, in Bijapur in Chhattisgarh.
|
import sys
import os
sys.path.insert(0, os.path.realpath('%s/..' % os.path.dirname(__file__)))
import autoconf
import plugins
from plugins import lindat_db
plugins.install_plugin('db', lindat_db, autoconf.settings)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Rename subcorpora directories')
parser.add_argument('--old-user_dict-key', dest='old_key', default='username', help='The old '
'key used to name directories, e.g. username in 0.9')
parser.add_argument('--new-user_dict-key', dest='new_key', default='id', help='The new key '
'used to name directories, e.g. id in 0.12')
args = parser.parse_args()
subcpath = autoconf.settings.get('corpora', 'users_subcpath')
redis_db = plugins.runtime.DB.instance
db = redis_db.get_instance('auth')
keys = list([key for key in list(db.keys()) if key != '__user_count'])
users = {db.hash_get(key, args.old_key): db.hash_get_all(key) for key in keys}
for user_subc_dir in [f for f in os.listdir(subcpath) if os.path.isdir(os.path.join(subcpath, f))]:
key = user_subc_dir
if args.old_key == 'id':
key = int(user_subc_dir)
user = users[key]
new_val = user[args.new_key]
if args.new_key == 'id':
new_val = str(user[args.new_key])
os.rename(os.path.join(subcpath, user_subc_dir), os.path.join(subcpath, new_val))
|
On Call Rate:No Call Overtime Rate:All Inclusive Rate + $5 Call Back Minimum: Holiday Rate:All Inclusive Rate + $5 shift:7p-7a, alternate weekends Minimum Years Experience Required:2 Certification Requirements: General:BLS, PALS, EPIC experience highly desirable Nursing:No Certifications Needed Dialysis:No Certifications Needed JAN 2017 START! This 30-bed Medical Unit receives a wide range of high acuity pediatric patients with medical needs. Applicants MUST have 2 years recent, strong IN-HOUSE Medical/Surgical experience in either free standing Children's Hospital or a large dedicated Pediatric unit within a cluster of Pediatric units within a hospital. PALS (AHA) required. May float to other acuity related Pediatric floors. EPIC experience highly desirable, but we will but will orient up to 8 hours on campus during orientation if needed. Must pass basic dysrrhythmia and medication exam on first day. MUST HAVE PERMANENT ADDRESS greater than 100 miles from facility. Please review additional requirements listed on MEDEFIS before submitting candidates. Insurance and Permanent Residence mileage restrictions.
|
from ionchannelABC.experiment import Experiment
import data.ical.Sun1997.data_Sun1997 as data
from ionchannelABC.protocol import availability_linear, recovery
import numpy as np
import myokit
import warnings
from scipy.optimize import OptimizeWarning
import scipy.optimize as so
Q10_cond = 1.6 # [Li1997]
Q10_tau_act = 1.7 # [Li1997]
Q10_tau_inact = 1.3 # [Li1997]
fit_threshold = 0.9
#
# Inactivation kinetics [Sun1997]
#
sun_inact_kin_desc = """
Inactivation kinetics measured using bi-exponential funcion
cf Fig 4B [Sun1997]
"""
vsteps_tf, tf, sd_tf = data.inact_tauf_Sun()
variances_tf = [sd_**2 for sd_ in sd_tf]
sun_inact_kin_tf_dataset = np.array([vsteps_tf, tf, variances_tf])
vsteps_ts, ts, sd_ts = data.inact_taus_Sun()
variances_ts = [sd_**2 for sd_ in sd_ts]
sun_inact_kin_ts_dataset = np.array([vsteps_ts, ts, variances_ts])
vsteps_rel_inact, rel_inact, sd_rel_inact = data.rel_inact_Sun()
variances_rel_inact = [sd_**2 for sd_ in sd_rel_inact]
sun_inact_kin_rel_inact_dataset = np.array([vsteps_rel_inact, rel_inact, variances_rel_inact])
# all voltage protocols are preceded by 500ms prepulse to -40mV from HP -80mV
tpre = 10000 # ms
tstep = 1000
vlower = -10
dv = 10
vupper = 30+dv
sun_inact_kin_protocol = myokit.Protocol()
for v in vsteps_tf:
sun_inact_kin_protocol.add_step(-80, tpre-500)
sun_inact_kin_protocol.add_step(-40, 500)
sun_inact_kin_protocol.add_step(v, 1000)
sun_conditions = {'phys.T': 296.15, # K
'ca_conc.Ca_o': 1} # mM
def sun_inact_kin_sum_stats(data, fast=True, slow=True):
def double_exp(t, tauh, taus, Ah, As, A0):
return A0 + Ah*np.exp(-t/tauh) + As*np.exp(-t/taus)
output_tf = []
output_ts = []
for d in data.split_periodic(11000, adjust=True, closed_intervals=False):
d = d.trim_left(10000, adjust=True)
current = d['ical.i_CaL']
time = d['engine.time']
index = np.argmax(np.abs(current))
# Set time zero to peak current
current = current[index:]
time = time[index:]
t0 = time[0]
time = [t-t0 for t in time]
with warnings.catch_warnings():
warnings.simplefilter('error', OptimizeWarning)
warnings.simplefilter('error', RuntimeWarning)
try:
current = [c/current[0] for c in current]
if len(time)<=1 or len(current)<=1:
raise Exception('Failed simulation')
popt, _ = so.curve_fit(double_exp,
time,
current,
p0=[10,200,0.5,0.5,0],
bounds=(0.,
[np.inf, np.inf, 1.0, 1.0, 1.0]),
max_nfev=1000)
fit = [double_exp(t,popt[0],popt[1],popt[2],popt[3],popt[4]) for t in time]
# Calculate r2
ss_res = np.sum((np.array(current)-np.array(fit))**2)
ss_tot = np.sum((np.array(current)-np.mean(np.array(current)))**2)
r2 = 1 - (ss_res / ss_tot)
tauf = min(popt[0],popt[1])
taus = max(popt[0],popt[1])
if r2 > fit_threshold:
if fast:
output_tf = output_tf+[tauf]
if slow:
output_ts = output_ts+[taus]
else:
raise RuntimeWarning('scipy.optimize.curve_fit found a poor fit')
except:
if fast:
output_tf = output_tf+[float('inf')]
if slow:
output_ts = output_ts+[float('inf')]
output = output_tf+output_ts
return output
def sun_inact_kin_sum_stats_tf(data):
return sun_inact_kin_sum_stats(data, fast=True, slow=False)
def sun_inact_kin_sum_stats_ts(data):
return sun_inact_kin_sum_stats(data, fast=False, slow=True)
def sun_rel_inact_sum_stats(data):
output = []
for d in data.split_periodic(11000, adjust=True, closed_intervals=False):
d = d.trim_left(10000, adjust=True)
current = d['ical.i_CaL']
peak = max(current, key=abs)
ss = current[-1]
try:
output = output + [1-ss/peak]
except:
output = output + [float('inf')]
return output
sun_inact_kin = Experiment(
dataset=[sun_inact_kin_tf_dataset,
sun_inact_kin_ts_dataset],
protocol=sun_inact_kin_protocol,
conditions=sun_conditions,
sum_stats=sun_inact_kin_sum_stats,
description=sun_inact_kin_desc,
Q10=Q10_tau_inact,
Q10_factor=-1)
sun_inact_kin_fast = Experiment(
dataset=sun_inact_kin_tf_dataset,
protocol=sun_inact_kin_protocol,
conditions=sun_conditions,
sum_stats=sun_inact_kin_sum_stats_tf,
description=sun_inact_kin_desc,
Q10=Q10_tau_inact,
Q10_factor=-1)
sun_inact_kin_slow = Experiment(
dataset=sun_inact_kin_ts_dataset,
protocol=sun_inact_kin_protocol,
conditions=sun_conditions,
sum_stats=sun_inact_kin_sum_stats_ts,
description=sun_inact_kin_desc,
Q10=Q10_tau_inact,
Q10_factor=-1)
sun_rel_inact = Experiment(
dataset=sun_inact_kin_rel_inact_dataset,
protocol=sun_inact_kin_protocol,
conditions=sun_conditions,
sum_stats=sun_rel_inact_sum_stats,
description=sun_inact_kin_desc,
Q10=None,
Q10_factor=0)
#
# Inactivation kinetics using monovalent cation [Sun1997]
#
sun_v_inact_kin_desc = """
Voltage-dependent inactivation kinetics measured using bi-exponential funcion
cf Fig 6B-C [Sun1997]
"""
vsteps_tf, tf, sd_tf = data.inact_tauf_Sun()
variances_tf = [sd_**2 for sd_ in sd_tf]
sun_inact_kin_tf_dataset = np.array([vsteps_tf, tf, variances_tf])
vsteps_ts, ts, sd_ts = data.inact_taus_Sun()
variances_ts = [sd_**2 for sd_ in sd_ts]
sun_inact_kin_ts_dataset = np.array([vsteps_ts, ts, variances_ts])
vsteps_rel_inact, rel_inact, sd_rel_inact = data.rel_inact_Sun()
variances_rel_inact = [sd_**2 for sd_ in sd_rel_inact]
sun_inact_kin_rel_inact_dataset = np.array([vsteps_rel_inact, rel_inact, variances_rel_inact])
# all voltage protocols are preceded by 500ms prepulse to -40mV from HP -80mV
tpre = 10000 # ms
tstep = 1000
vlower = -10
dv = 10
vupper = 30+dv
sun_inact_kin_protocol = myokit.Protocol()
for v in vsteps_tf:
sun_inact_kin_protocol.add_step(-80, tpre-500)
sun_inact_kin_protocol.add_step(-40, 500)
sun_inact_kin_protocol.add_step(v, 1000)
sun_conditions = {'phys.T': 296.15, # K
'ca_conc.Ca_o': 1} # mM
def sun_inact_kin_sum_stats(data, fast=True, slow=True):
def double_exp(t, tauh, taus, Ah, As, A0):
return A0 + Ah*np.exp(-t/tauh) + As*np.exp(-t/taus)
output_tf = []
output_ts = []
for d in data.split_periodic(11000, adjust=True, closed_intervals=False):
d = d.trim_left(10000, adjust=True)
current = d['ical.i_CaL']
time = d['engine.time']
index = np.argmax(np.abs(current))
# Set time zero to peak current
current = current[index:]
time = time[index:]
t0 = time[0]
time = [t-t0 for t in time]
with warnings.catch_warnings():
warnings.simplefilter('error', OptimizeWarning)
warnings.simplefilter('error', RuntimeWarning)
try:
current = [c/current[0] for c in current]
if len(time)<=1 or len(current)<=1:
raise Exception('Failed simulation')
popt, _ = so.curve_fit(double_exp,
time,
current,
p0=[10,200,0.5,0.5,0],
bounds=(0.,
[np.inf, np.inf, 1.0, 1.0, 1.0]),
max_nfev=1000)
fit = [double_exp(t,popt[0],popt[1],popt[2],popt[3],popt[4]) for t in time]
# Calculate r2
ss_res = np.sum((np.array(current)-np.array(fit))**2)
ss_tot = np.sum((np.array(current)-np.mean(np.array(current)))**2)
r2 = 1 - (ss_res / ss_tot)
tauf = min(popt[0],popt[1])
taus = max(popt[0],popt[1])
if r2 > fit_threshold:
if fast:
output_tf = output_tf+[tauf]
if slow:
output_ts = output_ts+[taus]
else:
raise RuntimeWarning('scipy.optimize.curve_fit found a poor fit')
except:
if fast:
output_tf = output_tf+[float('inf')]
if slow:
output_ts = output_ts+[float('inf')]
output = output_tf+output_ts
return output
def sun_inact_kin_sum_stats_tf(data):
return sun_inact_kin_sum_stats(data, fast=True, slow=False)
def sun_inact_kin_sum_stats_ts(data):
return sun_inact_kin_sum_stats(data, fast=False, slow=True)
def sun_rel_inact_sum_stats(data):
output = []
for d in data.split_periodic(11000, adjust=True, closed_intervals=False):
d = d.trim_left(10000, adjust=True)
current = d['ical.i_CaL']
peak = max(current, key=abs)
ss = current[-1]
try:
output = output + [1-ss/peak]
except:
output = output + [float('inf')]
return output
sun_inact_kin = Experiment(
dataset=[sun_inact_kin_tf_dataset,
sun_inact_kin_ts_dataset],
protocol=sun_inact_kin_protocol,
conditions=sun_conditions,
sum_stats=sun_inact_kin_sum_stats,
description=sun_inact_kin_desc,
Q10=Q10_tau_inact,
Q10_factor=-1)
|
What happens if you win the jam?
Well, excluding all the good things which come with just participating, HeartBeast will play and showcase your game on his YouTube channel, that's a guaranteed thousands of views. That'll for sure help with exposure alongside getting to say "I won a game jam which had over 500 participants" for future opportunities.
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
'''
Combine the converted Snac Data
usage: snac_combine.py modelname timestep nodex nodey nodez nprocx nprocy nprocz
'''
class Combine(object):
def __init__(self, grid):
# data storage
self.saved = {}
self.saved["positions"] = [0.0]*(grid['nox'] * grid['noy'] * grid['noz'])
self.saved["velocities"] = [0.0]*(grid['nox'] * grid['noy'] * grid['noz'])
self.saved["force"] = [0.0]*(grid['nox'] * grid['noy'] * grid['noz'])
self.saved["temperature"] = [0.0]*(grid['nox'] * grid['noy'] * grid['noz'])
self.saved["plstrain"] = [0.0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["phaseIndex"] = [0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["irheology"] = [0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["viscosity"] = [0.0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["stress"] = [0.0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["pressure"] = [0.0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.saved["strain_rate"] = [0.0]*((grid['nox']-1) * (grid['noy']-1) * (grid['noz']-1))
self.struct = {"velocities":"","force":"","strain_rate":"","stress":"","pressure":"","temperature":"","plstrain":"","viscosity":"","irheoogy":"","phaseIndex":""}
self.data = {"positions":[],"velocities":[],"strain_rate":[],"stress":[],"pressure":[],"temperature":[],"plstrain":[],"viscosity":[],"irheology":[],"phaseIndex":[]}
self.tempExist = False
self.apsExist = False
self.viscExist = False
self.irhExist = False
return
def readData(self, filename):
fp = file(filename, 'r')
lines = fp.readlines()
m=0
while 1:
if lines[m].startswith("object"):
ids = lines[m-1].split()
keywords = lines[m].split()
for i in range(len(keywords)):
if keywords[i] == "items":
items = int(keywords[i+1])
if ids[2] == "positions":
self.data["positions"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "velocity":
self.data["velocities"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "strain":
self.data["strain_rate"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "stress":
self.data["stress"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "pressure":
self.data["pressure"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "force":
self.data["force"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "phaseIndex":
self.data["phaseIndex"] = lines[m+1:m+items+1]
m = m+items+1
elif ids[2] == "temperature":
self.data["temperature"] = lines[m+1:m+items+1]
self.tempExist = True
m = m+items+1
elif ids[2] == "accumulated":
self.data["plstrain"] = lines[m+1:m+items+1]
self.apsExist = True
m = m+items+1
elif ids[2] == "viscosity":
self.data["viscosity"] = lines[m+1:m+items+1]
self.viscExist = True
m = m+items+1
elif ids[2] == "rheology":
self.data["irheology"] = lines[m+1:m+items+1]
self.irhExist = True
m = m+items+1
elif ids[1] == "construct":
break
else:
m = m + 1
if m >= len(lines):
break
else:
if m >= len(lines):
break
else:
m = m + 1
return self.data
def join(self, data, me, grid, cap):
# processor geometry
nprocx = int(cap['nprocx'])
nprocy = int(cap['nprocy'])
nprocz = int(cap['nprocz'])
mylocx = me % nprocx
mylocy = ((me - mylocx) / nprocx) % nprocy
mylocz = (((me - mylocx) / nprocx - mylocy) / nprocy) % nprocz
print me, nprocx,nprocy,nprocz, mylocx, mylocy, mylocz
# mesh geometry
nox = int(grid['nox'])
noy = int(grid['noy'])
noz = int(grid['noz'])
nex = nox - 1
ney = noy - 1
nez = noz - 1
mynox = 1 + (nox-1)/nprocx
mynoy = 1 + (noy-1)/nprocy
mynoz = 1 + (noz-1)/nprocz
mynex = mynox - 1
myney = mynoy - 1
mynez = mynoz - 1
if not len(data["positions"]) == mynox * mynoy * mynoz:
print mynox, mynoy, mynoz, mynox * mynoy * mynoz, len(data["positions"])
raise ValueError, "data size"
if not len(data["stress"]) == (mynox-1) * (mynoy-1) * (mynoz-1):
print (mynox-1),(mynoy-1),(mynoz-1), len(data["stress"])
raise ValueError, "data size"
mynxs = (mynox - 1) * mylocx
mynys = (mynoy - 1) * mylocy
mynzs = (mynoz - 1) * mylocz
myexs = mynex * mylocx
myeys = myney * mylocy
myezs = mynez * mylocz
n = 0
for i in range(mynzs, mynzs+mynoz):
for j in range(mynys, mynys + mynoy):
for k in range(mynxs, mynxs + mynox):
m = k + j * nox + i * nox * noy
self.saved["positions"][m] = data["positions"][n]
self.saved["velocities"][m] = data["velocities"][n]
self.saved["force"][m] = data["force"][n]
if self.tempExist:
self.saved["temperature"][m] = data["temperature"][n]
n += 1
n = 0
for i in range(myezs, myezs+mynez):
for j in range(myeys, myeys + myney):
for k in range(myexs, myexs + mynex):
m = k + j * nex + i * nex * ney
self.saved["strain_rate"][m] = data["strain_rate"][n]
self.saved["stress"][m] = data["stress"][n]
self.saved["pressure"][m] = data["pressure"][n]
self.saved["phaseIndex"][m] = data["phaseIndex"][n]
if self.apsExist:
self.saved["plstrain"][m] = data["plstrain"][n]
if self.viscExist:
self.saved["viscosity"][m] = data["viscosity"][n]
if self.irhExist:
self.saved["irheology"][m] = data["irheology"][n]
n += 1
return
def write(self, filename, grid, data, type, fp, count):
if type == "positions":
print >> fp, "\n# the positions array"
print >> fp, "object %d class array type float rank 1 shape 3 items %d data follows" % (count, grid['nox']*grid['noy']*grid['noz'])
fp.writelines(data[type])
return count + 1
elif type == "connections":
print >> fp, "\n# the regular connections"
print >> fp, "object %d class gridconnections counts %d %d %d" % (count, grid['noz'],grid['noy'],grid['nox'])
return count + 1
elif type == "velocities":
print >> fp, "\n# the velocities array"
print >> fp, "object %d class array type float rank 1 shape 3 items %d data follows" % (count, grid['nox']*grid['noy']*grid['noz'])
fp.writelines(data["velocities"])
self.struct[type] = '''object "velocities" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "strain_rate":
print >> fp, "\n# the strain rate array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["strain_rate"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "strain_rate" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "stress":
print >> fp, "\n# the stress array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["stress"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "stress" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "pressure":
print >> fp, "\n# the pressure array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["pressure"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "pressure" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "force":
print >> fp, "\n# the force array"
print >> fp, "object %d class array type float rank 1 shape 3 items %d data follows" % (count, grid['nox']*grid['noy']*grid['noz'])
fp.writelines(data["force"])
self.struct[type] = '''object "force" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "phaseIndex":
print >> fp, "\n# the phaseIndex array"
print >> fp, "object %d class array type int rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["phaseIndex"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "phaseIndex" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "temperature":
print >> fp, "\n# the temperature array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, grid['nox']*grid['noy']*grid['noz'])
fp.writelines(data["temperature"])
self.struct[type] = '''object "temperature" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "plstrain":
print >> fp, "\n# the accumulated plastic strain array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["plstrain"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "plstrain" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "viscosity":
print >> fp, "\n# the viscosity array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["viscosity"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "viscosity" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "irheology":
print >> fp, "\n# the irheology array"
print >> fp, "object %d class array type float rank 0 items %d data follows" % (count, (grid['nox']-1)*(grid['noy']-1)*(grid['noz']-1))
fp.writelines(data["irheology"])
print >> fp, 'attribute "dep" string "connections"'
self.struct[type] = '''object "irheology" class field
component "positions" value 1
component "connections" value 2
component "data" value %d
''' % (count)
return count + 1
elif type == "data_structure":
print >> fp, "\n# construct data structure"
for member in self.struct:
if self.struct[member] != "":
print >> fp, "%s" % (self.struct[member])
print >> fp, "object \"default\" class group"
for member in self.struct:
if self.struct[member] != "":
print >> fp, "member \"%s\" value \"%s\"" % (member,member)
print >> fp, "End"
return
if __name__ == '__main__':
import sys
if not len(sys.argv) == 9:
print __doc__
sys.exit(1)
prefix = sys.argv[1]
step = int(sys.argv[2])
grid = {}
grid['nox'] = int(sys.argv[3])
grid['noy'] = int(sys.argv[4])
grid['noz'] = int(sys.argv[5])
cap = {}
cap['nprocx'] = int(sys.argv[6])
cap['nprocy'] = int(sys.argv[7])
cap['nprocz'] = int(sys.argv[8])
nproc = cap['nprocx'] * cap['nprocy'] * cap['nprocz']
cb = Combine(grid)
for n in range(0, nproc):
filename = 'snac.%d.%06d.dx' % (n, step)
print 'reading', filename
data = cb.readData(filename)
cb.join(data, n, grid, cap)
filename = '%s.%06d.dx' % (prefix,step)
print 'writing', filename
fp = open(filename, 'w')
print >> fp, "# OpenDX DataFile Snac simulation output ASCII"
count = 1
count = cb.write(filename, grid, cb.saved, "positions", fp, count)
count = cb.write(filename, grid, cb.saved, "connections", fp, count)
count = cb.write(filename, grid, cb.saved, "velocities", fp, count)
count = cb.write(filename, grid, cb.saved, "strain_rate", fp, count)
count = cb.write(filename, grid, cb.saved, "stress", fp, count)
count = cb.write(filename, grid, cb.saved, "pressure", fp, count)
count = cb.write(filename, grid, cb.saved, "force", fp, count)
count = cb.write(filename, grid, cb.saved, "phaseIndex", fp, count)
if cb.tempExist:
count = cb.write(filename, grid, cb.saved, "temperature", fp, count)
if cb.apsExist:
count = cb.write(filename, grid, cb.saved, "plstrain", fp, count)
if cb.viscExist:
count = cb.write(filename, grid, cb.saved, "viscosity", fp, count)
if cb.irhExist:
count = cb.write(filename, grid, cb.saved, "irheology", fp, count)
cb.write(filename, grid, cb.saved, "data_structure", fp, count)
fp.close()
# End of file
|
Halloween Scare Attractions Reviews Preview!
So its that time of the year again... NO NOT CHRISTMAS.
Anyway its getting nearer and nearer to the Halloween season..
So this means I'm gonna get a crap ton of travelling to do!
Anyway I'm going to review quite a lot of events this year!
Tulleys always finds a way into my calendar.
and a entirely new maze based on witches!
Not forgetting the returns of classic like Haunted Hayride and The Cellar, and the return of the incredibly dull Hellements.. Meh.
After the train crash of last years event.. The event returns!
This year inlcudes a new maze loosely tied into DBGT, and a revamp of The Big Flop!
Overall the event seems to look better than last year! Well.. We Hope!
After the sheer rave reviews from people I had to get myself over to Screamland!
This year sees return of last year's great mazes, as well as two new mazes!
Overall Screamland last year seemed to be one of the best events in the UK, and I suspect this year will be no different!
After the massive success of last years event, I had to get myself back to Scarefest!
the return of TOTT, it was a must.
So there you go, the Halloween season is very near!
I wish everyone a happy Halloween, and good month of getting sh!t scared!
Previous entry Imindetonator's Halloween Awards 2015!
I'll be doing all of those (except Screamland) plus Xtreme Scream Park and Legoland fireworks.
It will be a first time for all of them for me (except Fright Nights. I think I'm most excited for Scarefest personally.
Xtreme Scream Park looks amazing, especially after the SCAR award they won.
Sadly its quite a long drive for me so I haven't got round to going yet.
Have fun at Scarefest, it deffo looks brilliant like last year.
Plus Sub Species is bloody terrifying!
Out of all of the ones I have done, Xtreme is by far my favourite Halloween event!
Especially the opening break out fun!
|
class SSHException(Exception):
"""Generic exception for jumpssh
Allow to chain exceptions keeping track of origin exception
"""
def __init__(self, msg, original_exception=None):
message = msg
if original_exception:
message += ": %s" % original_exception
super(SSHException, self).__init__(message)
self.__cause__ = original_exception
self.__suppress_context__ = True
class ConnectionError(SSHException):
"""Exception raised when unable to establish SSHSession with remote host"""
pass
class TimeoutError(SSHException):
"""Exception raised when remote command execution reached specified timeout"""
pass
class RestClientError(SSHException):
"""Exception raised when error occurs during rest ssh calls"""
pass
class RunCmdError(SSHException):
"""Exception raised when remote command return a non success exit code
:ivar int exit_code: The exit code from the run command.
:ivar list(int): List of expected success exit codes for run command.
:ivar str command: The command that is generating this exception.
:ivar str error: The error captured from the command output.
"""
def __init__(self, exit_code, success_exit_code, command, error, runs_nb=1):
message = 'Command (%s) returned exit status (%s), expected [%s]' \
% (command, exit_code, ','.join(map(str, success_exit_code)))
if runs_nb > 1:
message += " after %s runs" % runs_nb
if error:
message += ": %s" % error
super(RunCmdError, self).__init__(message)
self.exit_code = exit_code
self.success_exit_code = success_exit_code
self.command = command
self.error = error
self.runs_nb = runs_nb
|
Bulk cargo hoppers are now in place to provide continuity of trade at the Port of Newcastle while the new ship unloader and conveyor system is constructed.
The old ship unloaders, located at the Kooragang 2 berth, have been dismantled and have been a feature of the Walsh Point precinct since 1968. The two rail mounted ship unloaders were established at what was then known as “Rotten Row” to handle bulk dry products including fertiliser, rock phosphatic, ores and meals.
Kooragang 2 and 3 berths remain the busiest and most diverse common user berth in the port, handling a diverse range of dry bulk and liquid bulk commodities.
In June 2018, Port of Newcastle announced it was investing $33 million into a new ship unloader, which included a state-of-the-art crane, conveyor infrastructure and an electrical substation.
Port of Newcastle’s Executive Manager Marine and Operations Keith Wilks said the replacement of the old ship unloaders was driven by the Port’s commitment to working with its customers and anticipating their future needs.
“Our customers have requested more capacity to grow their cargo volumes and the new unloader will have a capacity of 1,000 tonne per hour, a significant increase from the current average rates of 230 tonnes per hour,” Wilks said.
The Port of Newcastle’s principle contractor Kerman Contracting will deliver the overall project with design and construction of the new crane unloader by its partner Tenova Takraf. The new crane unloader will be constructed in Vietnam and is due to arrive in the Port in early 2020.
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from ilastik.applets.base.appletSerializer import getOrCreateGroup, deleteIfPresent
from ilastik.workflows.carving.carvingSerializer import CarvingSerializer
from opSplitBodyCarving import OpSplitBodyCarving
class SplitBodyCarvingSerializer(CarvingSerializer):
def __init__(self, topLevelOperator, *args, **kwargs):
super( SplitBodyCarvingSerializer, self ).__init__(topLevelOperator, *args, **kwargs)
self._topLevelOperator = topLevelOperator
# Set up dirty tracking...
def setDirty(*args):
self.__dirty = True
def doMulti(slot, index, size):
slot[index].notifyDirty(setDirty)
slot[index].notifyValueChanged(setDirty)
topLevelOperator.AnnotationFilepath.notifyInserted(doMulti)
topLevelOperator.AnnotationFilepath.notifyRemoved(setDirty)
def _serializeToHdf5(self, topGroup, hdf5File, projectFilePath):
split_settings_grp = getOrCreateGroup(topGroup, "split_settings")
for laneIndex in range(len( self._topLevelOperator )):
lane_grp = getOrCreateGroup(split_settings_grp, "{}".format( laneIndex ))
opLaneView = self._topLevelOperator.getLane(laneIndex)
if opLaneView.AnnotationFilepath.ready():
annotation_filepath = opLaneView.AnnotationFilepath.value
deleteIfPresent( lane_grp, "annotation_filepath" )
lane_grp.create_dataset("annotation_filepath", data=annotation_filepath)
# Now save the regular the carving data.
super( SplitBodyCarvingSerializer, self )._serializeToHdf5( topGroup, hdf5File, projectFilePath )
self.__dirty = False
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath):
try:
split_settings_grp = topGroup["split_settings"]
except KeyError:
pass
else:
for laneIndex, grp_name in enumerate( sorted(split_settings_grp.keys()) ):
opLaneView = self._topLevelOperator.getLane(laneIndex)
lane_grp = split_settings_grp[grp_name]
try:
annotation_filepath = lane_grp["annotation_filepath"].value
except KeyError:
pass
else:
opLaneView.AnnotationFilepath.setValue( annotation_filepath )
# Now load the regular carving data.
super( SplitBodyCarvingSerializer, self )._deserializeFromHdf5( topGroup, groupVersion, hdf5File, projectFilePath )
self.__dirty = False
def isDirty(self):
return self.__dirty or super( SplitBodyCarvingSerializer, self ).isDirty()
|
The National Steinbeck Center, California Rural Legal Assistance, Salinas Valley Pride Celebrations and the Monterey County Office of Education are excited to present a FREE screening of The Lavender Scare (2017) on Monday, October 8th, 6PM, Maya Cinemas.
The screening is part of Salinas Valley Pride Celebrations’ week long activities leading up to the annual Salinas Valley Pride on Sunday, October 14. The screening is also part of California Rural Legal Assistance and the Monterey County Office of Education’s programming for LGBTQ History Month. For teacher resources please visit, click here.
|
# -*- coding:utf-8 -*-
'''
Created on 2014年2月2日
@author: Hyphen.Liu
'''
import Queue
import os
import globalvar.guiGlobalVar as ggv
import globalvar.crawlerGlobalVar as cgv
from crawler.siteInfoThread import SiteInfoThread
class SiteInfoMain():
'''
:对所有的网页连接识别该网页的语言
:对已经识别为相应语言的网站获取该网站建站详细信息
'''
def __init__(self,flag,infile,outfile=None):
'''
:初始化参数
:param flag:区别是进行搜索引擎结果的网站信息获取还是广度遍历的网站信息获取。标志取值:engine,wide
:param infile:需要从文本中读入待识别网页的超链接
:param outfile:结果写入的文档
'''
self.flag = flag
self.infile = infile
self.outfile = outfile
self.window = ggv.window
self.threadnum = ggv.systemSetList[1]
self.inqueue = Queue.Queue()
self.outqueue = Queue.Queue() #保存收集到的网站结果,供网站广度扫描使用,在收集网站广度扫描得到的网站时只做计数器
if self.flag == 'engine':m = '2/4' #第二步
else: m = '4/4' #第四步
self.window.SetStatusText(u'%s网页语言识别中..0.0%%'%m,1) #设置状态栏显示
self.window.SetStatusText(u'收集网站数:0',2)
self.window.gauge.Show() #设置进度条为显示状态
self.siteinfos()
if self.outfile:self.saveresults() #程序第二阶段需要保存结果
def siteinfos(self):
'''
:检测网页语言并获取识别出的语言的网页所属网站的详细信息
:param infile: 给定预处理后的url文档,该文档含有很多域名的url及其分段剥离后的url段
'''
if not os.path.isfile(self.infile):return None #获取搜索引擎程序被强行终止不会生成infile文件,在此检测是否为强制终止程序
ggv.scanindex = 0 #重置序号
lines = open(self.infile,'r').readlines() #读取保存好的txt文件内容
for line in lines:
if line:
self.inqueue.put(line) #带检测url队列生成
ggv.gaugesize = self.inqueue.qsize() #进度条的分母值
for i in range(self.threadnum):
if ggv.pterminate:break #程序被强制终止
gsit = SiteInfoThread(self.inqueue,self.outqueue,self.flag) #语言识别和网站信息获取主要类
cgv.threadlist.append(gsit)
# gsit.setDaemon(True)
gsit.start() #启动线程
self.inqueue.join() #等待输入队列为空再执行其他操作
if os.path.isfile(self.infile):os.remove(self.infile)
self.window.gauge.Hide()
def saveresults(self):
'''
:保存结果到文件
'''
if ggv.pterminate:return None
ofile = open(self.outfile,'w+')
while True:
if self.outqueue.empty():break
lk = self.outqueue.get()
if lk:
ofile.write(lk+'\r\n')
ofile.close()
|
What Do You Think? Can Cats Eat Dog Food for a Day !?
This Will Shock You: Can Cats Eat Peanut Butter ?
The Truth About Cats And Apples: Can Cats Eat Apples?
iHomePet.com is a participant in the Amazon Services LLC Associates Program, an affiliate advertising program designed to provide a means for sites to earn advertising fees by advertising and linking to Amazon.com.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import open
from unitex import *
_LOGGER = logging.getLogger(__name__)
class FSAConstants:
EPSILON = "<E>"
DEPTH_FIRST_SEARCH = "dfs"
BREADTH_FIRST_SEARCH = "bfs"
class Edge(object):
def __init__(self, label, targets=None, source=None):
self.__label = label
self.__source = source
self.__targets = targets
if self.__targets is not None:
self.__tids = set([target.get_id() for target in targets])
def __len__(self):
return len(self.__targets)
def __str__(self):
label = self.get_label()
label = label.encode(UnitexConstants.DEFAULT_ENCODING)
return label
def __unicode__(self):
return u"%s" % self.get_label()
def __hash__(self):
return hash(self.get_label())
def __cmp__(self, e):
return cmp(self.get_label(), self.get_label())
def __iter__(self):
for target in self.__targets:
yield target
def __contains__(self, target):
return True if target.get_id() in self.__tids else False
def __getitem__(self, i):
return self.__targets[i]
def get_label(self):
return self.__label
def get_source(self):
return self.__source
def set_source(self, source):
self.__source = source
def get_targets(self):
return self.__targets
def set_targets(self, targets):
self.__targets = targets
self.__tids = set([target.get_id() for target in targets])
def add_target(self, target):
if target.get_id() in self.__tids:
return
self.__targets.append(target)
def del_target(self, target):
if target.get_id() not in self.__tids:
return
self.__tids.remove(target.get_id())
for i in range(len(self.__targets)):
_target = self.__targets[i]
if _target.get_id() == target.get_id():
del self.__targets[i]
break
class Node(object):
def __init__(self, _id, final=False):
self.__id = _id
self.__final = final
self.__edges = {}
self.__depth = 0
self.__visited = False
def __len__(self):
return len(self.__edges)
def __contains__(self, label):
return label in self.__edges
def __getitem__(self, label):
return self.__edges.get(label, None)
def __iter__(self):
for label in self.__edges:
yield label
def __str__(self):
node = self.__unicode__()
node = node.encode(UnitexConstants.DEFAULT_ENCODING)
return node
def __unicode__(self):
s = u"NODE[%s]" % str(self.get_id())
if self.is_final():
s += u" -- FINAL"
for label in self:
targets = u" | ".join([str(target.get_id()) for target in self[label]])
s += u"\n\t%s -> (%s)" % (label, targets)
return s
def get_id(self):
return self.__id
def set_id(self, i):
self.__id = i
def is_deterministic(self):
if FSAConstants.EPSILON in self.__edges:
return False
for label in self.__edges:
if len(self[label]) > 1:
return False
return True
def exists(self, label, node=None):
if not label in self:
return False
if node is not None and node not in self[label]:
return False
return True
def add(self, label, target):
if self.exists(label, target) is True:
return
if self.exists(label) is False:
edge = Edge(label, [target], self)
self.__edges[label] = edge
else:
self[label].add_target(target)
def delete(self, label, node=None):
if not self.exists(label, node):
raise UnitexException("Edge not found: %s" % label)
if node is None:
del self.__edges[label]
else:
self[label].del_target(node)
def set_depth(self, depth):
self.__depth = depth
def get_depth(self):
return self.__depth
def is_visited(self):
return self.__visited
def set_visited(self, visited=True):
self.__visited = visited
def is_final(self):
return self.__final
def set_final(self, final=True):
self.__final = final
class NodeSets(object):
def __init__ (self):
self.__sets = {}
def __getitem__(self, _id):
return self.__sets[_id]
def __contains__(self, s):
return s in self.all()
def __iter__ (self):
return iter(self.all())
def all(self):
return set([tuple(l) for l in self.__sets.values()])
def add(self, s):
_set = tuple(sorted(set(s)))
for _id in s:
self.__sets[_id] = _set
class Automaton(object):
def __init__(self, name="Automaton"):
self.__name = name
self.__nodes = []
self.__initial = 0
self.__finals = []
self.__nodes.append(Node(self.__initial, False))
def __len__(self):
return len(self.__nodes)
def __getitem__(self, _id):
try:
return self.__nodes[_id]
except IndexError:
return None
def __iter__(self):
for node in self.__nodes:
yield node
def __str__(self):
automaton = self.__unicode__()
automaton = automaton.encode(UnitexConstants.DEFAULT_ENCODING)
return automaton
def __unicode__(self):
title = u"# FSA -- %s #" % self.get_name()
s = u"%s\n%s\n%s\n\n" % ("#" * len(title), title, "#" * len(title))
for node in self:
s += u"%s\n\n" % node
return s
def get_name(self):
return self.__name
def set_name(self, name):
self.__name = name
def get_depth(self):
depth = 0
for nid in self.__finals:
final = self.__nodes[nid]
if final.get_depth() > depth:
depth = final.get_depth()
return depth
def get_initial(self):
return self.__initial
def set_initial(self, initial):
self.__initial = initial
def get_finals(self):
return self.__finals
def set_finals(self, finals):
self.__finals = finals
def get_nodes(self):
return self.__nodes
def set_nodes(self, nodes):
self.__nodes = nodes
def add_edge(self, label, sid, tid):
source = self[sid]
target = self[tid]
target.set_depth(source.get_depth() + 1)
source.add(label, target)
def add_node(self, initial=False, final=False):
if initial is True:
return self.__initial
elif final is True:
self.__finals.append(len(self.__nodes))
self.__nodes.append(Node(self.__finals[-1], True))
return self.__finals[-1]
nid = len(self.__nodes)
self.__nodes.append(Node(nid, final))
return nid
def add_path(self, path):
if len(path) == 0:
raise UnitexException("Empty path!")
sid = self.add_node(initial=True, final=False)
for label in path[:-1]:
tid = self.add_node(initial=False, final=False)
self.add_edge(label, sid, tid)
sid = tid
else:
self.add_edge(path[-1], sid, self.add_node(initial=False, final=True))
def get_alphabet(self):
alphabet = set()
for node in self:
for label in node:
alphabet.add(label)
return tuple(alphabet)
def is_deterministic(self):
for node in self:
if not node.is_deterministic():
return False
return True
def __closure(self, nid):
stack = [nid]
result = set(stack)
while len(stack) > 0:
current = stack.pop()
if FSAConstants.EPSILON in self[current]:
edge = self[current][FSAConstants.EPSILON]
if edge not in result:
stack.append(edge)
result.add(edge)
return tuple(result)
def determinize(self):
dfa = Automaton("DETERMINIZED(%s)" % self.get_name())
alphabet = self.get_alphabet()
initials = self.__closure(self.get_initial())
hid = dfa.add_node(initial=True, final=False)
visited = {}
visited[initials] = hid
stack = [initials]
while len(stack) > 0:
current = stack.pop()
for label in alphabet:
new = set()
for node in current:
if not label in self[node]:
continue
for next in self[node][label]:
new.update(self.__closure(next.get_id()))
new = tuple(new)
if len(new) == 0:
continue
if new not in visited:
stack.append(new)
final = True in [self[_id].is_final() for _id in new]
nid = dfa.add_node(final=final)
visited[new] = nid
dfa.add_edge(label, visited[current], visited[new])
self.set_name(dfa.get_name())
self.set_initial(dfa.get_initial())
self.set_finals(dfa.get_finals())
self.set_nodes(dfa.get_nodes())
def minimize(self):
min = Automaton("MINIMIZED(%s)" % self.get_name())
alphabet = self.get_alphabet()
nodetoset = {}
settonode = {}
sets = NodeSets()
rest, final = [], []
for node in self:
if node.is_final():
final.append(node.get_id())
else:
rest.append(node.get_id())
sets.add(rest)
sets.add(final)
stack = [s for s in sets if len(s) > 1]
def target_set(_id, label):
edge = self[_id][label]
if edge is None:
return None
else:
return sets[edge[0].get_id()]
while len(stack) > 0:
current = stack.pop()
for label in alphabet:
target = target_set(current[0], label)
one, two = [current[0]], []
for _id in current[1:]:
if target_set(_id, label) == target:
one.append(_id)
else:
two.append(_id)
if len(two) > 0:
sets.add(one)
sets.add(two)
if len(one) > 1:
stack.append(one)
if len(two) > 1:
stack.append(two)
break
for s in sets:
initial = self.get_initial() in s
final = True in [self[_id].is_final() for _id in s]
_id = min.add_node(initial=initial, final=final)
nodetoset[_id] = s
settonode[s] = _id
for node in min:
done = set()
s = nodetoset[node.get_id()]
source = self[s[0]]
for label in source:
edge = source[label]
if label in done:
continue
done.add(label)
for target in edge:
t = sets[target.get_id()]
min.add_edge(label, node.get_id(), settonode[t])
self.set_name(min.get_name())
self.set_initial(min.get_initial())
self.set_finals(min.get_finals())
self.set_nodes(min.get_nodes())
def reset(self):
for node in self:
node.set_visited(False)
def __expand(self, source):
L = []
source.set_visited(True)
for label in source:
edge = source[label]
for target in source[label]:
L.append((edge.get_label(), source.get_id(), target.get_id()))
return L
def iter(self, iter_type=None):
if iter_type is None:
iter_type = FSAConstants.BREADTH_FIRST_SEARCH
if len(self[self.get_initial()]) == 0:
raise UnitexException("Empty FSA")
i = None
if iter_type == FSAConstants.DEPTH_FIRST_SEARCH:
i = -1
elif iter_type == FSAConstants.BREADTH_FIRST_SEARCH:
i = 0
else:
raise UnitexException("Unknown iter type: %s" % iter_type)
root = self[self.get_initial()]
if root.is_visited():
self.reset()
L = self.__expand(root)
while L:
edge, sid, tid = L.pop(i)
yield (edge, sid, tid)
if not self[tid].is_visited():
L += self.__expand(self[tid])
def save(self, file, encoding=None):
if encoding is None:
encoding = UnitexConstants.DEFAULT_ENCODING
with open(file, "w", encoding=encoding) as output:
output.write("digraph Automaton {\n\n")
output.write("\tcenter = 1;\n")
output.write("\tcharset = \"%s\";\n" % encoding)
output.write("\trankdir = LR;\n")
output.write("\tranksep = 1;\n")
output.write("\tedge [arrowhead = vee];\n\n")
nodes = set()
edges = set()
for node in self:
sid = node.get_id()
n1 = "node%s" % sid
if not sid in nodes:
nodes.add(sid)
if node.get_id() == self.get_initial():
output.write("\t%s[shape = circle, label = \"\"];\n" % n1)
elif node.is_final():
output.write("\t%s[shape = doublecircle, label = \"\"];\n" % n1)
else:
output.write("\t%s[shape = point, label = \"\"];\n" % n1)
for label in node:
for target in node[label]:
if (node.get_id(), label, target.get_id()) in edges:
continue
edges.add((node.get_id(), label, target.get_id()))
tid = target.get_id()
n2 = "node%s" % tid
if not tid in nodes:
nodes.add(tid)
if target.get_id() == self.get_initial():
output.write("\t%s[shape = circle, label = \"\"];\n" % n2)
elif target.is_final():
output.write("\t%s[shape = doublecircle, label = \"\"];\n" % n2)
else:
output.write("\t%s[shape = point, label = \"\"];\n" % n2)
output.write("\t%s -> %s [label = \"%s\"];\n" % (n1, n2, label))
output.write("\n")
output.write("}\n")
|
OK, admittedly not the best project name for my MHD2014 Boston hack.
I was inspired by the video that broke a few days previously - someone took Taylor Swift's "Shake it Off" and synchronized it to an 80s Aerobic video.
There have been multiple takedown-reposts of the video, so I'm not going to bother to try and link directly to it. The original poster did some nice editing of the video, and it worked really well.
I used the Echonest Remix tools to analyze the original audio from the video (~75bpm) and the Taylor Swift song (~160bpm). The audio replacement worked because the new song's tempo was an even multiple of the source audio.
That got me thinking about matching songs and videos that did not have matching tempos. By using ffmpeg to stretch or shrink the video, this is possible.
"Origin of Love" had a lower BPM than the original video soundtrack, so my script originally slowed down the video. I added a parameter that allowed me to run the BPM at a multiple -- this version matches the video to 2x the song's BPM.
The tool can take any video as input, but I only expect it to work well with video that's strongly defined by a beat. Naturally, music videos work great, but exercise and athletic videos also work really well.
I had a great weekend. It was a ton of fun hanging out with the other participants and seeing their awesome hacks on Sunday.
If you are interested, the code's available on github.
Published on November 10, 2014 by Brian Fife.
|
import os
from dnload.common import is_listing
from dnload.common import listify
from dnload.common import run_command
########################################
# Assembler ############################
########################################
class Assembler:
"""Class used to generate assembler output."""
def __init__(self, op):
"""Constructor."""
self.__executable = op
self.__comment = "#"
self.__byte = ".byte"
self.__short = ".short"
self.__word = ".long"
self.__quad = ".quad"
self.__string = ".ascii"
self.__assembler_flags_extra = []
op = os.path.basename(op)
if op.startswith("nasm"):
self.__comment = ";"
self.__byte = "db"
self.__short = "dw"
self.__word = "dd"
self.__string = "db"
def addExtraFlags(self, op):
"""Add extra flags to use when assembling."""
if is_listing(op):
for ii in op:
self.addExtraFlags(ii)
return
if not (op in self.__assembler_flags_extra):
self.__assembler_flags_extra += [op]
def assemble(self, src, dst):
"""Assemble a file."""
cmd = [self.__executable, src, "-o", dst] + self.__assembler_flags_extra
(so, se) = run_command(cmd)
if 0 < len(se) and is_verbose():
print(se)
def format_align(self, op):
"""Get alignmen string."""
return (".balign %i\n" % (op))
def format_block_comment(self, desc, length=40):
"""Get a block-formatted comment."""
block_text = ""
for ii in range(length):
block_text += self.__comment
block_text += "\n"
ret = self.__comment
if desc:
ret += " " + desc + " "
for ii in range(len(ret), length):
ret += self.__comment
return block_text + ret + "\n" + block_text
def format_comment(self, op, indent=""):
"""Get comment string."""
ret = ""
for ii in listify(op):
if ii:
ret += indent + self.__comment + " " + ii + "\n"
return ret
def format_data(self, size, value, indent=""):
"""Get data element."""
size = int(size)
value_strings = []
for ii in listify(value):
if isinstance(ii, int):
value_strings += ["0x%x" % (ii)]
else:
value_strings += [str(ii)]
if not value_strings:
raise RuntimeError("unable to format value: '%s'" % (str(value)))
value = ", ".join(value_strings)
if value.startswith("\"") and 1 == size:
return indent + self.__string + " " + value + "\n"
if 1 == size:
return indent + self.__byte + " " + value + "\n"
elif 2 == size:
return indent + self.__short + " " + value + "\n"
elif 4 == size:
return indent + self.__word + " " + value + "\n"
elif 8 == size:
return indent + self.__quad + " " + value + "\n"
else:
raise NotImplementedError("exporting assembler value of size %i", size)
def format_equ(self, name, value):
return ".globl %s\n.equ %s, %s\n" % (name, name, value)
def format_label(self, op):
"""Generate name labels."""
if not op:
return ""
ret = ""
if is_listing(op):
for ii in op:
ret += format_label(ii)
else:
ret += ".globl %s\n%s:\n" % (op, op)
return ret
|
Home Broadsides Grave of Lilly Dale.
Title Grave of Lilly Dale.
Description First line: We smoothed down the locks of her soft golden hair.; Without music.; At head of song sheet: "Johnson, Song Publisher, No. 7 N. 10th St., Philadelphia."; Picture of a tomb, with a mourning woman to the right and a weeping willow tree to the left--below title.; Text of song in four four-line stanzas, each with chorus.; "Johnson has 600 different kinds of Songs, call and he will give you..."--At foot of broadside.; Composer and lyricist cited from other CPM sheet music editions (005804-SMVOL).; OCLC #51530769 credits song as a Minstrel song.
Relation-Is Referenced By Edwin Wolf's American Song Sheets, 806d.
|
# supplies the 'copr' command.
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from dnf.pycomp import PY3
from subprocess import call
from dnfpluginscore import _, logger
from dnf.i18n import ucd
import dnfpluginscore.lib
import dnf
import glob
import json
import os
import platform
import shutil
import stat
YES = set([_('yes'), _('y')])
NO = set([_('no'), _('n'), ''])
# compatibility with Py2 and Py3 - rename raw_input() to input() on Py2
try:
input = raw_input
except NameError:
pass
class Copr(dnf.Plugin):
"""DNF plugin supplying the 'copr' command."""
name = 'copr'
def __init__(self, base, cli):
"""Initialize the plugin instance."""
super(Copr, self).__init__(base, cli)
if cli is not None:
cli.register_command(CoprCommand)
class CoprCommand(dnf.cli.Command):
""" Copr plugin for DNF """
copr_url = "https://copr.fedoraproject.org"
aliases = ("copr",)
summary = _("Interact with Copr repositories.")
usage = _("""
enable name/project [chroot]
disable name/project
remove name/project
list name
search project
Examples:
copr enable rhscl/perl516 epel-6-x86_64
copr enable ignatenkobrain/ocltoys
copr disable rhscl/perl516
copr remove rhscl/perl516
copr list ignatenkobrain
copr search tests
""")
def run(self, extcmds):
try:
subcommand = extcmds[0]
except (ValueError, IndexError):
dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
return 0
if subcommand == "help":
dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
return 0
try:
project_name = extcmds[1]
except (ValueError, IndexError):
logger.critical(
_('Error: ') +
_('exactly two additional parameters to '
'copr command are required'))
dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
raise dnf.cli.CliError(
_('exactly two additional parameters to '
'copr command are required'))
try:
chroot = extcmds[2]
except IndexError:
chroot = self._guess_chroot()
# commands without defined copr_username/copr_projectname
if subcommand == "list":
self._list_user_projects(project_name)
return
if subcommand == "search":
self._search(project_name)
return
try:
copr_username, copr_projectname = project_name.split("/")
except ValueError:
logger.critical(
_('Error: ') +
_('use format `copr_username/copr_projectname` '
'to reference copr project'))
raise dnf.cli.CliError(_('bad copr project format'))
repo_filename = "/etc/yum.repos.d/_copr_{}-{}.repo" \
.format(copr_username, copr_projectname)
if subcommand == "enable":
self._need_root()
self._ask_user("""
You are about to enable a Copr repository. Please note that this
repository is not part of the main Fedora distribution, and quality
may vary.
The Fedora Project does not exercise any power over the contents of
this repository beyond the rules outlined in the Copr FAQ at
<https://fedorahosted.org/copr/wiki/UserDocs#WhatIcanbuildinCopr>, and
packages are not held to any quality or security level.
Please do not file bug reports about these packages in Fedora
Bugzilla. In case of problems, contact the owner of this repository.
Do you want to continue? [y/N]: """)
self._download_repo(project_name, repo_filename, chroot)
logger.info(_("Repository successfully enabled."))
elif subcommand == "disable":
self._need_root()
self._disable_repo(copr_username, copr_projectname)
logger.info(_("Repository successfully disabled."))
elif subcommand == "remove":
self._need_root()
self._remove_repo(repo_filename)
logger.info(_("Repository successfully removed."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
def _list_user_projects(self, user_name):
# http://copr.fedoraproject.org/api/coprs/ignatenkobrain/
api_path = "/api/coprs/{}/".format(user_name)
res = dnfpluginscore.lib.urlopen(self, None, self.copr_url + api_path, 'w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(
_("Can't parse repositories for username '{}'.")
.format(user_name))
self._check_json_output(json_parse)
section_text = _("List of {} coprs").format(user_name)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(user_name,
json_parse["repos"][i]["name"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _search(self, query):
# http://copr.fedoraproject.org/api/coprs/search/tests/
api_path = "/api/coprs/search/{}/".format(query)
res = dnfpluginscore.lib.urlopen(self, None, self.copr_url + api_path, 'w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(_("Can't parse search for '{}'."
).format(query))
self._check_json_output(json_parse)
section_text = _("Matched: {}").format(query)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(json_parse["repos"][i]["username"],
json_parse["repos"][i]["coprname"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given.")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _print_match_section(self, text):
formatted = self.base.output.fmtSection(text)
print(formatted)
def _ask_user(self, question):
if self.base.conf.assumeyes and not self.base.conf.assumeno:
return
elif self.base.conf.assumeno and not self.base.conf.assumeyes:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
answer = None
while not ((answer in YES) or (answer in NO)):
answer = ucd(input(question)).lower()
answer = _(answer)
if answer in YES:
return
else:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
@classmethod
def _need_root(cls):
# FIXME this should do dnf itself (BZ#1062889)
if os.geteuid() != 0:
raise dnf.exceptions.Error(
_('This command has to be run under the root user.'))
@classmethod
def _guess_chroot(cls):
""" Guess which choot is equivalent to this machine """
# FIXME Copr should generate non-specific arch repo
dist = platform.linux_distribution()
if "Fedora" in dist:
# x86_64 because repo-file is same for all arch
# ($basearch is used)
if "Rawhide" in dist:
chroot = ("fedora-rawhide-x86_64")
else:
chroot = ("fedora-{}-x86_64".format(dist[1]))
else:
chroot = ("epel-%s-x86_64" % dist[1].split(".", 1)[0])
return chroot
def _download_repo(self, project_name, repo_filename, chroot=None):
if chroot is None:
chroot = self._guess_chroot()
short_chroot = '-'.join(chroot.split('-')[:2])
#http://copr.fedoraproject.org/coprs/larsks/rcm/repo/epel-7-x86_64/
api_path = "/coprs/{0}/repo/{1}/".format(project_name, short_chroot)
try:
f = dnfpluginscore.lib.urlopen(self, None, self.copr_url + api_path)
except IOError as e:
if os.path.exists(repo_filename):
os.remove(repo_filename)
if '404' in str(e):
if PY3:
import urllib.request
try:
res = urllib.request.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
except urllib.error.HTTPError as e:
status_code = e.getcode()
else:
import urllib
res = urllib.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
if str(status_code) != '404':
raise dnf.exceptions.Error(_("This repository does not have"\
" any builds yet so you cannot enable it now."))
else:
raise dnf.exceptions.Error(_("Such repository does not exists."))
raise
shutil.copy2(f.name, repo_filename)
os.chmod(repo_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
@classmethod
def _remove_repo(cls, repo_filename):
# FIXME is it Copr repo ?
try:
os.remove(repo_filename)
except OSError as e:
raise dnf.exceptions.Error(str(e))
@classmethod
def _disable_repo(cls, copr_username, copr_projectname):
exit_code = call(["dnf", "config-manager", "--set-disabled",
"{}-{}".format(copr_username, copr_projectname)])
if exit_code != 0:
raise dnf.exceptions.Error(
_("Failed to disable copr repo {}/{}"
.format(copr_username, copr_projectname)))
@classmethod
def _get_data(cls, f):
""" Wrapper around response from server
check data and print nice error in case of some error (and return None)
otherwise return json object.
"""
try:
output = json.loads(f.read())
except ValueError:
dnf.cli.CliError(_("Unknown response from server."))
return
return output
@classmethod
def _check_json_output(cls, json_obj):
if json_obj["output"] != "ok":
raise dnf.exceptions.Error("{}".format(json_obj["error"]))
class Playground(dnf.Plugin):
"""DNF plugin supplying the 'playground' command."""
name = 'playground'
def __init__(self, base, cli):
"""Initialize the plugin instance."""
super(Playground, self).__init__(base, cli)
if cli is not None:
cli.register_command(PlaygroundCommand)
class PlaygroundCommand(CoprCommand):
""" Playground plugin for DNF """
aliases = ("playground",)
summary = _("Interact with Playground repository.")
usage = " [enable|disable|upgrade]"
def _cmd_enable(self, chroot):
self._need_root()
self._ask_user("""
You are about to enable a Playground repository.
Do you want to continue? [y/N]: """)
api_url = "{0}/api/playground/list/".format(
self.copr_url)
f = dnfpluginscore.lib.urlopen(self, None, api_url)
output = self._get_data(f)
f.close()
if output["output"] != "ok":
raise dnf.cli.CliError(_("Unknown response from server."))
for repo in output["repos"]:
project_name = "{0}/{1}".format(repo["username"],
repo["coprname"])
repo_filename = "/etc/yum.repos.d/_playground_{}.repo" \
.format(project_name.replace("/", "-"))
try:
# check if that repo exist? but that will result in twice
# up calls
api_url = "{0}/api/coprs/{1}/detail/{2}/".format(
self.copr_url, project_name, chroot)
f = dnfpluginscore.lib.urlopen(self, None, api_url)
output2 = self._get_data(f)
f.close()
if (output2 and ("output" in output2)
and (output2["output"] == "ok")):
self._download_repo(project_name, repo_filename, chroot)
except dnf.exceptions.Error:
# likely 404 and that repo does not exist
pass
def _cmd_disable(self):
self._need_root()
for repo_filename in glob.glob('/etc/yum.repos.d/_playground_*.repo'):
self._remove_repo(repo_filename)
def run(self, extcmds):
try:
subcommand = extcmds[0]
except (ValueError, IndexError):
logger.critical(
_('Error: ') +
_('exactly one parameter to '
'playground command are required'))
dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)
raise dnf.cli.CliError(
_('exactly one parameter to '
'playground command are required'))
chroot = self._guess_chroot()
if subcommand == "enable":
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully enabled."))
elif subcommand == "disable":
self._cmd_disable()
logger.info(_("Playground repositories successfully disabled."))
elif subcommand == "upgrade":
self._cmd_disable()
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully updated."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
|
Gone Girl is director David Fincher’s most scathing and most dangerous social satire since Fight Club. Taking a scenario that seems to have sprung from a Men’s Rights Activist’s wet-dream, and gleefully highlighting how inherently ridiculous such a scenario is, both the film and the novel it is adapted from uses the template of a whodunnit thriller to deliver a darkly comedic and borderline absurdist critique of gender roles, mainstream attitudes towards marriage, media sensationalism and materialism in a post-GFC America. Gone Girl is part of a long tradition of horror and thriller films where social anxieties of the era, in this case anxieties primarily concerned with gender, are manifest into the film’s monster or villainous character.
Speaking of social satires that critique what is expected of men and women, the Swedish film Force Majeure by writer/director Ruben Östlund is a hugely entertaining drama, which also has a number of wonderfully borderline absurdist and comedic elements. Not unlike the far more minimalist The Loneliest Planet, the way somebody instinctively acts in a single moment of crisis completely ruptures the dynamic between a couple making them confront the ways in which they are expected and expect each other to behave. The passive-agressive dialogue that flows throughout this film – and the very unusual way the film sometimes disrupts the tension – is compelling and confronting, not to mention extremely funny at key moments.
The final film that caught my attention this month – and judging from the critical response, the attention of pretty much everybody – is Whiplash, by writer/director Damien Chazelle. It’s another film that challenges social conventions, in this case ideas about the nature of genius and notions concerning the use of pressure as a motivational tool. While it is a film about playing music, and some beautifully edited and shot sequences really bring the music to life visually, it overall resembles a boxing film and a Full Metal Jacket style war film. Whiplash shows us how something like music can be made miserable when the focus is on perfectionism and competitiveness, it shows that while some talent may be natural it also requires passion and a lot of practise, and most importantly it shows us that the antiquated and militaristic push-until-they-break approach is nothing but destructive.
This entry was posted on Wednesday, November 5th, 2014 at 7:56 am and is filed under Other. You can follow any responses to this entry through the RSS 2.0 feed. Both comments and pings are currently closed.
Thomas… this is great thank you.
|
import h5py
import os
import numpy as np
import traceback
from warnings import warn
def save_dict(d, filename, group, raise_type_fail=True):
"""
Recursively save a dict to an hdf5 group in a new file.
Parameters
----------
d: dict
dict to save as an hdf5 file
filename : str
Full path to save the file to. File must not already exist.
group : str
group name to save the dict to
raise_type_fail : bool
If True: raise an exception if saving a part of the dict fails.
If False: prints a warning instead and saves the
object's __str__() return value.
Returns
-------
None
Raises
------
FileExistsError
If the path specified by the `filename` parameter already exists.
TypeError
If a particular entry within the dict cannot be saved to hdf5 AND
the argument `raise_type_fail` is set to `True`
"""
if os.path.isfile(filename):
raise FileExistsError
with h5py.File(filename, 'w') as h5file:
_dicts_to_group(h5file, "{}/".format(group), d,
raise_meta_fail=raise_type_fail)
def _dicts_to_group(h5file, path, d, raise_meta_fail):
for key, item in d.items():
if isinstance(item, np.ndarray):
if item.dtype == np.dtype('O'):
# see if h5py is ok with it
try:
h5file[path + key] = item
# h5file[path + key].attrs['dtype'] = item.dtype.str
except TypeError:
msg = "numpy dtype 'O' for item:\n{}\n" \
"not supported by HDF5\n{}" \
"".format(item, traceback.format_exc())
if raise_meta_fail:
raise TypeError(msg)
else:
h5file[path + key] = str(item)
warn("{}, storing whatever str(obj) returns"
"".format(msg))
# numpy array of unicode strings
elif item.dtype.str.startswith('<U'):
h5file[path + key] = item.astype(h5py.special_dtype(vlen=str))
# otherwise h5py doesn't restore the right dtype for str types
h5file[path + key].attrs['dtype'] = item.dtype.str
# other types
else:
h5file[path + key] = item
# h5file[path + key].attrs['dtype'] = item.dtype.str
# single pieces of data
elif isinstance(item, (str, np.int, np.int8,
np.int16, np.int32, np.int64, np.float,
np.float16, np.float32, np.float64)):
h5file[path + key] = item
elif isinstance(item, dict):
_dicts_to_group(
h5file, "{}{}/".format(path, key), item, raise_meta_fail
)
# last resort, try to convert this object
# to a dict and save its attributes
elif hasattr(item, '__dict__'):
_dicts_to_group(
h5file,
"{}{}/".format(path, key),
item.__dict__,
raise_meta_fail
)
else:
msg = "{} for item: {} not supported by HDF5" \
"".format(type(item), item)
if raise_meta_fail:
raise TypeError(msg)
else:
h5file[path + key] = str(item)
warn("{}, storing whatever str(obj) returns"
"".format(msg))
def load_dict(filename, group):
"""
Recursively load a dict from an hdf5 group in a file.
Parameters
----------
filename : str
full path to the hdf5 file
group : str
Name of the group that contains the dict to load
Returns
-------
d : dict
dict loaded from the specified hdf5 group.
"""
with h5py.File(filename, 'r') as h5file:
return _dicts_from_group(h5file, "{}/".format(group))
def _dicts_from_group(h5file, path):
ans = {}
for key, item in h5file[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
if item.attrs.__contains__('dtype'):
ans[key] = item[()].astype(item.attrs['dtype'])
else:
ans[key] = item[()]
elif isinstance(item, h5py._hl.group.Group):
ans[key] = _dicts_from_group(h5file, path + key + '/')
return ans
|
It's hard to give you any help without further info, as kiwi is asking for. I have a quick question though. Have you noticed that the flash seems loose on the hot-shoe?
Hi Camerasnoop, No the flash doesn't seem loose on the hot shoe. I'm thinking maybe it's when I focus then recompose as the settings may change when I recompose the shot. Maybe I need to lock the exposure in AV mode in order for the camera to keep the correct exposure settings to match the flash settings. Does that make sense?????
Post some examples with exif attached.
Yep. post some of the photos, we can assist more when we can see what the problem is and what the settings were that you used.
I think I know what's happening, if it occurs mainly when you pre-focus & recompose. I'm guessing, for example, you are trying to take a portrait of a friend - half pressing shutter to focus on them, then moving camera to one side to get classic "Rule of Third's" composition? I assume you have the flash in ETTL mode (not manual)? Well the half press on the shutter is locking focus but NOT exposure. When you recompose, the central part of the lens (where ETTL mainly measures distance/exposure) is now pointed off into the distance so when fully press shutter the flash tries to light up the entire background and in the process overexposes your subject in the foreground!
I'm not familiar with Canon but I assume that when you enable the flash in Av mode that the shutter speed is set to your camera's sync speed - probably 1/200 or 1/250. If you have a wide aperture set at the time then there is a good chance that the scene will be over-exposed. There is possibly a warning in the viewfinder but it can be easy to miss when you are concentrating on the scene in front of you. (From memory Nikons set the shutter speed display to "HI" when this occurs).
For example, on a bright day you set the aperture to f/5.6 - the camera sets the shutter speed to 1/800. You decide to use the flash to add some fill light. The shutter speed is now set to 1/200 but the aperture is still at f/5.6 so the scene will be over-exposed by 2 stops. You would need to stop down your aperture until 1/200 was the correct shutter speed for the ambient light conditions (f/11 in this example).
If you want to use flash with wide apertures in daylight check out whether you gear supports "high-speed flash sync" (not sure exactly what Canon call it). You trade off a reduction in flash power for the ability to be able to shoot at higher than sync shutter speed.
I assume that when you enable the flash in Av mode that the shutter speed is set to your camera's sync speed - probably 1/200 or 1/250.Just to add, if the shutter speed is already slower than the sync speed before activating the flash it probably won't change (although this can depend on settings), so you should get correct exposure of the ambient conditions in that case.
|
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import logging
import re
import requests
from utilities import f2i, h2f
from rpc_api import RpcApi
from auth_ptc import AuthPtc
from auth_google import AuthGoogle
from exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException
import protos.RpcEnum_pb2 as RpcEnum
logger = logging.getLogger(__name__)
class PGoApi:
API_ENTRY = 'https://pgorelease.nianticlabs.com/plfe/rpc'
def __init__(self):
self.log = logging.getLogger(__name__)
self._auth_provider = None
self._api_endpoint = None
self._position_lat = 0
self._position_lng = 0
self._position_alt = 0
self._req_method_list = []
def call(self):
if not self._req_method_list:
return False
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return False
player_position = self.get_position()
request = RpcApi(self._auth_provider)
if self._api_endpoint:
api_endpoint = self._api_endpoint
else:
api_endpoint = self.API_ENTRY
self.log.info('Execution of RPC')
response = None
try:
response = request.request(api_endpoint, self._req_method_list, player_position)
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
# cleanup after call execution
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
#def get_player(self):
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RpcEnum.RequestMethod.Name(i),i))
def set_logger(self, logger):
self._ = logger or logging.getLogger(__name__)
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = f2i(lat)
self._position_lng = f2i(lng)
self._position_alt = f2i(alt)
def __getattr__(self, func):
def function(**kwargs):
if not self._req_method_list:
self.log.info('Create new request...')
name = func.upper()
if kwargs:
self._req_method_list.append( { RpcEnum.RequestMethod.Value(name): kwargs } )
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append( RpcEnum.RequestMethod.Value(name) )
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RpcEnum.RequestMethod.keys():
return function
else:
raise AttributeError
def login(self, provider, username, password):
if not isinstance(username, basestring) or not isinstance(password, basestring):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
self.log.info('Starting RPC login sequence (app simulation)')
# making a standard call, like it is also done by the client
self.get_player()
self.get_hatched_eggs()
self.get_inventory()
self.check_awarded_badges()
self.download_settings(hash="4a2e9bc330dae60e7b74fc85b98868ab4700802e")
response = self.call()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
elif 'auth_ticket' in response:
auth_ticket = response['auth_ticket']
self._auth_provider.set_ticket([auth_ticket['expire_timestamp_ms'], auth_ticket['start'], auth_ticket['end']])
else:
self.log.error('Login failed - unexpected server response!')
return False
self.log.info('Finished RPC login sequence (app simulation)')
self.log.info('Login process completed')
return True
|
You may have been wondering where Torro Forms went after it had become so quiet since its 1.0.0-beta.8 release. Now we’re proud to finally have an exciting announcement for you!
We have been working hard over the past year to completely rewrite the foundation of our form builder plugin. While this doesn’t commonly happen in the beta phase of a plugin, we just felt like we could serve you a better plugin than it was prior – it wasn’t quite there yet. So while the upcoming version will simply carry on with the previous version numbers (it will be 1.0.0-beta.9), it doesn’t have a lot in common with those earlier releases. With the new release, we’ll continue the beta testing cycle. Only bug fixes will go into the plugin until the full 1.0.0 version is released.
Here’s a sneak peek of what the new form builder experience looks like.
Form submissions are now managed in their own screen, and thus are no longer cramped into the form builder screen. You can granularly manage the submissions in a table view, just like you are familiar from most WordPress content already. You can even go into a submission and edit all its details.
Form submissions are now persisted with the database immediately, even if they haven’t been completed yet. This change hardens against the loss of data, and the new functionality even allows a user to pick up working on a submission that they have initiated earlier. A setting allows to automatically delete submissions that haven’t been completed after a certain amount of time, to keep your database clean.
A few existing form fields have been merged and simplified to improve usability, and a few smaller new features have been introduced as well, such as a new checkbox element type or a submission count access control for forms.
REST-API endpoints as well as WP-CLI commands are available for all content types that the plugin introduces.
Performance of the form builder plugin has improved significantly, by using a granular cache layer around all database requests.
Form meta and setting fields now support dependencies.
Form element types can now contain multiple fields to store multiple values.
The entire codebase now uses more modern practices, is more structured, easier to follow and easier to extend.
Extensions can include modules, following a flexible, but precise structure from the provided plugin core classes.
A fields library is used for all occurrences of fields in the plugin so that you only have to get familiar with one common foundation to extend.
Selecting an element type now happens through a clear, accessible and future-proof modal.
Submissions can now be managed in their own screen. You can even add or edit them.
The available modules have been restructured and a few new settings are present.
As a user, you can continue to use the form builder just like before. While some data structures and also the database schema have changed, the plugin includes migration routines for all core content. Those will automatically be executed as necessary. The only thing you need to be aware of is that the plugin’s requirements have changed: Your WordPress site needs to run on PHP version 5.6 and WordPress version 4.8 at least. If you’re not sure which PHP version your site is running, please ask your hosting provider for help. It’s likely that you are already on one of these supported PHP versions though as any older versions have been out of date for quite a while.
As a developer, be aware that extensions for prior Torro Forms versions will most likely no longer work with the new release due to the complete rewrite. If you have written a custom extension, we recommend you to stay on the old plugin version initially and adjust the extension code to account for the new structure. To make developing for Torro Forms as easy as possible, we have made available a separate developer-targeted site that includes tutorials as well as the full code reference. Furthermore, our extension boilerplate provides a solid base for scaffolding your extension. If you’re interested, you can also try out the new version now by using the master branch in its GitHub repository.
We’re sorry about the inconvenience that you are required to modify any existing extensions, but it’s all for the best of Torro Forms in the future. We felt like the form builder rewrite was necessary at this point, to have a solid structure for the future. Still in the beta phase, it was much better for it to happen now than any time after the full version release. From 1.0.0 and beyond, we will value backward-compatibility as much as WordPress core does.
The completely revamped Torro Forms version 1.0.0-beta.9 will be released on April 16th. We plan to have one more beta release following that, about two weeks later, then going into RC phase. The full 1.0.0 version, as mentioned before, is targeted for May 21st. Please test the new beta thoroughly and let us know about any issues you run into!
can you please send an email with the Link to your form to support@torro-forms.com? Then we can take a look at this.
|
import base64
import re
import logging
import hashlib
from .constants import signature_regex
class MalformedDocumentError(Exception):
"""
Malformed document exception
"""
def __init__(self, field_name):
super().__init__("Could not parse field {0}".format(field_name))
class Document:
re_version = re.compile("Version: ([0-9]+)\n")
re_currency = re.compile("Currency: ([^\n]+)\n")
re_signature = re.compile("({signature_regex})\n".format(signature_regex=signature_regex))
fields_parsers = {
"Version": re_version,
"Currency": re_currency,
"Signature": re_signature
}
@classmethod
def parse_field(cls, field_name, line):
"""
:param field_name:
:param line:
:return:
"""
try:
value = cls.fields_parsers[field_name].match(line).group(1)
except AttributeError:
raise MalformedDocumentError(field_name)
return value
def __init__(self, version, currency, signatures):
if version < 2:
raise MalformedDocumentError("Version 1 documents are not handled by duniterpy>0.2")
self.version = version
self.currency = currency
if signatures:
self.signatures = [s for s in signatures if s is not None]
else:
self.signatures = []
def sign(self, keys):
"""
Sign the current document.
Warning : current signatures will be replaced with the new ones.
"""
self.signatures = []
for key in keys:
signing = base64.b64encode(key.signature(bytes(self.raw(), 'ascii')))
logging.debug("Signature : \n{0}".format(signing.decode("ascii")))
self.signatures.append(signing.decode("ascii"))
def raw(self):
"""
Returns the raw document in string format
"""
raise NotImplementedError()
def signed_raw(self):
"""
If keys are None, returns the raw + current signatures
If keys are present, returns the raw signed by these keys
"""
raw = self.raw()
signed = "\n".join(self.signatures)
signed_raw = raw + signed + "\n"
return signed_raw
@property
def sha_hash(self):
return hashlib.sha256(self.signed_raw().encode("ascii")).hexdigest().upper()
|
Creek Printmakers (of which I am a member) are showing their work this coming week at the Fishslab Gallery in Whitstable. The prints on show are mostly from linocuts but there are silkscreen prints and monoprints too.
New(ish) work by me on show includes This Way! That Way!, Headless Chicken and Da Doo Run Run.
This Way! That Way! Linocut print and watercolour.
Da Doo Run Run. Linocut print.
Watercolour 25 x 21 cm.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.