text
stringlengths 29
850k
|
|---|
from django import forms
from django.core.exceptions import ValidationError
from django.forms import inlineformset_factory, ClearableFileInput
from django.utils.safestring import mark_safe
from dateutil.parser import parse
from isi_mip.climatemodels.fields import MyModelSingleChoiceField, MyModelMultipleChoiceField
from isi_mip.climatemodels.models import *
from isi_mip.climatemodels.widgets import MyMultiSelect, MyTextInput, MyBooleanSelect, RefPaperWidget
from isi_mip.contrib.models import Country
ContactPersonFormset = inlineformset_factory(BaseImpactModel, ContactPerson,
extra=1, max_num=2, min_num=1, fields='__all__',
can_delete=False, help_texts='The scientists responsible for performing the simulations for this sector')
class ImpactModelStartForm(forms.ModelForm):
model = forms.ModelChoiceField(queryset=BaseImpactModel.objects.order_by('name'), required=False)
name = forms.CharField(label='New Impact Model', required=False)
sector = forms.ModelChoiceField(queryset=Sector.objects.order_by('name'), required=False)
send_invitation_email = forms.BooleanField(label='Send the invitation email?', required=False, initial=True)
class Meta:
model = BaseImpactModel
fields = ('model', 'name', 'sector')
class BaseImpactModelForm(forms.ModelForm):
region = MyModelMultipleChoiceField(allowcustom=True, queryset=Region.objects, required=True)
class Meta:
model = BaseImpactModel
exclude = ('owners', 'public', 'sector', 'name', 'drkz_folder_name')
widgets = {
'short_description': MyTextInput(textarea=True),
}
class ImpactModelForm(forms.ModelForm):
class Meta:
model = ImpactModel
exclude = ('base_model', 'public', 'simulation_round')
widgets = {
'version': MyTextInput(),
'main_reference_paper': RefPaperWidget(),
'other_references': RefPaperWidget(),
'responsible_person': MyTextInput(),
}
@staticmethod
def _ref_paper(args):
if not args['doi'] and not args['title']:
return None
if args['doi']:
try:
rp = ReferencePaper.objects.get_or_create(doi=args['doi'])[0]
rp.title = args['title']
except ReferencePaper.MultipleObjectsReturned:
rp = ReferencePaper.objects.create(title=args['title'], doi=args['doi'])
else:
try:
rp = ReferencePaper.objects.get_or_create(title=args['title'])[0]
except ReferencePaper.MultipleObjectsReturned:
rp = ReferencePaper.objects.create(title=args['title'], doi=args['doi'])
rp.lead_author = args['lead_author']
rp.journal_name = args['journal_name']
rp.journal_volume = args['journal_volume']
rp.journal_pages = args['journal_pages']
rp.first_published = args['first_published']
rp.save()
return rp
def clean_main_reference_paper(self):
try:
myargs = {
'lead_author': self.data.getlist('main_reference_paper-author')[0],
'title': self.data.getlist('main_reference_paper-title')[0],
'journal_name': self.data.getlist('main_reference_paper-journal')[0],
'doi': self.data.getlist('main_reference_paper-doi')[0],
'journal_volume': self.data.getlist('main_reference_paper-volume')[0] or None,
'journal_pages': self.data.getlist('main_reference_paper-page')[0]
}
try:
myargs['first_published'] = parse(self.data.getlist('main_reference_paper-date')[0])
except:
myargs['first_published'] = None
except:
raise ValidationError('Problems adding the main reference paper')
return self._ref_paper(myargs)
def clean_other_references(self):
rps = []
for i in range(len(self.data.getlist('other_references-title')) - 1):
myargs = {
'lead_author': self.data.getlist('other_references-author')[i],
'title': self.data.getlist('other_references-title')[i],
'journal_name': self.data.getlist('other_references-journal')[i],
'doi': self.data.getlist('other_references-doi')[i],
'journal_volume': self.data.getlist('other_references-volume')[i] or None,
'journal_pages': self.data.getlist('other_references-page')[i]
}
try:
myargs['first_published'] = parse(self.data.getlist('other_references-date')[i])
except:
myargs['first_published'] = None
rp = self._ref_paper(myargs)
rps += [rp] if rp is not None else []
return rps
class TechnicalInformationModelForm(forms.ModelForm):
spatial_aggregation = MyModelSingleChoiceField(allowcustom=True, queryset=SpatialAggregation.objects)
class Meta:
model = TechnicalInformation
exclude = ('impact_model',)
widgets = {
'spatial_resolution': MyMultiSelect(allowcustom=True),
'spatial_resolution_info': MyTextInput(textarea=True),
'temporal_resolution_climate': MyMultiSelect(allowcustom=True),
'temporal_resolution_co2': MyMultiSelect(allowcustom=True),
'temporal_resolution_land': MyMultiSelect(allowcustom=True),
'temporal_resolution_soil': MyMultiSelect(allowcustom=True),
'temporal_resolution_info': MyTextInput(textarea=True),
}
class InputDataInformationModelForm(forms.ModelForm):
simulated_atmospheric_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
observed_atmospheric_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
simulated_ocean_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
observed_ocean_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
emissions_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
socio_economic_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
land_use_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
other_human_influences_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
other_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
climate_variables = MyModelMultipleChoiceField(allowcustom=False, queryset=ClimateVariable.objects)
class Meta:
model = InputDataInformation
exclude = ('impact_model',)
widgets = {
'climate_variables_info': MyTextInput(textarea=True),
'additional_input_data_sets': MyTextInput(textarea=True),
}
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
simulation_round = instance.impact_model.simulation_round
super(InputDataInformationModelForm, self).__init__(*args, **kwargs)
self.fields['climate_variables'].queryset = ClimateVariable.objects.filter(inputdata__data_type__is_climate_data_type=True, inputdata__simulation_round=simulation_round).distinct()
self.fields['emissions_data_sets'].queryset = InputData.objects.filter(data_type__name='Emissions', simulation_round=simulation_round).distinct()
self.fields['land_use_data_sets'].queryset = InputData.objects.filter(data_type__name='Land use', simulation_round=simulation_round).distinct()
self.fields['observed_atmospheric_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Observed atmospheric climate', simulation_round=simulation_round).distinct()
self.fields['observed_ocean_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Observed ocean climate', simulation_round=simulation_round).distinct()
self.fields['other_data_sets'].queryset = InputData.objects.filter(data_type__name='Other', simulation_round=simulation_round).distinct()
self.fields['other_human_influences_data_sets'].queryset = InputData.objects.filter(data_type__name='Other human influences', simulation_round=simulation_round).distinct()
self.fields['simulated_atmospheric_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Simulated atmospheric climate', simulation_round=simulation_round).distinct()
self.fields['simulated_ocean_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Simulated ocean climate', simulation_round=simulation_round).distinct()
self.fields['socio_economic_data_sets'].queryset = InputData.objects.filter(data_type__name='Socio-economic', simulation_round=simulation_round).distinct()
class OtherInformationModelForm(forms.ModelForm):
class Meta:
model = OtherInformation
exclude = ('impact_model',)
widgets = {
'exceptions_to_protocol': MyTextInput(textarea=True),
'spin_up': MyBooleanSelect(nullable=False),
'spin_up_design': MyTextInput(textarea=True),
'natural_vegetation_partition': MyTextInput(textarea=True),
'natural_vegetation_dynamics': MyTextInput(textarea=True),
'natural_vegetation_cover_dataset': MyTextInput(),
'management': MyTextInput(textarea=True),
'extreme_events': MyTextInput(textarea=True),
'anything_else': MyTextInput(textarea=True),
}
# SEKTOREN ############################################################
class BaseSectorForm(forms.ModelForm):
generic_fields = []
class Meta:
model = GenericSector
exclude = ('impact_model', 'data')
abstract = True
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
super(BaseSectorForm, self).__init__(*args, **kwargs)
if instance:
sector = instance.impact_model.base_model.sector
self.generic_groups = []
for group in SectorInformationGroup.objects.filter(sector=sector):
fields = []
for field in group.fields.all():
fields.append(field.unique_identifier)
self.generic_fields.append(field.unique_identifier)
self.fields[field.unique_identifier] = forms.CharField(widget=MyTextInput(textarea=True), label=field.name, help_text=field.help_text, required=False, initial='')
if instance.data and field.unique_identifier in instance.data:
field_initial = instance.data[field.unique_identifier]
if field_initial:
self.fields[field.unique_identifier].initial = field_initial
self.generic_groups.append({'name': group.name, 'fields': fields, 'description': group.description})
def clean(self):
cleaned_data_generic = {}
cleaned_data = super(BaseSectorForm, self).clean()
for k in list(cleaned_data.keys()):
if k in self.generic_fields:
cleaned_data_generic[k] = cleaned_data[k]
del cleaned_data[k]
cleaned_data['data'] = cleaned_data_generic
return cleaned_data
def save(self, commit=True):
instance = super(BaseSectorForm, self).save(commit=False)
instance.data = self.cleaned_data['data']
if commit:
instance.save()
return instance
class AgricultureForm(BaseSectorForm):
template = 'edit_agriculture.html'
class Meta:
model = Agriculture
exclude = ('impact_model',)
widgets = {
'crops': MyTextInput(textarea=True),
'land_coverage': MyTextInput(textarea=True),
'planting_date_decision': MyTextInput(textarea=True),
'planting_density': MyTextInput(textarea=True),
'crop_cultivars': MyTextInput(textarea=True),
'fertilizer_application': MyTextInput(textarea=True),
'irrigation': MyTextInput(textarea=True),
'crop_residue': MyTextInput(textarea=True),
'initial_soil_water': MyTextInput(textarea=True),
'initial_soil_nitrate_and_ammonia': MyTextInput(textarea=True),
'initial_soil_C_and_OM': MyTextInput(textarea=True),
'initial_crop_residue': MyTextInput(textarea=True),
'lead_area_development': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'yield_formation': MyTextInput(textarea=True),
'crop_phenology': MyTextInput(textarea=True),
'root_distribution_over_depth': MyTextInput(textarea=True),
'stresses_involved': MyTextInput(textarea=True),
'type_of_water_stress': MyTextInput(textarea=True),
'type_of_heat_stress': MyTextInput(textarea=True),
'water_dynamics': MyTextInput(textarea=True),
'evapo_transpiration': MyTextInput(textarea=True),
'soil_CN_modeling': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'parameters_number_and_description': MyTextInput(textarea=True),
'calibrated_values': MyTextInput(textarea=True),
'output_variable_and_dataset': MyTextInput(textarea=True),
'spatial_scale_of_calibration_validation': MyTextInput(textarea=True),
'temporal_scale_of_calibration_validation': MyTextInput(textarea=True),
'criteria_for_evaluation': MyTextInput(textarea=True),
}
class ForestsForm(BaseSectorForm):
template = 'edit_forests.html'
upload_parameter_list = forms.CharField(widget= MyTextInput(textarea=True), required=False, label=mark_safe('Please upload a list of your parameters as an attachment (Section 7). The list should include species-specific parameters and other parameters not depending on initialization data including the following information: short name, long name, short explanation, unit, value, see here for an example (<a href="http://www.pik-potsdam.de/4c/web_4c/theory/parameter_table_0514.pdf" target="_blank">parameter_table_0514.pdf</a>)'))
class Meta:
model = Forests
exclude = ('impact_model',)
widgets = {
# Forest Model Set-up Specifications
'initialize_model': MyTextInput(textarea=True),
'data_profound_db': MyTextInput(textarea=True),
'management_implementation': MyTextInput(textarea=True),
'harvesting_simulated': MyTextInput(textarea=True),
'regenerate': MyTextInput(textarea=True),
'unmanaged_simulations': MyTextInput(textarea=True),
'noco2_scenario': MyTextInput(textarea=True),
'leap_years': MyTextInput(textarea=True),
'simulate_minor_tree': MyTextInput(textarea=True),
'nitrogen_simulation': MyTextInput(textarea=True),
'soil_depth': MyTextInput(textarea=True),
'upload_parameter_list': MyTextInput(textarea=True),
'minimum_diameter_tree': MyTextInput(textarea=True),
'model_historically_calibrated': MyTextInput(textarea=True),
'stochastic_element': MyTextInput(textarea=True),
# Forest Model Output Specifications
'initial_state': MyTextInput(textarea=True),
'total_calculation': MyTextInput(textarea=True),
'output_dbh_class': MyTextInput(textarea=True),
'output': MyTextInput(textarea=True),
'output_per_pft': MyTextInput(),
'considerations': MyTextInput(textarea=True),
'dynamic_vegetation': MyTextInput(textarea=True),
'nitrogen_limitation': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'phenology': MyTextInput(textarea=True),
'water_stress': MyTextInput(textarea=True),
'heat_stress': MyTextInput(textarea=True),
'evapotranspiration_approach': MyTextInput(textarea=True),
'rooting_depth_differences': MyTextInput(textarea=True),
'root_distribution': MyTextInput(textarea=True),
'permafrost': MyTextInput(textarea=True),
'closed_energy_balance': MyTextInput(textarea=True),
'soil_moisture_surface_temperature_coupling': MyTextInput(textarea=True),
'latent_heat': MyTextInput(textarea=True),
'sensible_heat': MyTextInput(textarea=True),
'mortality_age': MyTextInput(textarea=True),
'mortality_fire': MyTextInput(textarea=True),
'mortality_drought': MyTextInput(textarea=True),
'mortality_insects': MyTextInput(textarea=True),
'mortality_storm': MyTextInput(textarea=True),
'mortality_stochastic_random_disturbance': MyTextInput(textarea=True),
'mortality_other': MyTextInput(textarea=True),
'mortality_remarks': MyTextInput(textarea=True),
'nbp_fire': MyTextInput(textarea=True),
'nbp_landuse_change': MyTextInput(textarea=True),
'nbp_harvest': MyTextInput(textarea=True),
'nbp_other': MyTextInput(textarea=True),
'nbp_comments': MyTextInput(textarea=True),
'list_of_pfts': MyTextInput(textarea=True),
'pfts_comments': MyTextInput(textarea=True),
'assimilation': MyTextInput(textarea=True),
'respiration': MyTextInput(textarea=True),
'carbon_allocation': MyTextInput(textarea=True),
'regeneration_planting': MyTextInput(textarea=True),
'soil_water_balance': MyTextInput(textarea=True),
'carbon_nitrogen_balance': MyTextInput(textarea=True),
'feedbacks_considered': MyTextInput(textarea=True),
}
class BiomesForm(BaseSectorForm):
template = 'edit_biomes.html'
class Meta:
model = Biomes
exclude = ('impact_model',)
widgets = {
'output': MyTextInput(textarea=True),
'output_per_pft': MyTextInput(),
'considerations': MyTextInput(textarea=True),
'dynamic_vegetation': MyTextInput(textarea=True),
'nitrogen_limitation': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'phenology': MyTextInput(textarea=True),
'water_stress': MyTextInput(textarea=True),
'heat_stress': MyTextInput(textarea=True),
'evapotranspiration_approach': MyTextInput(textarea=True),
'rooting_depth_differences': MyTextInput(textarea=True),
'root_distribution': MyTextInput(textarea=True),
'permafrost': MyTextInput(textarea=True),
'closed_energy_balance': MyTextInput(textarea=True),
'soil_moisture_surface_temperature_coupling': MyTextInput(textarea=True),
'latent_heat': MyTextInput(textarea=True),
'sensible_heat': MyTextInput(textarea=True),
'mortality_age': MyTextInput(textarea=True),
'mortality_fire': MyTextInput(textarea=True),
'mortality_drought': MyTextInput(textarea=True),
'mortality_insects': MyTextInput(textarea=True),
'mortality_storm': MyTextInput(textarea=True),
'mortality_stochastic_random_disturbance': MyTextInput(textarea=True),
'mortality_other': MyTextInput(textarea=True),
'mortality_remarks': MyTextInput(textarea=True),
'nbp_fire': MyTextInput(textarea=True),
'nbp_landuse_change': MyTextInput(textarea=True),
'nbp_harvest': MyTextInput(textarea=True),
'nbp_other': MyTextInput(textarea=True),
'nbp_comments': MyTextInput(textarea=True),
'list_of_pfts': MyTextInput(textarea=True),
'pfts_comments': MyTextInput(textarea=True),
'compute_soil_carbon': MyTextInput(textarea=True),
'seperate_soil_carbon': MyTextInput(textarea=True),
'harvest_npp_crops': MyTextInput(textarea=True),
'treat_biofuel_npp': MyTextInput(textarea=True),
'npp_litter_output': MyTextInput(textarea=True),
'simulate_bioenergy': MyTextInput(textarea=True),
'transition_cropland': MyTextInput(textarea=True),
'simulate_pasture': MyTextInput(textarea=True),
}
class BiodiversityForm(BaseSectorForm):
template = 'edit_biodiversity.html'
class Meta:
model = Biodiversity
exclude = ('impact_model',)
widgets = {
'model_algorithm': MyMultiSelect(allowcustom=False),
'explanatory_variables': MyTextInput(textarea=True),
'response_variable': MyMultiSelect(allowcustom=False),
'additional_information_response_variable': MyTextInput(textarea=True),
'distribution_response_variable': MyMultiSelect(allowcustom=False),
'parameters': MyTextInput(textarea=True),
'additional_info_parameters': MyTextInput(textarea=True),
'software_function': MyMultiSelect(allowcustom=False),
'software_package': MyMultiSelect(allowcustom=False),
'software_program': MyTextInput(textarea=True),
'model_output': MyMultiSelect(allowcustom=False),
'additional_info_model_output': MyTextInput(textarea=True),
}
class EnergyForm(BaseSectorForm):
template = 'edit_energy.html'
class Meta:
model = Energy
exclude = ('impact_model',)
widgets = {
'model_type': MyTextInput(textarea=True),
'temporal_extent': MyTextInput(textarea=True),
'temporal_resolution': MyTextInput(textarea=True),
'data_format_for_input': MyTextInput(textarea=True),
'impact_types_energy_demand': MyTextInput(textarea=True),
'impact_types_temperature_effects_on_thermal_power': MyTextInput(textarea=True),
'impact_types_weather_effects_on_renewables': MyTextInput(textarea=True),
'impact_types_water_scarcity_impacts': MyTextInput(textarea=True),
'impact_types_other': MyTextInput(textarea=True),
'output_energy_demand': MyTextInput(textarea=True),
'output_energy_supply': MyTextInput(textarea=True),
'output_water_scarcity': MyTextInput(textarea=True),
'output_economics': MyTextInput(textarea=True),
'output_other': MyTextInput(textarea=True),
'variables_not_directly_from_GCMs': MyTextInput(textarea=True),
'response_function_of_energy_demand_to_HDD_CDD': MyTextInput(textarea=True),
'factor_definition_and_calculation': MyTextInput(textarea=True),
'biomass_types': MyTextInput(textarea=True),
'maximum_potential_assumption': MyTextInput(textarea=True),
'bioenergy_supply_costs': MyTextInput(textarea=True),
'socioeconomic_input': MyTextInput(textarea=True),
}
class MarineEcosystemsForm(BaseSectorForm):
template = 'edit_marine.html'
class Meta:
model = MarineEcosystems
exclude = ('impact_model',)
widgets = {
'defining_features': MyTextInput(textarea=True),
'spatial_scale': MyTextInput(),
'spatial_resolution': MyTextInput(),
'temporal_scale': MyTextInput(),
'temporal_resolution': MyTextInput(),
'taxonomic_scope': MyTextInput(),
'vertical_resolution': MyTextInput(),
'spatial_dispersal_included': MyTextInput(),
'fishbase_used_for_mass_length_conversion': MyTextInput(),
}
class WaterForm(BaseSectorForm):
template = 'edit_water.html'
class Meta:
model = Water
exclude = ('impact_model',)
widgets = {
'technological_progress': MyTextInput(textarea=True),
'soil_layers': MyTextInput(textarea=True),
'water_use': MyTextInput(textarea=True),
'water_sectors': MyTextInput(textarea=True),
'routing': MyTextInput(textarea=True),
'routing_data': MyTextInput(textarea=True),
'land_use': MyTextInput(textarea=True),
'dams_reservoirs': MyTextInput(textarea=True),
'calibration': MyBooleanSelect(nullable=True),
'calibration_years': MyTextInput(),
'calibration_dataset': MyTextInput(),
'calibration_catchments': MyTextInput(),
'vegetation': MyBooleanSelect(nullable=True),
'vegetation_representation': MyTextInput(textarea=True),
"methods_evapotranspiration": MyTextInput(textarea=True),
'methods_snowmelt': MyTextInput(textarea=True),
}
class GenericSectorForm(BaseSectorForm):
template = 'edit_generic_sector.html'
class Meta:
model = GenericSector
exclude = ('impact_model', 'data')
def get_sector_form(sector):
mapping = {
'agriculture': AgricultureForm,
'agroeconomicmodelling': GenericSectorForm,
'biodiversity': BiodiversityForm,
'biomes': BiomesForm,
'coastalinfrastructure': GenericSectorForm,
'computablegeneralequilibriummodelling': GenericSectorForm,
'energy': EnergyForm,
'forests': ForestsForm,
'health': GenericSectorForm,
'marineecosystemsglobal': MarineEcosystemsForm,
'marineecosystemsregional': MarineEcosystemsForm,
'permafrost': GenericSectorForm,
'waterglobal': WaterForm,
'waterregional': WaterForm,
'genericsector': GenericSectorForm,
}
return mapping[sector.class_name.lower()]
class ContactInformationForm(forms.Form):
name = forms.CharField(label='Your name', max_length=60, required=False, widget=forms.TextInput(attrs={'readonly': 'readonly'}), help_text='If you want to change the contact person or add a new contact person, please contact info@isimip.org')
email = forms.EmailField(label='Your email adress', required=True)
institute = forms.CharField(max_length=500, required=False)
country = forms.ModelChoiceField(queryset=Country.objects.all(), required=False, empty_label='-------')
class AttachmentModelForm(forms.ModelForm):
class Meta:
model = Attachment
exclude = ('impact_model',)
widgets = {
'attachment1': ClearableFileInput,
'attachment1_description': MyTextInput(),
'attachment2': ClearableFileInput,
'attachment2_description': MyTextInput(),
'attachment3': ClearableFileInput,
'attachment3_description': MyTextInput(),
'attachment4': ClearableFileInput,
'attachment4_description': MyTextInput(),
'attachment5': ClearableFileInput,
'attachment5_description': MyTextInput(),
}
class DataConfirmationForm(forms.Form):
terms = forms.BooleanField(required=True)
license = forms.ChoiceField(required=True, choices=(('CC BY 4.0', 'CC BY 4.0'), ('other', 'other')))
other_license_name = forms.CharField(required=False)
correct = forms.BooleanField(required=True)
|
This book reminds you of the beauty of books: They can transport you anywhere! Sat on a park bench on a sunny late summer’s day in London I felt I almost had to put on an extra layer of clothing as my brain was taken over by temperatures below zero, snow boots, and cold Alaskan weather – that is what you want from a book.
My Lead Dog Was a Lesbian is a glimpse into the little known world of sled dog racing. Little known is not quite correct, in certain circles it is a schedule to live by, a job, a passion. For most of us however, it is almost unknown and reading this will make you learn about a lifestyle that seems almost impossible. Forget about the endless hours of training, of a professional ballet dancer or the crazy sleep schedule of musicians on tour.
Taking part in one of the toughest sled dog races in the world is at least two weeks in the middle of nowhere in Alaska, carrying with you everything you need to sustain yourself and at least a dozen dogs, nowhere to hide and hardly more than a couple of hours sleep at a time.
“In the tunnel vision created by my headlamp, I was lucky to glimpse the dogs, much less the landscape. Visibility was so poor, we could be within ten feet of the shelter and I might not see it. New snow was already piled a foot deep, and it was coming down hard.
Brian O’Donoghue is originally from Washington D.C., was a New York cab driver and as a journalist started covering sled dog races. He has covered all the important ones: The Klondike, the Yukon Quest, and the Iditarod – 1,000 miles across Alaska’s ice fields. Still, writing about it and taking part are worlds away from each other, yet he did it. After training with a dog team and qualifying through a number of smaller races, he started first in the 1991 race.
A quick look at the back of the book (or online, since we are now 24 years ahead) will tell you he ended last, taking three weeks to complete the trail. It takes a lot to finish the race, and everything that could have gone wrong, went wrong: from storms, to a relatively short time to prepare, to sexually confused dogs; but in the end, he made it past the finish line in Nome.
“It grew darker and the wind picked up. Snow began falling. The trail was rising with no end in sight. I sensed we’d lost our race with the storm, but there was no turning back.
The timeline isn’t always clear throughout the book. At times you wonder “Eight dogs? When did he drop the others?” until you realise it is a scene from previous training. Between the ongoing race and snippets of history it is not always easy to follow and the biographic recollections and interspersed with third person updates of other mushers, as this was written in hindsight. The changing point of view doesn’t always make it easy to situate yourself in the story. Of course, it doesn’t help having to keep track of mushers’ names, entire dog teams (at least a dozen dog names each), and rest stop names, but that’s what it is like in Alaska, it all sounds a little weird.
While you may initially need to read up on some of the terminology or at least take a look at some pictures of how dog teams and sleds are set up, it will transport you into a world that seems absolutely unimaginable. Unless you would like to roll up in a sleeping bag on a sled in temperatures below 50 in the middle of the snow with only your clothes to keep you warm?
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import os
import mantid
from mantid.simpleapi import *
class IndirectTransmissionMonitorTest(unittest.TestCase):
def setUp(self):
self._sample_workspace = 'IndirectTransmissionMonitorTest_sample'
self._can_workspace = 'IndirectTransmissionMonitorTest_can'
Load(Filename='IRS26176.RAW', OutputWorkspace=self._sample_workspace)
Load(Filename='IRS26173.RAW', OutputWorkspace=self._can_workspace)
self.kwargs = {}
self.kwargs['SampleWorkspace'] = self._sample_workspace
self.kwargs['CanWorkspace'] = self._can_workspace
def test_basic(self):
trans_workspace = IndirectTransmissionMonitor(**self.kwargs)
self.assertTrue(isinstance(trans_workspace, mantid.api.WorkspaceGroup), msg='Result should be a workspace group')
self.assertEqual(trans_workspace.size(), 3, msg='Transmission workspace group should have 3 workspaces: sample, can and transfer')
expected_names = set()
expected_names.add(self._sample_workspace + '_Can')
expected_names.add(self._sample_workspace + '_Sam')
expected_names.add(self._sample_workspace + '_Trans')
self.assertEqual(set(trans_workspace.getNames()), expected_names)
if __name__ == '__main__':
unittest.main()
|
Prepare to be enchanted with an in-depth and up-close view of the most loved of all invertebrates, the praying mantis! Keeping the Praying Mantis is a huge resource designed to give mantis enthusiasts every tool needed for feeding, housing, and rearing these magical (almost mythological) creatures. Details on their biology, relationship with man over the ages, behavior, and captive husbandry will give you a solid foundation for successfully keeping fascinating species from around the world. From ant mimics and unicorn mantids to Devil’s flower, orchid, and ghost mantids, there are species to entice every insect hobbyist.
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020, Linus Östberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of kimenu nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Parsers of the menu pages for the restaurants at Karolinska Institutet
'''
import datetime
from datetime import date
import re
import sys
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
def restaurant(func):
"""
Decorator to use for restaurants.
"""
def helper(res_data):
data = {'title': res_data['name'],
'location': res_data['campus'],
'url': res_data['url'],
'map_url': res_data['osm']}
try:
data.update(func(res_data))
except Exception as err:
sys.stderr.write(f'E in {func.__name__}: {err}\n')
data.update({'menu': []})
pass
return data
helper.__name__ = func.__name__
helper.__doc__ = func.__doc__
return helper
def get_parser(url: str) -> BeautifulSoup:
"""
Request page and create Beautifulsoup object
"""
page_req = requests.get(url)
if page_req.status_code != 200:
raise IOError('Bad HTTP responce code')
return BeautifulSoup(page_req.text, 'html.parser')
def fix_bad_symbols(text):
'''
HTML formatting of characters
'''
text = text.replace('è', 'è')
text = text.replace('ä', 'ä')
text = text.replace('Ã', 'Ä')
text = text.replace('Ã', 'Ä')
text = text.replace('ö', 'ö')
text = text.replace('é', 'é')
text = text.replace('Ã¥', 'å')
text = text.replace('Ã
', 'Å')
text = text.strip()
return text
### date management start ###
def get_day():
'''
Today as digit
'''
return date.today().day
def get_monthdigit():
'''
Month as digit
'''
return date.today().month
def get_month():
'''
Month name
'''
months = {1: 'januari', 2: 'februari', 3: 'mars', 4: 'april',
5: 'maj', 6: 'juni', 7: 'juli', 8: 'augusti',
9: 'september', 10: 'oktober', 11: 'november', 12: 'december'}
return months[get_monthdigit()]
def get_week():
'''
Week number
'''
return date.today().isocalendar()[1]
def get_weekday(lang='sv', tomorrow=False):
'''
Day name in swedish(sv) or english (en)
'''
wdigit = get_weekdigit()
if tomorrow:
wdigit += 1
if lang == 'sv':
weekdays = {0: 'måndag', 1: 'tisdag', 2: 'onsdag', 3: 'torsdag',
4: 'fredag', 5: 'lördag', 6: 'söndag', 7: 'måndag'}
if lang == 'en':
weekdays = {0: 'monday', 1: 'tuesday', 2: 'wednesday', 3: 'thursday',
4: 'friday', 5: 'saturday', 6: 'sunday', 7: 'monday'}
return weekdays[wdigit]
def get_weekdigit():
'''
Get digit for week (monday = 0)
'''
return date.today().weekday()
def get_year():
'''
Year as number
'''
return date.today().year
### date management end ###
### parsers start ###
@restaurant
def parse_bikupan(res_data: dict) -> dict:
'''
Parse the menu of Restaurang Bikupan
'''
def fmt_paragraph(p):
return p.get_text().strip().replace('\n', ' ')
def find_todays_menu(menus):
today = datetime.datetime.today()
today = (today.month, today.day)
for day_menu in menus:
# We expect day to contain text similar to `Måndag 10/2`
date = day_menu.find('h6').text.split(' ')[1]
day, month = date.split('/')
if (int(month), int(day)) == today:
menu = list()
# Bikupan has both English and Swedish, we are only showing Swedish:
courses = defaultdict(list)
for p in day_menu.find_all('p'):
if 'class' in p.attrs and p['class'][0] == 'eng-meny':
courses['english'].append(p)
else:
courses['swedish'].append(p)
for sv in courses['swedish']:
menu.append(fmt_paragraph(sv))
return menu
raise Exception("Can't find today's menu")
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
menus = soup.find_all('div', {'class': 'menu-item'})
menu = list(find_todays_menu(menus))
for course in menu:
data['menu'].append(course)
return data
@restaurant
def parse_dufva(res_data):
'''
Parse the menu of Sven Dufva
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
relevant = soup.find("div", {"id": "post"})
menu_data = relevant.get_text().split('\n')
dag = get_weekday()
started = False
for line in menu_data:
if not line:
continue
if line.lower() == f"- {dag} -":
started = True
continue
if started:
if line[0] != '-':
data['menu'].append(line.strip())
else:
break
return data
@restaurant
def parse_glada(res_data):
'''
Parse the menu of Glada restaurangen
'''
data = {'menu': []}
# No way I'll parse this one. If anyone actually wants to, I'd be happy to accept a patch.
return data
@restaurant
def parse_haga(res_data):
'''
Print a link to the menu of Haga gatukök
'''
return {'menu': []}
@restaurant
def parse_hjulet(res_data):
'''
Parse the menu of Restaurang Hjulet
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
passed = False
for header in soup.find_all('h3'):
if header.find(text=re.compile(f'MENY VECKA {get_week()}')):
passed = True
# Will fail if the day is in a non-menu paragraph
if passed:
menu = soup.find('pre')
correct_part = False
for menu_row in menu:
if get_weekday().upper() in str(menu_row):
correct_part = True
continue
if get_weekday(tomorrow=True).upper() in str(menu_row):
break
if correct_part:
data['menu'] += [entry
for entry
in str(menu_row).strip().replace('\r', '').split('\n')
if entry]
return data
@restaurant
def parse_hubben(res_data):
'''
Parse the menu of Restaurang Hubben
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all("div", {"class": "day"})
current = days[get_weekdigit()]
dishes = current.find_all('div', {'class': 'element description col-md-4 col-print-5'})
for dish in dishes:
data['menu'].append(dish.get_text().strip().replace('\n', ' '))
return data
@restaurant
def parse_jons(res_data):
'''
Parse the menu of Jöns Jacob
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find('table', {'class':'table lunch_menu animation'})
day = days.find('tbody', {'class':'lunch-day-content'})
dishes = day.find_all('td', {'class':'td_title'})
data['menu'] += [dish.text.strip() for dish in dishes if dish.text.strip()]
return data
@restaurant
def parse_jorpes(res_data):
'''
Parse the menu of Resturang Jorpes
'''
data = {'menu': []}
return data
@restaurant
def parse_livet(res_data):
'''
Parse the menu of Livet
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
started = False
for par in soup.find_all(('h3', 'p')):
if started:
if par.find(text=re.compile(get_weekday(tomorrow=True).capitalize())):
break
if par.find(text=re.compile('[Pp]ersonuppgifterna')):
break
text = par.find(text=True, recursive=False)
if text:
data['menu'].append(text)
continue
if par.find(text=re.compile(get_weekday().capitalize())):
started = True
return data
@restaurant
def parse_nanna(res_data):
'''
Parse the menu of Nanna Svartz
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
menu_part = soup.find_all('div', {'class': 'entry-content'})[0]
current_week = False
for tag in menu_part.find_all('strong'):
if tag.find(text=re.compile(r'MATSEDEL V\.' + str(get_week()))):
current_week = True
break
if current_week:
started = False
dishes = []
for par in menu_part.find_all(('li', 'strong')):
if started:
if (par.find(text=re.compile(get_weekday(tomorrow=True).capitalize())) or
par.find(text=re.compile(r'^Priser'))):
break
# Since they mess up the page now and then,
# day may show up twice because it is both <li> and <strong>
if par.find(text=re.compile(get_weekday().capitalize())):
continue
dish_text = par.text.replace('\xa0', '')
if dish_text:
dishes.append(dish_text)
if par.find(text=re.compile(get_weekday().capitalize())):
started = True
data['menu'] = dishes[::2] # get rid of entries in English
return data
@restaurant
def parse_rudbeck(res_data):
'''
Parse the menu of Bistro Rudbeck
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all('div', {'class':'container-fluid no-print'})
day = days[get_weekdigit()]
dishes = day.find_all('span')[3:]
for dish in dishes:
data['menu'].append(dish.get_text().strip())
return data
@restaurant
def parse_svarta(res_data):
'''
Parse the menu of Svarta Räfven
'''
return {'menu': []}
@restaurant
def parse_tallrik(res_data):
'''
Parse the menu of Tallriket
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all('div', {'class':'container-fluid no-print'})
day = days[get_weekdigit()]
dishes = day.find_all('span')[3:]
for dish in [x for x in dishes if x.get_text().strip() != '']:
data['menu'].append(dish.get_text().strip())
return data
|
Today we made a start on the electrofishing surveys in the River Fiddich. It was a delayed start to the day so we only managed four sites but they were productive. We used the same timed survey approach that we use in the Spey mainstem and in the other larger tributaries. This technique hadn’t bene used in the Fiddich before so there was no direct baseline rather it was more for comparison with other timed surveys across the catchment.
The first site was below the old railway bridge crossing just upstream of the Fiddichside Inn. I had sussed this location a few weeks ago and had high expectations as the habitat looked ideal.
To get to the site we had to battle through 8′ high Himilayan balsam, the tallest I have ever seen. However it was worth it as was we caught 322 salmon fry and a few parr in the three minute survey. This was the highest count from any of our fry surveys over the last three years.
There was a tremendous number of fish at this site, many of which were relatively large for fry in July. The thermal discharges from the many distilleries in the Fiddich is known to enhance the growth of fish in this tributary. At each of the four sites today there were a number of fish over 80mm (as well as larger more obvious parr). We took many scale samples as usual we will will know for certain once they are read if they were fry or small parr. The biomass of fish caught was high with damaged tail fins on many fish: a not unusual occurannce in sites with high fry densities.
Unfortunately due to my failure to activate the GoPro headcam the survey was not recorded on video. A pity as there was an impressive number of fish. Let’s just blame the jetlag!
The next site up was at Mains of Newton Farm. Here we caught 263 salmon fry and a few parr (subject to scale reading), along with a few trout and eels. This is I think the second highest salmon fry count we have recorded during the salmon fry counts.
The survey site at Mains of Newton. Holding the site board is Kirsteen Macdonald our seasonal summer assistant. With 3 years previous experience with the Kyle of Sutherland fishery board Kirsteen was able to hit the ground running and has been a very able assistant. Kirsteen’s father is a ghilie on the Oykel so she is well versed in salmon matters.
The next site was opposite Kinninvie House. We couldn’t find a site with optimum habitat for fry but we still got 142 in the 3 minute survey. This site had the largest average salmon fry size of the day at 64mm, comparable with the very lower reaches of the Spey.
Kininnvie survey site; much of the site was a bit too deep to be considered optimum fry habitat but we still had a good catch.
The upper site today was downstream of Balvenie Distillery. The count was lower here; 75 fry were caught in 3 minutes, just above the average for all the sites done on the Spey mainstem this summer.
We will continue with the timed surveys on the Fiddich tomorrow. There appears to be no shortage of salmon fry in the lower Fiddich. I always reckon that whilst electrofishing in core habitat in a healthy salmon river you should catch a salmon fry/parr within 10 seconds of starting. That was certainly the case today.
|
# MySQL unneccesary functions:
def insertNewSupplier(form_data, userID): # @@
"""All necessary convertations, validations and insertions there"""
raw_data = {} # a single dictionary
# lets prepare data to processing
iDkeys = [
'supplier', 'line1', 'city', 'province', 'zip',
'firstname', 'lastname', 'email', 'phone', 'fax'
] # later add preference & etc.
for i in iDkeys:
raw_data.update({i: form_data[i]})
# now raw_data filled
# input data validation
(data_valid, ins_data, msg) = inputDataValidator(raw_data)
msg.update({'ins': False})
if data_valid: # --<insertion case
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
supplier = ins_data['supplier']
address_id = ins_data['address_id']
contact_id = ins_data['contact_id']
try:
MySQL_c.execute(
"""INSERT INTO suppliers (supplier_name, address_id,
contact_id) VALUES (%s, %s, %s)""",
(supplier, address_id, contact_id))
msg.update({'ins': True})
except:
logging.info('Insertions failed, supplier=%s,\
address_id=%s, contact_id=%s' % (supplier, address_id, contact_id))
#
db.commit()
db.close()
else:
logging.info('Data not valid for insertion: %s' % (msg,))
return msg
def inputDataValidator(raw_data_dict): # @@
msg = {}
data_for_insertion = {}
val_result = ()
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
data_dict = sanitizer(raw_data_dict) # sanitize it
# check up supplier
supplier = data_dict['supplier']
if supplier:
MySQL_c.execute(
"""SELECT id FROM suppliers WHERE supplier_name=%s""",
(supplier,))
if MySQL_c.fetchone(): # this supplier is already exists in DB
msg.update({'s_name': 'already exists'}) # <-- update case
data_for_insertion.update({'supplier': supplier})
val_result = False
else: # <-- insert case
data_for_insertion.update({'supplier': supplier})
val_result = True
else: # <-- empty field case:
msg.update({'s_name': 'empty'})
val_result = False
data_for_insertion.update({'address_id': 1}) # address_id})
data_for_insertion.update({'contact_id': 1}) # clerk_id})
result = (val_result, data_for_insertion, msg)
db.commit()
db.close()
return result
# order_composition filler:
SQLite3_c.execute(
'SELECT Order_Number, Item_SKU, Item_Price, Item_Qty_Ordered \
FROM orders')
raw_item_data = SQLite3_c.fetchall()
prep_data = []
for i in raw_item_data:
(o_number, sku, price, qty) = i
MySQL_c.execute("""SELECT id FROM orders WHERE magento_id=%s""",
(o_number,))
o_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM products WHERE sku=%s""",
(sku,))
p_id = int(MySQL_c.fetchone()[0])
prep_data.append((o_id, p_id, price.split('$')[-1], qty))
print prep_data
MySQL_c.executemany(
""" INSERT INTO order_composition (order_id, product_id,
price, qty) VALUES (%s, %s, %s, %s)""", prep_data)
# this is orders table filler
SQLite3_c.execute(
'select Order_Number,Order_Date, Customer_Name, \
Shipping_Phone_Number, Shipping_Street from orders'
)
raw_orders = set(SQLite3_c.fetchall())
orders = list(raw_orders)
prepared_data = []
for i in orders:
(m_num, m_date, c_name, p_num, street) = i
# lets convert date into MySQL format:
raw_date, raw_time = m_date.split()
time = raw_time + ':00'
date = '-'.join(raw_date.split('/')[::-1])
m_date = date + ' ' + time
# lets find foreing keys:
MySQL_c.execute("""SELECT id FROM customers WHERE customer_name=%s""",
(c_name,))
customer_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM phones WHERE phone_num=%s""",
(p_num,))
phone_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM addresses WHERE line1=%s""",
(street,))
address_id = int(MySQL_c.fetchone()[0])
print (
m_num, m_date, c_name, customer_id, p_num, phone_id,
street, address_id
)
prepared_data.append(
(int(m_num), customer_id, address_id, phone_id, m_date))
MySQL_c.executemany(
"""INSERT INTO orders (magento_id, customer_id, shipping_address_id,
shipping_phone_id, magento_time) VALUES (%s, %s, %s, %s, %s)""",
prepared_data)
#?
def phoneFiller(self, raw_phone):
# extract significant parts:
if len(raw_phone) == 8: # it's a bold phone number
# Filling addresses table:
SQLite3_c.execute(
"""SELECT Shipping_Street, Shipping_Zip, Shipping_City, Shipping_State_Name, \
Shipping_Country_Name FROM orders"""
)
address_data = set(SQLite3_c.fetchall())
MySQL_c.executemany(
"""INSERT INTO addresses (line1, zip, city, province, country)
VALUES (%s, %s, %s,%s, %s)""", address_data
)
# - #
# typical MySQL interaction: filling products table
SQLite3_c.execute('SELECT Item_Name, Item_SKU from orders')
product_data = SQLite3_c.fetchall()
inserted_sku = []
prepared_data = []
for i in product_data:
if i[1] not in inserted_sku:
prepared_data.append((None, i[0], i[1]))
inserted_sku.append(i[1])
print prepared_data
MySQL_c.executemany(
"""INSERT INTO products (id, item_name, sku) VALUES (%s, %s, %s)""",
prepared_data)
# - #
# this snippet fills data from csv into SQLite3
csv_file = open('orders.csv', 'rU')
o = csv.reader(csv_file)
for i in o:
c.execute('INSERT INTO orders VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?)', tuple(i))
# - #
# check up address
line1 = data_dict['line1'] # -- four variables there
city = data_dict['city']
province = data_dict['province']
postal_zip = data_dict['zip']
#
if line1:
MySQL_c.execute(
"""SELECT id FROM addresses WHERE line1=%s""",
(line1,))
if MySQL_c.fetchone(): # this address is well known
address_id = MySQL_c.fetchone()[0]
else: # the new one
msg.update({'line1': 'new insertion'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
else: # empty line1 case
msg.update({'line1': 'empty'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
# check up clerk
c_first_name = data_dict['firstname']
c_last_name = data_dict['lastname']
email = data_dict['email']
phone = data_dict['phone']
fax = data_dict['fax']
# the main condition:
if (email or phone) or (email and phone):
# check it up
MySQL_c.execute(
"""SELECT id FROM clerks WHERE email=%s""",
(email,))
clerk_id = MySQL_c.fetchone()
if clerk_id: # this email is well known already
#
else: # it's a new email
#
else: # it's a deviation
msg.update({'contact': 'unknown communication method'})
# - #
|
Selected by Dr Bob Romanko, BOR 704 was so named for it’s planting position, row 7, plant 04, in the Prosser “Bone Yard” in the Yakima Valley. It features an aroma profile that is distinctly European. While BOR exhibits a relatively poor yield, it has been used to breed other seedlings in an effort to pass on its low cohumulone rate that is sometimes as low as 14%. However, its low alpha content has hindered it from gaining any successful foothold in commercial hops production.
There seems to be some confusion over its parentage. Some sources say it’s a seedling of Hallertau Mittelfrueh while others say it is of Saaz and Northern Brewer descent. The former is most likely true.
|
# Class to for the global value of a component
class RobotComponent(object):
pin1 = None
pin2 = None
position = None
def __init__(self, board, pin1, pin2=None, position=None):
self.pin1 = board.get_pin(pin1)
if pin2 != None:
self.pin2 = board.get_pin(pin2)
if position != None:
self.position = position
# class for the motors actions
class Motor(RobotComponent):
def forward(self):
self.pin1.write(1)
self.pin2.write(0)
def backward(self):
self.pin1.write(0)
self.pin2.write(1)
def left(self):
if self.position == "left":
self.backward()
elif self.position == "rigth":
self.forward()
def rigth(self):
if self.position == "left":
self.forward()
elif self.position == "rigth":
self.backward()
def stop(self):
self.pin1.write(0)
self.pin2.write(0)
# class for the magnet actions
class Magnet(RobotComponent):
def on(self):
print ("Magnet on")
for i in range(0,50):
self.pin1.write(1)
self.pin2.write(1)
def off(self):
print ("Magnet off")
for i in range(0,50):
self.pin1.write(0)
self.pin2.write(0)
class Servo(RobotComponent):
def move(self, value):
print (value)
self.pin1.write(value)
|
One of the biggest disappointments of 2009, Where the Wild Things Are fails to provide even the slightest glimpse of a full story. It’s obvious that the authors were trying to use the Wild Things as representations of Max’s psyche, but this gimmick is a poor vehicle for character development. Main Characters have access to mirrors every day, show them their own reflection and they’ll keep living their life the way they always have. But give them an alternative perspective, one that has some similarities yet truly is something outside of the Main Character, and you’ll have a catalyst for meaningful growth. WTWTA didn’t have this character and thus didn’t give Max the relationship he needed to arc convincingly. Consequently, the ending was meaningless and empty. This film is a perfect example of what happens when you construct a story that isn’t trying to argue a position. Instead of giving an audience something they’ll cherish, crafting a story in this manner only offers up confusion and disappointment.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
__appname__ = "vennFromKad"
__licence__ = "none"
__version__ = "0.1"
__author__ = "Benoit Guibert <benoit.guibert@free.fr>"
__shortdesc__ = "Build a file for a venn diagram from kad output"
__opts__ = []
def main(parent):
args = argsChk(parent)
for file in args:
venn_list = buildVennData(file)
setFile(file, venn_list)
# print("App: ", parent)
# print("Arguments:")
def buildVennData(file):
with open(file) as stream:
venn_list = {}
header = stream.readline().split(';')
for line in stream:
name = ""
sline = line.split('\t')[1:]
#print(sline)
if len(sline) > 0:
try:
for i,item in enumerate(sline):
if i == 0:
name += item.split('|')[0]
else:
name += "_" + item.split('|')[0]
except IndexError:
pass
if name in venn_list:
venn_list[name] += 1
else:
venn_list[name] = 1
return venn_list
def setFile(file, venn_list):
#print(venn_list)
venn_table = ""
for k,v in venn_list.items():
venn_table += "{}\t{}\n".format(k,v)
print(venn_table, end="")
def argsChk(parent):
args = sys.argv[1:] if __appname__ in sys.argv[0] else sys.argv[2:]
if "-h" in args:
__opts__.append("-h")
args.remove("-h")
helpme(parent)
sys.exit()
if len(args) < 1:
helpme(parent)
sys.exit()
return args
def helpme(parent):
opts = " -h\t: help\n"
print("\n{}\n".format(__shortdesc__))
if parent == __appname__:
print("Usage: {} <arguments>".format(__appname__))
print(" {} -h\n".format(__appname__))
print(opts)
else:
print("Usage: {} {} [-h] <arguments>".format(parent, __appname__))
print(" {} {} -h\n".format(parent, __appname__))
print(opts)
if __name__ == "__main__":
main(__appname__)
|
As part of its service package to clients, PF Olsen offers a comprehensive and competitive Group Insurance Scheme to forest owners, put together by our broker.
Are you covered for forest losses related to fire or wind damage?
Are you covered for costs of fire fighting or claims of loss from fire spreading to a neighbours property?
Could you afford to clear land and replant after a fire or wind event destroys your forest?
The PF Olsen Group Insurance Scheme is designed to assist small-medium sized forest owners get the same economies of scale as large forest owners, and make cost-effective insurance within their reach.
Want to find out whether the PF Olsen Group Insurance Scheme is right for you?
|
# -*- coding: utf-8 -*-
import os
import ConfigParser
import time
import socket
import subprocess
import alfred
def tcp_port_status(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((host, int(port)))
s.shutdown(2)
return True
except:
return False
def tcp_port_ping(host, port):
t = time.time()
if tcp_port_status(host, port):
diff = time.time() - t
return True, diff
return False, None
class PingportWorkflow(alfred.AlfredWorkflow):
_reserved_words = []
def __init__(self, max_results=20):
self.max_results = max_results
def command_autocomplete_iter(self, query):
args = query.rstrip().split()
if len(query.lstrip()) and query[-1] == ' ':
args.append('')
if len(args) in (0, 1):
valids = 0
for host in ('localhost', '127.0.0.1'):
item = self.item(title=host,
description='pingport {} ...'.format(host),
uid=host,
autocomplete=True, arg='{} '.format(host),
match=query, ignore=True)
if item:
valids += 1
yield item
if not valids:
yield self.item(title=query,
uid=query,
description='pingport {} ...'.format(query),
autocomplete=False, arg=query, ignore=True)
elif len(args) == 2:
valids = 0
for port in (22, 80, 443):
sub_query = '{} {}'.format(args[0], port)
item = self.item(title=sub_query,
uid=sub_query,
description='pingport {}'.format(sub_query),
autocomplete=True, arg='{} '.format(sub_query),
match=query, ignore=True)
if item:
valids += 1
yield item
if not valids:
yield self.item(title=query,
uid=query,
description='pingport {}'.format(query),
autocomplete=True, arg='{} '.format(query),
ignore=True)
elif len(args) == 3:
host, port = args[:2]
status, latency = tcp_port_ping(host, port)
if status:
description = '{}:{} TCP port replied in ' \
'{:.2f}ms.'.format(host, port, latency * 1000)
yield self.item(title=query, uid=query,
description=description, autocomplete=True,
arg=query, match=query, ignore=True)
else:
description = '{}:{} TCP port is closed.'.format(host, port)
yield self.item(title=query, uid=query,
description=description, autocomplete=True,
arg=query, match=query, ignore=True)
def do_command_autocomplete(self, query):
self.write_items(self.command_autocomplete_iter(query))
def main(action, query):
pingport = PingportWorkflow()
pingport.route_action(action, query)
if __name__ == "__main__":
main(action=alfred.args()[0], query=alfred.args()[1])
|
Format: , iii-ix, , 187, 508, p. :fold. map, fold. plan, geneal. tables (2 fold.) ;26 cm.
Reference: Binark & Eren. World bib., 412 BM Arabic I, col. 888 Chauvin, V.C. Bib. des ouvrages arabes, vol. X, 146 Schnurrer, C.F. von. Bibliotheca Arabica, p. 429 Also issued on microfiche.
|
import re
import os
import collections
import operator
def extend(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = extend(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def list_dirs(path):
folders = []
while path != "" and path != None:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path!="":
folders.append(path)
break
folders.reverse()
return folders
def uncapitalize(s):
return s[:1].lower() + s[1:] if s else ''
def extract_str(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return raw_string[start:end]
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_case_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def camel_case_to_hyphenate(name):
s1 = first_cap_re.sub(r'\1-\2', name)
return all_cap_re.sub(r'\1-\2', s1).lower()
def copy_dict(source_dict, diffs):
result=dict(source_dict) # Shallow copy
result.update(diffs)
return result
def sort_dict_keys(d):
return sorted(d)
def sort_dict_values(d):
return sorted(d.items(), key=operator.itemgetter(0))
def upcase_first(s):
return s[0].upper() + s[1:]
def lowercase_first(s):
return s[0].lower() + s[1:]
def human_string_list(strs, conjunction='or'):
n = len(strs)
if n == 0:
return ''
if n == 1:
return str(strs[0])
if n == 2:
return '{} {} {}'.format(strs[0], conjunction, strs[1])
# else list of >= 3
strs_safe = list(strs)
strs_safe[-1] = '{} {}'.format(conjunction, strs_safe[-1])
return ', '.join(strs_safe)
def is_primitive_type(type_obj):
return (type_obj['name'] in ['null', 'boolean', 'int', 'float', 'string', 'list', 'dictionary'])
|
TalkTalk’s new offices, fitted-out by Found Associates and implemented by Morgan Lovell, are way out west in what is an early phase of Allford Hall Monaghan Morris' Nottingdale Village development. At the moment though, it’s probably more recognisable as being a few minutes’ walk from Latimer Road Tube station and not a million miles away from Ladbroke Grove and Notting Hill.
As the crow flies, Westfield shopping centre is a neighbour but currently these are meaner streets than the wide, white boulevards of the shopping mecca. Speaking of retail, the headquarters of Monsoon and Accessorize are next door and with Chrysalis music group and John Brown publishing nearby, not to mention photographer Mario Testino’s studio, this is an area that has every chance of shaking off its ‘up and coming’ tag very soon.
The new workspace comes at an interesting time in TalkTalk’s history. Originally formed in 2002 as a result of The Carphone Warehouse’s acquisition of telcoms firm Opal, and then offering landline services, it is now a big player in the broadband sector and this year sees it being independently listed on the London Stock Exchange.
While it might have been tempting for the firm to bang the drum for this significant step with branding a-go-go, it has resisted. The first indication of whose office this is, is a large yet subtle metal logo beside some fairly anonymous revolving doors.
The ground floor forms the core of the workspace, epitomising the idea of ‘approachable yet intelligent design’. “We’ve left the concrete shell exposed and put materials alongside it,’ begins Richard Found, who started the practice in 1997 and as well as workplace, has achieved significant acclaim in the retail sector too, with projects for the likes of Kurt Geiger and Bally.
This part of the office has been divided up into distinct zones. Three four-metre long, interactive screens form a media wall within the lounge zone, which also contains contemporary, clean-lined sofas. Together these screens can be used for client presentations, town hall meetings and sporting events or for separate meetings simultaneously.
For the adjacent canteen area, dark bembe wood, in parquet strips, has been used seamlessly for the flooring and the three long benches and perimeter bar, which also serve to promote staff socialising. The industrial feel of this space is reinforced by the specification of lights from an old Rover vehicle factory.
If the lounge or canteen doesn’t appeal, there is also the conservatory area away from the main space, which utilises as much daylight as possible and is intended as a place for multiple meetings over breakfast or lunch. Found explains how in summer, this area is bathed in morning sunlight.
The guiding principle for the ground floor was to encourage staff to circulate and communicate and to provide a creative and inspiring environment for visitors at the same time. And given the open plan nature of this area, these two groups are anything but mutually exclusive, which helps with reinforcing what the company stands for and what it does.
The aesthetic is one of those understated restaurant interiors where the food keeps on winning Michelin stars. This workplace is no slouch in the winning of awards either, having secured a BREEAM rating of ‘excellent’.
So, up in the lifts we go and onto various floors to explore the use of colour – a key component of this scheme. While the story of this office had begun with the monochrome of the ground floor, the floors above are a different story.
“You get just a pop of colour when the lift doors open,” explains Found.
This is in the first instance an excellent wayfinding tool over the six floors. Teal is used for the first floor, orange for the second, purple for the third, green for the fourth, yellow for the fifth and a wood finish for the sixth, where the marketing team as well as the directors work, enjoying some pretty impressive views over the capital.
These brighter hues can be found on the main service areas for printing, storage and recycling and contrast nicely with the charcoal grey seating from Hitch Mylius.
The services are left exposed for a raw aesthetic and the workstations specified are unfussy, which makes the works of art on each floor a good counterbalance.
Each of the shades chosen for these five floors has been chosen as they are part of the corporate colour palette and the subtle branding continues on the exteriors of the building as well. Here, an LED lighting system illuminates the existing fair-faced concrete columns to give a delightful colour-changing reminder of TalkTalk’s existence in this part of west London.
|
''' Jeroen Meijaard 10611002 '''
import json
import os
import errno
import calendar
import sys
import time
from dateutil import parser
def loadTwitterData(filepath='00.json'):
''' Load in twitterdata'''
twitterData = []
print 'Loading', filepath
# get data from file to array
with open(filepath) as data_file:
for line in data_file:
twitterData.append(json.loads(line))
return twitterData
def loadStockData():
''' Load in stock data'''
stockData = []
dataObjects = []
# open input file and write to csv
with open('US2.GOOG_120101_120131.txt') as data_file:
inputFileList = [line.rstrip('\n').strip('\r') for line in data_file]
# for apple stock, remove strip('\r') and use split('\r')
for line in inputFileList:
stockData.append(line.split(','))
for i,line in enumerate(stockData):
if i == 0:
# write header
header = ["close_time","TICKER","OPEN","HIGH","LOW","CLOSE","VOL"]
print header[0]
for i,part in enumerate(line):
line.remove(line[i])
# line.insert(0,header[i + 1])
else:
# write data
print stockData[i]
dateObject = parser.parse(str(stockData[i][2] + stockData[i][3]))
timeobject = dateObject.strftime("%a %b %d %H:%M:%S %Y")
temp2 = timeobject.split(" ")
temp2.insert(4,"+0000")
temp3 = " ".join(temp2)
line.insert(0,str(temp3))
return stockData
def generate_paths(root_path =' '):
''' generate paths for all files in folder '''
paths = []
cal = calendar.Calendar()
year = 2012
month = 1
# generate day,hour,minute for pathname
days = [d for d in cal.itermonthdays(year, month) if d != 0]
for day in days:
if day < 10:
day = str(day).zfill(2)
else:
day = str(day)
for hour in range(0,23):
if hour < 10:
hour = str(hour).zfill(2)
else:
hour = str(hour)
for minute in range(0,60):
if minute < 10:
minute = str(minute).zfill(2)
else:
minute = str(minute)
temp = "/".join([root_path,str(year),str(month).zfill(2),day,hour,minute])
temp = temp + ".json"
yield temp
def loadFiles(paths, n = 5):
''' load a determined amount of 1 minute twitter files '''
files = []
for i,path in enumerate(paths):
# load file
try:
files.append(loadTwitterData(path))
if (i + 1) % n == 0:
yield files
files = []
# error handling
except IOError as e:
print(os.strerror(e.errno))
print "%s not found" %(path)
pass
#use Os.path.walk python to load multiple files # does not work!! To heavy ram use!!
# def loadAllJSON(root_path):
# alldata = []
# for root, directories, filenames in os.walk(root_path):
# print root
# for filename in filenames:
# if filename.endswith('json'):
# alldata.extend(loadTwitterData(os.path.join(root, filename)))
def main():
# give rootpath
root_path = "/Users/jeroen_meijaard48/Downloads"
t0 = time.time()
paths = generate_paths(root_path)
#paths = ["/somepath/something"]
# load 5 files at a time
test = loadFiles(paths,5)
# print amount of time needed for run
print 'Took %.2f seconds' % (time.time() - t0)
return test
if __name__ == '__main__':
pass
|
Eighteen days in the hallucinatory south of the United States. A few months before hurricane Katrina struck New Orleans a young Israeli journalist travels by train, bus, cars, hitchhiking and on foot, to discover what is called the 'true heart' of America, the land of Oh Susana and Georgia on my Mind. A surprising travel diary of the other America, a far cry from the American dream.
Yuval Ben Ami was born in 1976. He has written a play which was staged by the New York theater group Blank Slate. He has written for newspapers and magazines in Israel and abroad, such as The Boston Globe, National Geographic and Masa Aher. Ben Ami has also written a book on the Finish Mythology.
"An anxious journey with a guitar to America of the southern farmer and jazz, the land of the blacks, racism, softness, beauty and despair. Ben Ami has the intriguing pace for the place he creates, there is music in his writing, he listens to the wind and the pain and shows the ridiculous and the unbelievable... the book is modest, with a clear eye but without tedious phrases... a book that smells good. It is enjoyable, smart, for it springs from life."
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/HealthcareService) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class HealthcareService(domainresource.DomainResource):
""" The details of a healthcare service available at a location.
"""
resource_name = "HealthcareService"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.appointmentRequired = None
""" If an appointment is required for access to this service.
Type `bool`. """
self.availabilityExceptions = None
""" Description of availability exceptions.
Type `str`. """
self.availableTime = None
""" Times the Service Site is available.
List of `HealthcareServiceAvailableTime` items (represented as `dict` in JSON). """
self.characteristic = None
""" Collection of characteristics (attributes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.comment = None
""" Additional description and/or any specific issues not covered
elsewhere.
Type `str`. """
self.coverageArea = None
""" Location(s) service is inteded for/available to.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.eligibility = None
""" Specific eligibility requirements required to use the service.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.eligibilityNote = None
""" Describes the eligibility conditions for the service.
Type `str`. """
self.extraDetails = None
""" Extra details about the service that can't be placed in the other
fields.
Type `str`. """
self.identifier = None
""" External identifiers for this item.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Location where service may be provided.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.notAvailable = None
""" Not available during this time due to provided reason.
List of `HealthcareServiceNotAvailable` items (represented as `dict` in JSON). """
self.photo = None
""" Facilitates quick identification of the service.
Type `Attachment` (represented as `dict` in JSON). """
self.programName = None
""" Program Names that categorize the service.
List of `str` items. """
self.providedBy = None
""" Organization that provides this service.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.publicKey = None
""" PKI Public keys to support secure communications.
Type `str`. """
self.referralMethod = None
""" Ways that the service accepts referrals.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceCategory = None
""" Broad category of service being performed or delivered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.serviceName = None
""" Description of service as presented to a consumer while searching.
Type `str`. """
self.serviceProvisionCode = None
""" Conditions under which service is available/offered.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceType = None
""" Specific service delivered or performed.
List of `HealthcareServiceServiceType` items (represented as `dict` in JSON). """
self.telecom = None
""" Contacts related to the healthcare service.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(HealthcareService, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareService, self).elementProperties()
js.extend([
("appointmentRequired", "appointmentRequired", bool, False, None, False),
("availabilityExceptions", "availabilityExceptions", str, False, None, False),
("availableTime", "availableTime", HealthcareServiceAvailableTime, True, None, False),
("characteristic", "characteristic", codeableconcept.CodeableConcept, True, None, False),
("comment", "comment", str, False, None, False),
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("eligibility", "eligibility", codeableconcept.CodeableConcept, False, None, False),
("eligibilityNote", "eligibilityNote", str, False, None, False),
("extraDetails", "extraDetails", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("location", "location", fhirreference.FHIRReference, False, None, True),
("notAvailable", "notAvailable", HealthcareServiceNotAvailable, True, None, False),
("photo", "photo", attachment.Attachment, False, None, False),
("programName", "programName", str, True, None, False),
("providedBy", "providedBy", fhirreference.FHIRReference, False, None, False),
("publicKey", "publicKey", str, False, None, False),
("referralMethod", "referralMethod", codeableconcept.CodeableConcept, True, None, False),
("serviceCategory", "serviceCategory", codeableconcept.CodeableConcept, False, None, False),
("serviceName", "serviceName", str, False, None, False),
("serviceProvisionCode", "serviceProvisionCode", codeableconcept.CodeableConcept, True, None, False),
("serviceType", "serviceType", HealthcareServiceServiceType, True, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
from . import backboneelement
class HealthcareServiceAvailableTime(backboneelement.BackboneElement):
""" Times the Service Site is available.
A collection of times that the Service Site is available.
"""
resource_name = "HealthcareServiceAvailableTime"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allDay = None
""" Always available? e.g. 24 hour service.
Type `bool`. """
self.availableEndTime = None
""" Closing time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.availableStartTime = None
""" Opening time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.daysOfWeek = None
""" mon | tue | wed | thu | fri | sat | sun.
List of `str` items. """
super(HealthcareServiceAvailableTime, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceAvailableTime, self).elementProperties()
js.extend([
("allDay", "allDay", bool, False, None, False),
("availableEndTime", "availableEndTime", fhirdate.FHIRDate, False, None, False),
("availableStartTime", "availableStartTime", fhirdate.FHIRDate, False, None, False),
("daysOfWeek", "daysOfWeek", str, True, None, False),
])
return js
class HealthcareServiceNotAvailable(backboneelement.BackboneElement):
""" Not available during this time due to provided reason.
The HealthcareService is not available during this period of time due to
the provided reason.
"""
resource_name = "HealthcareServiceNotAvailable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason presented to the user explaining why time not available.
Type `str`. """
self.during = None
""" Service not availablefrom this date.
Type `Period` (represented as `dict` in JSON). """
super(HealthcareServiceNotAvailable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceNotAvailable, self).elementProperties()
js.extend([
("description", "description", str, False, None, True),
("during", "during", period.Period, False, None, False),
])
return js
class HealthcareServiceServiceType(backboneelement.BackboneElement):
""" Specific service delivered or performed.
A specific type of service that may be delivered or performed.
"""
resource_name = "HealthcareServiceServiceType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.specialty = None
""" Specialties handled by the Service Site.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" Type of service delivered or performed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(HealthcareServiceServiceType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceServiceType, self).elementProperties()
js.extend([
("specialty", "specialty", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
from . import attachment
from . import codeableconcept
from . import contactpoint
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
|
Need Rubbish Removed in the Wellington area? Jim’s Mowing offers yard clean ups and rubbish removal services in Wellington.
You can call Jim’s Rubbish Removal team in Wellington on 0800 454 654 [8am-8pm 7 days] or you can book a rubbish removal job online for a time that suits to finally get that yard clean junk free, with all your garden waste & rubbish removed.
Our Jim’s Rubbish Removal Wellington team can also offer advice on gardening, composting, recycling, garden layout, design, landscaping and plant selection. So once all that rubbish is removed, don’t be shy and make full use of the Jim’s Gardening team and their expert gardening knowledge. Put them to work weeding your garden, removing unwanted plants, pruning trees and hedges, installing garden watering systems, mowing the lawn, laying new turf perhaps or planting out new garden beds.
Call Jim’s Wellington Rubbish Removal – 8AM to 8PM (7 Days) – on 0800 454 654 now or simply Book a Rubbish Removal Service online 24/7. You’ll be glad you did!
|
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Program to create a list of stations for use in the Broadband Platform.
"""
# Import Python modules
import os
import sys
def main():
"""
Get min and max latitude and longitude values from the user, also
read the step to be used in generating the station list. This code
can only be used to generate a maximum of 9999 stations.
"""
if len(sys.argv) < 6:
print "Usage: %s lat_min lat_max lon_min lon_max step" % sys.argv[0]
sys.exit(0)
lat_min = float(sys.argv[1])
lat_max = float(sys.argv[2])
lon_min = float(sys.argv[3])
lon_max = float(sys.argv[4])
step = float(sys.argv[5])
station_number = 0
cur_lat = lat_min
cur_lon = lon_min
while cur_lat <= lat_max:
while cur_lon <= lon_max:
station_number = station_number + 1
print "%2.3f %2.3f sta%04d 10 " % (cur_lon,
cur_lat,
station_number)
cur_lon = cur_lon + step
cur_lat = cur_lat + step
cur_lon = lon_min
if __name__ == "__main__":
main()
|
New to hit Duncannon is our to-be-built two story home. Built by Exceptional homes, this 1920+sqft traditional home sits on 2+ acres and has many options still available to be selected. 900 sq ft first floor, 1020 sq ft 2nd floor, 320 sq ft unfinished bonus room (Finished room available). Laminate flooring in kitchen and dining room, carpet in living room, vinyl in powder room, granite counter tops, angled peninsula in kitchen (optional upgrade), solid wood kitchen cabinets, soft close drawerstile in master bath, walk in closets in master bedroom, vinyl in hall bath, 2nd floor laundry. Geo-thermal available For questions or more information, please contact the Listing Agent - Don Failor @ 717 574 3211Don't miss this opportunity for a brand new 3 bed/2.5 bath home for under $235,000!
|
from sqlalchemy import Column, Integer, String, Boolean, Numeric, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declared_attr
from schnitzelserver.models.modelbase import SchnitzelBase, inherits
class GroupMembership(SchnitzelBase):
__tablename__ = 'schnitzel_group_membership'
__audit__ = False
user = Column(Integer, ForeignKey('schnitzel_user.id'), primary_key=True)
group = Column(Integer, ForeignKey('schnitzel_group.id'), primary_key=True)
class User(SchnitzelBase):
"""
A basic schnitzel-user which can belong to groups
"""
__tablename__ = 'schnitzel_user'
__audit__ = True
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
groups = relationship("Group",
secondary="schnitzel_group_membership",
backref="members")
locked_permission = Column(Boolean) # if set to true, can't be granted additional
# permissions
def validate(self):
print("i am the user")
super().validate()
@inherits('schnitzel_user')
class Module(SchnitzelBase):
"""
A module has its dedicated user being in all groups
"""
__tablename__ = 'schnitzel_module'
id = Column(Integer, ForeignKey('schnitzel_user.id'), primary_key=True)
version = Column(Numeric)
class Group(SchnitzelBase):
"""
A basic group to have access on models/fields
"""
__tablename__ = 'schnitzel_group'
id = Column(Integer, primary_key=True)
name = Column(String(100))
|
There is no Dojo listed in Glusburn. This page shows 20 Martial Arts Schools found within 11 Miles of Glusburn. Based on the information available from these Dojos, Kick Boxing, Mixed Martial Arts (MMA), Karate, Wing Chun, Aikido, Tae Kwon Do & Jiu-Jitsu (Jujutsu/Jujitsu) are the most offered Martial Arts in Glusburn area. Most of these schools offer self defense classes for men, women and children near Glusburn.
Chart based on information available from Martial Arts Schools in Glusburn area.
Distance is shown approximately from the downtown area of Glusburn. For a list of Martial Arts Schools near you, Search with your complete address. Click on a Dojo name to view details. Clicking on 'Map' next to a Dojo will show the school on the Map.
See Martial Arts Schools in places nearby Glusburn: Steeton, Silsden, Skipton, Embsay & Haworth.
Are you associated with a Martial Arts School, Club or Dojo in the Glusburn area? Please make sure it is listed with dojos.co.uk.
|
"""
This file is part of RAPD
Copyright (C) 2016-2018 Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-01-29"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
import datetime
import logging
import os
import threading
# RAPD imports
from control_server import LaunchAction
# This is a rapd cloud handler
CLOUD_HANDLER = True
# This handler's request type
REQUEST_TYPE = "reindex"
# A unique UUID for this handler (uuid.uuid1().hex)
ID = "fc774d3ad98e11e5b08ac82a1400d5bc"
class Handler(threading.Thread):
"""
Handles the initialization of reprocessing runs in a separate thread
"""
# single, pair, or new_pair
index_type = None
# The data on the image(s)
image1 = None
image2 = None
# Previous result(s) information
original_result = None
process_settings = None
def __init__(self, request, database, settings, reply_settings):
"""Initialize the handler for reindexing frames"""
# Grab the logger
self.logger = logging.getLogger("RAPDLogger")
self.logger.info("ReprocessHandler::__init__ %s", request)
# Initialize the thread
threading.Thread.__init__(self)
#store passed-in variables
self.request = request
self.database = database
self.settings = settings
self.reply_settings = reply_settings
# Kick it off
self.start()
def run(self):
"""Main process og the handler"""
#mark that the request has been addressed
self.database.markCloudRequest(self.request["cloud_request_id"], "working")
# Get the settings for processing
self.get_process_data()
# Get the images data
self.get_image_data()
# Get the working directory and repr
new_work_dir, new_repr = self.get_work_dir()
# Save some typing
data_root_dir = self.original_result["data_root_dir"]
# Header beam position settings will be overridden sometimes
# Not overridden
if self.process_settings["x_beam"] == "0":
# Source the beam center from the calculated one from image1
# This gives better indexing results
if self.image1["calc_beam_center_x"] > 0.0:
self.process_settings["x_beam"] = self.image1["calc_beam_center_x"]
self.process_settings["y_beam"] = self.image1["calc_beam_center_y"]
process_type = {"single" : "single",
"pair" : "pair",
"new_pair" : "pair"}
# Add the process to the database to display as in-process
process_id = self.database.addNewProcess(type=process_type[self.index_type],
rtype="reprocess",
data_root_dir=data_root_dir,
repr=new_repr)
# Add the ID entry to the data dict
self.image1.update({"ID" : os.path.basename(new_work_dir),
"process_id" : process_id,
"repr" : new_repr})
if self.image2:
self.image2.update({"ID" : os.path.basename(new_work_dir),
"process_id" : process_id,
"repr" : new_repr})
# Now package directories into a dict for easy access by worker class
new_dirs = {"work" : new_work_dir,
"data_root_dir" : self.original_result["data_root_dir"]}
# Add the request to self.process_settings so it can be passed on
self.process_settings["request"] = self.request
# Mark that the request has been addressed
self.database.markCloudRequest(self.request["cloud_request_id"], "working")
#mark in the cloud_current table
self.database.addCloudCurrent(self.request)
# Connect to the server and autoindex the single image
# Pair
if "pair" in self.index_type:
LaunchAction(command=("AUTOINDEX-PAIR",
new_dirs,
self.image1,
self.image2,
self.process_settings,
self.reply_settings),
settings=self.settings)
# Single
else:
LaunchAction(("AUTOINDEX",
new_dirs,
self.image1,
self.process_settings,
self.reply_settings),
self.settings)
def get_process_data(self):
"""Retrieve information on the previous process from the database"""
# Get the settings for processing
self.process_settings = self.database.getSettings(setting_id=self.request["new_setting_id"])
self.logger.debug("process_settings: %s", self.process_settings)
# Get te original result from the database
self.original_result = self.database.getResultById(self.request["original_id"],
self.request["original_type"])
self.logger.debug("original_result: %s", self.original_result)
def get_image_data(self):
"""Retrieve image data for the image(s) in the autoindexing"""
# Coming from an indexing of a single image
if self.request["original_type"] == "single":
# Reindex using two singles to make a pair
if self.request["additional_image"] != 0:
self.index_type = "new_pair"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image1_id"])
self.image2 = self.database.getImageByImageID(
image_id=self.request["additional_image"])
# Single image reindex
else:
self.index_type = "single"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image_id"])
# Pair reindex
elif self.request["original_type"] == "pair":
self.index_type = "pair"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image1_id"])
self.image2 = self.database.getImageByImageID(
image_id=self.original_result["image2_id"])
def get_work_dir(self):
"""Calculate the new work directory for this reindexing"""
# Toplevel
if self.process_settings["work_dir_override"] == "False":
# Same as before
if "/single/" in self.original_result["work_dir"]:
toplevel_dir = os.path.dirname(
self.original_result["work_dir"].split("single")[0])
elif "/pair/" in self.original_result["work_dir"]:
toplevel_dir = os.path.dirname(
self.original_result["work_dir"].split("pair")[0])
else:
# New toplevel dir
toplevel_dir = self.process_settings["work_directory"]
# Type level
if self.index_type == "new_pair":
typelevel_dir = "pair"
else:
typelevel_dir = self.index_type
# Date level
datelevel_dir = datetime.date.today().isoformat()
# Sub level
if self.index_type == "single":
if self.settings["DETECTOR_SUFFIX"]:
sub_dir = os.path.basename(self.image1["fullname"]).replace(
self.settings["DETECTOR_SUFFIX"], "")
else:
sub_dir = os.path.basename(self.image1["fullname"])
elif self.index_type == "pair":
sub_dir = "_".join((self.image1["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
elif self.index_type == "new_pair":
# Image prefixes are the same
if self.image1["image_prefix"] == self.image2["image_prefix"]:
sub_dir = "_".join((self.image["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
# Different image prefixes - same for now, but could change if decide to
else:
sub_dir = "_".join((self.image1["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
# Join the three levels
work_dir_candidate = os.path.join(toplevel_dir, typelevel_dir, datelevel_dir, sub_dir)
# Make sure this is an original directory
if os.path.exists(work_dir_candidate):
# We have already
self.logger.debug("%s has already been used, will add qualifier", work_dir_candidate)
for i in range(1, 10000):
if not os.path.exists("_".join((work_dir_candidate, str(i)))):
work_dir_candidate = "_".join((work_dir_candidate, str(i)))
self.logger.debug("%s will be used for this image", work_dir_candidate)
break
else:
i += 1
return work_dir_candidate, sub_dir
|
Nestled on a peaceful, level golf course lot in CC of Roswell this extraordinary 7/5.2 executive home offers high-end living & inviting warmth. Inside the move in ready home you'll find custom, high end renovations...chef's kitchen w/viking ovens, gorgeous sunroom, master on main, updated baths, gleaming hrdwds, detailed trim, finished terrace lvl w/ amazing game rm & in-law suite. Relax w/family & friends by the enticing pool & spacious deck! Loaded w/opportunity, you'll relish living in the unique resort style community moments away from 400 & local hot spots!
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred}, now with 30% more starch.
"""
from zope.interface import implements, Interface
from twisted.trial import unittest
from twisted.cred import portal, checkers, credentials, error
from twisted.python import components
from twisted.internet import defer
try:
from crypt import crypt
except ImportError:
crypt = None
try:
from twisted.cred import pamauth
except ImportError:
pamauth = None
class ITestable(Interface):
pass
class TestAvatar:
def __init__(self, name):
self.name = name
self.loggedIn = False
self.loggedOut = False
def login(self):
assert not self.loggedIn
self.loggedIn = True
def logout(self):
self.loggedOut = True
class Testable(components.Adapter):
implements(ITestable)
# components.Interface(TestAvatar).adaptWith(Testable, ITestable)
components.registerAdapter(Testable, TestAvatar, ITestable)
class IDerivedCredentials(credentials.IUsernamePassword):
pass
class DerivedCredentials(object):
implements(IDerivedCredentials, ITestable)
def __init__(self, username, password):
self.username = username
self.password = password
def checkPassword(self, password):
return password == self.password
class TestRealm:
implements(portal.IRealm)
def __init__(self):
self.avatars = {}
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId in self.avatars:
avatar = self.avatars[avatarId]
else:
avatar = TestAvatar(avatarId)
self.avatars[avatarId] = avatar
avatar.login()
return (interfaces[0], interfaces[0](avatar),
avatar.logout)
class NewCredTests(unittest.TestCase):
def setUp(self):
r = self.realm = TestRealm()
p = self.portal = portal.Portal(r)
up = self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
up.addUser("bob", "hello")
p.registerChecker(up)
def testListCheckers(self):
expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]
got = self.portal.listCredentialsInterfaces()
expected.sort()
got.sort()
self.assertEqual(got, expected)
def testBasicLogin(self):
l = []; f = []
self.portal.login(credentials.UsernamePassword("bob", "hello"),
self, ITestable).addCallback(
l.append).addErrback(f.append)
if f:
raise f[0]
# print l[0].getBriefTraceback()
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def test_derivedInterface(self):
"""
Login with credentials implementing an interface inheriting from an
interface registered with a checker (but not itself registered).
"""
l = []
f = []
self.portal.login(DerivedCredentials("bob", "hello"), self, ITestable
).addCallback(l.append
).addErrback(f.append)
if f:
raise f[0]
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def testFailedLogin(self):
l = []
self.portal.login(credentials.UsernamePassword("bob", "h3llo"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
def testFailedLoginName(self):
l = []
self.portal.login(credentials.UsernamePassword("jay", "hello"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
class OnDiskDatabaseTests(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def testUserLookup(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.failUnlessRaises(KeyError, db.getUser, u.upper())
self.assertEqual(db.getUser(u), (u, p))
def testCaseInSensitivity(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.assertEqual(db.getUser(u.upper()), (u, p))
def testRequestAvatarId(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testRequestAvatarId_hashed(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernameHashedPassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
class HashedPasswordOnDiskDatabaseTests(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def hash(self, u, p, s):
return crypt(p, s)
def setUp(self):
dbfile = self.mktemp()
self.db = checkers.FilePasswordDB(dbfile, hash=self.hash)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, crypt(p, u[:2])))
f.close()
r = TestRealm()
self.port = portal.Portal(r)
self.port.registerChecker(self.db)
def testGoodCredentials(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.db.requestAvatarId(c) for c in goodCreds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testGoodCredentials_login(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.port.login(c, None, ITestable)
for c in goodCreds])
d.addCallback(lambda x: [a.original.name for i, a, l in x])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testBadCredentials(self):
badCreds = [credentials.UsernamePassword(u, 'wrong password')
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in badCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnauthorizedLogin)
return d
def testHashedCredentials(self):
hashedCreds = [credentials.UsernameHashedPassword(u, crypt(p, u[:2]))
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in hashedCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnhandledCredentials)
return d
def _assertFailures(self, failures, *expectedFailures):
for flag, failure in failures:
self.assertEqual(flag, defer.FAILURE)
failure.trap(*expectedFailures)
return None
if crypt is None:
skip = "crypt module not available"
class PluggableAuthenticationModulesTests(unittest.TestCase):
def setUp(self):
"""
Replace L{pamauth.callIntoPAM} with a dummy implementation with
easily-controlled behavior.
"""
self.patch(pamauth, 'callIntoPAM', self.callIntoPAM)
def callIntoPAM(self, service, user, conv):
if service != 'Twisted':
raise error.UnauthorizedLogin('bad service: %s' % service)
if user != 'testuser':
raise error.UnauthorizedLogin('bad username: %s' % user)
questions = [
(1, "Password"),
(2, "Message w/ Input"),
(3, "Message w/o Input"),
]
replies = conv(questions)
if replies != [
("password", 0),
("entry", 0),
("", 0)
]:
raise error.UnauthorizedLogin('bad conversion: %s' % repr(replies))
return 1
def _makeConv(self, d):
def conv(questions):
return defer.succeed([(d[t], 0) for t, q in questions])
return conv
def testRequestAvatarId(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
d.addCallback(self.assertEqual, 'testuser')
return d
def testBadCredentials(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'', 2:'', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
def testBadUsername(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('baduser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
if not pamauth:
skip = "Can't run without PyPAM"
class CheckersMixin:
"""
L{unittest.TestCase} mixin for testing that some checkers accept
and deny specified credentials.
Subclasses must provide
- C{getCheckers} which returns a sequence of
L{checkers.ICredentialChecker}
- C{getGoodCredentials} which returns a list of 2-tuples of
credential to check and avaterId to expect.
- C{getBadCredentials} which returns a list of credentials
which are expected to be unauthorized.
"""
@defer.inlineCallbacks
def test_positive(self):
"""
The given credentials are accepted by all the checkers, and give
the expected C{avatarID}s
"""
for chk in self.getCheckers():
for (cred, avatarId) in self.getGoodCredentials():
r = yield chk.requestAvatarId(cred)
self.assertEqual(r, avatarId)
@defer.inlineCallbacks
def test_negative(self):
"""
The given credentials are rejected by all the checkers.
"""
for chk in self.getCheckers():
for cred in self.getBadCredentials():
d = chk.requestAvatarId(cred)
yield self.assertFailure(d, error.UnauthorizedLogin)
class HashlessFilePasswordDBMixin:
credClass = credentials.UsernamePassword
diskHash = None
networkHash = staticmethod(lambda x: x)
_validCredentials = [
('user1', 'password1'),
('user2', 'password2'),
('user3', 'password3')]
def getGoodCredentials(self):
for u, p in self._validCredentials:
yield self.credClass(u, self.networkHash(p)), u
def getBadCredentials(self):
for u, p in [('user1', 'password3'),
('user2', 'password1'),
('bloof', 'blarf')]:
yield self.credClass(u, self.networkHash(p))
def getCheckers(self):
diskHash = self.diskHash or (lambda x: x)
hashCheck = self.diskHash and (lambda username, password, stored: self.diskHash(password))
for cache in True, False:
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s:%s\n' % (u, diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s dingle dongle %s\n' % (diskHash(p), u))
fObj.close()
yield checkers.FilePasswordDB(fn, ' ', 3, 0, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('zip,zap,%s,zup,%s\n' % (u.title(), diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, ',', 2, 4, False, cache=cache, hash=hashCheck)
class LocallyHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
diskHash = staticmethod(lambda x: x.encode('hex'))
class NetworkHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
networkHash = staticmethod(lambda x: x.encode('hex'))
class credClass(credentials.UsernameHashedPassword):
def checkPassword(self, password):
return self.hashed.decode('hex') == password
class HashlessFilePasswordDBCheckerTests(HashlessFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class LocallyHashedFilePasswordDBCheckerTests(LocallyHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class NetworkHashedFilePasswordDBCheckerTests(NetworkHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
|
Be your own sunlight! Sun Drenched jumpsuit is the ideal outfit to accompanie you all the summer period and not only!
Made of 100% cotton net this jumpsuit is perfect either for a morning walk or as an occasion wear outfit.
Pair it with flat or high heels sandals.
Even in autum, wear it with tights and booties and be as stylish as it gets!
|
import module
# unique to module
from urlparse import urlparse
class Module(module.Module):
def __init__(self, params):
module.Module.__init__(self, params, query='SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL ORDER BY domain')
self.info = {
'Name': 'Google CSE Hostname Enumerator',
'Author': 'Tim Tomes (@LaNMaSteR53)',
'Description': 'Leverages the Google Custom Search Engine API to harvest hosts using the \'site\' search operator. Updates the \'hosts\' table with the results.'
}
def module_run(self, domains):
cnt = 0
new = 0
for domain in domains:
self.heading(domain, level=0)
base_query = 'site:' + domain
hosts = []
while True:
query = ''
# build query based on results of previous results
for host in hosts:
query += ' -site:%s' % (host)
query = base_query + query
results = self.search_google_api(query, limit=1)
if not results: break
for result in results:
host = urlparse(result['link']).netloc
if not host in hosts:
hosts.append(host)
self.output(host)
# add each host to the database
new += self.add_hosts(host)
cnt += len(hosts)
self.summarize(new, cnt)
|
Now *you* can hire *me*!
I’ve got admit – lately I have felt more than a little dissatisfaction with my professional life. Not that I dislike what I do – writing and web management are my natural and chosen field – but after thirteen years I wouldn’t mind a little more variety.
When I dropped my workweek down to four days last fall, one thought was that the extra time would make taking on freelance work an option again. It’s been about ten years since I’ve taken on any writing or editing work, and in that time I’ve developed a whole new set of professional skills around social media planning and management – which I would love the opportunity to use outside of my regular work environment (it’s a tad bureaucratic at times). So! Instead of working five days a week at my (kinda boring) job, four days a week there and one day a week on my own seems like a good balance between stability and work-diversity.
|
"""
Name : PinnacleFileFinder.py
Usage : PinnacleFileFinder.py -h
Author : Bar Harel
Description:
- Takes a .AXP file and creates a list of all the used files in that pinnacle project with their order and time of appearance.
- The list can be output as a text file or .csv for use with programs like Excel
Todo:
- Add all possible file formats
Changelog:
- 06/03/15 - GitHub :-)
- 21/02/15 - Creation
"""
import re, argparse, os, csv
# The encoding pinnacle studio uses
PINNACLE_ENCODING = "utf_16_le"
# File formats
FILE_FORMATS = "jpg|JPG|MOV|mov|png|PNG|avi|AVI"
# Unicode RE format for getting the time and name
TIME_NAME_RE = ur"RecIn=\".*?\(([^>]*?)\).*?<Name>([^\n]+?\.(?:%s))</Name>" % (FILE_FORMATS)
# Default output paths
CSV_DEFAULT = r".\PinnacleFiles.csv"
TXT_DEFAULT = r".\PinnacleFiles.txt"
# Max name for file
MAX_FILE_NAME = 100
def convert_time(seconds_as_float):
"""
Function : convert_time(seconds_as_float) --> hours, minutes, seconds, ms
Purpose:
- Convert the time from seconds to an hour, minute, second, ms tupple
"""
# Conversion
seconds, ms = divmod(seconds_as_float,1)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds, ms
def output_file(findings_list, output_format, output_path):
"""
Function : output_txt(findings_list, output_format, output_path) --> NoneType
Purpose:
- Output the file in the specified format
"""
# Txt output
if output_format == "txt":
# The final string used to store the formatted file
final_str = u"Pinnacle studio file list:\n"
# Set a counter for the files
counter = 1
# Go over the findings
for appearance_time, file_name in findings_list:
# Failsafe in case of false positive matching
if len(file_name) > MAX_FILE_NAME:
continue
# Convert time to hours, mintes, seconds, ms
try:
hours, minutes, seconds, ms = convert_time(float(appearance_time))
# In case of conversion errors
except ValueError as err:
continue
# The time string
time_str = "%02d:%02d:%02d.%s" % (hours, minutes, seconds, str(ms)[2:])
# Format the output
final_str += u"%d: %-25s \tat %02d:%02d:%02d.%s\n" % (counter, file_name, hours, minutes, seconds, str(ms)[2:])
# Increase counter
counter += 1
# Write the result to the output file
try:
with open(output_path,"w") as my_file:
my_file.write(final_str)
except IOError as err:
print "Error opening or writing to the output file."
# CSV output
elif output_format == "csv":
try:
with open(output_path,"wb") as my_file:
# Generate the csv file writer
file_writer = csv.writer(my_file)
# Go over the findings
for appearance_time, file_name in findings_list:
# Failsafe in case of false positive matching
if len(file_name) > MAX_FILE_NAME:
continue
# Convert time to hours, mintes, seconds, ms
try:
hours, minutes, seconds, ms = convert_time(float(appearance_time))
# In case of conversion errors
except ValueError as err:
continue
# The time string
time_str = "%02d:%02d:%02d.%s" % (hours, minutes, seconds, str(ms)[2:])
# Output the row
file_writer.writerow([file_name, time_str])
except IOError, csv.Error:
print "Error opening or writing to the output file."
else:
print "ERROR: Invalid output format"
def main():
"""
Function: main() --> NoneType
Purpose:
- Control the flow of the program
"""
# Parse arguments
parser = argparse.ArgumentParser(description="Find file names and time of appearance from a pinnacle studio AXP file.")
parser.add_argument("axp_file", help="Path to the .axp file")
parser.add_argument("-o", "--output_file", help=("Output file, defaults to '%s' in case of txt and '%s' in case of csv" % (TXT_DEFAULT, CSV_DEFAULT)))
parser.add_argument("-csv", help="Output the file in csv format.", action="store_true")
args = parser.parse_args()
# Check if input file exists
if not os.path.exists(args.axp_file):
print "ERROR: Invalid input path."
return
# Check the extension
if args.axp_file[-4:].lower() != ".axp":
print "Error: Not a .axp file"
return
# Unicode RE for getting the time and name
try:
time_name_re = re.compile(TIME_NAME_RE, re.S|re.U)
except re.error as err:
print "ERROR: Bad input RE."
return
# Open and read from the file
try:
with open(args.axp_file, "r") as input_file:
input_str = input_file.read()
except IOError as err:
print "Error opening or reading from input file."
return
# Decode using the pinnacle studio encoding
input_str = input_str.decode(PINNACLE_ENCODING)
# Find the re matches in the string
findings = time_name_re.findall(input_str)
# Check the specified output format
output_format = "csv" if args.csv else "txt"
# Check output file path
if args.output_file is None:
output_path = CSV_DEFAULT if args.csv else TXT_DEFAULT
else:
output_path = args.output_file
output_file(findings, output_format, output_path)
if __name__ == "__main__":
main()
|
No matter how it is expressed in your own organization, we can tell you for sure, resistance will occur. It can be out in the open (overt) or deep down in the hidden corners of the organization (covert), but trust us…it is there.
“De Personalize” the Resistance – Do not take resistance to a change personally. It is not about YOU.
Surface Resistance Early – Start looking for resistance to a change as soon as the project begins. Use tactics such as focus groups, social media, team meetings and hot lines to begin surfacing resistance as early as possible.
Communicate – Every communication sent during any implementation must include a feedback loop in order to allow individuals to express their thoughts and feelings about the change. This is a great way for Change Agents to gather information about potential sources of resistance.
Use Involvement Techniques – It may not be appropriate to involve people in deciding what to change, but you can almost always get them involved in how to implement it in their daily work. TIP: Take your most vocal non-supporters and put them on the project team! It’s the perfect way to keep an eye on them and involve them in creating a solution for their issues.
Make Surfacing Resistance Safe - When you are looking to bring covert resistance out in the open, don’t punish individuals for expressing their resistance.
Invest in Readiness – Readiness and Resistance are two sides of the same coin. If you invest in readiness by giving Targets information, motivation, ability and confidence you will see less resistance while the change takes place.
Prepare for Resistance to Come Back – Even if you successfully manage resistance once, trust us it will come back. Managing resistance is definitely not a one time, check the box that you are done event. As a project evolves, so does the resistance to it which is why managing resistance must be an ongoing activity throughout a project’s life cycle.
When it comes to resistance to change the question should not be whether you will or will not have it, but rather how much will there be and what strategies will you use to manage it. Using these eight tactics to manage the resistance to change you face will allow your implementation to move ahead at speed.
|
# Copyright 2014-2015 VPAC
# Copyright 2014 The University of Melbourne
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
LOG = logging.getLogger(__name__)
def assert_password_simple(password, old=None):
if old and password == old:
raise ValueError('Old and new passwords are the same.')
elif len(password) < 6:
raise ValueError('Password is less than six characters.')
return password
try:
from cracklib import VeryFascistCheck as _assert_password
# Some configuration errors are only apparent when cracklib
# tests a password for the first time, so test a strong password to
# verify that cracklib is working as intended.
_assert_password('thaeliez4niore0U')
except ImportError:
_assert_password = assert_password_simple
except (OSError, ValueError) as e:
LOG.warning("Cracklib misconfigured: %s", str(e))
_assert_password = assert_password_simple
def assert_strong_password(username, password, old_password=None):
"""Raises ValueError if the password isn't strong.
Returns the password otherwise."""
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password)
|
Oh Glory! What a wonderful thing Serena is doing for the period, and her gowns are exquisite! Absolutely gorgeous, I do love the bonnet as well, perhaps we ought all get one and start a new garden fashion. I’d love the excuse to wear it, especially now Spring is here. What a beautiful post, thank you, Julie, as ever!
Thanks Mr Darcy, LOL. I love “Go to it”!
What a marvelous resource! I would love to see one of her presentations. And I covet that open robe, not to mention that lovely bonnet, although I confess I would get some odd glances wearing it about current-day New York.
LOL-but if she trimmed one in black……?
|
# This file is part of django-xmpp-server-list
# (https://github.com/mathiasertl/django-xmpp-server-list)
#
# django-xmpp-server-list is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmppllist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-xmpp-server-list. If not, see <http://www.gnu.org/licenses/>.
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.xmlstream import register_stanza_plugin
class RosterVerStanza(ElementBase):
name = 'ver'
namespace = 'urn:xmpp:features:rosterver'
interfaces = set()
plugin_attrib = 'ver'
class feature_rosterver(BasePlugin):
"""Plugin for Roster Versioning (XEP-0237).
.. seealso:: http://www.xmpp.org/extensions/xep-0237.html
"""
def plugin_init(self):
self.description = 'XEP-0237: Roster Versioning (obsolete)'
self.xmpp.register_feature(
'ver',
self._handle_rosterver,
restart=False,
order=self.config.get('order', 0))
register_stanza_plugin(StreamFeatures, RosterVerStanza)
def _handle_rosterver(self, features):
pass
|
The Trinity Bracelet© is Ronaldo's attempt to impress upon all of us the significance of the Holy Trinity, because of its importance and meaning in so many people's lives. Ronaldo was inspired to incorporate two pearls into the bracelet to signify the purity of God's love as shown by the Holy Trinity (which he felt was perfectly represented by the Son and Holy Spirit). Then, Ronaldo wanted to incorporate as the bracelet's center stone design, a special tubular-cut black onyx, to signify the Holy Trinity's power and absolute authority over all of heaven and earth (best represented by the Father). Ronaldo wants everybody to remember that we are all part of something much bigger than ourselves.
|
# -*- coding: utf-8 -*-
"""
Interface for creating modeling environments.
Author: R. Lombaert
"""
import os
from time import gmtime
import types
import cc.path
from cc.tools.io import DataIO
class ModelingSession(object):
"""
The basic modeling environment. Inherited by MCMax() and Gastronoom().
"""
def __init__(self,code,path,replace_db_entry=0,new_entries=[],\
single_session=0):
"""
Initializing an instance of ModelingSession.
@param code: code for which the modelingsession is created
@type code: string
@param path: modeling output folder in the code's home folder
@type path: string
@keyword replace_db_entry: replace an entry in the database with a
newly calculated model with a new model id
(eg if some general data not included in
the inputfiles is changed)
(default: 0)
@type replace_db_entry: bool
@keyword new_entries: The new model_ids when replace_db_entry is 1
of other models in the grid. These are not
replaced!
(default: [])
@type new_entries: list[str]
@keyword single_session: If this is the only CC session. Speeds up db
check.
(default: 0)
@type single_session: bool
"""
self.path = path
self.code = code
self.model_id = ''
self.replace_db_entry = replace_db_entry
self.new_entries = new_entries
self.single_session = single_session
if code == 'Chemistry':
self.mutable = []
else:
mutablefile = os.path.join(cc.path.aux,\
'Mutable_Parameters_%s.dat'%code)
self.mutable = [line[0]
for line in DataIO.readFile(mutablefile,delimiter=' ')
if ' '.join(line)]
self.mutable = [line for line in self.mutable if line[0] != '#']
fout = os.path.join(getattr(cc.path,self.code.lower()),self.path)
DataIO.testFolderExistence(os.path.join(fout,'models'))
def makeNewId(self):
'''
Make a new model_id based on the current UTC in seconds since 1970.
'''
return 'model_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
%(gmtime()[0],gmtime()[1],gmtime()[2],\
gmtime()[3],gmtime()[4],gmtime()[5])
def setCommandKey(self,comm_key,star,key_type,star_key=None,\
alternative=None,make_int=0,exp_not=0):
'''
Try setting a key in the command_list from a star instance.
If the key is unknown, it is left open and will be filled in from the
standard gastronoom inputfile.
@param comm_key: the name of the keyword in the command list
@type comm_key: string
@param star: The parameter set
@type star: Star()
@param key_type: the type of the keyword, either 'DUST' or 'GAS'
@type key_type: string
@keyword star_key: the name of the keyword in the star instance
(minus '_%s'%key_type, which is added as well in a
second attempt if the first without the addition is
not found), if None, it is equal to comm_key
(default: None)
@type star_key: string
@keyword alternative: a default value passed from the standard
inputfile that is used if the keyword or the
keyword + '_%s'%key_type is not found in Star()
(default: None)
@type alternative: string
@keyword make_int: make an integer before converting to string for this
keyword.
(default: 0)
@type make_int: boolean
@keyword exp_not: Convert to exponential notation in a string
(default: 0)
@type exp_not: bool
@return: True if successful, otherwise False.
@rtype: bool
'''
if star_key is None: star_key = comm_key
try:
self.command_list[comm_key] = \
DataIO.inputToString(star[star_key],make_int,exp_not)
return True
except KeyError:
try:
self.command_list[comm_key] = \
DataIO.inputToString(star[star_key+ '_%s'%key_type],\
make_int,exp_not)
return True
except KeyError:
if not alternative is None:
self.command_list[comm_key] = \
DataIO.inputToString(alternative,make_int,exp_not)
return True
else:
return False
def compareCommandLists(self,this_list,modellist,code,ignoreAbun=0,\
extra_dict=None,check_keys=[]):
"""
Comparing a command_list with a database entry.
@param this_list: parameters in this modeling session
@type this_list: dict
@param modellist: parameters from database model
@type modellist: dict
@param code: The GASTRoNOoM subcode
@type code: string
@keyword ignoreAbun: only relevant for mline: ignore the 4 abundance
parameters (such as for co)
(default: 0)
@type ignoreAbun: bool
@keyword extra_dict: if not None this gives extra dictionary entries
to be used in the comparison on top of this_list.
The extra entries are assumed present in modellist
otherwise the comparison will return False.
(default: None)
@type extra_dict: dict
@keyword check_keys: Only check keys given in this list. If empty, the
standard keyword lists are used.
(default: [])
@type check_keys: list[str]
@return: Comparison between the two parameter sets
@rtype: bool
"""
model_bool_list = []
if not extra_dict is None: this_list.update(extra_dict)
if check_keys:
keywords = check_keys
elif code == 'mcmax':
keywords = set(this_list.keys()+modellist.keys())
if 'dust_species' in keywords:
keywords.remove('dust_species')
if 'IN_PROGRESS' in keywords:
keywords.remove('IN_PROGRESS')
#elif code == 'chemistry':
##keywords = set(this_list.keys()+modellist.keys())
#keywords = getattr(self,code + '_keywords')
#if 'IN_PROGRESS' in keywords:
#keywords.remove('IN_PROGRESS')
else:
keywords = getattr(self,code + '_keywords')
if code == 'mline' and ignoreAbun and not check_keys:
keywords = [key
for key in keywords
if key not in ['ABUN_MOLEC','ABUN_MOLEC_RINNER',\
'ABUN_MOLEC_RE','RMAX_MOLEC']]
for keyword in keywords:
#-- All issues with "double" notation instead of exponential should be resolved
# if keyword == 'STEP_RS_RIN':
# if this_list.has_key(keyword) \
# and type(this_list[keyword]) is types.StringType:
# if 'd' in this_list[keyword]:
# this_list[keyword] =this_list[keyword].replace('d','e')
# if modellist.has_key(keyword) \
# and type(modellist[keyword]) is types.StringType:
# if 'd' in modellist[keyword]:
# modellist[keyword] =modellist[keyword].replace('d','e')
try:
try:
try:
val = float(this_list[keyword])
except TypeError:
raise ValueError
delta = not val and 1e-10 or 0.001*val
if val < 0:
tb = val-delta > float(modellist[keyword]) > val+delta
else:
tb = val-delta < float(modellist[keyword]) < val+delta
except ValueError:
tb = this_list[keyword]==modellist[keyword]
except KeyError:
if keyword not in this_list.keys() \
and keyword not in modellist.keys():
tb = True
else:
tb = False
model_bool_list.append(tb)
if False not in model_bool_list:
return True
else:
return False
def cCL(self,*args,**kwargs):
'''
Short-hand helper function for compareCommandLists.
'''
return self.compareCommandLists(*args,**kwargs)
|
The Goshen Farm Sharing Garden provides a place for Goshen Farm members to garden. The Sharing Garden exists to educate the public about the history and importance of Goshen Farm and to revive the working history of Goshen Farm.
We have many annual events each year to help raise funds, celebrate the history and educate the community about Goshen Farm. Find out what is coming up next so you can get involved!
The Garden Chair and Committee wanted to provide the community for the community to plant, grow, and harvest delicious and healthy food while having fun. See how you can join!
The Society was established in 2006 to safeguard and restore the historic Goshen Farm and its surrounding property for our community. Learn more about our efforts and how you can help.
|
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides UK Met Office Post Process (PP) format specific capabilities.
"""
import abc
import collections
from copy import deepcopy
import itertools
import operator
import os
import re
import struct
import warnings
import numpy as np
import numpy.ma as ma
import netcdftime
import iris.config
import iris.fileformats.rules
import iris.unit
from iris.fileformats.manager import DataManager
import iris.fileformats.pp_rules
import iris.coord_systems
import iris.proxy
iris.proxy.apply_proxy('iris.fileformats.pp_packing', globals())
__all__ = ['load', 'save', 'PPField', 'add_load_rules', 'reset_load_rules',
'add_save_rules', 'reset_save_rules', 'STASH', 'EARTH_RADIUS']
EARTH_RADIUS = 6371229.0
# PP->Cube and Cube->PP rules are loaded on first use
_load_rules = None
_save_rules = None
PP_HEADER_DEPTH = 256
PP_WORD_DEPTH = 4
NUM_LONG_HEADERS = 45
NUM_FLOAT_HEADERS = 19
# The header definition for header release 2.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 2 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_2 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbday', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbdayd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# The header definition for header release 3.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 3 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_3 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbsec', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbsecd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# A map from header-release-number to header definition
UM_HEADERS = {2: UM_HEADER_2, 3: UM_HEADER_3}
# Offset value to convert from UM_HEADER positions to PP_HEADER offsets.
UM_TO_PP_HEADER_OFFSET = 1
#: A dictionary mapping IB values to their names.
EXTRA_DATA = {
1: 'x',
2: 'y',
3: 'lower_y_domain',
4: 'lower_x_domain',
5: 'upper_y_domain',
6: 'upper_x_domain',
7: 'lower_z_domain',
8: 'upper_z_domain',
10: 'field_title',
11: 'domain_title',
12: 'x_lower_bound',
13: 'x_upper_bound',
14: 'y_lower_bound',
15: 'y_upper_bound',
}
#: Maps lbuser[0] to numpy data type. "default" will be interpreted if
#: no match is found, providing a warning in such a case.
LBUSER_DTYPE_LOOKUP = {1 :np.dtype('>f4'),
2 :np.dtype('>i4'),
3 :np.dtype('>i4'),
-1:np.dtype('>f4'),
-2:np.dtype('>i4'),
-3:np.dtype('>i4'),
'default': np.dtype('>f4'),
}
# LBPROC codes and their English equivalents
LBPROC_PAIRS = ((1, "Difference from another experiment"),
(2, "Difference from zonal (or other spatial) mean"),
(4, "Difference from time mean"),
(8, "X-derivative (d/dx)"),
(16, "Y-derivative (d/dy)"),
(32, "Time derivative (d/dt)"),
(64, "Zonal mean field"),
(128, "Time mean field"),
(256, "Product of two fields"),
(512, "Square root of a field"),
(1024, "Difference between fields at levels BLEV and BRLEV"),
(2048, "Mean over layer between levels BLEV and BRLEV"),
(4096, "Minimum value of field during time period"),
(8192, "Maximum value of field during time period"),
(16384, "Magnitude of a vector, not specifically wind speed"),
(32768, "Log10 of a field"),
(65536, "Variance of a field"),
(131072, "Mean over an ensemble of parallel runs"))
# lbproc_map is dict mapping lbproc->English and English->lbproc essentially a one to one mapping
lbproc_map = {x : y for x,y in itertools.chain(LBPROC_PAIRS, ((y,x) for x,y in LBPROC_PAIRS))}
class STASH(collections.namedtuple('STASH', 'model section item')):
"""
A class to hold a single STASH code.
Create instances using:
>>> model = 1
>>> section = 2
>>> item = 3
>>> my_stash = iris.fileformats.pp.STASH(model, section, item)
Access the sub-components via:
>>> my_stash.model
1
>>> my_stash.section
2
>>> my_stash.item
3
String conversion results in the MSI format:
>>> print iris.fileformats.pp.STASH(1, 16, 203)
m01s16i203
"""
__slots__ = ()
def __new__(cls, model, section, item):
"""
Args:
* model
A positive integer less than 100, or None.
* section
A non-negative integer less than 100, or None.
* item
A positive integer less than 1000, or None.
"""
model = cls._validate_member('model', model, 1, 99)
section = cls._validate_member('section', section, 0, 99)
item = cls._validate_member('item', item, 1, 999)
return super(STASH, cls).__new__(cls, model, section, item)
@staticmethod
def from_msi(msi):
"""Convert a STASH code MSI string to a STASH instance."""
if not isinstance(msi, basestring):
raise TypeError('Expected STASH code MSI string, got %r' % msi)
msi_match = re.match('^\s*m(.*)s(.*)i(.*)\s*$', msi, re.IGNORECASE)
if msi_match is None:
raise ValueError('Expected STASH code MSI string "mXXsXXiXXX", got %r' % msi)
return STASH(*msi_match.groups())
@staticmethod
def _validate_member(name, value, lower_limit, upper_limit):
# Returns a valid integer or None.
try:
value = int(value)
if not lower_limit <= value <= upper_limit:
value = None
except (TypeError, ValueError):
value = None
return value
def __str__(self):
model = self._format_member(self.model, 2)
section = self._format_member(self.section, 2)
item = self._format_member(self.item, 3)
return 'm{}s{}i{}'.format(model, section, item)
def _format_member(self, value, num_digits):
if value is None:
result = '?' * num_digits
else:
format_spec = '0' + str(num_digits)
result = format(value, format_spec)
return result
def lbuser3(self):
"""Return the lbuser[3] value that this stash represents."""
return (self.section or 0) * 1000 + (self.item or 0)
def lbuser6(self):
"""Return the lbuser[6] value that this stash represents."""
return self.model or 0
@property
def is_valid(self):
return '?' not in str(self)
def __eq__(self, other):
if isinstance(other, basestring):
return super(STASH, self).__eq__(STASH.from_msi(other))
else:
return super(STASH, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
class SplittableInt(object):
"""
A class to hold integers which can easily get each decimal digit individually.
>>> three_six_two = SplittableInt(362)
>>> print three_six_two
362
>>> print three_six_two[0]
2
>>> print three_six_two[2]
3
.. note:: No support for negative numbers
"""
def __init__(self, value, name_mapping_dict=None):
"""
Build a SplittableInt given the positive integer value provided.
Kwargs:
* name_mapping_dict - (dict)
A special mapping to provide name based access to specific integer positions:
>>> a = SplittableInt(1234, {'hundreds': 2})
>>> print a.hundreds
2
>>> a.hundreds = 9
>>> print a.hundreds
9
>>> print a
1934
"""
if value < 0:
raise ValueError('Negative numbers not supported with splittable integers object')
# define the name lookup first (as this is the way __setattr__ is plumbed)
#: A dictionary mapping special attribute names on this object
#: to the slices/indices required to access them.
self._name_lookup = name_mapping_dict or {}
self._value = value
self._calculate_str_value_from_value()
def __int__(self):
return int(self._value)
def _calculate_str_value_from_value(self):
# Reverse the string to get the appropriate index when getting the sliced value
self._strvalue = [int(c) for c in str(self._value)[::-1]]
# Associate the names in the lookup table to attributes
for name, index in self._name_lookup.items():
object.__setattr__(self, name, self[index])
def _calculate_value_from_str_value(self):
self._value = np.sum([ 10**i * val for i, val in enumerate(self._strvalue)])
def __len__(self):
return len(self._strvalue)
def __getitem__(self, key):
try:
val = self._strvalue[key]
except IndexError:
val = 0
# if the key returns a list of values, then combine them together to an integer
if isinstance(val, list):
val = sum([10**i * val for i, val in enumerate(val)])
return val
def __setitem__(self, key, value):
# The setitem method has been overridden so that assignment using ``val[0] = 1`` style syntax updates
# the entire object appropriately.
if (not isinstance(value, int) or value < 0):
raise ValueError('Can only set %s as a positive integer value.' % key)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice objects containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# get indices for as many digits as have been requested. Putting the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
raise ValueError('Cannot put %s into %s as it has too many digits.' % (value, key))
# Iterate over each of the indices in the slice, zipping them together with the associated digit
for index, digit in zip(indices, str(value).zfill(current_length)[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
# If we are trying to set to an index which does not currently exist in _strvalue then extend it to the
# appropriate length
if (key + 1) > len(self):
new_str_value = [0] * (key + 1)
new_str_value[:len(self)] = self._strvalue
self._strvalue = new_str_value
self._strvalue[key] = value
for name, index in self._name_lookup.items():
if index == key:
object.__setattr__(self, name, value)
self._calculate_value_from_str_value()
def __setattr__(self, name, value):
# if the attribute is a special value, update the index value which will in turn update the attribute value
if (name != '_name_lookup' and name in self._name_lookup.keys()):
self[self._name_lookup[name]] = value
else:
object.__setattr__(self, name, value)
def __str__(self):
return str(self._value)
def __repr__(self):
return 'SplittableInt(%r, name_mapping_dict=%r)' % (self._value, self._name_lookup)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, SplittableInt):
result = self._value == other._value
elif isinstance(other, int):
result = self._value == other
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _compare(self, other, op):
result = NotImplemented
if isinstance(other, SplittableInt):
result = op(self._value, other._value)
elif isinstance(other, int):
result = op(self._value, other)
return result
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ge__(self, other):
return self._compare(other, operator.ge)
class BitwiseInt(SplittableInt):
"""
A class to hold an integer, of fixed bit-length, which can easily get/set each bit individually.
.. note::
Uses a fixed number of bits.
Will raise an Error when attempting to access an out-of-range flag.
>>> a = BitwiseInt(511)
>>> a.flag1
1
>>> a.flag8
1
>>> a.flag128
1
>>> a.flag256
1
>>> a.flag512
AttributeError: 'BitwiseInt' object has no attribute 'flag512'
>>> a.flag512 = 1
AttributeError: Cannot set a flag that does not exist: flag512
"""
def __init__(self, value, num_bits=None):
""" """ # intentionally empty docstring as all covered in the class docstring.
SplittableInt.__init__(self, value)
self.flags = ()
#do we need to calculate the number of bits based on the given value?
self._num_bits = num_bits
if self._num_bits is None:
self._num_bits = 0
while((value >> self._num_bits) > 0):
self._num_bits += 1
else:
#make sure the number of bits is enough to store the given value.
if (value >> self._num_bits) > 0:
raise ValueError("Not enough bits to store value")
self._set_flags_from_value()
def _set_flags_from_value(self):
all_flags = []
# Set attributes "flag[n]" to 0 or 1
for i in range(self._num_bits):
flag_name = 1 << i
flag_value = ((self._value >> i) & 1)
object.__setattr__(self, 'flag%d' % flag_name, flag_value)
# Add to list off all flags
if flag_value:
all_flags.append(flag_name)
self.flags = tuple(all_flags)
def _set_value_from_flags(self):
self._value = 0
for i in range(self._num_bits):
bit_value = pow(2, i)
flag_name = "flag%i" % bit_value
flag_value = object.__getattribute__(self, flag_name)
self._value += flag_value * bit_value
def __iand__(self, value):
"""Perform an &= operation."""
self._value &= value
self._set_flags_from_value()
return self
def __ior__(self, value):
"""Perform an |= operation."""
self._value |= value
self._set_flags_from_value()
return self
def __iadd__(self, value):
"""Perform an inplace add operation"""
self._value += value
self._set_flags_from_value()
return self
def __setattr__(self, name, value):
# Allow setting of the attribute flags
# Are we setting a flag?
if name.startswith("flag") and name != "flags":
#true and false become 1 and 0
if not isinstance(value, bool):
raise TypeError("Can only set bits to True or False")
# Setting an existing flag?
if hasattr(self, name):
#which flag?
flag_value = int(name[4:])
#on or off?
if value:
self |= flag_value
else:
self &= ~flag_value
# Fail if an attempt has been made to set a flag that does not exist
else:
raise AttributeError("Cannot set a flag that does not exist: %s" % name)
# If we're not setting a flag, then continue as normal
else:
SplittableInt.__setattr__(self, name, value)
class PPDataProxy(object):
"""A reference to the data payload of a single PP field."""
__slots__ = ('path', 'offset', 'data_len', 'lbpack', 'mask')
def __init__(self, path, offset, data_len, lbpack, mask):
self.path = path
self.offset = offset
self.data_len = data_len
self.lbpack = lbpack
self.mask = mask
# NOTE:
# "__getstate__" and "__setstate__" functions are defined here to provide a custom interface for Pickle
# : Pickle "normal" behaviour is just to save/reinstate the object dictionary
# : that won't work here, because the use of __slots__ means **there is no object dictionary**
def __getstate__(self):
# object state capture method for Pickle.dump()
# - return the instance data values needed to reconstruct the PPDataProxy object
return dict([(k,getattr(self,k)) for k in PPDataProxy.__slots__])
def __setstate__(self, state):
# object reconstruction method for Pickle.load()
# reinitialise the object state from the serialised values (using setattr, as there is no object dictionary)
for (key, val) in state.items():
setattr(self, key, val)
def __repr__(self):
return '%s(%r, %r, %r, %r, %r)' % \
(self.__class__.__name__, self.path, self.offset,
self.data_len, self.lbpack, self.mask)
def load(self, data_shape, data_type, mdi, deferred_slice):
"""
Load the corresponding proxy data item and perform any deferred slicing.
Args:
* data_shape (tuple of int):
The data shape of the proxy data item.
* data_type (:class:`numpy.dtype`):
The data type of the proxy data item.
* mdi (float):
The missing data indicator value.
* deferred_slice (tuple):
The deferred slice to be applied to the proxy data item.
Returns:
:class:`numpy.ndarray`
"""
# Load the appropriate proxy data conveniently with a context manager.
with open(self.path, 'rb') as pp_file:
pp_file.seek(self.offset, os.SEEK_SET)
data_bytes = pp_file.read(self.data_len)
data = _read_data_bytes(data_bytes, self.lbpack, data_shape,
data_type, mdi, self.mask)
# Identify which index items in the deferred slice are tuples.
tuple_dims = [i for i, value in enumerate(deferred_slice) if isinstance(value, tuple)]
# Whenever a slice consists of more than one tuple index item, numpy does not slice the
# data array as we want it to. We therefore require to split the deferred slice into
# multiple slices and consistently slice the data with one slice per tuple.
if len(tuple_dims) > 1:
# Identify which index items in the deferred slice are single scalar values.
# Such dimensions will collapse in the sliced data shape.
collapsed_dims = [i for i, value in enumerate(deferred_slice) if isinstance(value, int)]
# Equate the first slice to be the original deferred slice.
tuple_slice = list(deferred_slice)
# Replace all tuple index items in the slice, except for the first,
# to be full slices over their dimension.
for dim in tuple_dims[1:]:
tuple_slice[dim] = slice(None)
# Perform the deferred slice containing only the first tuple index item.
payload = data[tuple_slice]
# Re-slice the data consistently with the next single tuple index item.
for dim in tuple_dims[1:]:
# Identify all those pre-sliced collapsed dimensions less than
# the dimension of the current slice tuple index item.
ndims_collapsed = len(filter(lambda x: x < dim, collapsed_dims))
# Construct the single tuple slice.
tuple_slice = [slice(None)] * payload.ndim
tuple_slice[dim - ndims_collapsed] = deferred_slice[dim]
# Slice the data with this single tuple slice.
payload = payload[tuple_slice]
else:
# The deferred slice contains no more than one tuple index item, so
# it's safe to slice the data directly.
payload = data[deferred_slice]
return payload
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPDataProxy):
result = True
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _read_data_bytes(data_bytes, lbpack, data_shape, data_type, mdi,
mask=None):
"""
Convert the already read binary data payload into a numpy array, unpacking
and decompressing as per the F3 specification.
"""
if lbpack.n1 in (0, 2):
data = np.frombuffer(data_bytes, dtype=data_type)
elif lbpack.n1 == 1:
data = pp_packing.wgdos_unpack(data_bytes, data_shape[0],
data_shape[1], mdi)
elif lbpack.n1 == 4:
data = pp_packing.rle_decode(data_bytes, data_shape[0], data_shape[1], mdi)
else:
raise iris.exceptions.NotYetImplementedError(
'PP fields with LBPACK of %s are not yet supported.' % lbpack)
# Ensure we have write permission on the data buffer.
data.setflags(write=True)
# Ensure the data is in the native byte order
if not data.dtype.isnative:
data.byteswap(True)
data.dtype = data.dtype.newbyteorder('=')
if hasattr(lbpack, 'boundary_packing'):
# Convert a long string of numbers into a "lateral boundary
# condition" array, which is split into 4 quartiles, North
# East, South, West and where North and South contain the corners.
boundary_packing = lbpack.boundary_packing
compressed_data = data
data = np.ma.masked_all(data_shape)
boundary_height = boundary_packing.y_halo + boundary_packing.rim_width
boundary_width = boundary_packing.x_halo + boundary_packing.rim_width
y_height, x_width = data_shape
# The height of the east and west components.
mid_height = y_height - 2 * boundary_height
n_s_shape = boundary_height, x_width
e_w_shape = mid_height, boundary_width
# Keep track of our current position in the array.
current_posn = 0
north = compressed_data[:boundary_height*x_width]
current_posn += len(north)
data[-boundary_height:, :] = north.reshape(*n_s_shape)
east = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(east)
data[boundary_height:-boundary_height,
-boundary_width:] = east.reshape(*e_w_shape)
south = compressed_data[current_posn:
current_posn + boundary_height * x_width]
current_posn += len(south)
data[:boundary_height, :] = south.reshape(*n_s_shape)
west = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(west)
data[boundary_height:-boundary_height,
:boundary_width] = west.reshape(*e_w_shape)
elif lbpack.n2 == 2:
if mask is None:
raise ValueError('No mask was found to unpack the data. '
'Could not load.')
land_mask = mask.data.astype(np.bool)
sea_mask = ~land_mask
new_data = np.ma.masked_all(land_mask.shape)
if lbpack.n3 == 1:
# Land mask packed data.
new_data.mask = sea_mask
# Sometimes the data comes in longer than it should be (i.e. it
# looks like the compressed data is compressed, but the trailing
# data hasn't been clipped off!).
new_data[land_mask] = data[:land_mask.sum()]
elif lbpack.n3 == 2:
# Sea mask packed data.
new_data.mask = land_mask
new_data[sea_mask] = data[:sea_mask.sum()]
else:
raise ValueError('Unsupported mask compression.')
data = new_data
else:
# Reform in row-column order
data.shape = data_shape
# Mask the array?
if mdi in data:
data = ma.masked_values(data, mdi, copy=False)
return data
# The special headers of the PPField classes which get some improved functionality
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbpack', 'lbproc',
'data', 'data_manager', 'stash', 't1', 't2')
def _header_defn(release_number):
"""
Returns the zero-indexed header definition for a particular release of a PPField.
"""
um_header = UM_HEADERS[release_number]
offset = UM_TO_PP_HEADER_OFFSET
return [(name, tuple(position - offset for position in positions)) for name, positions in um_header]
def _pp_attribute_names(header_defn):
"""
Returns the allowed attributes of a PPField:
all of the normal headers (i.e. not the _SPECIAL_HEADERS),
the _SPECIAL_HEADERS with '_' prefixed,
the possible extra data headers.
"""
normal_headers = list(name for name, positions in header_defn if name not in _SPECIAL_HEADERS)
special_headers = list('_' + name for name in _SPECIAL_HEADERS)
extra_data = EXTRA_DATA.values()
return normal_headers + special_headers + extra_data
class PPField(object):
"""
A generic class for PP fields - not specific to a particular header release number.
A PPField instance can easily access the PP header "words" as attributes with some added useful capabilities::
for field in iris.fileformats.pp.load(filename):
print field.lbyr
print field.lbuser
print field.lbuser[0]
print field.lbtim
print field.lbtim.ia
print field.t1
"""
# NB. Subclasses must define the attribute HEADER_DEFN to be their
# zero-based header definition. See PPField2 and PPField3 for examples.
__metaclass__ = abc.ABCMeta
__slots__ = ()
def __init__(self):
"""
PPField instances are always created empty, and attributes are added subsequently.
.. seealso::
For PP field loading see :func:`load`.
"""
@abc.abstractproperty
def t1(self):
pass
@abc.abstractproperty
def t2(self):
pass
def __repr__(self):
"""Return a string representation of the PP field."""
# Define an ordering on the basic header names
attribute_priority_lookup = {name: loc[0] for name, loc in self.HEADER_DEFN}
# With the attributes sorted the order will remain stable if extra attributes are added.
public_attribute_names = attribute_priority_lookup.keys() + EXTRA_DATA.values()
self_attrs = [(name, getattr(self, name, None)) for name in public_attribute_names]
self_attrs = filter(lambda pair: pair[1] is not None, self_attrs)
if hasattr(self, '_data_manager'):
if self._data_manager is None:
self_attrs.append( ('data', self.data) )
else:
self_attrs.append( ('unloaded_data_manager', self._data_manager) )
self_attrs.append( ('unloaded_data_proxy', self._data) )
# sort the attributes by position in the pp header followed, then by alphabetical order.
attributes = sorted(self_attrs, key=lambda pair: (attribute_priority_lookup.get(pair[0], 999), pair[0]) )
return 'PP Field' + ''.join(['\n %s: %s' % (k, v) for k, v in attributes]) + '\n'
@property
def stash(self):
"""A stash property giving access to the associated STASH object, now supporting __eq__"""
if (not hasattr(self, '_stash') or
self.lbuser[6] != self._stash.lbuser6() or
self.lbuser[3] != self._stash.lbuser3()):
self._stash = STASH(self.lbuser[6], self.lbuser[3] / 1000, self.lbuser[3] % 1000)
return self._stash
@stash.setter
def stash(self, stash):
if isinstance(stash, basestring):
self._stash = STASH.from_msi(stash)
elif isinstance(stash, STASH):
self._stash = stash
else:
raise ValueError('Cannot set stash to {!r}'.format(stash))
# Keep the lbuser up to date.
self.lbuser = list(self.lbuser)
self.lbuser[6] = self._stash.lbuser6()
self.lbuser[3] = self._stash.lbuser3()
# lbtim
def _lbtim_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ia/ib/ic values for lbtim
new_value = SplittableInt(new_value, {'ia':slice(2, None), 'ib':1, 'ic':0})
self._lbtim = new_value
lbtim = property(lambda self: self._lbtim, _lbtim_setter)
# lbcode
def _lbcode_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ix/iy values for lbcode
new_value = SplittableInt(new_value, {'iy':slice(0, 2), 'ix':slice(2, 4)})
self._lbcode = new_value
lbcode = property(lambda self: self._lbcode, _lbcode_setter)
# lbpack
def _lbpack_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the n1/n2/n3/n4/n5 values for lbpack
name_mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
new_value = SplittableInt(new_value, name_mapping)
self._lbpack = new_value
lbpack = property(lambda self: self._lbpack, _lbpack_setter)
# lbproc
def _lbproc_setter(self, new_value):
if not isinstance(new_value, BitwiseInt):
new_value = BitwiseInt(new_value, num_bits=18)
self._lbproc = new_value
lbproc = property(lambda self: self._lbproc, _lbproc_setter)
@property
def data(self):
"""The :class:`numpy.ndarray` representing the multidimensional data of the pp file"""
# Cache the real data on first use
if self._data_manager is not None:
self._data = self._data_manager.load(self._data)
self._data_manager = None
return self._data
@data.setter
def data(self, value):
self._data = value
self._data_manager = None
@property
def calendar(self):
"""Return the calendar of the field."""
# TODO #577 What calendar to return when ibtim.ic in [0, 3]
return iris.unit.CALENDAR_GREGORIAN if self.lbtim.ic != 2 else iris.unit.CALENDAR_360_DAY
def _read_extra_data(self, pp_file, file_reader, extra_len):
"""Read the extra data section and update the self appropriately."""
# While there is still extra data to decode run this loop
while extra_len > 0:
extra_int_code = struct.unpack_from('>L', file_reader(PP_WORD_DEPTH))[0]
extra_len -= PP_WORD_DEPTH
ib = extra_int_code % 1000
ia = extra_int_code // 1000
data_len = ia * PP_WORD_DEPTH
if ib == 10:
self.field_title = ''.join(struct.unpack_from('>%dc' % data_len, file_reader(data_len))).rstrip('\00')
elif ib == 11:
self.domain_title = ''.join(struct.unpack_from('>%dc' % data_len, file_reader(data_len))).rstrip('\00')
elif ib in EXTRA_DATA:
attr_name = EXTRA_DATA[ib]
values = np.fromfile(pp_file, dtype=np.dtype('>f%d' % PP_WORD_DEPTH), count=ia)
# Ensure the values are in the native byte order
if not values.dtype.isnative:
values.byteswap(True)
values.dtype = values.dtype.newbyteorder('=')
setattr(self, attr_name, values)
else:
raise ValueError('Unknown IB value for extra data: %s' % ib)
extra_len -= data_len
@property
def x_bounds(self):
if hasattr(self, "x_lower_bound") and hasattr(self, "x_upper_bound"):
return np.column_stack((self.x_lower_bound, self.x_upper_bound))
@property
def y_bounds(self):
if hasattr(self, "y_lower_bound") and hasattr(self, "y_upper_bound"):
return np.column_stack((self.y_lower_bound, self.y_upper_bound))
def save(self, file_handle):
"""
Save the PPField to the given file object (typically created with :func:`open`).
::
# to append the field to a file
a_pp_field.save(open(filename, 'ab'))
# to overwrite/create a file
a_pp_field.save(open(filename, 'wb'))
.. note::
The fields which are automatically calculated are: 'lbext',
'lblrec' and 'lbuser[0]'. Some fields are not currently
populated, these are: 'lbegin', 'lbnrec', 'lbuser[1]'.
"""
# Before we can actually write to file, we need to calculate the header elements.
# First things first, make sure the data is big-endian
data = self.data
if isinstance(data, ma.core.MaskedArray):
data = data.filled(fill_value=self.bmdi)
if data.dtype.newbyteorder('>') != data.dtype:
# take a copy of the data when byteswapping
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('>')
# Create the arrays which will hold the header information
lb = np.empty(shape=NUM_LONG_HEADERS, dtype=np.dtype(">u%d" % PP_WORD_DEPTH))
b = np.empty(shape=NUM_FLOAT_HEADERS, dtype=np.dtype(">f%d" % PP_WORD_DEPTH))
# Populate the arrays from the PPField
for name, pos in self.HEADER_DEFN:
try:
header_elem = getattr(self, name)
except AttributeError:
raise AttributeError("PPField.save() could not find %s" % name)
if pos[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET:
index = slice(pos[0], pos[-1] + 1)
if isinstance(header_elem, SplittableInt):
header_elem = int(header_elem)
lb[index] = header_elem
else:
index = slice(pos[0] - NUM_LONG_HEADERS, pos[-1] - NUM_LONG_HEADERS + 1)
b[index] = header_elem
# Although all of the elements are now populated, we still need to update some of the elements in case
# things have changed (for example, the data length etc.)
# Set up a variable to represent the datalength of this PPField in WORDS.
len_of_data_payload = 0
# set up a list to hold the extra data which will need to be encoded at the end of the data
extra_items = []
# iterate through all of the possible extra data fields
for ib, extra_data_attr_name in EXTRA_DATA.iteritems():
# try to get the extra data field, returning None if it doesn't exist
extra_elem = getattr(self, extra_data_attr_name, None)
if extra_elem is not None:
# The special case of character extra data must be caught
if isinstance(extra_elem, basestring):
ia = len(extra_elem)
# pad any strings up to a multiple of PP_WORD_DEPTH (this length is # of bytes)
ia = (PP_WORD_DEPTH - (ia-1) % PP_WORD_DEPTH) + (ia-1)
extra_elem = extra_elem.ljust(ia, '\00')
# ia is now the datalength in WORDS of the string
ia /= PP_WORD_DEPTH
else:
# ia is the datalength in WORDS
ia = np.product(extra_elem.shape)
# flip the byteorder if the data is not big-endian
if extra_elem.dtype.newbyteorder('>') != extra_elem.dtype:
# take a copy of the extra data when byte swapping
extra_elem = extra_elem.byteswap(False)
extra_elem.dtype = extra_elem.dtype.newbyteorder('>')
# add the number of bytes to the len_of_data_payload variable + the extra integer which will encode ia/ib
len_of_data_payload += PP_WORD_DEPTH * ia + PP_WORD_DEPTH
integer_code = 1000 * ia + ib
extra_items.append( [integer_code, extra_elem] )
if ia >= 1000:
raise IOError('PP files cannot write extra data with more than '
'1000 elements. Tried to write "%s" which has %s '
'elements.' % (extra_data_attr_name, ib)
)
HEADER_DICT = dict(self.HEADER_DEFN)
# populate lbext in WORDS
lb[HEADER_DICT['lbext'][0]] = len_of_data_payload / PP_WORD_DEPTH
# Put the data length of pp.data into len_of_data_payload (in BYTES)
len_of_data_payload += data.size * PP_WORD_DEPTH
# populate lbrec in WORDS
lb[HEADER_DICT['lblrec'][0]] = len_of_data_payload / PP_WORD_DEPTH
# populate lbuser[0] to have the data's datatype
if data.dtype == np.dtype('>f4'):
lb[HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>f8'):
warnings.warn("Downcasting array precision from float64 to float32 for save."
"If float64 precision is required then please save in a different format")
data = data.astype('>f4')
lb[HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>i4'):
# NB: there is no physical difference between lbuser[0] of 2 or 3 so we encode just 2
lb[HEADER_DICT['lbuser'][0]] = 2
else:
raise IOError('Unable to write data array to a PP file. The datatype was %s.' % data.dtype)
# NB: lbegin, lbnrec, lbuser[1] not set up
# Now that we have done the manouvering required, write to the file...
if not isinstance(file_handle, file):
raise TypeError('The file_handle argument must be an instance of a Python file object, but got %r. \n'
'e.g. open(filename, "wb") to open a binary file with write permission.' % type(file_handle))
pp_file = file_handle
# header length
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# 49 integers
lb.tofile(pp_file)
# 16 floats
b.tofile(pp_file)
#Header length (again)
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# Data length (including extra data length)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
# the data itself
if lb[HEADER_DICT['lbpack'][0]] == 0:
data.tofile(pp_file)
else:
raise NotImplementedError('Writing packed pp data with lbpack of %s '
'is not supported.' % lb[HEADER_DICT['lbpack'][0]])
# extra data elements
for int_code, extra_data in extra_items:
pp_file.write(struct.pack(">L", int(int_code)))
if isinstance(extra_data, basestring):
pp_file.write(struct.pack(">%sc" % len(extra_data), *extra_data))
else:
extra_data = extra_data.astype(np.dtype('>f4'))
extra_data.tofile(pp_file)
# Data length (again)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
##############################################################
#
# From here on define helper methods for PP -> Cube conversion.
#
def regular_points(self, xy):
"""Return regular points from the PPField, or fail if not regular.
Args:
* xy - a string, "x" or "y" to specify the dimension for which to return points.
.. deprecated:: 1.5
"""
msg = "The 'regular_points' method is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
if xy.lower() == "x":
bz = self.bzx
bd = self.bdx
count = self.lbnpt
elif xy.lower() == "y":
bz = self.bzy
bd = self.bdy
count = self.lbrow
else:
raise ValueError("'x' or 'y' not supplied")
return (bz + bd) + bd * np.arange(count, dtype=np.float32)
def regular_bounds(self, xy):
"""Return regular bounds from the PPField, or fail if not regular.
Args:
* xy - a string, "x" or "y" to specify the dimension for which to return points.
.. deprecated:: 1.5
"""
msg = "The 'regular_bounds' method is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
if xy.lower() == "x":
delta = 0.5 * self.bdx
elif xy.lower() == "y":
delta = 0.5 * self.bdy
else:
raise ValueError("'x' or 'y' not supplied")
points = self.regular_points(xy)
return np.concatenate([[points - delta], [points + delta]]).T
def time_unit(self, time_unit, epoch='epoch'):
return iris.unit.Unit('%s since %s' % (time_unit, epoch), calendar=self.calendar)
def coord_system(self):
"""Return a CoordSystem for this PPField.
Returns:
Currently, a :class:`~iris.coord_systems.GeogCS` or :class:`~iris.coord_systems.RotatedGeogCS`.
"""
geog_cs = iris.coord_systems.GeogCS(EARTH_RADIUS)
if self.bplat != 90.0 or self.bplon != 0.0:
geog_cs = iris.coord_systems.RotatedGeogCS(self.bplat, self.bplon, ellipsoid=geog_cs)
return geog_cs
def _x_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
x_name = "longitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
x_name = "grid_longitude"
return x_name
def _y_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
y_name = "latitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
y_name = "grid_latitude"
return y_name
def copy(self):
"""
Returns a deep copy of this PPField.
Returns:
A copy instance of the :class:`PPField`.
"""
return self._deepcopy({})
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo):
field = self.__class__()
for attr in self.__slots__:
if hasattr(self, attr):
value = getattr(self, attr)
# Cope with inability to deepcopy a 0-d NumPy array.
if attr == '_data' and value is not None and value.ndim == 0:
setattr(field, attr, np.array(deepcopy(value[()], memo)))
else:
setattr(field, attr, deepcopy(value, memo))
return field
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPField):
result = True
for attr in self.__slots__:
attrs = [hasattr(self, attr), hasattr(other, attr)]
if all(attrs):
if not np.all(getattr(self, attr) == getattr(other, attr)):
result = False
break
elif any(attrs):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
class PPField2(PPField):
"""
A class to hold a single field from a PP file, with a header release number of 2.
"""
HEADER_DEFN = _header_defn(2)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat, self.lbhr, self.lbmin)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbday = int(dt.strftime('%j'))
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon, lbdat, lbhr, and lbmin attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond, self.lbdatd, self.lbhrd, self.lbmind)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbdayd = int(dt.strftime('%j'))
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, lbmond, lbdatd, lbhrd, and lbmind attributes.")
class PPField3(PPField):
"""
A class to hold a single field from a PP file, with a header release number of 3.
"""
HEADER_DEFN = _header_defn(3)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat, self.lbhr, self.lbmin, self.lbsec)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbsec = dt.second
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon, lbdat, lbhr, lbmin, and lbsec attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond, self.lbdatd, self.lbhrd, self.lbmind, self.lbsecd)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbsecd = dt.second
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, lbmond, lbdatd, lbhrd, lbmind, and lbsecd attributes.")
PP_CLASSES = {
2: PPField2,
3: PPField3
}
def make_pp_field(header_values):
# Choose a PP field class from the value of LBREL
lbrel = header_values[21]
if lbrel not in PP_CLASSES:
raise ValueError('Unsupported header release number: {}'.format(lbrel))
pp_field = PP_CLASSES[lbrel]()
for name, loc in pp_field.HEADER_DEFN:
if len(loc) == 1:
value = header_values[loc[0]]
else:
value = header_values[loc[0]:loc[-1]+1]
setattr(pp_field, name, value)
return pp_field
DeferredArrayBytes = collections.namedtuple('DeferredBytes',
'fname, position, n_bytes, dtype')
LoadedArrayBytes = collections.namedtuple('LoadedArrayBytes', 'bytes, dtype')
def load(filename, read_data=False):
"""
Return an iterator of PPFields given a filename.
Args:
* filename - string of the filename to load.
Kwargs:
* read_data - boolean
Flag whether or not the data should be read, if False an empty data manager
will be provided which can subsequently load the data on demand. Default False.
To iterate through all of the fields in a pp file::
for field in iris.fileformats.pp.load(filename):
print field
"""
return _interpret_fields(_field_gen(filename, read_data_bytes=read_data))
def _interpret_fields(fields):
"""
Turn the fields read with load and FF2PP._extract_field into useable
fields. One of the primary purposes of this function is to either convert
"deferred bytes" into "deferred arrays" or "loaded bytes" into actual
numpy arrays (via the _create_field_data) function.
"""
land_mask = None
landmask_compressed_fields = []
for field in fields:
# Store the first reference to a land mask, and use this as the
# definitive mask for future fields in this generator.
if land_mask is None and field.stash == 'm01s00i030':
land_mask = field
# Handle land compressed data payloads.
if field.lbpack.n2 == 2:
# If we don't have the land mask yet, we shouldn't yield the field.
if land_mask is None:
landmask_compressed_fields.append(field)
continue
# Land compressed fields don't have a lbrow and lbnpt.
field.lbrow, field.lbnpt = land_mask.lbrow, land_mask.lbnpt
data_shape = (field.lbrow, field.lbnpt)
_create_field_data(field, data_shape, land_mask)
yield field
if landmask_compressed_fields:
if land_mask is None:
warnings.warn('Landmask compressed fields existed without a '
'landmask to decompress with. The data will have '
'a shape of (0, 0) and will not read.')
mask_shape = (0, 0)
else:
mask_shape = (land_mask.lbrow, land_mask.lbnpt)
for field in landmask_compressed_fields:
field.lbrow, field.lbnpt = mask_shape
_create_field_data(field, (field.lbrow, field.lbnpt), land_mask)
yield field
def _create_field_data(field, data_shape, land_mask):
"""
Modifies a field's ``_data`` attribute either by:
* converting DeferredArrayBytes into a "deferred array".
* converting LoadedArrayBytes into an actual numpy array.
"""
if isinstance(field._data, LoadedArrayBytes):
loaded_bytes = field._data
field._data = _read_data_bytes(loaded_bytes.bytes, field.lbpack, data_shape,
loaded_bytes.dtype, field.bmdi, land_mask)
field._data_manager = None
else:
# Get hold of the DeferredArrayBytes instance.
deferred_bytes = field._data
# NB. This makes a 0-dimensional array
field._data = np.array(PPDataProxy(deferred_bytes.fname, deferred_bytes.position,
deferred_bytes.n_bytes, field.lbpack, land_mask))
field._data_manager = DataManager(data_shape, deferred_bytes.dtype, field.bmdi)
def _field_gen(filename, read_data_bytes):
pp_file = open(filename, 'rb')
# Get a reference to the seek method on the file
# (this is accessed 3* #number of headers so can provide a small performance boost)
pp_file_seek = pp_file.seek
pp_file_read = pp_file.read
# Keep reading until we reach the end of file
while True:
# Move past the leading header length word
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Get the LONG header entries
header_longs = np.fromfile(pp_file, dtype='>i%d' % PP_WORD_DEPTH, count=NUM_LONG_HEADERS)
# Nothing returned => EOF
if len(header_longs) == 0:
break
# Get the FLOAT header entries
header_floats = np.fromfile(pp_file, dtype='>f%d' % PP_WORD_DEPTH, count=NUM_FLOAT_HEADERS)
header = tuple(header_longs) + tuple(header_floats)
# Make a PPField of the appropriate sub-class (depends on header release number)
pp_field = make_pp_field(header)
# Skip the trailing 4-byte word containing the header length
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Read the word telling me how long the data + extra data is
# This value is # of bytes
len_of_data_plus_extra = struct.unpack_from('>L', pp_file_read(PP_WORD_DEPTH))[0]
if len_of_data_plus_extra != pp_field.lblrec * PP_WORD_DEPTH:
raise ValueError('LBLREC has a different value to the integer recorded after the '
'header in the file (%s and %s).' % (pp_field.lblrec * PP_WORD_DEPTH,
len_of_data_plus_extra))
# calculate the extra length in bytes
extra_len = pp_field.lbext * PP_WORD_DEPTH
# Derive size and datatype of payload
data_len = len_of_data_plus_extra - extra_len
dtype = LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0],
LBUSER_DTYPE_LOOKUP['default'])
if read_data_bytes:
# Read the actual bytes. This can then be converted to a numpy array
# at a higher level.
pp_field._data = LoadedArrayBytes(pp_file.read(data_len), dtype)
else:
# Provide enough context to read the data bytes later on.
pp_field._data = DeferredArrayBytes(filename, pp_file.tell(), data_len, dtype)
# Seek over the actual data payload.
pp_file_seek(data_len, os.SEEK_CUR)
# Do we have any extra data to deal with?
if extra_len:
pp_field._read_extra_data(pp_file, pp_file_read, extra_len)
# Skip that last 4 byte record telling me the length of the field I have already read
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
yield pp_field
pp_file.close()
def _ensure_load_rules_loaded():
"""Makes sure the standard conversion and verification rules are loaded."""
# Uses these module-level variables
global _load_rules, _cross_reference_rules
rules = iris.fileformats.rules
if _load_rules is None:
basepath = iris.config.CONFIG_PATH
_load_rules = rules.RulesContainer(os.path.join(basepath, 'pp_rules.txt'))
if _cross_reference_rules is None:
basepath = iris.config.CONFIG_PATH
_cross_reference_rules = rules.RulesContainer(os.path.join(basepath, 'pp_cross_reference_rules.txt'),
rule_type=rules.ObjectReturningRule)
def add_load_rules(filename):
"""
Registers a rules file for use during the PP load process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
.. deprecated:: 1.5
"""
msg = "The 'add_load_rules' function is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
# Uses this module-level variable
global _load_rules
if _load_rules is None:
_load_rules = iris.fileformats.rules.RulesContainer(filename)
else:
_load_rules.import_rules(filename)
def reset_load_rules():
"""Resets the PP load process to use only the standard conversion rules."""
# Uses this module-level variable
global _load_rules
_load_rules = None
def _ensure_save_rules_loaded():
"""Makes sure the standard save rules are loaded."""
# Uses these module-level variables
global _save_rules
if _save_rules is None:
# Load the pp save rules
rules_filename = os.path.join(iris.config.CONFIG_PATH, 'pp_save_rules.txt')
_save_rules = iris.fileformats.rules.RulesContainer(rules_filename, iris.fileformats.rules.ProcedureRule)
def add_save_rules(filename):
"""
Registers a rules file for use during the PP save process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
"""
_ensure_save_rules_loaded()
_save_rules.import_rules(filename)
def reset_save_rules():
"""Resets the PP save process to use only the standard conversion rules."""
# Uses this module-level variable
global _save_rules
_save_rules = None
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of pp filenames.
Args:
* filenames - list of pp filenames to load
Kwargs:
* callback - a function which can be passed on to :func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file (order
is not preserved when there is a field with orography references)
"""
return _load_cubes_variable_loader(filenames, callback, load)
def _load_cubes_variable_loader(filenames, callback, loading_function,
loading_function_kwargs=None):
pp_loader = iris.fileformats.rules.Loader(
loading_function, loading_function_kwargs or {},
iris.fileformats.pp_rules.convert, _load_rules)
return iris.fileformats.rules.load_cubes(filenames, callback, pp_loader)
def save(cube, target, append=False, field_coords=None):
"""
Use the PP saving rules (and any user rules) to save a cube to a PP file.
Args:
* cube - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of cubes.
* target - A filename or open file handle.
Kwargs:
* append - Whether to start a new file afresh or add the cube(s) to the end of the file.
Only applicable when target is a filename, not a file handle.
Default is False.
* field_coords - list of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
See also :func:`iris.io.save`.
"""
# Open issues
# Could use rules in "sections" ... e.g. to process the extensive dimensions; ...?
# Could pre-process the cube to add extra convenient terms?
# e.g. x-coord, y-coord ... but what about multiple coordinates on the dimension?
# How to perform the slicing?
# Do we always slice in the last two dimensions?
# Not all source data will contain lat-lon slices.
# What do we do about dimensions with multiple coordinates?
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
# Build confidence by having a PP object that records which header items
# have been set, and only saves if they've all been set?
# Watch out for extra-data.
# On the flip side, record which Cube metadata has been "used" and flag up
# unused?
_ensure_save_rules_loaded()
# pp file
if isinstance(target, basestring):
pp_file = open(target, "ab" if append else "wb")
elif hasattr(target, "write"):
if hasattr(target, "mode") and "b" not in target.mode:
raise ValueError("Target not binary")
pp_file = target
else:
raise ValueError("Can only save pp to filename or writable")
n_dims = len(cube.shape)
if n_dims < 2:
raise ValueError('Unable to save a cube of fewer than 2 dimensions.')
if field_coords is not None:
# cast the given coord/coord names into cube coords
field_coords = cube._as_list_of_coords(field_coords)
if len(field_coords) != 2:
raise ValueError('Got %s coordinates in field_coords, expecting exactly 2.' % len(field_coords))
else:
# default to the last two dimensions (if result of coords is an empty list, will
# raise an IndexError)
# NB watch out for the ordering of the dimensions
field_coords = (cube.coords(dimensions=n_dims-2)[0], cube.coords(dimensions=n_dims-1)[0])
# Save each named or latlon slice2D in the cube
for slice2D in cube.slices(field_coords):
# Start with a blank PPField
pp_field = PPField3()
# Set all items to 0 because we need lbuser, lbtim
# and some others to be present before running the rules.
for name, positions in pp_field.HEADER_DEFN:
# Establish whether field name is integer or real
default = 0 if positions[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET else 0.0
# Establish whether field position is scalar or composite
if len(positions) > 1:
default = [default] * len(positions)
setattr(pp_field, name, default)
# Some defaults should not be 0
pp_field.lbrel = 3 # Header release 3.
pp_field.lbcode = 1 # Grid code.
pp_field.bmks = 1.0 # Some scaley thing.
pp_field.lbproc = 0
# Set the data
pp_field.data = slice2D.data
# Run the PP save rules on the slice2D, to fill the PPField,
# recording the rules that were used
rules_result = _save_rules.verify(slice2D, pp_field)
verify_rules_ran = rules_result.matching_rules
# Log the rules used
iris.fileformats.rules.log('PP_SAVE', target if isinstance(target, basestring) else target.name, verify_rules_ran)
# Write to file
pp_field.save(pp_file)
if isinstance(target, basestring):
pp_file.close()
|
Class N. Required in addition to Class A, A-L, AB-L, X, XJ, XS license. See the Hunting Regulations (available at license agents in July) for additional information and application procedure.
Class RB and RG. One for archery and one for firearms-sold separately Must be accompanied by a Class A, A-L, AB-L, X, XS, or XJ license. See the Hunting Regulations for additional information.
Class RM. Must be accompanied by a Class A, A-L, AB-L, X, XS, or XJ license. See the Hunting Regulations booklet for additional information.
Class BG. Required of Class A license holders to hunt deer, turkey, bear, and boar with a bow or muzzleloader.
Class NN. Must be accompanied by a Class E license. A limited number of Class NN stamps are available for specified counties. See Hunting Regulations (available at license agents in July) for application procedure.
Class RRG. Required in addition to Class E license. See Hunting Regulations booklet for additional information.
Class RRB and RRM. One for archery and one for muzzleloader sold separately. Must be accompanied by a Class E license and, if for archery also accompanied by Class UU, and if for muzzleloader, also accompanied by a Class VV. See the Hunting Regulations (available at licensed agents in July) for additional information.
Class UU. Must be accompanied by a Class E License.
Class VV. Must be accompanied by a Class E license.
Required for all licensed hunters & anglers, except holders of Class X, XJ, A-L, B-L and AB-L licenses (Class CS).
Class X for ages 18 through 64. Includes Class CS and BG Stamps.
Class XJ. Similar to Class X but for ages 15 through 17.
Class A-1. For ages 21 or older. Must be accompanied by a Class A, A-L, AB-L, X, E, EE, H, J, or free license.
Required for all licensed hunters and anglers (Class CS/LE).
Class I. Required on National Forests in addition to Class E, F, EE, H, or LL license.
Class DS. Required to hunt or pursue bear. required in addition to Class A+BG, A-L, AB-L, EE, X, XJ license.
Class WW. Must be accompanied by a Class E License.
All licensed migratory bird hunters must possess a valid HIP registration card: Available at all license agents and DNR offices.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import pkg_resources
import codecs
import fsaipe
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
install_requires = [str(req) for req in pkg_resources.parse_requirements(f)]
setup(
name='saipe',
version=fsaipe.__version__,
license="Apache",
description='Flask SqlAlchemy In Place Editor',
long_description=long_description,
author='Gustavo vargas',
author_email='xgvargas@gmail.com',
url='https://github.com/xgvargas/saipe',
# py_modules = ['saipe'],
packages = ['fsaipe'],
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
|
The collapse of a structure is often the furthest thing from the mind of an owner, engineer, or constructor. Yet, structural collapses do occur, and often with tragic consequences. When disaster strikes, it is critical to engage a forensic engineer experienced with collapse investigation. Why? Because understanding how and why a structure can fail catastrophically requires a specialist; it is not the same process as design. The investigative process involves methodically examining and analyzing the available evidence to determine the root cause and identify lessons to avoid similar events in the future.
CTLGroup’s forensic engineers have been involved in the investigation of some of the most disastrous collapses of in-service and under-construction structures. We understand the importance of scene documentation and evidence preservation. We follow the Scientific Method of investigation, developing and testing hypotheses based on observation, experimentation, and analysis. Ultimately, we find answers and help our clients bring closure to unfortunate ordeals.
|
# -*- coding: utf-8 -*-
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.files import File
from legacy.models import legacyLeaflet, legacyParty
from leaflets.models import Leaflet, LeafletImage
from constituencies.models import Constituency
from uk_political_parties.models import Party
class Command(BaseCommand):
def clean_legacy_leaflet(self, legacy_leaflet, party=None, constituency=None):
data = legacy_leaflet.__dict__.copy()
del data['publisher_party_id']
del data['_publisher_party_cache']
del data['_state']
data['publisher_party'] = party
data['constituency'] = constituency
if data.get('live'):
data['status'] = 'live'
else:
data['status'] = 'removed'
del data['live']
return data
def clean_legacy_leaflet_image(self, legacy_image):
data = {}
key = "%s.jpg" % legacy_image.image_key
if getattr(settings, 'IMAGE_LOCAL_CACHE'):
image_path = os.path.join(
settings.IMAGE_LOCAL_CACHE,
key
)
if os.path.exists(image_path):
print "Exists"
f = open(image_path, 'r')
data['image'] = File(f)
else:
image_path = os.path.join(
settings.IMAGE_LOCAL_CACHE,
'large',
key
)
if os.path.exists(image_path):
print "Exists"
f = open(image_path, 'r')
data['image'] = File(f)
else:
print "Doesn't exist"
return data
def clean_constituency(self, con):
con_name = con.constituency.name
if con_name == "Ynys Mon":
con_name = "Ynys Môn"
if con_name == "Cotswold":
con_name = "The Cotswolds"
if con_name == "Taunton":
con_name = "Taunton Deane"
try:
con = Constituency.objects.get(name__iexact=con_name)
except Constituency.DoesNotExist:
con_name = ", ".join(con_name.split(' ', 1))
con = Constituency.objects.get(name=con_name)
return con
def handle(self, **options):
for legacy_leaflet in legacyLeaflet.objects.all():
if not legacy_leaflet.date_uploaded:
if legacy_leaflet.date_delivered:
legacy_leaflet.date_uploaded = legacy_leaflet.date_delivered
if legacy_leaflet.date_uploaded:
if not bool(legacy_leaflet.publisher_party_id):
party = None
else:
party = Party.objects.find_party_by_name(
legacy_leaflet.publisher_party.name)
cons = legacy_leaflet.legacyleafletconstituency_set.all()
con = None
if cons:
con = cons[0]
con = self.clean_constituency(con)
# import ipdb
# ipdb.set_trace()
new_leaflet, created = Leaflet.objects.update_or_create(
pk=legacy_leaflet.pk,
defaults=self.clean_legacy_leaflet(
legacy_leaflet,
party,
constituency=con
))
for legacy_image in legacy_leaflet.images.all():
new_image, created = LeafletImage.objects.update_or_create(
leaflet=new_leaflet,
legacy_image_key=legacy_image.image_key,
defaults=self.clean_legacy_leaflet_image(legacy_image))
|
Click on links for event details.
BOLD and SGA are collecting non-perishable food items for the less fortunate in Macon. Bring donations to Game Room in SLC thru November 22.
From 9:00 am to 3:00 pm each day Monday, Nov. 26, thru Thursday, Nov. 29, students, faculty, staff, alumni and interested community members can participate at MGA’s Macon Campus Quad near the library to create a public art installation.
Please bring gently used coats. Donation locations: COAS Lobby; Math building billions board; PSC Lobby; Student Life Center lobby.
Stay Informed with MGA’s Student-Run Media. KnighTVision, video production lab; The Fall Line Review, a literary journal; and The Statement, the student-led newspaper are forum enterprises staffed with students who are free to select the materials to be published. Watch the latest broadcast at KnighTVision.
Events, Knightly News, Students Knightly News. permalink.
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from slave.recipe_config import BadConf
from slave.recipe_config_types import Path
from slave import recipe_config
from RECIPE_MODULES.chromium import CONFIG_CTX
@CONFIG_CTX()
def v8(c):
targ_arch = c.gyp_env.GYP_DEFINES.get('target_arch')
if not targ_arch: # pragma: no cover
raise recipe_config.BadConf('v8 must have a valid target_arch.')
c.gyp_env.GYP_DEFINES['v8_target_arch'] = targ_arch
del c.gyp_env.GYP_DEFINES['component']
c.build_dir = Path('[CHECKOUT]', 'out')
c.compile_py.build_tool = 'make'
if c.HOST_PLATFORM == 'mac':
c.compile_py.build_tool = 'xcode'
elif c.HOST_PLATFORM == 'win':
c.compile_py.build_tool = 'vs'
c.build_dir = Path('[CHECKOUT]', 'build')
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 1
# Chromium adds '_x64' to the output folder, which is neither needed nor
# understood when compiling v8 standalone.
if c.HOST_PLATFORM == 'win' and c.TARGET_BITS == 64:
c.build_config_fs = c.BUILD_CONFIG
c.compile_py.pass_arch_flag = True
@CONFIG_CTX(includes=['v8'])
def interpreted_regexp(c):
c.gyp_env.GYP_DEFINES['v8_interpreted_regexp'] = 1
@CONFIG_CTX(includes=['v8'])
def no_i18n(c):
c.gyp_env.GYP_DEFINES['v8_enable_i18n_support'] = 0
@CONFIG_CTX(includes=['v8'])
def no_lsan(c):
c.gyp_env.GYP_DEFINES['lsan'] = 0
@CONFIG_CTX(includes=['v8'])
def no_snapshot(c):
c.gyp_env.GYP_DEFINES['v8_use_snapshot'] = 'false'
@CONFIG_CTX(includes=['v8'])
def novfp3(c):
c.gyp_env.GYP_DEFINES['v8_can_use_vfp3_instructions'] = 'false'
@CONFIG_CTX(includes=['v8'])
def no_optimized_debug(c):
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 0
@CONFIG_CTX(includes=['v8'])
def optimized_debug(c):
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 2
@CONFIG_CTX(includes=['v8'])
def verify_heap(c):
c.gyp_env.GYP_DEFINES['v8_enable_verify_heap'] = 1
@CONFIG_CTX(includes=['v8'])
def vtunejit(c):
c.gyp_env.GYP_DEFINES['v8_enable_vtunejit'] = 1
|
Let’s have some fun with Nodejs.
Personally I enjoy coding a lot in Vim, even more than in Visual Studio or Webstorm. Why, you might ask. Well, because Vim is extremely lightweight and totally configurable.
Also, I like working on a Linux box, because working from the command line is a huge productivity boost. It takes some time to get used to though.
I blogged about my Linux setup on my other blog. I also spent some time getting to know Nodejs here.
To follow the tutorial, you need to have Node and NPM (Node Package Manager, best comparable with Nuget) installed.
With Node and NPM installed, you also may want to install Mongodb.
The Member Admin tool contains Members for a club or whatever. You can give the members a login, so that they can update their own information. You, as an admin, get to see all the member info, while the members only can see their own nformation.
Let’s go ahead and create this API.
Express is a web application framework for Node, a bit like ASP.NET MVC or Nancy in .NET. (Or Sinatra for the Ruby folks). We need to install that on top of Node, with NPM. We add the -g switch because we want to install it globally.
Jade is a bit like Razor and a very elegant markup language. However, I just want to code an API and am not interested in rendering views at this time. So I’m going to delete the views folder.
I also want the server in a separate folder, for seperation of concerns.
Now let’s open up package.json and remove the jade dependency.
We’ll add Mongoose, a very *very* nice ORM for Mongo.
This will install the dependencies (Express and Mongoose).
Create a folder ‘models’ in the server dir and name it Member.js.
Now, if you ‘require’ this file in e.g. app.js, you can use all the methods of Member.
Very much like ‘using’ in .NET.
Next create a tests folder in the server dir.
Create a file named 0-connection.js (that’s my own convention).
var Member = require(‘../models/Member’) this is were we import the Member class library.
Mongoose is the ‘Entity Framework’ for Mongodb. When using Mongoose to define our Member model, the Member instance automatically gets methods like save, findOne and findOneAndRemove.
In the next post we’ll start writing the API.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-16 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0008_article_page'),
]
operations = [
migrations.AddField(
model_name='file',
name='article_count',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='content',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='article',
name='css',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='article',
name='date',
field=models.DateField(default='1970-01-01'),
),
migrations.AlterField(
model_name='file',
name='company',
field=models.CharField(blank=True, default='', max_length=80),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=80),
),
]
|
Images from Events posted (11).
April Dance info and poster added.
Page updated responsive plus dance link.
No Dancing nights added and wording changed.
May 2019 Dance details added.
Tea Dance 2019 Crib added.
Date of next meeting, Minutes 253 and Agenda 254 added.
Tea Dance 2019 Poster added.
Haggis Supper 2019 details added.
Sevenoaks Reel Club and North Kent moved to closed list.
Updated to 17 clubs and diary note added.
Updated with 2019 Walk date.
Detail of Club closure added.
2019 Progamme details and outline info on Hogmanay 2018 added.
Financial Statements for 2017-18 added.
November 2018 Dance details added.
Link to Newsletter 70 added and Text updated.
Newsletter number 70 November 2018 added.
More Christmas Dance Cribs added.
More Christmas Dance details added.
October 2018 Dance details added.
Wealden and Canterbury SS moved to closed list.
September Social deleted and Christmas Dance details added.
November 2018 Dance cancellation added.
November 2018 dance details added and club information updated.
September 2018 Dance details added.
Minutes of meeting 252 and Agenda for Meeting 253 added .
August 2018 Dance crib added.
November Dance added dates for 18-19 terms added.
August 2018 Dance details added.
September Social and Christmas Dance added.
July 2018 Dance details added.
Walk deleted Autumn Dance added.
October 2018 Dance added page updated.
June 2018 Dance details added.
May and June 2018 Dance details added.
Newsletter number 69 May 2018 added.
Walk Details and Poster added Tea Dance deleted.
Diary dated 30.04.18 added .
April 2018 Dance details added.
Highland Ball poster and crib added.
Details of Highland Ball added.
Diary dated 06.04.18 added .
KASS Diary 06.04.18 added, text revised.
March 2018 Dance details added.
Diary dated 28.02.18 added .
KASS Diary 28.02.18 added Page revised with KASS Calendar.
Additional info on Tea Dance and Walk added, 2019 Ball added.
Diary dated 12.02.18 added .
Shield logo,Tea Dance Poster and Crib added.
February 2018 Dance details added.
Haggis Supper 2018 details added.
Feb 2018 Poster added events lists updated.
Date of next meeting corrected.
Spring Drinks rota 2018 added.
Spring Dance details and crib added.
Diary dated 24.11.17 added .
KASS Diary 24.11.17 added here and to home page.
Details of November dance added.
Oct 17 to July 18 events updated.
2018 KASS Events added all 2017 removed.
Updated to show 2018 Events. Nov 2017 Newsletter added.
Newsletter number 68 November 2017 added.
Christmas Dance programme and poster added.
Oct 17 to July 18 events added.
Details of October dance added.
Diary link updated and message changed.
Diary dated 16.10.17 added .
KASS Diary 16.10.17 added here and to home page.
Diary dated 07.10.17 added Address list Dated 07.10.17 added.
KASS Diary 07.10.17 added here and to home page.
KASS Diary 18.09.17 added here and to home page.
Address list and diary updated.
KASS Diary 11.09.17 added here and home page.
Details of August dance added.
Date of AGM added minutes and agendas added.
KASS Diary 31.07.17 added here and home page.
Page updated and images added.
Details of July dance added.
Reminder about late summer events added.
Autumn Dance and 2017-18 class dates added.
Summer dance programme crib and poster added.
November Dance added walk removed.
Details of June dance crib added.
Details of June dance added.
KASS Diary 08.06.17 added here and home page.
KASS Diary 24.05.17 added here and home page.
Details of May dance added.
Ball removed. November dance venue change noted.
KASS Diary 12.05.17 added here and home page.
Newsletter number 67 May 2017 added.
Tea Dance removed walk details added.
Tea Dance removed Walk added.
Details of April dance added.
Map updated and Ball ticket message added.
Details of March dance added Officers updated.
2017 Ball programme details added.
Link added to Tea Dance info..
Tea Dance Poster details updated.
Tea Dance Poster and Crib added.
Page revised and updated with images added.
Details of 17th March 2017 event added.
Details of February dance added.
Spring dance programme crib and poster added.
Address List Updated to 27.01.17.
Details of 2016 Hogmanay added.
Feb 2017 Dance details added.
Latest insurance schedule and financial statement added.
Draft Minutes of Meeting 18.10.16 added.
2017 Tea Dance and Ball details added.
Wealden and Dover and East Kent added.
Events and Number of members updated.
Address List Updated to 11.11.16.
Newsletter number 66 November 2016 added.
Christmas Dance details updated and poster added.
Details of 4th November event added.
Tea Dance removed details of Autumn Dance added.
Tea Dance 2016 removed Christmas Dance details updated.
November 2016 Dance details added and Feb 2017 dance added.
KASS Agenda and minutes added.
Crib for September dance added.
Address List Updated to 08.08.16.
Details of September dance added.
Future Events November Dance 2016 added.
Details of July dance updated.
Details of 29 July event added.
Tea Dance 2016 details updated other events added.
Details of July event added.
Details of June dance added and other information.
Content updated with various changes.
Details of July 2016 Dance added.
Details of June event added.
Contacts List Updated to 14.05.16.
KASS Walk details added Ball removed.
Updated Newsletter number 65 May 2016 added.
minutes of meetings 247 and 248(draft)added.
Contacts List Updated to 03.05.16.
Future Events for 2016 and 2017 added.
MiniCrib for May 2016 Dance added.
Details of April dance changed.
Tea Dance 2016 details added.
Details of May 2016 Dance added.
Details of March dance updated.
Details of March dance added.
Details of Easter Bonnet event added.
Details of March 2016 Dance updated.
Poster changed. Page made responsive.
Details of March 2016 Dance added.
Details of February dance added. 2016 Dates added.
Details of Valantines event added. Gallery updated.
Spring Dance 2016 details added.
Minicrib for Burns Dance added. December Celebration removed.
KASS events 2016 added 2015 events removed.
Details of Burns Dance added.
Christmas Dance details added and updated.
Updated Newsletter number 64 November 2015 added.
December 2015 Ceilidh and January 2015 Dance details added.
KASS Diary 15.10.15 added with changed dates.
Page revised with latest events added.
SELSA added to map and links.
Autumn Dance programme and crib added.
Events and mail link updated.
Walk removed mail link updated.
Date of October 2015 Meeting added.
minutes 246 of 07.04.15 added.
Details of June Presidents Buffet Dance added.
Address of venue changed to Bredgar.
Dancing after the walk added.
Next Events Ball removed and Autumn Dance added.
Updated Newsletter number 63 May 2015 added.
Newsletter number 63 May 2015 added.
Next Events Tea Dance removed and walk added.
Spring Tea Dance Removed Walk Details added and Walk Poster added.
Spring Tea Dance Programme and Crib added.
Address of new venue added and events updated.
Ball Programme,Poster and Crib changed to show Eileen Watt's Strathspey.
Ball Programme and Poster changed to show Eileen Watt's Reel.
KASS Agenda for Meeting on 07.04.15 added.
Updated after the 2015 Ceilidh and Future Events note added.
KASS Contacts list updated 23.01.15 and minutes of Meeting 245 added.
Next Events Tea Dance added.
Ball Programme added Autumn Dance Date corrected (html and css updated).
Link to Next Event page activated.
2015 Events added 2014 Events removed.
Details of Burns Supper corrected and elaborated.
Details of Burns Supper added.
KASS Contacts list updated 18-11-14 added.
Agendas for Meetings of 25.03.14 and 07.10.14 added.
Minutes for Meeting of 25.03.14 added.
Details of September Dance and 2015 Dance Programme added.
Printable version of Diary Updated 21st July 2014.
Diary Updated 21st July 2014.
Diary in Documents - Updated 21st July 2014.
Walk 2014 removed Autumn Dance details added.
Details of July Monthly Dance added.
July Dance Details Added and 75th Aniversary Removed.
Details of June Buffet Dance added.
Diary Updated 28th April 2014.
Links to 75th Anniversary Minicrib and Flyer corrected.
Walk 2014 details and map added.
Diary Updated 14th March 2014.
Details of March Dance added and change to post code and October exception.
Diary Updated 16th February 2014.
KASS Ball, Poster and Crib added.
KASS Ball, Walk and Autumn Dance 2014 added.
Details of Hogmanay Party added.
Burns Supper 2014 details added.
Diary Updated 21st November 2013.
Diary Updated 19th November 2013.
Details of November Dance amended.
November Dance 2013 details added.
2013 - 14 Insurance Certificate and Letter added.
Agenda 243 and Minutes 242 added.
Details of September Dance added. Prices changed.
Diary Updated 14th September 2013.
Diary Updated 10th August 2013.
Ticket secretary email added, typing error corrected and css file revised.
Page centred with blue background.
Autumn Dance date added and fees and club history revised.
Diary Updated 30th July 2013.
New term dates and Autumn dance date added.
Page centred and updated with blue background.
More Summer Dance Pictures added.
July 2013 dance removed and September Dance 2013 added. Layout changed.
Updated with Summer Dance photo.
1st July 2013 Diary and Contacts Updated .
Diary Updated 1st July 2013.
May 2013 dance removed and July Dance 2013 added. Layout changed.
Spring and Summer 2014 dance dates added.
50th Celebration Ball minicrib updated.
50th Celebration Ball further details added.
50th Celebration Ball details added.
Directions to the start of the KASS walk added.
Layout and content updated with blue background and page centred.
Layout updated with blue background and page centred.
Layout updated with blue background and latest information on Summer Dance added.
Layout updated with blue background and latest information on Spring Dance added.
Page now in centre of screen and facilities added to allow two dances to be advertised.
Calling notice, Agenda for the 2nd October 2012 and minutes of the last meeting added.
|
import os
from ctypes import POINTER
from ctypes import pointer
from ctypes import Structure
from ctypes import c_float
from ctypes import c_int
from ctypes import c_void_p
import numpy as np
leargist_folder = os.path.abspath(__file__).rsplit(os.path.sep, 1)[0]
leargist_name = "_gist"
libleargist = np.ctypeslib.load_library(leargist_name, leargist_folder)
class GistBwImage(Structure):
'''Matches image_t declared in standalone_image.h'''
_fields_ = [
("width", c_int),
("height", c_int),
("stride", c_int), # stride needs to be computed separately
("data", POINTER(c_float))
]
class GistColorImage(Structure):
'''Matches color_image_t declared in standalone_image.h'''
_fields_ = [
("width", c_int), # stride = width
("height", c_int),
("c1", POINTER(c_float)), # R
("c2", POINTER(c_float)), # G
("c3", POINTER(c_float)), # B
]
# Setup argument & return types for color gist
libleargist.color_gist_scaletab.argtypes = (
POINTER(GistColorImage), c_int, c_int, POINTER(c_int))
libleargist.color_gist_scaletab.restype = c_void_p
# Setup argument & return types
libleargist.bw_gist_scaletab.argtypes = (
POINTER(GistBwImage), c_int, c_int, POINTER(c_int))
libleargist.bw_gist_scaletab.restype = c_void_p
def bw_gist(im, nblocks=4, orientations=(8, 8, 4)):
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
if im.shape[0] < 8 or im.shape[1] < 8:
raise ValueError(
"image size must at least be (8, 8), got %s" % im.size)
im = np.ascontiguousarray(im, dtype=np.float32)
gbwi = GistBwImage(
im.shape[1], # Width is the SECOND element of the shape tuple
im.shape[0],
im.shape[1],
im.ctypes.data_as(POINTER(c_float)))
# We don't need a *3 because it's black & white. Note the useless
# looking brackets here are HIGHLY NECESSARY!! difference between
# ending up with c_float * 320 (which we want) and c_float * 4 * 4 * 20
descriptors = c_float * (nblocks * nblocks * orientations.sum())
addr = libleargist.bw_gist_scaletab(
pointer(gbwi), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
def color_gist_numpy(image, nblocks=4, orientations=(8, 8, 4)):
height, width = image.shape[:2]
if width < 8 or height < 8:
raise ValueError(
"image size should at least be (8, 8), got %r" % (width, height))
image = image.transpose(2, 0, 1)
image = np.ascontiguousarray(image, dtype=np.float32)
gci = GistColorImage(
width,
height,
image[0].ctypes.data_as(POINTER(c_float)),
image[1].ctypes.data_as(POINTER(c_float)),
image[2].ctypes.data_as(POINTER(c_float)))
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
addr = libleargist.color_gist_scaletab(
pointer(gci), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
descriptors = c_float * (nblocks * nblocks * orientations.sum() * 3)
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
def color_gist(im, nblocks=4, orientations=(8, 8, 4)):
"""Compute the GIST descriptor of an RGB image"""
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
# check minimum image size
if im.size[0] < 8 or im.size[1] < 8:
raise ValueError(
"image size should at least be (8, 8), got %r" % (im.size,))
# ensure the image is encoded in RGB
im = im.convert(mode='RGB')
# build the lear_gist color image C datastructure
arr = np.fromstring(im.tobytes(), np.uint8)
arr.shape = list(im.size) + [3]
arr = arr.transpose(2, 0, 1)
arr = np.ascontiguousarray(arr, dtype=np.float32)
gci = GistColorImage(
im.size[0],
im.size[1],
arr[0].ctypes.data_as(POINTER(c_float)),
arr[1].ctypes.data_as(POINTER(c_float)),
arr[2].ctypes.data_as(POINTER(c_float)))
descriptors = c_float * (nblocks * nblocks * orientations.sum() * 3)
addr = libleargist.color_gist_scaletab(
pointer(gci), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
|
Switzerland is a stable and modern market economy with an unemployment rate of around 2.5% (2018), a highly skilled labour force, and a GDP per capita that is among the highest in the world. According to the World Economic Forum’s Global Competitiveness Report 2018, Switzerland ranks fourth place globally in terms of competitiveness. The high ranking is driven by Switzerland’s efficient labour market, the sophistication of its businesses, its strength in innovation, and the availability and use of the latest technologies. Other key advantages of Switzerland as a business hub are its infrastructure, the availability of financial services and the quality of its education system. Switzerland’s reasonable corporate tax rates contribute to the country’s status as one of the most competitive economies.
After 2017 has seen continued high levels of M&A activity in Switzerland, M&A activity in 2018 saw a new record number of deals and remained on a high level in terms of deal value. All industries contributed to a strong year for M&A in Switzerland. Particularly noteworthy deals took place in the pharmaceutical sector (with Novartis selling its stake of its Consumer Healthcare joint venture with GlaxoSmithKline to GlaxoSmithKline for USD13 billion and acquiring AveXis and Endocyte for an aggregate of USD10.6 billion), industrial sector (ABB’s sale of its power grid business to Hitachi for USD9.4 billion), consumer markets sector (Nestlé’s strategic cooperation with Starbucks valued at USD7 billion and Richemont’s acquisition of YOOX Net-a-Porter) and the chemicals sector (Clariant’s strategic transaction with SABIC), all showing high levels of transformation or even disruptive changes. 2018 was also a very successful year for IPOs, with a record number of 12 IPOs on the SIX Swiss Exchange, as well as for private equity, where the number of deals in Switzerland reached the highest number since 2007.
For 2019, further changes to business and operating models are to be anticipated, not only in the large international companies but also the many Switzerland-based small and mid-sized firms. This is particularly due to the digitisation of business models. We expect M&A to continue being strong in the life sciences sector and to become more important in fintech and related sectors.
The Swiss Federal Government is relaxed about foreign investment and maintains favourable conditions for both Swiss and foreign investors. In principle, there are no restrictions for individuals and entities outside Switzerland wishing to do business in Switzerland or wishing to invest in businesses in Switzerland. One exception is the real estate sector, where federal law restricts the acquisition of residential real estate by persons living outside Switzerland; with regard to commercial real estate, there are, with a few exceptions, no such restrictions. Except for the financial sector (e.g. banks, securities dealers, insurance companies, and financial market infrastructures), the transportation sector and for certain professions (e.g. lawyers, auditors, healthcare and certain other professionals), there is in general no permit required for doing business in Switzerland.
Switzerland has a transparent, effective and reliable legal system. Swiss corporate law is characterised by a relatively low level of regulation, with the exception perhaps of say-on-pay and other compensation-related rules for listed companies. Swiss employment law is considerably less restrictive than in most of continental Europe, and social security contributions are significantly below the levels in many Western European countries.
The strength of the Swiss economy lies mainly in its international outreach and strong interconnectedness with the economies of other countries. Switzerland’s strong financial sector, which accounted for more than 9% of overall economic value creation in 2017, and Switzerland’s leading position in the global pharmaceutical, biotech, fintech, robotics and virtual reality industries attracts new companies and a qualified labour force.
Although Switzerland is not a member of the EU, it has strong relations with the EU, which are governed by a series of bilateral agreements. Under these agreements, Swiss employers can hire workers from EU/EEA countries without having to show a specific need. In 2018, the Swiss government negotiated a framework agreement on “institutional issues“ with the EU that aims to ensure that current and future agreements on market access are applied more consistently and efficiently. However, it is uncertain whether Swiss voters will approve the agreement. Switzerland has also been adapting its legal system to international standards—in particular EU standards—in many ways in order to ensure the equivalence and interoperability of Swiss laws, in particular in the financial sector.
Switzerland is currently undertaking a comprehensive corporate tax reform in order to abolish tax privileges for certain types of companies that the OECD no longer accepts. The reform would lead to a significant decrease of headline tax rates and introduce new competitive and internationally accepted measures. One of the proposed measures is the so-called licence box providing for reduced tax rates for certain income from patents and the like. If voters approve this tax reform in a referendum scheduled for 19 May 2019, it will take effect on 1 January 2020.
On 1 January 2019, the new Financial Services Act has taken effect, which governs a broad range of financial services, including cross-border services into Switzerland, and aligns the Swiss rules more closely to EU regulation. Another project in the legislative process is a comprehensive corporate law reform proposal aiming to strengthen corporate governance, to modernise the incorporation and capital structure of public and private companies in Switzerland and to increase legal certainty. The bill is currently being considered by the federal parliament.
|
from django import forms
from django.forms.utils import ErrorList
from crits.campaigns.campaign import Campaign
from crits.core import form_consts
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.acls import Common, TargetACL
relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
class TargetInfoForm(forms.Form):
"""
Django form for adding/updating target information.
"""
error_css_class = 'error'
required_css_class = 'required'
firstname = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
lastname = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
division = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
department = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
email_address = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
organization_id = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
title = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
note = forms.CharField(widget=forms.Textarea(attrs={'cols':'50', 'rows':'2'}),
required=False)
campaign = forms.ChoiceField(widget=forms.Select, required=False,
label=form_consts.Target.CAMPAIGN)
camp_conf = forms.ChoiceField(required=False,
label=form_consts.Target.CAMPAIGN_CONFIDENCE)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, username, *args, **kwargs):
super(TargetInfoForm, self).__init__( *args, **kwargs)
if username.has_access_to(Common.CAMPAIGN_READ):
self.fields['campaign'].choices = [('', '')] + [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['camp_conf'].choices = [('',''),
('low', 'low'),
('medium', 'medium'),
('high', 'high')]
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
add_bucketlist_to_form(self)
add_ticket_to_form(self)
def clean(self):
cleaned_data = super(TargetInfoForm, self).clean()
campaign = cleaned_data.get('campaign')
if campaign:
confidence = cleaned_data.get('camp_conf')
if not confidence or confidence == '':
self._errors.setdefault('camp_conf', ErrorList())
self._errors['camp_conf'].append(u'This field is required if campaign is specified.')
return cleaned_data
|
Indiana Wild’s exotic – and not so exotic – animals were a hit with children at the annual meeting.
Around 1,300 LaGrange County REMC members and their families came together on June 9 to elect two directors to the cooperative board, as well as spend the day eating, learning and laughing together.
Registered members re-elected incumbent director Jeff Hampshire to represent District 3 and elected Clifford Hibbs to lead District 4. Hibbs was appointed to the board in January after the death of Director Mark Bontrager in November 2017.
The meeting began with a flag ceremony, the Pledge of Allegiance and a recognition of the veterans in attendance before CEO Mark Leu addressed the teamwork that makes the co-op successful today and in the future.
LaGrange County REMC CEO Mark Leu gives his address from the main stage, while the board of directors looks on.
Attendees viewed displays and talked to REMC employees about various programs offered through the cooperative. The REMC’s power supplier, Wabash Valley Power Association, showcased information about Co-op Solar, and Indiana Electric Cooperatives shared about the co-op’s monthly magazine, Electric Consumer. The Builders Association of Northeast Indiana and our energy advisor, Jake Taylor, provided information about building for efficiency and comfort.
LaGrange County Convention & Visitors Bureau educated visitors on its programs, and Parkview LaGrange Hospital and its EMS team brought health information and offered blood pressure checks for members.
Children, and hopefully future REMC members, enjoyed learning more about exotic animals through Indiana Wild’s onsite program, playing in bounce houses and having their faces painted by clowns.
Crowds were also entertained with music from Smooth Edge 2, and cash and other prizes were given away throughout the day.
Hundreds of members gathered at LaGrange County REMC headquarters to enjoy lunch and musical entertainment.
|
# -*- coding: utf-8 -*-
"""The EWF image file-like object."""
import pyewf
from dfvfs.file_io import file_object_io
from dfvfs.lib import errors
from dfvfs.lib import ewf
from dfvfs.resolver import resolver
class EWFFile(file_object_io.FileObjectIO):
"""File input/output (IO) object using pyewf."""
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(EWFFile, self).__init__(resolver_context, path_spec)
self._file_objects = []
def _Close(self):
"""Closes the file-like object."""
# pylint: disable=protected-access
super(EWFFile, self)._Close()
self._file_objects = []
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyewf.handle: a file-like object or None.
Raises:
PathSpecError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if parent_location and parent_path_spec.IsSystemLevel():
segment_file_paths = pyewf.glob(parent_location)
ewf_handle = pyewf.handle()
ewf_handle.open(segment_file_paths)
else:
# Note that we cannot use pyewf's glob function since it does not
# handle the file system abstraction dfvfs provides.
file_system = resolver.Resolver.OpenFileSystem(
parent_path_spec, resolver_context=self._resolver_context)
segment_file_path_specs = ewf.EWFGlobPathSpec(file_system, path_spec)
if not segment_file_path_specs:
return None
for segment_file_path_spec in segment_file_path_specs:
file_object = resolver.Resolver.OpenFileObject(
segment_file_path_spec, resolver_context=self._resolver_context)
self._file_objects.append(file_object)
ewf_handle = pyewf.handle()
ewf_handle.open_file_objects(self._file_objects)
return ewf_handle
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the RAW storage media image inside the EWF container.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._file_object.get_media_size()
|
This week on Flyover we’re knocking down the myths of “Real America” and discussing the values that tie us together as a nation.
Join the conversation: How do you define "real America"?
Politicians often portray “real America” as some nostalgic, heartland-rooted, mythic place that is somehow more genuine than the coasts or the cities of this country. But Americans who hail from cities and coasts, flatlands and rural farm towns share many of the same aspirations for their families and ambitions for the country.
Hurricane Irma is bearing down on the state of Florida today. The Category 3 storm is responsible for at least 25 deaths and forcing millions of people to flee from their homes. Irma comes just days after Hurricane Harvey’s disastrous wake hit Houston. What roles do identity, race and class play in a natural disaster?
Here is the article mentioned by our guest, Neena Satija.
Maria Kefalas, professor at St. Joseph’s University in Philadelphia and the co-author of "Hollowing out the Middle: The Rural Brain Drain and What it means for America"
WE Are all Human Beings and we are living in a Class War not a RACE War...WE HUMANS need to help other HUMAN BEINGS. I am only 40 and I know We can move forward right now. Admit We are all real Human Beings. HELP your children and your family...MOVE FORWARD right now and Stand up to the class war We are facing right now.
Now is the time for all Human Beings to make change and help each other. My husband is from NYC. I am from Wisconsin and have lived in Minneapolis for the past twenty years. I encourage all Human Beings to make a change for We should not live in fear and WE can make change by standing up and making a difference.
I have heard you do this.See the recent Economist article on colleges for a shocker. The 'real America' allows for different perspectives and practices within agreed upon boundaries of free speech, "justice for all', pursuit of happiness- opportunity for all, freedom to worship God - or not.
|
# -*- coding: iso-8859-1 -*-
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Path Iterator
A TALES Iterator with the ability to use first() and last() on
subpaths of elements.
"""
import TALES
from Expressions import restrictedTraverse, Undefs, getSecurityManager
class Iterator (TALES.Iterator):
def __bobo_traverse__ (self, REQUEST, name):
if name in ('first', 'last'):
path = REQUEST['TraversalRequestNameStack']
names = list(path)
names.reverse()
path[:] = [tuple(names)]
return getattr(self, name)
def same_part (self, name, ob1, ob2):
if name is None:
return ob1 == ob2
if isinstance(name, type('')):
name = name.split('/')
name = filter(None, name)
securityManager = getSecurityManager()
try:
ob1 = restrictedTraverse(ob1, name, securityManager)
ob2 = restrictedTraverse(ob2, name, securityManager)
except Undefs:
return 0
return ob1 == ob2
|
The servo itself has nothing to do with the fluid, it merely assists the master cylinder. If you're having sporadic braking issues, you'd need to be looking at the master seals.
I had an odd issue with my old talbot samba where the seals had worn in such a way that they'd push fluid to both circuits, then one, then the other causing very odd self-steering brakes.
How old are the flexible brake hoses as these often cause problems on classic cars as they collapse internally? Also what angle does the master cylinder sit at; is the back end higher than the front? The master cylinder on Jensens used to be higher at the back end and trap air, to bleed the brakes properly you used to have to jack the car up at the back end to raise the front of the master cylinder which would allow the trapped air to be bled out of the system. To test the servo; hold the brake pedal down hard without the engine running, you should have a solid pedal, start the engine and the after a couple of seconds you should feel the pedal go down more as the servo assist kicks in, if it doesn't go down more your servo isn't working.
The master cylinder is horizontal in a Moggie, good call about the flex is though.
But air is getting in Tim said, I'm wondering if a union needs tightening, or if the end was flared ok. Who did the fitting and pipe work for the servo Tim?
Another thought, and I've not experienced it or heard of it, but the master cylinder from memory is cast, so there is always a chance you have a porous casting, even though it has been refurbished.
Another thing worth doing if you can be bothered Tim is to have a header tank on the brake master cylinder mounted in the engine bay. It means you don't have to keep lifting the carpet and checking in the footwell for brake fluid level.
Great cars though, so much fun with 45bhp and 145 section tyres, four wheel drifting ftw!
On to Pigeon, I started her up again today, she started on the button, not a cough in sight. Furthermore, if she did have compression problems, wouldn't she blow white smoke from cold and if anything, get better as she warmed up? It seems the amount and colour of smoke is variable and the only variable is the amount of air entering the pump. Soon after start up, there was no sign of air. A mate of mine reckoned she looked fine for emissions, so I had him do the mot style rev test while I looked and I'm inclined to agree. So I've taken the big broad decision to enter for mot, as is, with the exception of curing the power steering issue.
So she's sorned for this month but if I can get the power steering sorted in the next week (gonna piss down tomorrow) then I'll mot and tax her for October. She's looking smart and sounds full,of xud awesomeness!
Master cylinder is new as are the flexis, Kev the master cylinder in a Minor sits under neath the drives floor and is pretty level.
Re porous castings. It does happen, I had a mains water tap go after about 40 years in my kitchen. One quiet evening heard a slight hissss and turning on the light saw a very, very fine stream of water coming out the tap body sideways. That's only a nominal 80psi. Must have had an air bubble in there which after many years of on/off hammer decided to let go.
However a porous casting should at least give a weep if there is a weakness.
Steering woes continue, as do my smoke issue. I thought I had spotted a reason for poor timing control on the Lucas - the advance solenoid was slightly loose, so I tightened with no improvement. However, I can see some tiny little bubbles appearing out the sticky out bit on the end of it. I'll have to take photos to show where.
If it fails on emmissions I'm tempted to swap head and pump from my scrapper.
Next in line is the pinion valve. To cut a long story short, the overflow from the pinion valve should reduce /cut off when turning left and instead, there seems to be little difference in the flow at all. This would point to either a blockage preventing the pinion valve closing the relevant ports or alternatively an internal leak within the pinion valve. As both are unlikely, I'm probably completely wrong, but as I want the car to be without issues so I don't have to keep getting under it every few weeks to sort out yet another problem, then I may as well refurb my spare onion valve and see what happens!
If this doesn't work, then the logic breaks down a bit and short of bum scratching then it's a replacement of the pressure reg or pump although I think both are wild stabs in the dark.
Not sure if conventional power steering is the same but I think this is quite clever.
This was the state of the top seal inside the pinion valve. It leaked oil into the void above it which gave a path for the oil to return to tank.
Like an idiot I made a hash of rebuilding the valve using the new seals but I'm hoping I can use two of the new seals and another non leaky one.
Nice one! Surely that deserves a beer or two, at least when it's all back together and working.
Satisfying to find a real naff part as hopefully that will resolve the issue. Not knowing after a lot of work is just not the same. Handy pics for the future too.
You should of stopped right there! Already on it my friend, although knowing my luck it'll be a red herring! Pretty sure this was the problem though.
Thanks for the encouragement both........nearly there. I've just got fettling to do after the steering is working (unless that is he fails me on my grey smoke issue), but I'll have to wait and see about that one.
Yay!!! I have power steering! But the car didn't want to start. This car has a death wish I reckon! It seems that the fuel had, somehow mysteriously drained back from the pump! The clear tube from top if filter housing to the pump was full of air - I could see the fuel being drawn into the pump. That means the air must of got in through somewhere in the pump itself. I have one idea where that may be, so I'll bung some sealant on it for now to prove a point. Fingers crossed it'll prove the cause of me smoke!
That's what it used to do when i first bought it, but bypassing the fuel heater cured it completely.
Very odd it should come back!?!
Are you using the original Lucas pump?
Yeah I am. There is a leak, but oddly it's spitting out tiny bubbles rather than just diesel. I'll take piccies and show where I mean. But surely, in your case it would only syphon back if it let air in from a higher point? It also begs the question in my case as to where the diesel has gone as there is no sign of weeping diesel and there is a check valve to prevent fuel flowing back to tank!
Gonna spend the morning putting bungs back in, putting some under body sealant in various spots where it just needs touching up. Then a bit of sill work where there was small bits of rust leaving small holes. All rust treated and filled but needs finishing.
See how she ain't level with the garage door?
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
from tcvars import *
import datetime
from pyrrd.rrd import RRD, RRA, DS
from pyrrd.graph import DEF, CDEF, VDEF
from pyrrd.graph import LINE, AREA, GPRINT, PRINT, COMMENT
from pyrrd.graph import ColorAttributes, Graph
def Tcgraph():
exampleNum = 1
min = 60
hour = 60 * 60
day = 24 * 60 * 60
week = 7 * day
month = day * 30
quarter = month * 3
half = 365 * day / 2
year = 365 * day
now = int(time.time())
endTime = now
startTime = str(now - 600)
zacalo = datetime.datetime.today()
#### index pro web
filenam = wwwpath + 'index.html'
soubor = file(filenam, 'w')
soubor.write(wwwhead)
adr_list = os.listdir('rrd/')
for adr in adr_list:
filename = 'rrd/%s' % adr
print filename
graphfile = wwwpath + '%s.png' % adr.replace('.rrd', '')
graphfile_lg = 'graphs/%s.png' % adr.replace('.rrd', '')
hgraphfile_lg = wwwpath + '%sh.png' % adr.replace('.rrd', '')
dgraphfile_lg = 'graphs/%sd.png' % adr.replace('.rrd', '')
wgraphfile_lg = 'graphs/%sw.png' % adr.replace('.rrd', '')
mgraphfile_lg = 'graphs/%sm.png' % adr.replace('.rrd', '')
ygraphfile_lg = 'graphs/%sy.png' % adr.replace('.rrd', '')
now = int(time.time())
endTime = now
startTime = str(now - 600)
myRRD = RRD(filename)
def1 = DEF(rrdfile=myRRD.filename, vname='dsrate', dsName='rate')
def2 = DEF(rrdfile=myRRD.filename, vname='dsceil', dsName='ceil')
def3 = DEF(rrdfile=myRRD.filename, vname='dssent', dsName='sent')
cdef1 = CDEF(vname='sdsrate', rpn='%s,1,*,FLOOR' % def1.vname)
cdef2 = CDEF(vname='sdsceil', rpn='%s,1,*,FLOOR' % def2.vname)
cdef3 = CDEF(vname='sdssent', rpn='%s,8,*,FLOOR' % def3.vname)
area1 = LINE(defObj=cdef1, color='#468A41', legend='rate', width='2')
area2 = LINE(defObj=cdef2, color='#d91161', legend='ceil', width='2')
area3 = AREA(defObj=cdef3, color='#8399f770', legend='sent', width='1')
# area4 = LINE(defObj=cdef2, color='#468A41', legend='', width='1')
# vdef1 = VDEF(vname='rate', rpn='%s,TOTAL' % def1.vname)
# vdef2 = VDEF(vname='ceil', rpn='%s,TOTAL' % def2.vname)
vdef1 = VDEF(vname='rate', rpn='%s,MAXIMUM' % def1.vname)
vdef2 = VDEF(vname='ceil', rpn='%s,MAXIMUM' % def2.vname)
# vdef1 = VDEF(vname='RATE_last', rpn='%s,LAST' % def1.vname)
# vdef2 = VDEF(vname='RSSI_last', rpn='%s,LAST' % def2.vname)
# vdef3 = VDEF(vname='CHANN_last', rpn='%s,LAST' % def3.vname)
# vdef2 = VDEF(vname='myavgtx', rpn='%s,TOTAL' % def1.vname)
# gprint1 = GPRINT(vdef1, 'rate %lg%SMbps')
# gprint2 = GPRINT(vdef2, 'rssi %lg%SdBm')
# gprint3 = GPRINT(vdef3, 'kanal %lg%S')
gprint1 = GPRINT(vdef1, 'rate %lg %Sbits')
gprint2 = GPRINT(vdef2, 'ceil %lg %Sbits')
#gprint3 = GPRINT('2588888', 'ceil %lg %Sbits')
comment1 = COMMENT('textik')
ca = ColorAttributes()
ca.back = '#333333'
ca.canvas = '#333333'
ca.shadea = '#000000'
ca.shadeb = '#111111'
ca.mgrid = '#CCCCCC'
ca.axis = '#FFFFFF'
ca.frame = '#AAAAAA'
ca.font = '#FFFFFF'
ca.arrow = '#FFFFFF'
nadpis = adr + ' - ' + str(datetime.datetime.today())
graphwidth = 800
graphheight = 400
print hgraphfile_lg
gh = Graph(hgraphfile_lg, start=int(time.time()) - min*20, end=endTime, vertical_label='bits/s', color=ca)
gh.width = graphwidth
gh.height = graphheight
text = nadpis
text = text.replace(' ', '_')
gh.title = text
gh.data.extend([
def1, def2, def3,
cdef1, cdef2, cdef3,
area1, area2, area3, #area4,
# area6, area10, area7, area8, area9,
vdef1, gprint1, vdef2, gprint2, comment1,
])
gh.write()
if 'lan' in hgraphfile_lg:
soubor.write('<td><img src="' + str(hgraphfile_lg).replace(wwwpath, '') + '"></td><td><img src="' + str(
hgraphfile_lg).replace(wwwpath, '').replace('lan', 'wan') + '"></td></tr>')
soubor.write(wwwfooter)
soubor.close()
dobabehu = datetime.datetime.today() - zacalo
dobabehu = dobabehu.seconds
print 'Doba zpracování grafů: ' + str(dobabehu) + ' sec.'
Tcgraph()
|
Discussion in 'Other ESET Business Products' started by enod_1701_D, Jan 17, 2012.
The counters were not increasing for AV or spam, even though things in spam were being loged. Rebooted Exchange 2007 and now we get this error message. VSAPI does not seem to be communicating with ESET. For example the SETUP button under VSAPI 2.6 -->Transport Agent does not do anything when clicked. The custom Rules appear to be the only thing operating at this time.
2007 Exchange has latest SP and updates. ESET is version 4 latest.
Please check the system event log for possible errors. Are there any that might be related to this issue?
If you run "Get-TransportAgent", is ESET listed among the transport agents? Have you already restarted the server to see if it cures the problem?
I've restarted the Exchange Transport, Information Store, and Hosts Service and it made no difference.
I enabled ESET debugging and can see that it is using VSAPI sometimes.
I shut the server down and turned it back on instead of doing a "restart" and it solved the problem, however I am not sure the spam protection is working. Nothing has logged in about 30 minutes and we usually get spam all day long.
Well, no errors, so I will wait a while and check it again. Usually it lists everything and if it was rejected or retained, so I will have to say that it is not working at all.
Please enable synchronious diagnostic logging under Computer protection -> Tools -> Log files and Server protection -> Tools -> Log files (Sync. writing without using cache & Log diagnostic information), receive some emails and then check the ESET Event and Antispam logs for more information. If there are some relevant entries, post them here.
|
# Copyright 2011-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#!/usr/bin/env python
# encoding: utf-8
"""
convert_netflix.py
place this file in the same directory as the Netflix 'training_data' folder
containing movie_titles.txt, then run:
$ python convert_netflix.py
To upload the resulting files to an S3 bucket, get s3cmd from:
http://s3tools.org/s3cmd
Then run:
$ s3cmd put --force netflix-data/ratings* s3://<yourbucket>/netflix-data/
convert_netflix.py reformats Netflix Prize data so that each line contains:
userid movieid rating
Output files are chunked for upload to S3 and placed
in a directory called 'netflix-data'. This takes about 20 minutes
on a 2 Ghz laptop and the resulting files are 428MB compressed.
Original format:
userid, rating, date
$ head mv_0000001.txt
1:
1488844,3,2005-09-06
822109,5,2005-05-13
885013,4,2005-10-19
30878,4,2005-12-26
823519,3,2004-05-03
893988,3,2005-11-17
124105,4,2004-08-05
convert_netflix.py converts these input files
to a set of files where each line contains: [userid movieid rating]
$ head user_movie_rating_1.txt
1488844 1 3
822109 1 5
885013 1 4
30878 1 4
823519 1 3
893988 1 3
124105 1 4
1248029 1 3
1842128 1 4
2238063 1 3
Created by Peter Skomoroch on 2009-03-09.
Copyright (c) 2009 Data Wrangling. All rights reserved.
"""
import sys
import os
import re
CHUNK_FILES = True
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
pass
def main(args):
outfile = open('reformatted_movie_titles.txt', 'w')
movie_title_file = open('movie_titles.txt','r')
movie_title_exp=re.compile("([\w]+),([\w]+),(.*)")
movie_titles={}
for line in movie_title_file:
m = movie_title_exp.match(line.strip())
outfile.write('%s\t%s\n' % (m.group(1), m.group(3)))
outfile.close()
movie_title_file.close()
in_dir= args[1] #'training_set'
out_dir = args[2] #'netflix-data'
filenames = [in_dir +'/' + file for file in os.listdir(in_dir)]
rating_count = 0
L = 0
outfile_num = 0
mkdir(out_dir)
outfilename = out_dir+ '/' + 'ratings_'+ str(outfile_num) +'.txt'
output_file = open(outfilename, 'w')
for i, moviefile in enumerate(filenames):
# if i+1 in (10774, 175, 11064, 4472,
# 16265, 9628, 299, 16948, 9368, 8627, 10627): # for sample dataset
if i % 100 == 0: print "processing movie %s " % (i+1)
f = open(moviefile,'r')
for j, line in enumerate(f.readlines()):
if j == 0:
movieid = line.split(':')[0]
else:
(userid, rating, date) = line.split(',')
nextline = ' '.join([userid, movieid, rating+'\n'])
L += len(nextline) # when this is 65536, we start a new file
if L/1000 > 65536 and CHUNK_FILES:
output_file.close()
# os.system('gzip ' + outfilename)
outfile_num += 1
outfilename = out_dir+ '/' + \
'ratings_'+ str(outfile_num) +'.txt'
print "--- starting new file: %s" % outfilename
output_file = open(outfilename, 'w')
L = len(nextline)
output_file.write(nextline)
rating_count += 1
f.close()
output_file.close()
# os.system('gzip ' + outfilename)
if __name__ == '__main__':
main(sys.argv)
|
You will have a unique hiking tour to explore Manzanillo’ s amazing wildlife refuge. Jungle Man is a local expert and will offer you an exclusive and fun experience. This tour provides the opportunity to watch white-faced monkeys and howler monkeys, sloths, iguanas, snakes, poisonous frogs and many other rain forest creatures. The Gandoca-Manzanillo Refuge is also home to 384 species of birds including hummingbirds, tanagers, owls, and three kind of toucans.
|
# TODO crossplatform join these tests with test_application.py
import sys
import os
import unittest
import subprocess
import time
sys.path.append(".")
from pywinauto.application import WindowSpecification # noqa: E402
if sys.platform.startswith('linux'):
from pywinauto.controls import atspiwrapper # register atspi backend
from pywinauto.linux.application import Application # noqa: E402
from pywinauto.linux.application import AppStartError # noqa: E402
from pywinauto.linux.application import AppNotConnected # noqa: E402
from pywinauto.linux.application import ProcessNotFoundError # noqa: E402
app_name = r"gtk_example.py"
def _test_app():
test_folder = os.path.join(os.path.dirname
(os.path.dirname
(os.path.dirname
(os.path.abspath(__file__)))),
r"apps/Gtk_samples")
sys.path.append(test_folder)
return os.path.join(test_folder, app_name)
sys.path.append(".")
if sys.platform.startswith('linux'):
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
self.subprocess_app = None
self.app = Application()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
if self.subprocess_app:
self.subprocess_app.communicate()
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises(AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises(AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises(AppNotConnected, Application().window_, name='Hiya')
self.assertRaises(AppNotConnected, Application().top_window, )
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises(AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
self.assertEqual(self.app.process, None)
self.app.start(_test_app())
self.assertNotEqual(self.app.process, None)
def test_connect_by_pid(self):
"""Create an application via subprocess then connect it to Application"""
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(pid=self.subprocess_app.pid)
self.assertEqual(self.app.process, self.subprocess_app.pid)
def test_connect_by_path(self):
"""Create an application via subprocess then connect it to Application by application name"""
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(path=_test_app())
self.assertEqual(self.app.process, self.subprocess_app.pid)
def test_cpu_usage(self):
self.app.start(_test_app())
self.assertGreater(self.app.cpu_usage(0.1), 0)
self.app.wait_cpu_usage_lower(threshold=0.1, timeout=4.0, usage_interval=0.3)
# default timings
self.assertEqual(self.app.cpu_usage(), 0)
# non-existing process
self.app.kill()
self.assertRaises(ProcessNotFoundError, self.app.cpu_usage, 7.8)
# not connected or not started app
self.assertRaises(AppNotConnected, Application().cpu_usage, 12.3)
def test_is_process_running(self):
self.app.start(_test_app())
time.sleep(1)
self.assertTrue(self.app.is_process_running())
self.app.kill()
self.assertFalse(self.app.is_process_running())
def test_kill_killed_app(self):
self.app.start(_test_app())
time.sleep(1)
self.app.kill()
self.assertTrue(self.app.kill())
def test_kill_connected_app(self):
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(pid=self.subprocess_app.pid)
self.app.kill()
# Unlock the subprocess explicity, otherwise
# it's presented in /proc as a zombie waiting for
# the parent process to pickup the return code
self.subprocess_app.communicate()
self.subprocess_app = None
self.assertFalse(self.app.is_process_running())
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.WindowSpecification class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
self.app = Application()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_app_binding(self):
self.app.start(_test_app())
self.assertEqual(self.app.NonExistingDialog.app, self.app)
self.assertEqual(self.app.Application.Panel.exists(), True)
self.assertEqual(self.app.Application.Panel.app, self.app)
self.assertIsInstance(self.app.Application.find(), atspiwrapper.AtspiWrapper)
wspec = WindowSpecification(dict(name=u"blah", app=self.app))
self.assertEqual(wspec.app, self.app)
def test_app_binding_after_app_restart(self):
self.app.start(_test_app())
old_pid = self.app.process
wspec = self.app.Application.Panel
self.app.kill()
self.assertEqual(wspec.app, self.app)
self.app.start(_test_app())
new_pid = self.app.process
self.assertNotEqual(old_pid, new_pid)
self.assertEqual(wspec.app, self.app)
if __name__ == "__main__":
unittest.main()
|
'Classical Highlights' presents the best-known and most popular performance pieces of the classical repertoire in arrangements for horn and piano. The comprehensive selection of works enables the musician to access classical masterpieces - from Handel and Bach via Schubert, Mendelssohn, Schumann and Wagner to Fauré, Elgar, Debussy and Satie. There will be something for any hornist who is looking for a popular classical work for teaching or performing purposes.
- F. Mendelssohn:Wedding March, op. 61/9 from "A Midsummer Night's Dream"
- R. Wagner: Treulich geführt. Bridal Chorus from "Lohengrin"
|
import numpy as np
from copy import copy
import datetime
from syscore.genutils import none_to_object, object_to_none, list_of_ints_with_highest_common_factor_positive_first
from syscore.objects import no_order_id, no_children, no_parent
from sysexecution.trade_qty import tradeQuantity
from sysobjects.production.tradeable_object import tradeableObject
class overFilledOrder(Exception):
pass
class orderType(object):
def __repr__(self):
return self.as_string()
def allowed_types(self):
return ["market"]
def __init__(self, type_string: str):
if type_string is None:
type_string = ""
else:
assert type_string in self.allowed_types(), "Type %s not valid" % type_string
self._type = type_string
def as_string(self):
return self._type
def __eq__(self, other):
return self.as_string() == other.as_string()
class Order(object):
"""
An order represents a desired or completed trade
This is a base class, specific orders are used for virtual and contract level orders
Need to be able to compare orders with each other to enforce the 'no multiple orders of same characteristics'
"""
def __init__(
self,
tradeable_object: tradeableObject,
trade: tradeQuantity,
fill: tradeQuantity=None,
filled_price: float=None,
fill_datetime: datetime.datetime=None,
locked=False,
order_id: int=no_order_id,
parent: int=no_parent,
children: list =no_children,
active:bool =True,
order_type: orderType = orderType("market"),
**order_info
):
"""
:param object_name: name for a tradeableObject, str
:param trade: trade we want to do, int or list
:param fill: fill done so far, int
:param fill_datetime: when fill done (if multiple, is last one)
:param fill_price: price of fill (if multiple, is last one)
:param locked: if locked an order can't be modified, bool
:param order_id: ID given to orders once in the stack, do not use when creating order
:param parent: int, order ID of parent order in upward stack
:param children: list of int, order IDs of child orders in downward stack
:param active: bool, inactive orders have been filled or cancelled
:param kwargs: other interesting arguments
"""
self._tradeable_object = tradeable_object
(
resolved_trade,
resolved_fill,
) = resolve_inputs_to_order(trade, fill)
if children == []:
children = no_children
self._trade = resolved_trade
self._fill = resolved_fill
self._filled_price = filled_price
self._fill_datetime = fill_datetime
self._locked = locked
self._order_id = order_id
self._parent = parent
self._children = children
self._active = active
self._order_type = order_type
self._order_info = order_info
def __repr__(self):
terse_repr = self.terse_repr()
return terse_repr
def full_repr(self):
terse_repr = self.terse_repr()
full_repr = terse_repr + " %s" % str(self._order_info)
return full_repr
def terse_repr(self):
if self._locked:
lock_str = " LOCKED"
else:
lock_str = ""
if not self._active:
active_str = " INACTIVE"
else:
active_str = ""
return "(Order ID:%s) Type %s for %s, qty %s, fill %s@ price, %s Parent:%s Children:%s%s%s" % (
str(self.order_id),
str(self._order_type),
str(self.key),
str(self.trade),
str(self.fill),
str(self.filled_price),
str(self.parent),
str(self.children),
lock_str,
active_str,
)
@property
def order_info(self):
return self._order_info
@property
def tradeable_object(self):
return self._tradeable_object
@property
def trade(self):
return self._trade
def as_single_trade_qty_or_error(self) -> int:
return self.trade.as_single_trade_qty_or_error()
def replace_required_trade_size_only_use_for_unsubmitted_trades(self, new_trade: tradeQuantity):
# ensure refactoring works
assert type(new_trade) is tradeQuantity
try:
assert len(new_trade)==len(self.trade)
except:
raise Exception("Trying to replace trade of length %d with one of length %d" % (len(self.trade), len(new_trade)))
new_order = copy(self)
new_order._trade = new_trade
return new_order
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, order_type: orderType):
self._order_type = order_type
@property
def fill(self):
return tradeQuantity(self._fill)
@property
def filled_price(self):
return self._filled_price
@property
def fill_datetime(self):
return self._fill_datetime
def fill_order(self, fill_qty: tradeQuantity,
filled_price: float,
fill_datetime: datetime.datetime=None):
# Fill qty is cumulative, eg this is the new amount filled
try:
assert self.trade.fill_less_than_or_equal_to_desired_trade(
fill_qty)
except:
raise overFilledOrder("Can't fill order with fill %s more than trade quantity %s "
% (str(fill_qty), str(self.trade)))
self._fill = fill_qty
self._filled_price = filled_price
if fill_datetime is None:
fill_datetime = datetime.datetime.now()
self._fill_datetime = fill_datetime
def fill_equals_zero(self) -> bool:
return self.fill.equals_zero()
def fill_equals_desired_trade(self) -> bool:
return self.fill == self.trade
def is_zero_trade(self) -> bool:
return self.trade.equals_zero()
@property
def order_id(self) -> int:
order_id = resolve_orderid(self._order_id)
return order_id
@order_id.setter
def order_id(self, order_id: int):
assert isinstance(order_id, int)
current_id = getattr(self, "_order_id", no_order_id)
if current_id is no_order_id:
self._order_id = order_id
else:
raise Exception("Can't change order id once set")
@property
def children(self) -> list:
return self._children
@children.setter
def children(self, children):
if isinstance(children, int):
children = [children]
if not self.no_children():
raise Exception(
"Can't add children to order which already has them: use add another child"
)
self._children = children
def remove_all_children(self):
self._children = no_children
def no_children(self):
return self.children is no_children
def add_a_list_of_children(self, list_of_new_children: list):
_ = [self.add_another_child(new_child) for new_child in list_of_new_children]
def add_another_child(self, new_child: int):
if self.no_children():
new_children = [new_child]
else:
new_children = self.children + [new_child]
self._children = new_children
@property
def remaining(self) -> tradeQuantity:
return self.trade - self.fill
def create_order_with_unfilled_qty(self):
new_order = copy(self)
new_trade = self.remaining
new_order._trade = new_trade
new_order._fill = new_trade.zero_version()
new_order._filled_price = None
new_order._fill_datetime = None
return new_order
def change_trade_size_proportionally_to_meet_abs_qty_limit(self, max_abs_qty:int):
# if this is a single leg trade, does a straight replacement
# otherwise
new_order = copy(self)
old_trade = new_order.trade
new_trade = old_trade.change_trade_size_proportionally_to_meet_abs_qty_limit(max_abs_qty)
new_order = new_order.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def reduce_trade_size_proportionally_so_smallest_leg_is_max_size(self, max_size: int):
new_order = copy(self)
old_trade = new_order.trade
new_trade = old_trade.reduce_trade_size_proportionally_so_smallest_leg_is_max_size(max_size)
new_order = new_order.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def trade_qty_with_lowest_abs_value_trade_from_order_list(self, list_of_orders: list) -> 'Order':
## only deals with single legs right now
new_order = self.single_leg_trade_qty_with_lowest_abs_value_trade_from_order_list(list_of_orders)
return new_order
def single_leg_trade_qty_with_lowest_abs_value_trade_from_order_list(self, list_of_orders: list) -> 'Order':
list_of_trade_qty = [order.trade for order in list_of_orders]
my_trade_qty = self.trade
new_trade = my_trade_qty.single_leg_trade_qty_with_lowest_abs_value_trade_from_list(list_of_trade_qty)
new_order= self.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def change_trade_qty_to_filled_qty(self):
self._trade = self._fill
@property
def parent(self):
parent = resolve_parent(self._parent)
return parent
@parent.setter
def parent(self, parent: int):
if self._parent == no_parent:
self._parent = int(parent)
else:
raise Exception("Can't add parent to order which already has them")
@property
def active(self):
return bool(self._active)
def deactivate(self):
# Once deactivated: filled or cancelled, we can never go back!
self._active = False
def zero_out(self):
zero_version_of_trades = self.trade.zero_version()
self._fill = zero_version_of_trades
self.deactivate()
def as_dict(self):
object_dict = dict(key=self.key)
object_dict["trade"] = list(self.trade)
object_dict["fill"] = list(self.fill)
object_dict["fill_datetime"] = self.fill_datetime
object_dict["filled_price"] = self.filled_price
object_dict["locked"] = self._locked
object_dict["order_id"] = object_to_none(self.order_id, no_order_id)
object_dict["parent"] = object_to_none(self.parent, no_parent)
object_dict["children"] = object_to_none(self.children, no_children)
object_dict["active"] = self.active
object_dict["order_type"] = self.order_type.as_string()
for info_key, info_value in self.order_info.items():
object_dict[info_key] = info_value
return object_dict
@classmethod
def from_dict(Order, order_as_dict):
# will need modifying in child classes
trade = order_as_dict.pop("trade")
object_name = order_as_dict.pop("key")
locked = order_as_dict.pop("locked")
fill = order_as_dict.pop("fill")
filled_price = order_as_dict.pop("filled_price")
fill_datetime = order_as_dict.pop("fill_datetime")
order_id = none_to_object(order_as_dict.pop("order_id"), no_order_id)
parent = none_to_object(order_as_dict.pop("parent"), no_parent)
children = none_to_object(order_as_dict.pop("children"), no_children)
active = order_as_dict.pop("active")
order_type = orderType(order_as_dict.pop("order_type", None))
order_info = order_as_dict
order = Order(
object_name,
trade,
fill=fill,
fill_datetime=fill_datetime,
filled_price=filled_price,
locked=locked,
order_id=order_id,
parent=parent,
children=children,
active=active,
order_type=order_type,
**order_info
)
return order
@property
def key(self):
return self.tradeable_object.key
def is_order_locked(self):
return bool(self._locked)
def lock_order(self):
self._locked = True
def unlock_order(self):
self._locked = False
def same_tradeable_object(self, other):
my_object = self.tradeable_object
other_object = other.tradeable_object
return my_object == other_object
def same_trade_size(self, other):
my_trade = self.trade
other_trade = other.trade
return my_trade == other_trade
def __eq__(self, other):
same_tradeable_object = self.same_tradeable_object(other)
same_trade = self.same_trade_size(other)
return same_tradeable_object and same_trade
def log_with_attributes(self, log):
"""
Returns a new log object with order attributes added
:param log: logger
:return: log
"""
return log
def resolve_inputs_to_order(trade, fill) -> (tradeQuantity, tradeQuantity):
resolved_trade = tradeQuantity(trade)
if fill is None:
resolved_fill = resolved_trade.zero_version()
else:
resolved_fill = tradeQuantity(fill)
return resolved_trade, resolved_fill
def resolve_orderid(order_id:int):
if order_id is no_order_id:
return no_order_id
if order_id is None:
return no_order_id
order_id= int(order_id)
return order_id
def resolve_parent(parent: int):
if parent is no_parent:
return no_parent
if parent is None:
return no_parent
parent= int(parent)
return parent
def resolve_multi_leg_price_to_single_price(trade_list: tradeQuantity, price_list: list) -> float:
if len(price_list)==0:
## This will be the case when an order is first created or has no fills
return None
if len(price_list)==1:
return price_list[0]
assert len(price_list) == len(trade_list)
trade_list_as_common_factor = list_of_ints_with_highest_common_factor_positive_first(trade_list)
fill_price = [x * y for x,y in zip(trade_list_as_common_factor, price_list)]
fill_price = sum(fill_price)
if np.isnan(fill_price):
return None
return fill_price
|
For the rest of your life and then some.
That’s how long our warranties last.
The warranty is transferable to another person, so if you sell your home it carries over for 50 years.
Our warranty lasts the rest of your life because we stand by our products and our work. You’ll never need it done again, and if you need a fix, you can count on us to complete it.
|
#!/usr/bin/python
#------------------------------------------
#
# A script to periodically download a
# Tor relay's descriptor and write it to
# the disk
#
# Author : Luiz Kill
# Date : 21/11/2014
#
# http://lzkill.com
#
#------------------------------------------
import os
import sys
import time
from stem.descriptor import DocumentHandler
from stem.descriptor.remote import DescriptorDownloader
DOWNLOAD_DELAY = 60.0
FINGERPRINT = [""]
PATHNAME="/var/lib/rpimonitor/stat/tor_desc"
def main():
try:
dump = open(PATHNAME,"wb")
downloader = DescriptorDownloader()
while True:
query = downloader.get_server_descriptors(fingerprints=FINGERPRINT)
for desc in query.run():
dump.seek(0)
dump.write("Nickname " + str(desc.nickname)+"\n")
dump.write("Fingerprint " + "".join(str(desc.fingerprint).split())+"\n")
dump.write("Published " + str(desc.published)+"\n")
dump.write("Address " + str(desc.address)+"\n")
dump.write("Version " + str(desc.tor_version)+"\n")
dump.write("Uptime " + str(desc.uptime)+"\n")
dump.write("Average_Bandwidth " + str(desc.average_bandwidth)+"\n")
dump.write("Burst_Bandwidth " + str(desc.burst_bandwidth)+"\n")
dump.write("Observed_Bandwidth " + str(desc.observed_bandwidth)+"\n")
dump.write("Hibernating " + str(desc.hibernating)+"\n")
time.sleep(DOWNLOAD_DELAY)
except Exception as exc:
print 'Unable to retrieve the server descriptors: %s' % exc
if __name__ == '__main__':
main()
|
Charismatic and chewy. This physical theatre performance encompasses all the headache and heartache of entering adulthood. A small cast of four spirited actors Vicky Bennett, Riley Pullen, Patrick Durnan-Silva and Jacqui Essing are the epitome of most young adults battling real life and first world problems.
|
from __future__ import print_function
import unittest
import argparse
import opentuner
from opentuner.api import TuningRunManager
from opentuner.measurement.interface import DefaultMeasurementInterface
from opentuner.resultsdb.models import Result
from opentuner.search.manipulator import ConfigurationManipulator, IntegerParameter
__author__ = 'Chick Markley chick@eecs.berkeley.edu U.C. Berkeley'
class TestApi(unittest.TestCase):
def test_api_start_and_stop(self):
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
args = parser.parse_args(args=[])
# we set up an api instance but only run it once
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -10, 10))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
desired_result = api.get_next_desired_result()
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
# something changes and now we want to shut down the api
# and start a new one, this used to raise an exception
api.finish()
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -100, 100))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
desired_result = api.get_next_desired_result()
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
self.assertIsNotNone(api.get_best_configuration())
api.finish()
def test_small_range(self):
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
args = parser.parse_args(args=[])
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -10, 10))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
configs_tried = set()
for x in xrange(40):
desired_result = api.get_next_desired_result()
if desired_result is None:
# The search space for this example is very small, so sometimes
# the techniques have trouble finding a config that hasn't already
# been tested. Change this to a continue to make it try again.
break
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
configs_tried.add(cfg)
best_cfg = api.get_best_configuration()
api.finish()
self.assertEqual(best_cfg['x'], -10.0)
# TODO: should this have tried everything in range?
# print(configs_tried)
# for x in range(-10, 11):
# print(x)
# self.assertTrue(
# x in configs_tried,
# "{} should have been in tried set {}".format(x, configs_tried))
|
Our goal is to make the process of buying or selling real estate efficient, enjoyable and rewarding for you. See what our clients have said about our real estate services and what they have experienced working with us.
Mary and I wanted to let you know how deeply we appreciate everything you've done for our family. From start to finish, our entire Real Estate experience with Properties By Hood was outstanding. You made our dreams come true with the purchase of a beautiful new home and flawlessly handled the sale of our existing home, both transactions were seamless. Thank you for always being so willing and available to answer ALL our questions throughout the entire process. Your knowledge, experience and outstanding vendor recommendations gave us peace of mind and a great deal of stress relief. Your attention to detail and fantastic marketing exceeded our expectations and were definitely key factors in the quick sale of our home. It's hard to believe the entire process of buying and selling our home, especially in this tough market, took just over 60 days! Your services are fantastic and again we can't thank you enough for everything. We will definitely recommend and use Properties By Hood for all our future Real Estate needs.
From the first day we met Randall and Grace, we knew they would help us find the perfect home. Aside from being kind and personable, they had a very ‘no pressure’ attitude which made us feel comfortable from the very beginning. Since we weren’t exactly sure of what we were looking for, they gave us the personalized attention we needed to narrow down our options and prioritize our needs. Additionally, Randall and Grace were quick to respond regarding new market postings and they accommodated our schedules when viewing homes (most often with our young children). We could not be happier with our new home and we look back on our home buying experience as extremely positive.
Regardless of whether you are buying or selling, we would highly recommend Randall and Grace to help you through this life changing event. We could not have asked for better Realtors to guide us through the process. Thank you Randall and Grace!
I would like to express my gratitude to Properties by Hood for being great Realtors during my process of buying a house in Encinitas, California in 2007. I had already selected the house that I wanted when I called up Properties by Hood. It was a foreclosure, and the market was just beginning to decline. Unlike Realtors I have worked with in the past, Properties by Hood cautioned me when they felt my bid for the house was over market value. Most Realtors would have simply pushed me to make a bid high enough so that a sale occurred. What I appreciate about Properties by Hood was that they had the integrity to advise me correctly even at the cost of the sale. In fact, when my first bid was rejected, they just hibernated until I was ready to try again. We did, and in fact got the house 30 thousand dollars below the appraised value! I just refinanced and the house is still appraised 22 thousand dollars above my purchase price. I can highly recommend Properties by Hood because they have the integrity and savvy you will need during this strange housing market.
Randall and Grace Hood have been an absolute pleasure to work with. Without them there is no way that our real estate transaction would come close to fruition. Their knowledge, compassion, and willingness to help us cannot be quantified in words. We are very fortunate to have had Randall and Grace represent our real estate interests.
Thank you so much for all your help in finding us a place in Southern California. You two are great at what you do and we are glad to have gotten to know you as friends too!
Properties By Hood was able to navigate this difficult foreclosure market and negotiate with the bank to allow me to successfully purchase the home I wanted. After placing offers on other properties with other agents only to be disappointed by the lack of response, Properties By Hood was able to secure the house I wanted, on the terms I wanted in just a few weeks.
You guys have made my dream come true. God bless you all.
Thanks again for all your help. Both of you have been great and very informative throughout this process.
It's finally complete! I want to thank both of you for all of the help, insight and direction you provided over these past many months. I would certainly recommend you guys to anyone looking to buy a property.
Can I just go on record to say Jon and I have bought/sold 4 houses and have lots of experience with realtors (we have moved around quite a bit). You and Randy have been for me the most professional, pleasant, patient, responsive and just plain fun to be around! Not to mention troopers for gallivanting around in the snow and cold with a bunch of cousins under 10 who are very happy to see each other!
I am so blessed to have been able to work with you both. I am also glad for the friendship that has blossomed from it. Thank you for all your help.
|
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QGraphicsLinearLayout
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
from PyKDE4.kdeui import KMessageBox
from PyKDE4.kdecore import i18n, KStandardDirs
from pluginmanager import PluginManager
import providerplugins.Provider
import adressplugins.AdressPlugin
import ConfigParser, os, re
class Multimobilewidget(plasmascript.Applet):
def __init__(self, parent, args=None):
plasmascript.Applet.__init__(self, parent)
def init(self):
self.setHasConfigurationInterface(False)
self.setAspectRatioMode(Plasma.Square)
self.theme = Plasma.Svg(self)
self.setBackgroundHints(Plasma.Applet.DefaultBackground)
self.layout = QGraphicsLinearLayout(Qt.Vertical, self.applet)
self.getLogin()
self.setHasConfigurationInterface(True)
self.label = Plasma.Label(self.applet)
self.label.setText(i18n("Welcome to the Multimobilewidget"))
nrlabel = Plasma.Label(self.applet)
nrlabel.setText(i18n("Phonenr(s)"))
self.messagelabel = Plasma.Label(self.applet)
self.messagelabel.setText(i18n("Message - 0 signs used"))
self.nrfield = Plasma.LineEdit()
self.messageText = Plasma.TextEdit(self.applet)
self.messageText.nativeWidget()
sendButton = Plasma.PushButton(self.applet)
sendButton.setText(i18n("Send the SMS!"))
sendButton.resize(20, 40)
configButton = Plasma.PushButton(self.applet)
configButton.setText("Config")
configButton.resize(20, 40)
self.layout.addItem(self.label)
self.layout.addItem(nrlabel)
self.layout.addItem(self.nrfield)
self.layout.addItem(self.messagelabel)
self.layout.addItem(self.messageText)
self.layout.addItem(sendButton)
self.layout.addItem(configButton)
self.applet.setLayout(self.layout)
self.connect(sendButton, SIGNAL("clicked()"), self.onSendClick)
self.connect(configButton, SIGNAL("clicked()"), self.onConfigClick)
self.connect(self.messageText, SIGNAL("textChanged()"), self.onTextChanged)
fullPath = str(self.package().path())
self.providerPluginManager = PluginManager("multimobilewidget/contents/code/providerplugins/","", providerplugins.Provider.Provider)
self.providerpluginlist = self.providerPluginManager.getPluginClassList()
for provider in self.providerpluginlist:
self.ui.providerList.addItem(provider.getObjectname())
print provider.getObjectname()
self.ui.providerList.setCurrentRow(0)
self.adressplugins = PluginManager("multimobilewidget/contents/code/adressplugins/","", adressplugins.AdressPlugin.AdressPlugin)
self.adresspluginlist = self.adressplugins.getPluginClassList()
self.adressList = list()
def onConfigClick(self):
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
def connectToAkonadi(self):
self.akonadiEngine = Plasma.DataEngine()
self.akonadiEngine.setName("akonadi")
def onSendClick(self):
for provider in self.providerpluginlist:
if(provider.getObjectname() == self.ui.providerList.selectedItems()[0].text()):
sms = provider
if self.ui.smstext.toPlainText() != "":
if self.ui.phonenr.text() != "":
self.getLogin()
try:
sms.setConfig(self.config)
except Exception:
self.onConfigClick()
return
sms.clearNrs()
for nr in re.findall("(\+\d*)", self.ui.phonenr.text()):
sms.addNr(nr)
sms.setText(self.ui.smstext.toPlainText())
savenr = self.ui.phonenr.text()
try:
sms.execute()
# self.notification.setText(i18n("Wurde erfolgreich an <i>%1</i> geschickt!").arg(savenr ))
# self.notification.setTitle("Erfolg!")
# self.notification.sendEvent()
KMessageBox.information(None, i18n("SMS sendet successfully to " + savenr + ". Service: "+sms.getProvidername()), i18n("Success!"))
except Exception, error:
KMessageBox.error(None, i18n(error.message), i18n("Sendproblems"))
self.ui.phonenr.clear()
self.ui.smstext.clear()
else:
KMessageBox.error(None, i18n("Please fill in a phonenr"), i18n("Please fill in a phonenr"))
else:
KMessageBox.error(None, i18n("Please fill in a Text"), i18n("Please fill in a Text"))
def onTextChanged(self):
tmp = self.messageText.nativeWidget()
if(len(tmp.toPlainText()) < 160):
self.messagelabel.setText(i18n("Message - ") + str(len(tmp.toPlainText())) + i18n(" signs used"))
else:
# count how many sms are used and update the status
self.messagelabel.setText(i18n("Message - ") + str(len(tmp.toPlainText())) + i18n(" signs used"))
def getLogin(self):
if(os.path.isfile(os.getenv("HOME") + "/.multimobile.cfg")):
try:
self.config = ConfigParser.ConfigParser()
self.config.readfp(open(os.getenv("HOME") + "/.multimobile.cfg"))
except Exception, e:
print e
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins, False)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
else:
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins, True)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
def CreateApplet(parent):
return Multimobilewidget(parent)
|
Residency, or where you live, impacts tuition and enrollment fees. It is determined when you are admitted to the school. It can require a legal residency statement.
Proof of residency must be dated one year and one day before the semester or term you wish to enroll in begins.
Have you been classified as a nonresident? Want to be a California resident? Submit documentation and a Residency Reclassification Petition to the Admissions Office.
Individuals who have met certain criteria based on previous education within the state of California may qualify for nonresident tuition exemptions provided under state law.
Apply for a waiver if you are not a resident. Send the California Nonresident Tuition Exemption Request to Admissions and Records.
Attended a high school (public or private) in California for three or more years.
Don't have legal immigration status? Fill out an affidavit. It must state that you have filed an application to legalize your immigration status or will file as soon as you are eligible.
|
# -*- coding: utf-8 -*-
"""
generate_keyring command
Assemble a GPG keyring with all known developer keys.
Usage: ./manage.py generate_keyring <keyserver> <keyring_path>
"""
from django.core.management.base import BaseCommand, CommandError
import logging
import subprocess
import sys
from devel.models import MasterKey, UserProfile
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s -> %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stderr)
logger = logging.getLogger()
class Command(BaseCommand):
args = "<keyserver> <keyring_path> [ownertrust_path]"
help = "Assemble a GPG keyring with all known developer keys."
def add_arguments(self, parser):
parser.add_argument('args', nargs='*', help='<arch> <filename>')
def handle(self, *args, **options):
v = int(options.get('verbosity', None))
if v == 0:
logger.level = logging.ERROR
elif v == 1:
logger.level = logging.INFO
elif v >= 2:
logger.level = logging.DEBUG
if len(args) < 2:
raise CommandError("keyserver and keyring_path must be provided")
generate_keyring(args[0], args[1])
if len(args) > 2:
generate_ownertrust(args[2])
def generate_keyring(keyserver, keyring):
logger.info("getting all known key IDs")
# Screw you Django, for not letting one natively do value != <empty string>
key_ids = UserProfile.objects.filter(
pgp_key__isnull=False).extra(where=["pgp_key != ''"]).values_list(
"pgp_key", flat=True)
logger.info("%d keys fetched from user profiles", len(key_ids))
master_key_ids = MasterKey.objects.values_list("pgp_key", flat=True)
logger.info("%d keys fetched from master keys", len(master_key_ids))
# GPG is stupid and interprets any filename without path portion as being
# in ~/.gnupg/. Fake it out if we just get a bare filename.
if '/' not in keyring:
keyring = './%s' % keyring
gpg_cmd = ["gpg", "--no-default-keyring", "--keyring", keyring,
"--keyserver", keyserver, "--recv-keys"]
logger.info("running command: %r", gpg_cmd)
gpg_cmd.extend(key_ids)
gpg_cmd.extend(master_key_ids)
subprocess.check_call(gpg_cmd)
logger.info("keyring at %s successfully updated", keyring)
TRUST_LEVELS = {
'unknown': 0,
'expired': 1,
'undefined': 2,
'never': 3,
'marginal': 4,
'fully': 5,
'ultimate': 6,
}
def generate_ownertrust(trust_path):
master_key_ids = MasterKey.objects.values_list("pgp_key", flat=True)
with open(trust_path, "w") as trustfile:
for key_id in master_key_ids:
trustfile.write("%s:%d:\n" % (key_id, TRUST_LEVELS['marginal']))
logger.info("trust file at %s created or overwritten", trust_path)
# vim: set ts=4 sw=4 et:
|
Posted September 21, 2016 by xcite & filed under Uncategorized.
Spinal surgery is usually only considered after more conservative treatment methods like physical therapy and medications have already been tried. This is because surgery of the spine can be very risky, with even minor errors creating serious problems for the patient. For this reason, spinal surgery requires an especially high level of instrument positioning and control. The surgeon must also have access to the part of the spine that needs to be corrected. Traditional open spine surgery requires an incision large enough to expose the entire surgical site. Larger incisions are associated with greater tissue damage and more severe postoperative pain.
Over the last few years, new developments in minimally invasive spinal surgery have sought to fix these problems. In minimally invasive surgery, a tiny endoscope—a flexible tube with a light and camera on one end often accompanied by a surgical tool—is inserted through a very small incision near the area of the spine to be operated upon. The surgeon must then navigate the tool to the problem area itself, avoiding major blood vessels, nerves, and other important internal structures.
Now, spinal surgeons are using even more advanced three-dimensional image-guided therapy to further improve upon minimally invasive spinal surgery techniques. Image-guided therapy includes all medical procedures that use generated pictures to target a medically relevant site. What started with the x-ray in the 19th century has now merged with the world of robotics.
These robot-assisted surgeries are planned with the aid of specialized software long before the first incision. The surgeon uploads a CT scan of the patient into the robotic system’s 3D-planning software. The surgeon then uses these highly accurate 3D images to plan out the surgery in detail. Once surgery begins, the surgeon then uses the system to help guide his or her hand to complete what was mapped out in the planning stage. In addition to navigation, these systems also reduce even minor tremors for greatly improved dexterity.
Accuracy improved by up to 70%.
Greater consistency with screw placement.
Minimal tissue damage thanks to smaller incisions and better accuracy. One dramatic example found significant medial breach to be eight times more likely to happen without robotic navigation. Another is that only 0.6% of screws placed with the aid of robotic navigation had to be removed during surgery compared to 4.9% in traditional surgery.
Better outcomes for complicated cases. Many spinal surgeries are performed on tissue that is distorted from problems like scoliosis or bone-destroying tumors. Direct real-time imaging is especially important when treating patients with these issues.
Reduces radiation exposure by 56%. Traditional spinal surgery requires a large number of x-rays to compensate for the lack of direct imaging.
Faster recovery time. Patients undergoing robot-assisted spinal surgery spent an average of 27% less time in the hospital.
One thing to keep in mind about spinal surgery with robotic navigation is that it in no way replaces the need for an experienced spinal surgeon to perform the procedure. Robotic navigation systems can only make skilled surgeons even better. If you’re considering spinal surgery or other related treatments, consider contacting us at the Colorado Brain & Spine Institute.
|
#!/usr/bin/env python
"""
Parse SAM file and output only pairs with at least one read aligned.
Compatible with bowtie/bwa output - one entry per read.
SAM file has to be sorted by read name.
USAGE:
samtools view -St yeast_chromosomes.fa.fai 409.sam -f3 | sam2aligned.py > 409.aligned.sam
"""
from datetime import datetime
import os, sys
def int2bin( n, count=12 ):
"""returns the binary of integer n, using count number of digits
@ http://www.daniweb.com/software-development/python/code/216539
"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def sam2unique( handle = sys.stdin ):
"""
"""
i = k = aligned = 0
pName = lines = ''
refs=0
for l in handle:
#write header info
if l.startswith('@'):
sys.stdout.write( l )
continue
#name,flag,contig,pos,mapq,cigar,paired,pairStart,isize,seq,qual
name,flag,ref = l.split('\t')[:3]
if name != pName:
i+=1
if lines and refs:
sys.stdout.write( lines )
aligned += 1
refs = 0
lines = l
if ref != "*":
refs += 1
pName = name
else:
#reads
if ref != "*":
refs += 1
lines += l
if lines and refs:
aligned += 1
sys.stdout.write( lines )
sys.stderr.write( 'Processed pairs:\t%s\nAligned pairs:\t%s [%.2f%s]\n' % ( i,aligned,aligned*100.0/i,'%' ) ) #,bothUnique,bothUnique*100.0/pairs,'%'
if __name__=='__main__':
T0=datetime.now()
sam2unique()
sys.stderr.write( "Elapsed time: %s\n" % ( datetime.now()-T0 ) )
|
We have identified and analyzed the growth strategies of successful small and medium-sized businesses.
Modern trading companies are going all out in equipping their warehouses with the best of tools and technologies.
For the projects industry, achieving 'on-time delivery' is a highly sought after scenario.
Today's retail landscape offers unprecedented opportunity even as it presents new challenges.
From movie making to post production activities, from multi-channel digital distribution to social media.
This is to extend our sincere appreciation for the consultancy service provided by GITL to help us evaluate Microsoft Dynamics CRM and implement an integrated solution at our organization. We evaluated Microsoft Dynamics CRM to help us take care of the business challenges of automating our sales, marketing and service processes; thus providing a 360° view of the customer. Microsoft Dynamics CRM 2011 has been implemented in our organization using Microsoft Sure Step-Methodology. We have personally and professionally enjoyed working with GITL and would be pleased to recommend GITL services to any other organization desiring implementation of Microsoft Dynamics CRM and wish all the luck for all their future endeavours.
Godrej Infotech (Singapore) Pte. Ltd. has been instrumental in providing us excellent support in implementing the Infor LN to manage the complex Manufacturing and Logistic requirements.
We appreciate the efforts and dedication of Godrej Infotech (Singapore) Pte. Ltd.'s team of consultants for successfully providing the necessary guidance and support for the process improvements and migration activities.
Wish you more success in future.
In our business, right timing holds the key to our bottom line. We import some of the finest coffee beans from across the globe. Maintaining just the optimal level of supplies at all our cafes is extremely important. Having the LS NAV system in place makes it extremely easy for us to make real time decisions. We get a holistic and accurate view of what's going on in each location. We are now assured of standardized processes being followed across all our outlets.
The NAV inter-company configuration has streamlined our sales and purchase order processes, reducing significant amount of manual effort. Looking back, we are extremely pleased to have taken the decision to opt for an ERP system.
Godrej Infotech (Singapore) Pte. Ltd., a subsidiary of Godrej Infotech Ltd (GITL), which is one of the holding companies within the USD 4.1 billion Godrej Group. The Godrej group started its journey in 1897 and through the years have diversified from High Tech Engineering to Consumer Products.
Beyond Business through Integrated IT solutions.
Godrej Infotech (Singapore) Pte. Ltd. is forging ahead in the global markets. We invite you to join us. Come and fulfill your cherished dreams & ambitions by joining our dynamic team of young professionals. You may apply against existing vacancies, alternately create and maintain your CV online on our database and we shall revert to you as and when we have a suitable position befitting your experience & expertise.
Godrej Infotech (Singapore) Pte. Ltd.
|
from share.regulate.steps import GraphStep
class Deduplicate(GraphStep):
"""Look for duplicate nodes and merge/discard them
Example config (YAML):
```yaml
- namespace: share.regulate.steps.graph
name: deduplicate
```
"""
MAX_MERGES = 100
# map from concrete type to set of fields used to dedupe
DEDUPLICATION_CRITERIA = {
# works and agents may be merged if duplicate identifiers are merged
# 'abstractcreativework': {},
# 'abstractagent': {},
'abstractagentworkrelation': {'creative_work', 'agent', 'type'},
'abstractagentrelation': {'subject', 'related', 'type'},
'abstractworkrelation': {'subject', 'related', 'type'},
'workidentifier': {'uri'},
'agentidentifier': {'uri'},
'subject': {'name', 'parent', 'central_synonym'},
'tag': {'name'},
'throughtags': {'tag', 'creative_work'},
# 'award': {},
'throughawards': {'funder', 'award'},
'throughsubjects': {'subject', 'creative_work'},
}
def regulate_graph(self, graph):
# naive algorithm, O(n*m) (n: number of nodes, m: number of merges)
# but merges shouldn't be common, so probably not worth optimizing
count = 0
while self._merge_first_dupe(graph):
count += 1
if count > self.MAX_MERGES:
self.error('Way too many deduplications')
return
def _merge_first_dupe(self, graph):
dupe_index = {}
for node in graph:
node_key = self._get_node_key(node)
if node_key:
other_node = dupe_index.get(node_key)
if other_node:
graph.merge_nodes(node, other_node)
return True
dupe_index[node_key] = node
return False
def _get_node_key(self, node):
criteria = self.DEDUPLICATION_CRITERIA.get(node.concrete_type)
if not criteria:
return None
return (
node.concrete_type,
tuple(
self._get_criterion_value(node, criterion)
for criterion in criteria
)
)
def _get_criterion_value(self, node, criterion_name):
if criterion_name == 'type':
return node.type
return node[criterion_name]
|
MUNICH, Germany – With DB Rent’s Call-a-Bike and Nextbike’s MVG-Rad (offered by Munich’s public transportation company MVG) there are already two bicycle rental systems operating in Munich. This April, Danish Donkey Bikes entered Munich followed by Singaporean Obike in August. Obike is flooding the streets of Munich with 7,000 grey-yellow rental bikes, leading to serious concerns and anger.
The anger in Munich is growing about the 7,000 single speeders from this first Fareast rental bike provider which is trying to conquer Munich. They are not only occupying the last free spaces at bike parking and sidewalks in downtown Munich but also those in city suburbs. Obike isn’t operating with docking stations but strictly through an app based rental procedure without any direct address. There’s only an email address.
Like in other European capitals and cities Munich citizens are questioning how Obike is making any money as there’s even no space on the bikes for advertising. It’s speculated that rental bike newcomers from the Fareast are only interested in collecting consumer data they can sell. This fear is rooted in the fact that IT giants are backing such rental bike operators.
After rising concerns and anger Obike reacted on the negative reporting in and around Munich. First of all they made clear that they won’t sell any customer data to third parties – “but we could offer our anonymous data cost-free to the cities so they can use them for updating their infrastructure.” Moreover they want to install a hotline.
According to latest news in the daily newspaper “Süddeutsche Zeitung” Obike is working with subsidiaries in each country. In Germany it’s Berlin-based OBG Germany GmbH. Country manager Germany Marco Piu is working right now 24 hours a day on image polishing. After a meeting with Munich city representatives he assured to work on a better service. As a further result there will be no more than ten Obikes parked at one and the same spot – and only there where they are not disturbing anyone. Nevertheless there is no information on who is collecting these bikes standing around in places they shouldn’t be parked – and where they will be parked after that.
The only thing that is clear by now is that Obikes are produced in China and shipped by the British logistics company Unsworth Global Logistics to Germany. This leaves questions on whether anti-dumping duties are levied on the imported China made bicycles? Unsworth Global Logistics is also responsible for parking the bikes all over the Munich. The maintenance of Obikes is handled by a company named Live Cycle. According to Live Cycle the contract with Obike was signed only a week ago.
|
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Table implementation based on LevelDB (https://github.com/google/leveldb).
This is a sort of poor, lazy man implementation of IndexedDB schema.
**KEYS**
Numbers are 8-bit unsigned.
- Schema Version: (0)
- Index Last Global Id: (0,1, <indexnum>)
- Last Update Key: (0,2)
- Number of Indicators: (0,3)
- Table Last Global ID: (0,4)
- Custom Metadata: (0,5)
- Indicator Version: (1,0,<indicator>)
- Indicator: (1,1,<indicator>)
**INDICATORS**
Each indicators has 2 entries associated in the DB: a version and a value.
The version number is used to track indicator existance and versioning.
When an indicator value is updated, its version number is incremented.
The version number is a 64-bit LSB unsigned int.
The value of an indicator is a 64-bit unsigned int LSB followed by a dump of
a dictionary of attributes in JSON format.
To iterate over all the indicators versions iterate from key (1,0) to key
(1,1) excluded.
NULL indicators are not allowed.
**INDEXES**
Indicators are stored in alphabetical order. Indexes are secondary indexes
on indicators attributes.
Each index has an associated id in the range 0 - 255. The attribute associated
to the index is stored at (0,1,<index id>), if the key does not exist the
index does not exist.
There is also a Last Global Id per index, used to index indicators with the
same attribute value. Each time a new indicator is added to the index, the
Last Global Id is incremented. The Last Global Id of an index is stored at
(2,<index id>,0) as a 64-bit LSB unsigned int.
Each entry in the index is stored with a key
(2,<index id>,0xF0,<encoded value>,<last global id>) and value
(<version>,<indicator>). <encoded value> depends on the type of attribute.
When iterating over an index, the value of an index entry is loaded and if
the version does not match with current indicator version the index entry is
deleted. This permits a sort of lazy garbage collection.
To retrieve all the indicators with a specific attribute value just iterate
over the keys (2,<index id>,0xF0,<encoded value>) and
(2,<index id>,0xF0,<encoded value>,0xFF..FF)
"""
import os
import plyvel
import struct
import ujson
import time
import logging
import shutil
import gevent
SCHEMAVERSION_KEY = struct.pack("B", 0)
START_INDEX_KEY = struct.pack("BBB", 0, 1, 0)
END_INDEX_KEY = struct.pack("BBB", 0, 1, 0xFF)
LAST_UPDATE_KEY = struct.pack("BB", 0, 2)
NUM_INDICATORS_KEY = struct.pack("BB", 0, 3)
TABLE_LAST_GLOBAL_ID = struct.pack("BB", 0, 4)
CUSTOM_METADATA = struct.pack("BB", 0, 5)
LOG = logging.getLogger(__name__)
class InvalidTableException(Exception):
pass
class Table(object):
def __init__(self, name, truncate=False, bloom_filter_bits=0):
if truncate:
try:
shutil.rmtree(name)
except:
pass
self.db = None
self._compact_glet = None
self.db = plyvel.DB(
name,
create_if_missing=True,
bloom_filter_bits=bloom_filter_bits
)
self._read_metadata()
self.compact_interval = int(os.environ.get('MM_TABLE_COMPACT_INTERVAL', 3600 * 6))
self.compact_delay = int(os.environ.get('MM_TABLE_COMPACT_DELAY', 3600))
self._compact_glet = gevent.spawn(self._compact_loop)
def _init_db(self):
self.last_update = 0
self.indexes = {}
self.num_indicators = 0
self.last_global_id = 0
batch = self.db.write_batch()
batch.put(SCHEMAVERSION_KEY, struct.pack("B", 1))
batch.put(LAST_UPDATE_KEY, struct.pack(">Q", self.last_update))
batch.put(NUM_INDICATORS_KEY, struct.pack(">Q", self.num_indicators))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", self.last_global_id))
batch.write()
def _read_metadata(self):
sv = self._get(SCHEMAVERSION_KEY)
if sv is None:
return self._init_db()
sv = struct.unpack("B", sv)[0]
if sv == 0:
# add table last global id
self._upgrade_from_s0()
elif sv == 1:
pass
else:
raise InvalidTableException("Schema version not supported")
self.indexes = {}
ri = self.db.iterator(
start=START_INDEX_KEY,
stop=END_INDEX_KEY
)
with ri:
for k, v in ri:
_, _, indexid = struct.unpack("BBB", k)
if v in self.indexes:
raise InvalidTableException("2 indexes with the same name")
self.indexes[v] = {
'id': indexid,
'last_global_id': 0
}
for i in self.indexes:
lgi = self._get(self._last_global_id_key(self.indexes[i]['id']))
if lgi is not None:
self.indexes[i]['last_global_id'] = struct.unpack(">Q", lgi)[0]
else:
self.indexes[i]['last_global_id'] = -1
t = self._get(LAST_UPDATE_KEY)
if t is None:
raise InvalidTableException("LAST_UPDATE_KEY not found")
self.last_update = struct.unpack(">Q", t)[0]
t = self._get(NUM_INDICATORS_KEY)
if t is None:
raise InvalidTableException("NUM_INDICATORS_KEY not found")
self.num_indicators = struct.unpack(">Q", t)[0]
t = self._get(TABLE_LAST_GLOBAL_ID)
if t is None:
raise InvalidTableException("TABLE_LAST_GLOBAL_ID not found")
self.last_global_id = struct.unpack(">Q", t)[0]
def _get(self, key):
try:
result = self.db.get(key)
except KeyError:
return None
return result
def __del__(self):
self.close()
def get_custom_metadata(self):
cmetadata = self._get(CUSTOM_METADATA)
if cmetadata is None:
return None
return ujson.loads(cmetadata)
def set_custom_metadata(self, metadata=None):
if metadata is None:
self.db.delete(CUSTOM_METADATA)
return
cmetadata = ujson.dumps(metadata)
self.db.put(CUSTOM_METADATA, cmetadata)
def close(self):
if self.db is not None:
self.db.close()
if self._compact_glet is not None:
self._compact_glet.kill()
self.db = None
self._compact_glet = None
def exists(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikeyv = self._indicator_key_version(key)
return (self._get(ikeyv) is not None)
def get(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikey = self._indicator_key(key)
value = self._get(ikey)
if value is None:
return None
# skip version
return ujson.loads(value[8:])
def delete(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikey = self._indicator_key(key)
ikeyv = self._indicator_key_version(key)
if self._get(ikeyv) is None:
return
batch = self.db.write_batch()
batch.delete(ikey)
batch.delete(ikeyv)
self.num_indicators -= 1
batch.put(NUM_INDICATORS_KEY, struct.pack(">Q", self.num_indicators))
batch.write()
def _indicator_key(self, key):
return struct.pack("BB", 1, 1) + key
def _indicator_key_version(self, key):
return struct.pack("BB", 1, 0) + key
def _index_key(self, idxid, value, lastidxid=None):
key = struct.pack("BBB", 2, idxid, 0xF0)
if type(value) == unicode:
value = value.encode('utf8')
if type(value) == str:
key += struct.pack(">BL", 0x0, len(value))+value
elif type(value) == int or type(value) == long:
key += struct.pack(">BQ", 0x1, value)
else:
raise ValueError("Unhandled value type: %s" % type(value))
if lastidxid is not None:
key += struct.pack(">Q", lastidxid)
return key
def _last_global_id_key(self, idxid):
return struct.pack("BBB", 2, idxid, 0)
def create_index(self, attribute):
if attribute in self.indexes:
return
if len(self.indexes) == 0:
idxid = 0
else:
idxid = max([i['id'] for i in self.indexes.values()])+1
self.indexes[attribute] = {
'id': idxid,
'last_global_id': -1
}
batch = self.db.write_batch()
batch.put(struct.pack("BBB", 0, 1, idxid), attribute)
batch.write()
def put(self, key, value):
if type(key) == unicode:
key = key.encode('utf8')
if type(value) != dict:
raise ValueError()
ikey = self._indicator_key(key)
ikeyv = self._indicator_key_version(key)
exists = self._get(ikeyv)
self.last_global_id += 1
cversion = self.last_global_id
now = time.time()
self.last_update = now
batch = self.db.write_batch()
batch.put(ikey, struct.pack(">Q", cversion)+ujson.dumps(value))
batch.put(ikeyv, struct.pack(">Q", cversion))
batch.put(LAST_UPDATE_KEY, struct.pack(">Q", self.last_update))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", self.last_global_id))
if exists is None:
self.num_indicators += 1
batch.put(
NUM_INDICATORS_KEY,
struct.pack(">Q", self.num_indicators)
)
for iattr, index in self.indexes.iteritems():
v = value.get(iattr, None)
if v is None:
continue
index['last_global_id'] += 1
idxkey = self._index_key(index['id'], v, index['last_global_id'])
batch.put(idxkey, struct.pack(">Q", cversion) + key)
batch.put(
self._last_global_id_key(index['id']),
struct.pack(">Q", index['last_global_id'])
)
batch.write()
def query(self, index=None, from_key=None, to_key=None,
include_value=False, include_stop=True, include_start=True,
reverse=False):
if type(from_key) is unicode:
from_key = from_key.encode('ascii', 'replace')
if type(to_key) is unicode:
to_key = to_key.encode('ascii', 'replace')
if index is None:
return self._query_by_indicator(
from_key=from_key,
to_key=to_key,
include_value=include_value,
include_stop=include_stop,
include_start=include_start,
reverse=reverse
)
return self._query_by_index(
index,
from_key=from_key,
to_key=to_key,
include_value=include_value,
include_stop=include_stop,
include_start=include_start,
reverse=reverse
)
def _query_by_indicator(self, from_key=None, to_key=None,
include_value=False, include_stop=True,
include_start=True, reverse=False):
if from_key is None:
from_key = struct.pack("BB", 1, 1)
include_stop = False
else:
from_key = self._indicator_key(from_key)
if to_key is None:
to_key = struct.pack("BB", 1, 2)
include_start = False
else:
to_key = self._indicator_key(to_key)
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_stop=include_stop,
include_start=include_start,
reverse=reverse,
include_value=False
)
with ri:
for ekey in ri:
ekey = ekey[2:]
if include_value:
yield ekey.decode('utf8', 'ignore'), self.get(ekey)
else:
yield ekey.decode('utf8', 'ignore')
def _query_by_index(self, index, from_key=None, to_key=None,
include_value=False, include_stop=True,
include_start=True, reverse=False):
if index not in self.indexes:
raise ValueError()
idxid = self.indexes[index]['id']
if from_key is None:
from_key = struct.pack("BBB", 2, idxid, 0xF0)
include_start = False
else:
from_key = self._index_key(idxid, from_key)
if to_key is None:
to_key = struct.pack("BBB", 2, idxid, 0xF1)
include_stop = False
else:
to_key = self._index_key(
idxid,
to_key,
lastidxid=0xFFFFFFFFFFFFFFFF
)
ldeleted = 0
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_value=True,
include_start=include_start,
include_stop=include_stop,
reverse=reverse
)
with ri:
for ikey, ekey in ri:
iversion = struct.unpack(">Q", ekey[:8])[0]
ekey = ekey[8:]
evalue = self._get(self._indicator_key_version(ekey))
if evalue is None:
# LOG.debug("Key does not exist")
# key does not exist
self.db.delete(ikey)
ldeleted += 1
continue
cversion = struct.unpack(">Q", evalue)[0]
if iversion != cversion:
# index value is old
# LOG.debug("Version mismatch")
self.db.delete(ikey)
ldeleted += 1
continue
if include_value:
yield ekey.decode('utf8', 'ignore'), self.get(ekey)
else:
yield ekey.decode('utf8', 'ignore')
LOG.info('Deleted in scan of {}: {}'.format(index, ldeleted))
def _compact_loop(self):
gevent.sleep(self.compact_delay)
while True:
try:
gevent.idle()
counter = 0
for idx in self.indexes.keys():
for i in self.query(index=idx, include_value=False):
if counter % 512 == 0:
gevent.sleep(0.001) # yield to other greenlets
counter += 1
except gevent.GreenletExit:
break
except:
LOG.exception('Exception in _compact_loop')
try:
gevent.sleep(self.compact_interval)
except gevent.GreenletExit:
break
def _upgrade_from_s0(self):
LOG.info('Upgrading from schema version 0 to schema version 1')
LOG.info('Loading indexes...')
indexes = {}
ri = self.db.iterator(
start=START_INDEX_KEY,
stop=END_INDEX_KEY
)
with ri:
for k, v in ri:
_, _, indexid = struct.unpack("BBB", k)
if v in indexes:
raise InvalidTableException("2 indexes with the same name")
indexes[v] = {
'id': indexid,
'last_global_id': 0
}
for i in indexes:
lgi = self._get(self._last_global_id_key(indexes[i]['id']))
if lgi is not None:
indexes[i]['last_global_id'] = struct.unpack(">Q", lgi)[0]
else:
indexes[i]['last_global_id'] = -1
LOG.info('Scanning indexes...')
last_global_id = 0
for i, idata in indexes.iteritems():
from_key = struct.pack("BBB", 2, idata['id'], 0xF0)
include_start = False
to_key = struct.pack("BBB", 2, idata['id'], 0xF1)
include_stop = False
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_value=True,
include_start=include_start,
include_stop=include_stop,
reverse=False
)
with ri:
for ikey, ekey in ri:
iversion = struct.unpack(">Q", ekey[:8])[0]
if iversion > last_global_id:
last_global_id = iversion+1
LOG.info('Last global id: {}'.format(last_global_id))
batch = self.db.write_batch()
batch.put(SCHEMAVERSION_KEY, struct.pack("B", 1))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", last_global_id))
batch.write()
|
Since bug 557087, an element can be disabled whithout having a disabled attribute because a disabled fieldset will make all its elements disabled.
A simple testcase: both input elements should appear disabled. Currently, I guess that only the second one will.
I might as well take this one along with the 'required' state bug.
Just a reminder, without this bug fix, some disabled elements will appear as enabled to users with AT.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
from cerf_api import Cerf
from file_util import Template, FileUtil
from misc import calc_time_spent
__author__ = 'tchen'
logger = logging.getLogger(__name__)
class InterviewManager(object):
def __init__(self, id=None):
self.id = id
if self.id:
self.exam_path = 'exam%s' % self.id
else:
self.exam_path = None
self.code = None
self.interview = None
self.exam_id = None
self.cerf_api = None
def generate_environment(self):
# create exam dir
os.mkdir(self.exam_path)
# write .interview.json for further use
Template.create_exam_config(os.path.join(os.getcwd(), self.exam_path), self.interview)
# retrieve exam and write general instruction file
exam = self.cerf_api.exam.retrieve(self.exam_id)
if len(exam) == 0:
print('Can not retrieve proper exam by id %s. Please contact your hiring manager.' % self.exam_id)
exit(-1)
Template.create_exam_instruction(self.exam_path, self.interview, exam)
# generate cases
for case in exam['cases']:
self.generate_case(case)
def generate_case(self, case):
os.mkdir('%s/case%s' % (self.exam_path, case['position']))
path = os.path.join(os.getcwd(), self.exam_path, 'case%s' % str(case['position']))
# write .case.json for further use
Template.create_case_config(path, case)
# write instruction
Template.create_case_instruction(path, case)
# write code
Template.create_case_code(path, case)
def start(self):
code = raw_input('Please provide your authentication code:')
self.code = code
self.cerf_api = Cerf(self.id, code)
data = self.cerf_api.interview.start()
if len(data) == 0:
print('Can not retrieve proper interview by id %s. Please contact your hiring manager.' % self.id)
exit(-1)
if calc_time_spent(data['started']) > 1 or os.path.exists(self.exam_path):
print('This interview has been started already!')
exit(-1)
self.interview = data
self.exam_id = self.interview['exam']
print('Nice to meet you, %s! Thanks for your interest in Juniper China R&D.' % data['applicant'])
print('Creating the exam environment...'),
self.generate_environment()
print('Done!\nYou can "cd %s" to start your exam now.' % self.exam_path)
def load_data(self, interview):
self.id = interview['id']
self.code = interview['authcode']
self.interview = interview
self.exam_id = interview['exam']
self.exam_path = 'exam%d' % self.exam_id
def submit_case(self, case):
path = os.path.join(os.getcwd(), 'case%s' % case['position'])
print('\tSubmit case%s...' % case['position']),
extentions = [ext.strip() for ext in case['extentions'].split(',')]
first_list, second_list = FileUtil.get_valid_files(path, extentions)
content = ''
for name in first_list + second_list:
s = '/* %s */\n\n%s' % (name, FileUtil.read_content(os.path.join(path, name)))
content += s
data = {
'interview': self.id,
'applicant': self.interview['applicant_id'],
'case': case['cid'],
'content': content
}
if not self.cerf_api.answer.create(data):
print('Cannot submit case%s, please contact your hiring manager.' % case['position'])
# do not bail out so that we could try the latter cases.
# exit(-1)
else:
print('Done!')
def submit_cases(self):
path = os.getcwd()
for root, dirs, files in os.walk('.'):
for d in dirs:
if d.startswith('case'):
config = FileUtil.read_case(os.path.join(path, d))
self.submit_case(config)
def finish_interview(self):
data = self.cerf_api.interview.finish()
if len(data) == 0:
print('Can not finish interview by id %s. Please contact your hiring manager.' % self.id)
exit(-1)
def finish(self):
if not FileUtil.interview_exists():
print('Please change to the root of the exam directory, then execute this command again.')
exit(-1)
# do not trust existing data, retrieve interview data from server again
interview = FileUtil.read_interview('.')
self.cerf_api = Cerf(interview['id'], interview['authcode'])
interview = self.cerf_api.interview.retrieve(interview['id'])
self.load_data(interview)
if interview['time_spent']:
print('Your exam is over. Please stay tuned.')
exit(-1)
spent = calc_time_spent(interview['started'])
print('Thank you! Your exam is done! Total time spent: %d minutes.' % spent)
print('Submitting your code to generate report...')
self.submit_cases()
print('Done!')
print('Notifying the hiring manager...'),
self.finish_interview()
print('Done!')
print('Please wait for a short moment. If no one comes in 5m, please inform frontdesk.')
def main(arguments):
is_finish = arguments['finish']
is_start = arguments['start']
# sanity check
if is_finish:
InterviewManager().finish()
elif is_start:
try:
id = int(arguments['<id>'])
except:
print('Interview id is not valid. Please contact your hiring manager.')
exit(-1)
InterviewManager(id).start()
else:
print("Please specify a correct command.")
|
This traditional band from Juiz de Fora, a Brazilian city, was performing on a festive Saturday morning in the busiest street in the city. Everything was cheerful and relaxed. The sun was strong (Brazil is a tropical country ...) and the musicians suffered, but they were not discouraged.
|
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# std lib imports
import os.path
# third party imports
from PySide import QtGui, QtCore
import pyqtgraph as pg
# local imports
from .scanners import Scanner, GoToer
from .simple_pl_parser import SimplePLParser
from .spectra_plot_item import SpectraPlotItem
from .measured_spectrum import MeasuredSpectrum
from .expanding_spectrum import ExpandingSpectrum
from .instruments.spectrometer import Spectrometer
from .instruments.lockin import Lockin
from .dialogs.start_scan_dialog import StartScanDialog
from .dialogs.diverters_config_dialog import DivertersConfigDialog
from .dialogs.lockin_config_dialog import LockinConfigDialog
from .dialogs.gratings_and_filters_config_dialog import (
GratingsAndFiltersConfigDialog)
from .dialogs.set_wavelength_dialog import SetWavelengthDialog
from .dialogs.config_instruments_dialog import ConfigInstrumentsDialog
from .dialogs.generate_veusz_file_dialog import GenerateVeuszFileDialog
from .dialogs.about_dialog import AboutDialog
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# Initialize private variables
self.plot = None
self.spectrum = None
self._grating = None
self._filter = None
self._wavelength = None
self._signal = None
self._rawSignal = None
self._phase = None
self.spectrometer = None
self.lockin = None
self.scanner = None
# Internal flags
self._scanSaved = True
# Initialize QSettings object
self._settings = QtCore.QSettings()
# Initialize GUI stuff
self.initUI()
# Disable all actions except for configuring the ports,
# until the instruments are initialized
self._spectrometerInitilized = False
self._lockinInitilized = False
self.updateActions()
# Initialize the instruments
if bool(self._settings.value('autoConnect')):
self.initSpectrometer()
self.initLockin()
# Initialize the current instrument values
sysResPath = self._settings.value('sysResPath')
self._sysresParser = SimplePLParser(None, sysResPath)
def initSpectrometer(self):
self.spectrometer = Spectrometer()
self.spectrometer.sigException.connect(self.spectrometerException)
self.spectrometer.sigInitialized.connect(self.spectrometerInitialized)
self.spectrometer.sigChangingGrating.connect(self.changingGrating)
self.spectrometer.sigChangingFilter.connect(self.changingFilter)
self.spectrometer.sigChangingWavelength.connect(
self.changingWavelength)
self.spectrometer.sigGrating.connect(self.updateGrating)
self.spectrometer.sigFilter.connect(self.updateFilter)
self.spectrometer.sigWavelength.connect(self.updateWavelength)
self.spectrometer.thread.start()
def initLockin(self):
self.lockin = Lockin()
self.lockin.sigException.connect(self.lockinException)
self.lockin.sigInitialized.connect(self.lockinInitialized)
self.lockin.sigRawSignal.connect(self.updateRawSignal)
self.lockin.sigPhase.connect(self.updatePhase)
self.lockin.thread.start()
@QtCore.Slot(Exception)
def spectrometerException(self, e):
raise e
@QtCore.Slot(Exception)
def lockinException(self, e):
raise e
@QtCore.Slot(Exception)
def scannerException(self, e):
self.scanner.wait()
self.updateStatus('Scan failed.')
raise e
@QtCore.Slot()
def spectrometerInitialized(self):
self._spectrometerInitilized = True
if self._spectrometerInitilized and self._lockinInitilized:
self.updateStatus('Idle.')
self.updateActions()
@QtCore.Slot()
def lockinInitialized(self):
self._lockinInitilized = True
if self._spectrometerInitilized and self._lockinInitilized:
self.updateStatus('Idle.')
self.updateActions()
@QtCore.Slot()
def changingGrating(self):
self.gratingLabel.setText('Grating=?')
self.wavelengthLabel.setText('Wavelength=?')
@QtCore.Slot()
def changingFilter(self):
self.filterLabel.setText('Filter=?')
@QtCore.Slot()
def changingWavelength(self):
self.wavelengthLabel.setText('Wavelength=?')
@QtCore.Slot(str)
def updateStatus(self, status):
self.statusLabel.setText(status)
@QtCore.Slot(float)
def updateGrating(self, grating):
self._grating = grating
try:
s = 'Grating=%d' % grating
except:
s = 'Grating=?'
self.gratingLabel.setText(s)
@QtCore.Slot(float)
def updateFilter(self, filt):
self._filter = filt
try:
s = 'Filter=%d' % filt
except:
s = 'Filter=?'
self.filterLabel.setText(s)
@QtCore.Slot(float)
def updateWavelength(self, wavelength):
self._wavelength = wavelength
try:
s = 'Wavelength=%.1f' % wavelength
except:
s = 'Wavelength=?'
self.wavelengthLabel.setText(s)
@QtCore.Slot(float)
def updateRawSignal(self, rawSignal):
self._rawSignal = rawSignal
try:
s = 'Raw Signal=%.3E' % rawSignal
except:
s = 'Raw Signal=?'
self.rawSignalLabel.setText(s)
# Calculate the signal by dividing by the system response,
# and update that too
sysres = self._sysresParser.getSysRes(self._wavelength)
self.updateSignal(rawSignal / sysres)
@QtCore.Slot(float)
def updateSignal(self, signal):
self._signal = signal
try:
s = 'Signal=%.3E' % signal
except:
s = 'Signal=?'
self.signalLabel.setText(s)
@QtCore.Slot(float)
def updatePhase(self, phase):
self._phase = phase
try:
s = 'Phase=%.1f' % phase
except:
s = 'Phase=?'
self.phaseLabel.setText(s)
def initUI(self):
self.setWindowTitle('SimplePL')
from .resources.icons import logoIcon
self.setWindowIcon(logoIcon)
self.aboutAction = QtGui.QAction('&About', self)
self.aboutAction.triggered.connect(self.about)
self.openAction = QtGui.QAction('&Open', self)
self.openAction.setStatusTip('Open a spectrum')
self.openAction.setToolTip('Open a spectrum')
self.openAction.setShortcut('Ctrl+O')
self.openAction.triggered.connect(self.openFile)
self.saveAction = QtGui.QAction('&Save', self)
self.saveAction.setStatusTip('Save the current spectrum')
self.saveAction.setToolTip('Save the current spectrum')
self.saveAction.setShortcut('Ctrl+S')
self.saveAction.triggered.connect(self.saveFile)
self.saveAsAction = QtGui.QAction('&Save As', self)
self.saveAsAction.setStatusTip('Save the current spectrum')
self.saveAsAction.setToolTip('Save the current spectrum')
self.saveAsAction.setShortcut('Ctrl+Shift+S')
self.saveAsAction.triggered.connect(self.saveAsFile)
self.closeAction = QtGui.QAction('Close &Window', self)
self.closeAction.setStatusTip('Close the Window')
self.closeAction.setToolTip('Close the Window')
self.closeAction.setShortcut('Ctrl+W')
self.closeAction.triggered.connect(self.close)
self.viewSignal = QtGui.QAction('&Signal', self)
self.viewSignal.setStatusTip('Plot the signal with system '
'response removed')
self.viewSignal.setToolTip('Plot the signal with system '
'response removed')
self.viewSignal.toggled.connect(self.viewSignalToggled)
self.viewSignal.setCheckable(True)
self.viewSignal.setChecked(True)
self.viewRawSignal = QtGui.QAction('&Raw Signal', self)
self.viewRawSignal.setStatusTip('Plot the raw signal')
self.viewRawSignal.setToolTip('Plot the raw signal')
self.viewRawSignal.toggled.connect(self.viewRawSignalToggled)
self.viewRawSignal.setCheckable(True)
self.viewRawSignal.setChecked(False)
self.viewPhase = QtGui.QAction('&Phase', self)
self.viewPhase.setStatusTip('Plot the phase')
self.viewPhase.setToolTip('Plot the phase')
self.viewPhase.toggled.connect(self.viewPhaseToggled)
self.viewPhase.setCheckable(True)
self.viewPhase.setChecked(False)
self.viewClearPlotAction = QtGui.QAction('&Clear Plot', self)
self.viewClearPlotAction.setStatusTip('Clear the plot')
self.viewClearPlotAction.setToolTip('Clear the plot')
self.viewClearPlotAction.triggered.connect(self.clearPlot)
self.axesWavelengthAction = QtGui.QAction('&Wavelength', self)
self.axesWavelengthAction.setStatusTip('Plot against Wavelength')
self.axesWavelengthAction.setToolTip('Plot against Wavelength')
self.axesWavelengthAction.setShortcut('Ctrl+Shift+W')
self.axesWavelengthAction.triggered.connect(self.axesWavelength)
self.axesWavelengthAction.setCheckable(True)
self.axesWavelengthAction.setChecked(True)
self.axesEnergyAction = QtGui.QAction('&Energy', self)
self.axesEnergyAction.setStatusTip('Plot against Energy')
self.axesEnergyAction.setToolTip('Plot against Energy')
self.axesEnergyAction.setShortcut('Ctrl+Shift+e')
self.axesEnergyAction.triggered.connect(self.axesEnergy)
self.axesEnergyAction.setCheckable(True)
self.axesSemilogAction = QtGui.QAction('Semi-&log', self)
self.axesSemilogAction.setStatusTip('Plot the log of the y-axis')
self.axesSemilogAction.setToolTip('Plot the log of the y-axis')
self.axesSemilogAction.setShortcut('Ctrl+Shift+L')
self.axesSemilogAction.changed.connect(self.axesSemilog)
self.axesSemilogAction.setCheckable(True)
self.axesSemilogAction.setChecked(False)
group = QtGui.QActionGroup(self)
group.addAction(self.axesWavelengthAction)
group.addAction(self.axesEnergyAction)
self.gotoWavelengthAction = QtGui.QAction('&Go to wavelength', self)
self.gotoWavelengthAction.setStatusTip('Go to a wavelength')
self.gotoWavelengthAction.setToolTip('Go to a wavelength')
self.gotoWavelengthAction.setShortcut('Ctrl+G')
self.gotoWavelengthAction.triggered.connect(self.setWavelength)
self.startScanAction = QtGui.QAction('S&tart Scan', self)
self.startScanAction.setStatusTip('Start a scan')
self.startScanAction.setToolTip('Start a scan')
self.startScanAction.setShortcut('Ctrl+T')
self.startScanAction.triggered.connect(self.startScan)
self.abortScanAction = QtGui.QAction('A&bort Scan', self)
self.abortScanAction.setStatusTip('Abort the current scan')
self.abortScanAction.setToolTip('Abort the current scan')
self.abortScanAction.setShortcut('Ctrl+B')
self.abortScanAction.triggered.connect(self.abortScan)
self.abortScanAction.setEnabled(False)
self.configInstrumentsAction = QtGui.QAction('&Instruments', self)
self.configInstrumentsAction.setStatusTip('Configure the instruments')
self.configInstrumentsAction.setToolTip('Configure the instruments')
self.configInstrumentsAction.triggered.connect(self.configInstruments)
self.configSysResAction = QtGui.QAction('System &Response', self)
self.configSysResAction.setStatusTip('Configure the system response')
self.configSysResAction.setToolTip('Configure the system response')
self.configSysResAction.triggered.connect(self.configSysRes)
self.configLockinAction = QtGui.QAction('&Lock-in', self)
self.configLockinAction.setStatusTip(
'Configure the lock-in amplifier')
self.configLockinAction.setToolTip(
'Configure the lock-in amplifier')
self.configLockinAction.triggered.connect(
self.configLockin)
self.configDivertersAction = QtGui.QAction('&Diverters', self)
self.configDivertersAction.setStatusTip(
'Configure the diverters')
self.configDivertersAction.setToolTip('Configure the diverters')
self.configDivertersAction.triggered.connect(
self.configDiverters)
self.configGratingsAndFiltersAction = QtGui.QAction(
'&Gratings and Filters',
self)
self.configGratingsAndFiltersAction.setStatusTip(
'Configure the gratings and filters')
self.configGratingsAndFiltersAction.setToolTip(
'Configure the gratings and filters')
self.configGratingsAndFiltersAction.triggered.connect(
self.configGratingsAndFilters)
self.generateVeuszFileAction = QtGui.QAction('Generate &Veusz File',
self)
self.generateVeuszFileAction.setStatusTip(
'Generate a Veusz file')
self.generateVeuszFileAction.setToolTip(
'Generate a Veusz file')
self.generateVeuszFileAction.triggered.connect(
self.generateVeuszFile)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(self.openAction)
fileMenu.addAction(self.saveAction)
fileMenu.addAction(self.saveAsAction)
fileMenu.addAction(self.closeAction)
viewMenu = menubar.addMenu('&View')
viewMenu.addAction(self.viewSignal)
viewMenu.addAction(self.viewRawSignal)
viewMenu.addAction(self.viewPhase)
viewMenu.addSeparator().setText("Spectra")
viewMenu.addAction(self.viewClearPlotAction)
axesMenu = menubar.addMenu('A&xes')
axesMenu.addSeparator().setText("X Axis")
axesMenu.addAction(self.axesWavelengthAction)
axesMenu.addAction(self.axesEnergyAction)
axesMenu.addSeparator().setText("Y Axis")
axesMenu.addAction(self.axesSemilogAction)
self.axesSemilogAction.changed.connect(self.axesSemilog)
scanMenu = menubar.addMenu('&Scan')
scanMenu.addAction(self.gotoWavelengthAction)
scanMenu.addAction(self.startScanAction)
scanMenu.addAction(self.abortScanAction)
configMenu = menubar.addMenu('&Config')
configMenu.addAction(self.configInstrumentsAction)
configMenu.addAction(self.configSysResAction)
configMenu.addAction(self.configLockinAction)
configMenu.addAction(self.configDivertersAction)
configMenu.addAction(self.configGratingsAndFiltersAction)
toolsMenu = menubar.addMenu('&Tools')
toolsMenu.addAction(self.generateVeuszFileAction)
aboutMenu = menubar.addMenu('&About')
aboutMenu.addAction(self.aboutAction)
statusBar = self.statusBar()
self.statusLabel = QtGui.QLabel('Initializing...')
self.gratingLabel = QtGui.QLabel('Grating=?')
self.filterLabel = QtGui.QLabel('Filter=?')
self.wavelengthLabel = QtGui.QLabel('Wavelength=?')
self.signalLabel = QtGui.QLabel('Signal=?')
self.rawSignalLabel = QtGui.QLabel('Raw Signal=?')
self.phaseLabel = QtGui.QLabel('Phase=?')
statusBar.addWidget(self.statusLabel, stretch=1)
statusBar.addWidget(self.gratingLabel, stretch=1)
statusBar.addWidget(self.filterLabel, stretch=1)
statusBar.addWidget(self.wavelengthLabel, stretch=1)
statusBar.addWidget(self.signalLabel, stretch=1)
statusBar.addWidget(self.rawSignalLabel, stretch=1)
statusBar.addWidget(self.phaseLabel, stretch=1)
view = pg.GraphicsLayoutWidget()
self.setCentralWidget(view)
self.plot = SpectraPlotItem(xaxis='wavelength')
self.plot.setSignalEnabled(True)
self.plot.setRawSignalEnabled(False)
self.plot.setPhaseEnabled(False)
view.addItem(self.plot, 0, 0)
self.setCentralWidget(view)
self.setWindowTitle('SimplePL')
self.setMinimumSize(576, 432)
self.readWindowSettings()
@QtCore.Slot(bool)
def viewSignalToggled(self, b):
if self.plot:
self.plot.setSignalEnabled(b)
@QtCore.Slot(bool)
def viewRawSignalToggled(self, b):
if self.plot:
self.plot.setRawSignalEnabled(b)
@QtCore.Slot(bool)
def viewPhaseToggled(self, b):
if self.plot:
self.plot.setPhaseEnabled(b)
def clearPlot(self):
self.plot.clear()
def axesWavelength(self):
self.plot.setXAxisView('wavelength')
def axesEnergy(self):
self.plot.setXAxisView('energy')
def setWavelength(self):
wavelength = SetWavelengthDialog.getWavelength(
spectrometer=self.spectrometer,
wavelength=self._wavelength,
parent=self)
if wavelength is None:
return
self.scanner = GoToer(self.spectrometer, wavelength)
self.scanner.statusChanged.connect(self.updateStatus)
self.scanner.started.connect(self.updateActions)
self.scanner.finished.connect(self.updateActions)
self.scanner.sigException.connect(self.scannerException)
self.scanner.start()
def axesSemilog(self):
logMode = self.axesSemilogAction.isChecked()
if self.plot:
self.plot.setLogMode(None, logMode)
def updateActions(self):
spec = self._spectrometerInitilized
lockin = self._lockinInitilized
both = spec and lockin
scanning = bool(self.scanner) and self.scanner.isScanning()
notScanning = not scanning
all = both and notScanning
self.openAction.setEnabled(notScanning)
self.saveAction.setEnabled(not self._scanSaved and notScanning)
self.saveAsAction.setEnabled(notScanning and self.spectrum is not None)
self.gotoWavelengthAction.setEnabled(spec and notScanning)
self.startScanAction.setEnabled(all)
self.abortScanAction.setEnabled(scanning)
self.configInstrumentsAction.setEnabled(not both or notScanning)
self.configSysResAction.setEnabled(notScanning)
self.configLockinAction.setEnabled(lockin and notScanning)
self.configDivertersAction.setEnabled(spec and notScanning)
self.configGratingsAndFiltersAction.setEnabled(spec and notScanning)
def startScan(self):
if self.scanner and self.scanner.isScanning():
return # a scan is already running
if not self._scanSaved:
self.savePrompt() # Prompt the user to save the scan
self._scanSaved = False
# Get the scan parameters from the user
params = StartScanDialog.getScanParameters(
spectrometer=self.spectrometer,
parent=self)
if params is None:
return # cancel
start, stop, step, delay = params
# Remove the old spectrum from the plot, and add a new one
if self.spectrum:
result = QtGui.QMessageBox.question(self,
'Clear plot?',
'Do you want to clear the '
'plot?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
self.clearPlot()
self.spectrum = ExpandingSpectrum(self._sysresParser)
self.plot.addSpectrum(self.spectrum)
self.scanner = Scanner(self.spectrometer, self.lockin, self.spectrum,
start, stop, step, delay)
self.scanner.statusChanged.connect(self.updateStatus)
self.scanner.started.connect(self.updateActions)
self.scanner.finished.connect(self.updateActions)
self.scanner.sigException.connect(self.scannerException)
self.scanner.start()
def abortScan(self):
if not self.scanner.isScanning():
self.updateActions()
return
self.updateStatus('Aborting scan...')
self.scanner.abort()
def configDiverters(self):
# Get the config parameters
entranceMirror, exitMirror, accepted = (
DivertersConfigDialog.getDivertersConfig(parent=self))
if not accepted:
return
self.spectrometer.setEntranceMirror(entranceMirror)
self.spectrometer.setExitMirror(exitMirror)
def configInstruments(self):
# Get the ports
ports = ConfigInstrumentsDialog.getConfig(parent=self)
if ports is None:
return
# Reset the status
self.updateStatus('Reinitializing...')
self._lockinInitilized = False
self._spectrometerInitilized = False
self.updateActions()
# Restart the lockin and spectrometer
if self.lockin:
self.lockin.thread.quit()
if self.spectrometer:
self.spectrometer.thread.quit()
if self.lockin:
self.lockin.thread.wait()
if self.spectrometer:
self.spectrometer.thread.wait()
self.initSpectrometer()
self.initLockin()
def configSysRes(self):
sysResPath = self._settings.value('sysResPath', None)
sysResPath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a system response file',
dir=sysResPath)
if not sysResPath:
return
self._settings.setValue('sysResPath', sysResPath)
self._sysresParser = SimplePLParser(None, sysResPath)
def configLockin(self):
# Get the config parameters
timeConstantIndex, reserveModeIndex, inputLineFilterIndex, accepted = (
LockinConfigDialog.getLockinConfig(self.lockin, parent=self))
if not accepted:
return
self.lockin.setTimeConstantIndex(timeConstantIndex)
self.lockin.setReserveModeIndex(reserveModeIndex)
self.lockin.setInputLineFilterIndex(inputLineFilterIndex)
def configGratingsAndFilters(self):
GratingsAndFiltersConfigDialog.getAdvancedConfig(self.spectrometer,
parent=self)
def generateVeuszFile(self):
GenerateVeuszFileDialog(self).exec_()
def getSystemResponseFilePath(self):
sysResPath = self._settings.value('sysResPath', None)
sysResPath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a system response file',
dir=sysResPath)
return sysResPath
def openFile(self):
dirpath = self._settings.value('last_directory', '')
filepath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a PL spectrum file',
dir=dirpath)
if not filepath:
return
dirpath, filename = os.path.split(filepath)
self._settings.setValue('last_directory', dirpath)
# self.setWindowTitle(u'SimplePL - {}'.format(filename))
spectrum = MeasuredSpectrum.open(filepath)
# Check if the system response removed is included.
# If not, ask user to select a system response file.
if not len(spectrum.getSignal()):
result = QtGui.QMessageBox.question(self,
'Provide system response?',
'The selected file does not '
'appear to have a system-'
'response-removed column. '
'Would you like to provide a '
'system response?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
sysres_filepath = self.getSystemResponseFilePath()
if sysres_filepath:
spectrum = MeasuredSpectrum.open(filepath, sysres_filepath)
# remove the previous measured spectrum
if self.spectrum:
result = QtGui.QMessageBox.question(self,
'Clear plot?',
'Do you want to clear the '
'plot?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
self.clearPlot()
# plot the measured spectrum
self.plot.addSpectrum(spectrum)
self.spectrum = spectrum
self.updateActions()
def savePrompt(self):
reply = QtGui.QMessageBox.question(self, 'Save?',
'Do you want to save the current scan?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.saveFile()
def saveFile(self):
dirpath = self._settings.value('last_directory', '')
filepath, _filter = QtGui.QFileDialog.getSaveFileName(parent=self,
caption='Save the current spectrum',
dir=dirpath,
filter='Tab Delimited Text (*.txt)')
if not filepath:
return
dirpath, _filename = os.path.split(filepath)
self._settings.setValue('last_directory', dirpath)
self.spectrum.save(filepath)
self._scanSaved = True
def saveAsFile(self):
self.saveFile()
def about(self):
AboutDialog().exec_()
def moveCenter(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def moveTopLeft(self):
p = QtGui.QDesktopWidget().availableGeometry().topLeft()
self.move(p)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Quit?',
'Are you sure you want to quit?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
if not self._scanSaved:
self.abortScan()
self.savePrompt() # Prompt the user to save the scan
if self.spectrometer:
self.spectrometer.thread.quit()
if self.lockin:
self.lockin.thread.quit()
if self.spectrometer:
self.spectrometer.thread.wait()
if self.lockin:
self.lockin.thread.wait()
self.writeWindowSettings()
event.accept()
else:
event.ignore()
def writeWindowSettings(self):
self._settings.setValue("MainWindow/size", self.size())
self._settings.setValue("MainWindow/pos", self.pos())
def readWindowSettings(self):
self.resize(self._settings.value("MainWindow/size",
QtCore.QSize(1280, 800)))
pos = self._settings.value("MainWindow/pos")
if pos is None:
self.moveCenter() # default to centered
else:
self.move(pos)
|
I am trying to use the Jigsaw client API to make a POST/GET request.
where ourManager is the HttpManager instance.
Any hints to what I am doing wrong would be welcome.
Next message: Jean-Philippe Orsini: "Bug report in the 2.1.0 release of jigsaw"
Previous message: Luc Saint-Elie: "Re: jigsaw under windows"
|
import torch
from torch.autograd.function import Function, InplaceFunction
from torch._thnn import type2backend
from . import _all_functions
class PReLU(Function):
def forward(self, input, weight):
self._backend = type2backend[type(input)]
output = input.new()
self.num_parameters = weight.numel()
if self.num_parameters == 1:
self.num_parameters = 0
self._backend.PReLU_updateOutput(
self._backend.library_state,
input,
output,
weight,
self.num_parameters
)
self.save_for_backward(input, weight)
return output
def backward(self, grad_output):
input, weight = self.saved_tensors
# TODO: check if requires grad
grad_input = input.new()
self._backend.PReLU_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
weight,
self.num_parameters
)
buf = weight.new()
buf2 = weight.new()
# TODO: this won't have to be zeroed in the future
grad_weight = weight.new().resize_as_(weight).zero_()
self._backend.PReLU_accGradParameters(
self._backend.library_state,
input,
grad_output,
grad_input,
weight,
grad_weight,
buf,
buf2,
self.num_parameters,
1
)
return grad_input, grad_weight
class RReLU(InplaceFunction):
def __init__(self, lower, upper, train, inplace=False):
super(RReLU, self).__init__(inplace)
self.lower = lower
self.upper = upper
self.train = train
def forward(self, input):
self._backend = type2backend[type(input)]
output = input.new()
self.noise = input.new()
self._backend.RReLU_updateOutput(
self._backend.library_state,
input,
output,
self.noise,
self.lower,
self.upper,
self.train,
self.inplace,
torch.default_generator if not input.is_cuda else 0
)
self.save_for_backward(input)
return output
def backward(self, grad_output):
input, = self.saved_tensors
# TODO: check if requires grad
grad_input = input.new()
self._backend.RReLU_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
self.noise,
self.lower,
self.upper,
self.train,
self.inplace
)
return grad_input
class Softmin(Function):
def forward(self, input):
self._backend = type2backend[type(input)]
self.mininput = input.clone().mul(-1)
output = input.new()
self._backend.SoftMax_updateOutput(
self._backend.library_state,
self.mininput,
output
)
self.save_for_backward(output)
return output
def backward(self, grad_output):
output, = self.saved_tensors
grad_input = grad_output.new()
self._backend.SoftMax_updateGradInput(
self._backend.library_state,
self.mininput,
grad_output,
grad_input,
output
)
return grad_input.mul(-1)
_all_functions.append(PReLU)
_all_functions.append(RReLU)
_all_functions.append(Softmin)
|
This supposed juicy hook up tabloid fodder out of Taiwan entertainment is too cute not to report. One of Taiwan’s top tabloids Next Magazine has splashed all over its cover of last week’s edition the possible unexpected romance between veteran actress-producer Ruby Lin and top idol singer Show Luo. I have to get this out of the way first, regardless of whether this news report is true or not, the name of this potential coupling has all of Taiwan dying of laughter. Ruby’s name is Lin Xin Ru (Xin = heart) and Show’s nickname is Xiao Zhu (Zhu = pig), so Next Magazine has deemed this coupling the Pig Heart Romance (Zhu Xin Lian 豬心戀). Excuse me while I laugh with the rest of Taiwan for a minute here. Okay, back now to report on how Next Magazine caught this romance in action. Apparently the periodical was tipped off that long casual time friends Ruby and Show might have taken their friendship to the next level during a serendipitous opening in both of their dating lives after a push from mutual friend/matchmaker Shu Qi. LOL, Shu Qi has a lot of time on her hands so it seems. Anyhoo, this four month old romance has been hush hush with a friend of Show’s remarking that Show is very serious about her which means keeping this budding romance under wraps.
Next Magazine staged their own surveillance outside of Ruby’s luxe condo complex in Taipei and twice caught Show driving into the garage. The magazine noted that Show drove a different one of his many expensive sports cars the two times he visited the building, perhaps in an attempt to not be recognized by his drive. I have to point out that Next Magazine is not adding the disclaimer that Ruby lives in the same super duper expensive high rise condo that a lot of other Taiwan A-listers live in, including Vic Zhou and Cyndi Wang among others. Both Show and Ruby immediately issued denials of this noona-dongsaeng romance, and even funnier is their weibo convo about it. Show posted a shrugging picture asking the media to stop pairing him up with ladies. He then pointed out the hideousness of the Pig Heart Romance nickname, and asked if it was paired with a bowl of rice noodle soup (since pig heart is typically a Taiwanese soup ingredient). Ruby responded on weibo asking “shop owner, how much for a bowl?” (of pig heart romance, of course), to which Show then responded “One bowl is NT50! Hahaha!” I feel for Show, he’s pretty much gotten a reputation of a manwhore if all his rumored romances are to be believed, including but not limited to Jolin Tsai, Makiyo, Rainie Yang, Da S, Selina Jen, Tia Li, and Butterfly Chien. I don’t care if Ruby and Show are really dating or not, but I do love Ruby so much I pretty much nod in approval if she’s rumored to be dating someone young and hot because she damn wants to.
Yeah.. That’s what I though too that she was dating Wallace Huo.
I don’t believe a word of it. Show Luo seems like the type to chase naïve young things.
You took the words right outta my mouth. Love her and wouldn’t mind who she’s dating. Let it be a younger or older man.
Ahhhh….I’ve always hope he will romance Rainie.
LOL. Congratulations if they are! I’d never have imagined it though.
Dating a younger, hot guy…you go girl!
Get to sleep with many beautiful women and have people feel sorry for him.
Man, nice work if you can get it….
LOL the weibo post was hilarious! Thanks Ms. Koala I’m finally catching up with the drama/entertainment world outside of Korea.
But wasn’t Show Luo rumored to dating a model girl? Wow. There’s so many rumor romance of Show Luo. Must be difficult being a celebrity w/ so much rumors coming out about this and that.
Their couple name and their Weibo cracks me up so bad, excuse me while I laugh! HAHAHA!
|
from flask import Flask, request, send_from_directory, jsonify
import rest.database as database
import os
app = Flask(__name__)
if os.path.isdir('./src'):
ROOT_DIR = './src/'
else:
ROOT_DIR = '../../src/'
@app.route("/")
def hello():
return send_from_directory(ROOT_DIR, 'index.html')
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory(ROOT_DIR + 'js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory(ROOT_DIR + 'css', path)
@app.route('/index')
def send_main():
return send_from_directory(ROOT_DIR, 'index.html')
@app.route('/save')
def send_save():
return send_from_directory(ROOT_DIR, 'save.html')
@app.route('/selftest')
def send_test():
return send_from_directory(ROOT_DIR, 'selftest.html')
@app.route('/api/courses')
def get_courses():
return jsonify(database.get_courses())
@app.route('/api/courses', methods=['POST'])
def post_course():
data = request.json
database.create_course(data)
return jsonify(database.get_courses())
@app.route('/api/notes/<lectureid>')
def get_notes(lectureid):
return jsonify(database.get_notes(lectureid))
#
# @app.route('/api/notes/<lectureid>', methods=['POST'])
# def post_note(lectureid):
# data = request.json
# database.create_note(lectureid, data)
# return jsonify(database.get_notes())
@app.route('/api/debug')
def debug():
print(database.__courses)
print(database.__notes)
return jsonify(database.__notes)
if __name__ == "__main__":
print(os.listdir(ROOT_DIR))
app.run()
|
Do you want to hire a trusted Cleaning Services in Queens Park London London W10?
Hire our dependable Cleaning Services company in Queens Park London London W10 for all your cleaning requirements; we offer top-notch Cleaning Services services at affordable prices.
Whether you want to book a one-off cleaning appointment or you need daily or weekly cleaning services in Queens Park London London W10, we can help you. Contact our experienced customer service team by phone or via email and we will offer you a free quotation. You can also book our Queens Park London London W10 cleaning services by utilising the booking form on our site. After you give us your specific cleaning requirements and confirm the price of the job with us, our Cleaning Services efficient cleaners will visit your address in Queens Park London London W10 to handle the job.
|
#!/usr/bin/env python
"""
This is a little demo of how to make clickable (and changeable) objects with
FloatCanvas
Also an example of constant size, rather than the usual zooming and panning
Developed as an answer to a question on the wxPYhton mailing lilst:
"get panel id while dragging across several panels'
April 5, 2012
"""
import random
import wx
## import the installed version
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
## import a local version
#import sys
#sys.path.append("../")
#from floatcanvas import FloatCanvas as FC
colors = [ (255, 0 , 0 ),
(0 , 255, 0 ),
(0 , 0, 255),
(255, 255, 0 ),
(255, 0, 255),
(0 , 255, 255),
]
class DrawFrame(wx.Frame):
"""
A frame used for the Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
# Add the Canvas
Canvas = FloatCanvas.FloatCanvas(self,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "Black",
)
self.Canvas = Canvas
self.Canvas.Bind(wx.EVT_SIZE, self.OnSize)
# build the squares:
w = 10
dx = 14
for i in range(9):
for j in range(9):
Rect = Canvas.AddRectangle((i*dx, j*dx), (w, w), FillColor="White", LineStyle = None)
Outline = Canvas.AddRectangle((i*dx, j*dx), (w, w),
FillColor=None,
LineWidth=4,
LineColor='Red',
LineStyle=None)
Rect.indexes = (i,j)
Rect.outline = Outline
Rect.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.SquareHitLeft)
Rect.Bind(FloatCanvas.EVT_FC_ENTER_OBJECT, self.SquareEnter)
Rect.Bind(FloatCanvas.EVT_FC_LEAVE_OBJECT, self.SquareLeave)
self.Show()
Canvas.ZoomToBB()
def SquareHitLeft(self, square):
print("square hit:", square.indexes)
# set a random color
c = random.sample(colors, 1)[0]
square.SetFillColor( c )
self.Canvas.Draw(True)
def SquareEnter(self, square):
print("entering square:", square.indexes)
square.outline.SetLineStyle("Solid")
self.Canvas.Draw(True)
def SquareLeave(self, square):
print("leaving square:", square.indexes)
square.outline.SetLineStyle(None)
self.Canvas.Draw(True)
def OnSize(self, event):
"""
re-zooms the canvas to fit the window
"""
print("in OnSize")
self.Canvas.ZoomToBB()
event.Skip()
app = wx.App(False)
F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop()
|
An easy hike close to Sedona when time is an issue or you want a nice walk for exercise. Although it is close to homes in Mystic Hills Subdivision, you will cross the creek washes and see nice rock formations. The trailhead starts near Chapel of the Holy Cross. Not a trail for you, if you want a wilderness hike.
|
from collections import namedtuple
import datetime
import random
import hotmodel
ProcessOperation = namedtuple("ProcessOperation", [
"operation",
"act",
])
ProductOperation = namedtuple("ProductOperation", [
"operation",
"tm",
"workplace",
])
class Server(object):
"""
A mock server. Can answer questions about a process for an
(article, serial_number) and about operations done on the
same.
"""
ACTS = [
"Laser",
"Automatic SMT placement", "Manual SMT placement",
"AOI",
"THT placement", "Optical inspection",
"Selective soldering", "Wave", "Manual soldering",
]
def __init__(self, op_done_rate=90):
self.op_done_rate = op_done_rate
pass
def get_product_ops(self, article, serial_num,):
"""
Returns a list of operations done from the product process.
Randomly skips some operations, and add random dates and
workplaces.
"""
ret = []
dt0 = datetime.datetime.now() - datetime.timedelta(
random.randint(3, 5),
random.randint(0, 60*60*24),
)
proc = self.get_process(article, serial_num)
for operation in proc:
if random.randint(0, 100) > self.op_done_rate:
continue
ret.append(ProductOperation(
operation.operation,
dt0,
random.randint(1, 5),
))
dt0 += datetime.timedelta(0, random.randint(10, 14400))
return ret
def get_process(self, article, dummy_sn):
"""
Returns a list of operations (operation number, act) for this
article/sn. For the articles ending with an even number, returns
one set of operations, another set for all of the rest.
"""
if article[-1] in ("0", "1", "2", "3", ):
return [ # SMT both sides
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 2, 3, 1, 3,))
]
if article[-1] in ("4", "5", "6", ):
return [ # SMT one side and THT
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 3, 4, 7, 5, 8, 5,))
]
return [ # SMT one side, THT, selective soldering
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 3, 4, 6, 5,))
]
class ProductModel(hotmodel.HotObject):
"""
Holds information about a product (article/serial number). When told
a new (article, serial_number), fetches the information about it's
process and performed operations from the server. Manages the selected
operation in the list of performed operations and the selected
operation in the process.
"""
def __init__(self, server):
"""
Set-up the read-only properties.
"""
super(ProductModel, self).__init__("", None)
self.server = server
self.make_hot_property("article", str, True, None)
self.make_hot_property("sn", int, True, None)
self.make_hot_property(
"process",
hotmodel.TypedHotList,
False,
hotmodel.TypedHotList(ProcessOperation),
)
self.make_hot_property(
"operations",
hotmodel.TypedHotList,
False,
hotmodel.TypedHotList(ProductOperation),
)
self.make_hot_property("process_selection", int, True, None)
self.make_hot_property("operation_selection", int, True, None)
def set_product(self, article, sn):
"""
Set the current product.
"""
self.article = article
self.sn = sn
self.process = self.server.get_process(article, sn)
self.operations = self.server.get_product_ops(article, sn)
self.process_selection = None
self.operation_selection = None
def select_operation(self, index):
"""
Set the selected operation in the list of performed operations.
"""
if index == self.operation_selection:
return
self.operation_selection = index
self._fire("select", index)
def select_process_operation(self, index):
"""
Set the selected operation in the process.
"""
if index == self.process_selection:
return
self.process_selection = index
self._fire("select", index)
def sample_handler(handler_name, model, fqname, event_name, key):
print handler_name, "-->", fqname, event_name, key
if "__main__" == __name__:
MODEL = ProductModel(Server())
MAPPER = hotmodel.Mapper()
MAPPER.add_route("/process", "", lambda a,b,c,d: sample_handler("/process-HANDLER-1", a,b,c,d),)
MAPPER.add_route("", "reset", lambda a,b,c,d: sample_handler("!!RESET-handler-1", a,b,c,d),)
MAPPER.add_route("", "", lambda a,b,c,d: sample_handler("*-handler-1", a,b,c,d),)
MODEL.add_listener(MAPPER)
MODEL.set_product("AAAQA1", 1)
MODEL.set_product("AAAQA2", 2)
MODEL.select_operation(3)
MODEL.select_process_operation(1)
MODEL.select_process_operation(2)
MODEL.select_process_operation(2)
|
The Music & Function Room has already been host to many dance and musical performances, as well as lectures and music recitals. The Room boasts high windows that allow impressive natural light, as well as haunting features that give a natural atmosphere to any evening reception or performance. The Room can be set up in various ways; whether it is with rows of seating for formal presentations and recitals, or transformed for evening sit-down dinners that can be catered with a la carte dining. If you wish to have further information on our Music & Function Room, or to hire it out for your event, then please contact Pushkin House on +44 (0) 20 7269 9770.
|
import jmespath
import demistomock as demisto
from CommonServerPython import *
from JSONFeedApiModule import * # noqa: E402
from typing import Dict
DEFAULT_COUNT = 100
SEARCH_PARAMS = {
'indicator': 'indicator',
'from': 'from',
'until': 'until',
'threat_type': 'threatType',
'malware_family': 'malwareFamily',
'confidence': 'confidence',
'count': 'count',
}
FEED_INDICATOR_TYPES = {
FeedIndicatorType.URL: FeedIndicatorType.URL,
FeedIndicatorType.File: FeedIndicatorType.File,
"ipv4": FeedIndicatorType.IP
}
FEED_URL = 'https://api.intel471.com/v1/indicators/stream?'
MAPPING = {
FeedIndicatorType.File: {
'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_file_md5': 'md5',
'indicator_data_file_sha1': 'sha1',
'indicator_data_file_sha256': 'sha256',
'context_description': 'description',
'indicator_data_file_download_url': 'downloadurl',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
},
FeedIndicatorType.URL: {'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_url': 'url',
'context_description': 'description',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
},
"ipv4": {'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_address': 'ipaddress',
'context_description': 'description',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
}
}
INDICATOR_VALUE_FIELD = {FeedIndicatorType.File: 'indicator_data_file_sha256',
FeedIndicatorType.URL: 'indicator_data_url',
"ipv4": 'indicator_data_address'}
DEMISTO_VERSION = demisto.demistoVersion()
CONTENT_PACK = 'Intel471 Feed/2.0.1'
INTEGRATION = 'Intel471 Malware Indicator Feed'
USER_AGENT = f'XSOAR/{DEMISTO_VERSION["version"]}.{DEMISTO_VERSION["buildNumber"]} - {CONTENT_PACK} - {INTEGRATION}'
def _create_url(**kwargs):
url_suffix = ""
for param in kwargs:
url_suffix += f"&{param}={kwargs.get(param)}"
return FEED_URL + url_suffix.strip('&')
def _build_url_parameter_dict(**kwargs):
"""
Given a set of parameters, creates a dictionary with only searchable items that can be used in api.
"""
params_dict = {}
for param in kwargs:
if param in SEARCH_PARAMS:
params_dict[SEARCH_PARAMS.get(param)] = kwargs.get(param)
return params_dict
def get_params_by_indicator_type(**kwargs):
indicators_url = {}
params = _build_url_parameter_dict(**kwargs)
params['count'] = int(params.get('count', DEFAULT_COUNT))
indicator_types = argToList(kwargs.get('indicator_type'))
# allows user to choose multiple indicator types at once.
if 'All' in indicator_types:
indicator_types = FEED_INDICATOR_TYPES
for current_type in indicator_types:
params['indicatorType'] = current_type
indicators_url[current_type] = _create_url(**params)
return indicators_url
def custom_build_iterator(client: Client, feed: Dict, limit: int = 0, **kwargs) -> List:
url = feed.get('url', client.url)
fetch_time = feed.get('fetch_time')
start_date, end_date = parse_date_range(fetch_time, utc=True, to_timestamp=True)
integration_context = get_integration_context()
last_fetch = integration_context.get(f"{feed.get('indicator_type')}_fetch_time")
params = {'lastUpdatedFrom': last_fetch if last_fetch else start_date}
result: List[Dict] = []
should_continue = True
while should_continue:
r = requests.get(
url=url,
verify=client.verify,
auth=client.auth,
cert=client.cert,
headers=client.headers,
params=params,
**kwargs
)
try:
r.raise_for_status()
data = r.json()
current_result = jmespath.search(expression=feed.get('extractor'), data=data)
if current_result:
result = result + current_result
# gets next page reference and handles paging.
should_continue = len(result) < limit if result else True
should_continue = should_continue or data.get('cursorNext') != params.get('cursor')
params['cursor'] = data.get('cursorNext') if should_continue else ''
except ValueError as VE:
raise ValueError(f'Could not parse returned data to Json. \n\nError massage: {VE}')
set_integration_context({f"{feed.get('indicator_type')}_fetch_time": str(end_date)})
return result
def custom_build_relationships(feed_config: dict, mapping: dict, indicator_data: dict):
if indicator_data.get(mapping.get('relation_entity_b')):
relationships_lst = EntityRelationship(
name=feed_config.get('relation_name'),
entity_a=indicator_data.get('value'),
entity_a_type=indicator_data.get('type'),
entity_b=indicator_data.get(mapping.get('relation_entity_b')),
entity_b_type=feed_config.get('relation_entity_b_type'),
)
return [relationships_lst.to_indicator()]
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['headers'] = {'user-agent': USER_AGENT}
urls = get_params_by_indicator_type(**params)
params['feed_name_to_config'] = {}
for indicator_type in urls:
params['feed_name_to_config'][indicator_type] = {
'url': urls.get(indicator_type),
'extractor': 'indicators[*].data',
'indicator_type': FEED_INDICATOR_TYPES.get(indicator_type),
'indicator': INDICATOR_VALUE_FIELD.get(indicator_type),
'flat_json_with_prefix': True,
'mapping': MAPPING.get(indicator_type),
'custom_build_iterator': custom_build_iterator,
'fetch_time': params.get('fetch_time', '7 days'),
'relation_entity_b_type': 'Malware',
'relation_name': EntityRelationship.Relationships.COMMUNICATES_WITH,
'create_relations_function': custom_build_relationships,
}
feed_main(params, 'Intel471 Malware Indicators Feed', 'intel471-indicators')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
What is Highpower you ask?
High Power competition is shot with center fire rifles divided into two categories.
2 Match rifle, which consist of bolt guns or modified AR-15's with "iron" (non telescopic) match sights, like these examples below.
The courses of fire consist of four separate matches shot in three different shooting positions at three different distances.
The 80 round "National Match" course.
20 shots slow fire from "standing" at 200 yards.
20 shots rapid fire from "sitting" at 200 yards.
20 shots rapid fire from "prone" at 300 yards.
20 Shots slow fire from "prone" at 600 yards. The 50 round "National Match" course.
10 shots slow fire from "standing" at 200 yards.
10 shots rapid fire from "sitting" at 200 yards.
10 shots rapid fire from "prone" at 300 yards.
*** Note: At Central Jersey we fire the 80 round course for all 4th Sunday NRA matches, while the CMP John C. Garand matches fire the 50 round course. All positions can be shot at reduced range with the appropriate reduced targets, here at Central Jersey we fire at 200 and 300 yards only. Each shooter has the option of firing the entire match at 200 yards or of move back to 300 yards for the prone stages.
You start in Standing position, at the 200 yard line. Standing is fired "slow fire" loading one shot at a time. You are allowed one minute per shot and are also allowed two additional minutes for two "sighting" shots.
The 80 round course is 22 shots, two sighting shots and 20 shots for record in 22 minutes.
The 50 round course is 12 shots, two sighting shots and 10 shots for record in 12 minutes.
Next is Sitting position, also shot at the 200 yard line. Sitting position is shot "rapid fire", 10 shots in sixty seconds with a mandatory reload. With ammo/magazines on the ground (mat) or shooting stool not to be touched, with bolts remaining open & rifles out of the shoulder, awaiting the command to commence fire. In your firing position with targets up, at the command to commence fire, load with two or five rounds for service rifles or five rounds for match rifles. Fire your two or five rounds, reload (with the remaining eight or five rounds) & complete firing before the cease fire command within the allowed sixty seconds.
For the 80 round course you shoot two strings of 10 shots.
For the 50 round course you shoot one string of 10 shots.
The third match is shot from the "Prone" position at 200 or 300 yards. Just like "Sitting" it is shot "rapid fire" only now you have seventy seconds for ten rounds. As in Sitting you are allowed two minutes for two sighting shots.
Note: All positions can be shot at reduced range with the appropriate reduced targets, here at Central Jersey we fire at 200 and 300 yards only. Each shooter has the option of firing the entire match at 200 yards or of move back to 300 yards for the prone stages.
The final match is shot from the "prone" position, only now in "slow fire" from the 200 or 300 yard line.
You are allowed twenty two minutes for two "sighting" shots and twenty shots for record.
|
"""Scraper for D.C. Circuit of Appeals
CourtID: cadc
Court Short Name: cadc
Author: Andrei Chelaru
Reviewer: mlr
Date created: 18 July 2014
"""
from datetime import datetime, date
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
d = date.today()
#d = date(month=5, day=1, year=2014)
self.url = 'http://www.cadc.uscourts.gov/recordings/recordings.nsf/DocsByRDate?OpenView&count=100&SKey={yearmo}'.format(
yearmo=d.strftime('%Y%m')
)
self.back_scrape_iterable = ["%s%02d" % (year, month) for year in
range(2007, d.year + 1) for month in
range(1, 13)]
def _get_download_urls(self):
path = "id('ViewBody')//div[contains(concat(' ',@class,' '),' row-entry')]//@href"
return list(self.html.xpath(path))
def _get_case_names(self):
path = "id('ViewBody')//*[contains(concat(' ',@class,' '),' column-two')]/div[1]/text()"
return list(self.html.xpath(path))
def _get_case_dates(self):
path = "id('ViewBody')//date/text()"
return map(self._return_case_date, self.html.xpath(path))
@staticmethod
def _return_case_date(e):
e = ''.join(e.split())
return datetime.strptime(e, '%m/%d/%Y').date()
def _get_docket_numbers(self):
path = "id('ViewBody')//*[contains(concat(' ',@class,' '),' row-entry')]//a//text()"
return list(self.html.xpath(path))
def _get_judges(self):
path = '//div[span[contains(., "Judges")]]/text()'
return [' '.join(s.split()) for s in self.html.xpath(path)]
def _download_backwards(self, yearmo):
self.url = 'http://www.cadc.uscourts.gov/recordings/recordings.nsf/DocsByRDate?OpenView&count=100&SKey={yearmo}'.format(
yearmo=yearmo,
)
self.html = self._download()
|
Whether it’s a simple natural to artificial lawn conversion or a new build commercial landscaping project incorporating a water harvesting system, Bradleys offer a range of systems for landscaping solutions.
Artificial turf products for landscaping purposes have reached new highs over the last few years with products now more realistic in appearance than ever before.
Artificial turf lawns are now widespread and increasingly the choice for people looking for a perfect looking lawn all year round that requires little maintenance. With all the attributes of an attractive natural lawn, Bradleys ensure that the artificial turf products they use look fantastic day after day in all weather conditions.
All surfaces are 100% pet friendly with many pet owners now choosing artificial turf over natural turf to create a more manageable garden. Pets can make growing a good lawn problematic and trail soil through the house, which can be frustrating for owners who then opt for artificial turf lawn as an easy solution.
Collecting rainwater is a sustainable and environmentally responsible way to combat rising water costs. Using experience gained by working in other parts of the world, the team at Bradleys Surfacing Systems has developed and tested a water harvesting system, designed for domestic properties, schools and commercial buildings. A typical system would collect water via a synthetic turf lawn, store the water underground and make it available above ground via a pump. Rainwater from roofs and other sources can be linked to the system to increase the amount of water collected.
• Rainwater harvesting is a sustainable way to use water at home and reduces the cost of water bills.
• The system is completely out of sight, as the water is stored underground – unlike unsightly water butts which are an eyesore in small gardens.
• The system involves water attenuation, holding water locally is an effective way to help reduce flooding.
• Potential for system to be plumbed to supply water inside properties.
• The system has all the benefits of a normal synthetic lawn; no need to own a lawn mower, or mow the lawn, leaving you with a clean and perfect looking garden all year round.
Our helpful brochure provides more information and is available for download via this link. Alternatively, please use our online enquiry form to contact us and we will get back to you as soon as possible.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Auxiliary script used to send data between ports on guests.
@copyright: 2010 Red Hat, Inc.
@author: Jiri Zupka (jzupka@redhat.com)
@author: Lukas Doktor (ldoktor@redhat.com)
"""
import threading
from threading import Thread
import os, select, re, random, sys, array, stat
import fcntl, traceback, signal, time
DEBUGPATH = "/sys/kernel/debug"
SYSFSPATH = "/sys/class/virtio-ports/"
DEVPATH = "/dev/virtio-ports/"
exiting = False
class VirtioGuest:
"""
Test tools of virtio_ports.
"""
LOOP_NONE = 0
LOOP_POLL = 1
LOOP_SELECT = 2
def __init__(self):
self.files = {}
self.exit_thread = threading.Event()
self.threads = []
self.ports = {}
self.poll_fds = {}
self.catch_signal = None
self.use_config = threading.Event()
def _readfile(self, name):
"""
Read file and return content as string
@param name: Name of file
@return: Content of file as string
"""
out = ""
try:
f = open(name, "r")
out = f.read()
f.close()
except Exception:
print "FAIL: Cannot open file %s" % (name)
return out
def _get_port_status(self, in_files=None):
"""
Get info about ports from kernel debugfs.
@param in_files: Array of input files.
@return: Ports dictionary of port properties
"""
ports = {}
not_present_msg = "FAIL: There's no virtio-ports dir in debugfs"
if not os.path.ismount(DEBUGPATH):
os.system('mount -t debugfs none %s' % (DEBUGPATH))
try:
if not os.path.isdir('%s/virtio-ports' % (DEBUGPATH)):
print not_present_msg
except Exception:
print not_present_msg
else:
viop_names = os.listdir('%s/virtio-ports' % (DEBUGPATH))
if in_files is not None:
dev_names = os.listdir('/dev')
rep = re.compile(r"vport[0-9]p[0-9]+")
dev_names = filter(lambda x: rep.match(x) is not None, dev_names)
if len(dev_names) != len(in_files):
print ("FAIL: Not all ports were successfully initialized "
"in /dev, only %d from %d." % (len(dev_names),
len(in_files)))
return
if len(viop_names) != len(in_files):
print ("FAIL: Not all ports were successfuly initialized "
"in debugfs, only %d from %d." % (len(viop_names),
len(in_files)))
return
for name in viop_names:
open_db_file = "%s/virtio-ports/%s" % (DEBUGPATH, name)
f = open(open_db_file, 'r')
port = {}
file = []
for line in iter(f):
file.append(line)
try:
for line in file:
m = re.match("(\S+): (\S+)", line)
port[m.group(1)] = m.group(2)
if port['is_console'] == "yes":
port["path"] = "/dev/hvc%s" % (port["console_vtermno"])
# Console works like a serialport
else:
port["path"] = "/dev/%s" % name
if not os.path.exists(port['path']):
print "FAIL: %s not exist" % port['path']
sysfspath = SYSFSPATH + name
if not os.path.isdir(sysfspath):
print "FAIL: %s not exist" % (sysfspath)
info_name = sysfspath + "/name"
port_name = self._readfile(info_name).strip()
if port_name != port["name"]:
print ("FAIL: Port info does not match "
"\n%s - %s\n%s - %s" %
(info_name , port_name,
"%s/virtio-ports/%s" % (DEBUGPATH, name),
port["name"]))
dev_ppath = DEVPATH + port_name
if not os.path.exists(dev_ppath):
print "FAIL: Symlink %s does not exist." % dev_ppath
if not os.path.realpath(dev_ppath) != "/dev/name":
print "FAIL: Symlink %s is not correct." % dev_ppath
except AttributeError:
print ("Bad data on file %s:\n%s. " %
(open_db_file, "".join(file).strip()))
print "FAIL: Bad data on file %s." % open_db_file
return
ports[port['name']] = port
f.close()
return ports
def check_zero_sym(self):
"""
Check if port /dev/vport0p0 was created.
"""
symlink = "/dev/vport0p0"
if os.path.exists(symlink):
print "PASS: Symlink %s exists." % symlink
else:
print "FAIL: Symlink %s does not exist." % symlink
def init(self, in_files):
"""
Init and check port properties.
"""
self.ports = self._get_port_status(in_files)
if self.ports is None:
return
for item in in_files:
if (item[1] != self.ports[item[0]]["is_console"]):
print self.ports
print "FAIL: Host console is not like console on guest side\n"
return
print "PASS: Init and check virtioconsole files in system."
class Switch(Thread):
"""
Thread that sends data between ports.
"""
def __init__ (self, in_files, out_files, event,
cachesize=1024, method=0):
"""
@param in_files: Array of input files.
@param out_files: Array of output files.
@param method: Method of read/write access.
@param cachesize: Block to receive and send.
"""
Thread.__init__(self, name="Switch")
self.in_files = in_files
self.out_files = out_files
self.exit_thread = event
self.method = method
self.cachesize = cachesize
def _none_mode(self):
"""
Read and write to device in blocking mode
"""
data = ""
while not self.exit_thread.isSet():
data = ""
for desc in self.in_files:
data += os.read(desc, self.cachesize)
if data != "":
for desc in self.out_files:
os.write(desc, data)
def _poll_mode(self):
"""
Read and write to device in polling mode.
"""
pi = select.poll()
po = select.poll()
for fd in self.in_files:
pi.register(fd, select.POLLIN)
for fd in self.out_files:
po.register(fd, select.POLLOUT)
while not self.exit_thread.isSet():
data = ""
t_out = self.out_files
readyf = pi.poll(1.0)
for i in readyf:
data += os.read(i[0], self.cachesize)
if data != "":
while ((len(t_out) != len(readyf)) and not
self.exit_thread.isSet()):
readyf = po.poll(1.0)
for desc in t_out:
os.write(desc, data)
def _select_mode(self):
"""
Read and write to device in selecting mode.
"""
while not self.exit_thread.isSet():
ret = select.select(self.in_files, [], [], 1.0)
data = ""
if ret[0] != []:
for desc in ret[0]:
data += os.read(desc, self.cachesize)
if data != "":
ret = select.select([], self.out_files, [], 1.0)
while ((len(self.out_files) != len(ret[1])) and not
self.exit_thread.isSet()):
ret = select.select([], self.out_files, [], 1.0)
for desc in ret[1]:
os.write(desc, data)
def run(self):
if (self.method == VirtioGuest.LOOP_POLL):
self._poll_mode()
elif (self.method == VirtioGuest.LOOP_SELECT):
self._select_mode()
else:
self._none_mode()
class Sender(Thread):
"""
Creates a thread which sends random blocks of data to dst port.
"""
def __init__(self, port, event, length):
"""
@param port: Destination port
@param length: Length of the random data block
"""
Thread.__init__(self, name="Sender")
self.port = port
self.exit_thread = event
self.data = array.array('L')
for i in range(max(length / self.data.itemsize, 1)):
self.data.append(random.randrange(sys.maxint))
def run(self):
while not self.exit_thread.isSet():
os.write(self.port, self.data)
def _open(self, in_files):
"""
Open devices and return array of descriptors
@param in_files: Files array
@return: Array of descriptor
"""
f = []
for item in in_files:
name = self.ports[item]["path"]
if (name in self.files):
f.append(self.files[name])
else:
try:
self.files[name] = os.open(name, os.O_RDWR)
if (self.ports[item]["is_console"] == "yes"):
print os.system("stty -F %s raw -echo" % (name))
print os.system("stty -F %s -a" % (name))
f.append(self.files[name])
except Exception, inst:
print "FAIL: Failed to open file %s" % (name)
raise inst
return f
@staticmethod
def pollmask_to_str(mask):
"""
Conver pool mast to string
@param mask: poll return mask
"""
str = ""
if (mask & select.POLLIN):
str += "IN "
if (mask & select.POLLPRI):
str += "PRI IN "
if (mask & select.POLLOUT):
str += "OUT "
if (mask & select.POLLERR):
str += "ERR "
if (mask & select.POLLHUP):
str += "HUP "
if (mask & select.POLLMSG):
str += "MSG "
return str
def poll(self, port, expected, timeout=500):
"""
Pool event from device and print event like text.
@param file: Device.
"""
in_f = self._open([port])
p = select.poll()
p.register(in_f[0])
mask = p.poll(timeout)
maskstr = VirtioGuest.pollmask_to_str(mask[0][1])
if (mask[0][1] & expected) == expected:
print "PASS: Events: " + maskstr
else:
emaskstr = VirtioGuest.pollmask_to_str(expected)
print "FAIL: Events: " + maskstr + " Expected: " + emaskstr
def lseek(self, port, pos, how):
"""
Use lseek on the device. The device is unseekable so PASS is returned
when lseek command fails and vice versa.
@param port: Name of the port
@param pos: Offset
@param how: Relativ offset os.SEEK_{SET,CUR,END}
"""
fd = self._open([port])[0]
try:
os.lseek(fd, pos, how)
except Exception, inst:
if inst.errno == 29:
print "PASS: the lseek failed as expected"
else:
print inst
print "FAIL: unknown error"
else:
print "FAIL: the lseek unexpectedly passed"
def blocking(self, port, mode=False):
"""
Set port function mode blocking/nonblocking
@param port: port to set mode
@param mode: False to set nonblock mode, True for block mode
"""
fd = self._open([port])[0]
try:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
if not mode:
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
else:
fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
except Exception, inst:
print "FAIL: Setting (non)blocking mode: " + str(inst)
return
if mode:
print "PASS: set to blocking mode"
else:
print "PASS: set to nonblocking mode"
def __call__(self, sig, frame):
"""
Call function. Used for signal handle.
"""
if (sig == signal.SIGIO):
self.sigio_handler(sig, frame)
def sigio_handler(self, sig, frame):
"""
Handler for sigio operation.
@param sig: signal which call handler.
@param frame: frame of caller
"""
if self.poll_fds:
p = select.poll()
map(p.register, self.poll_fds.keys())
masks = p.poll(1)
print masks
for mask in masks:
self.poll_fds[mask[0]][1] |= mask[1]
def get_sigio_poll_return(self, port):
"""
Return PASS, FAIL and poll walue in string format.
@param port: Port to check poll information.
"""
fd = self._open([port])[0]
maskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][1])
if (self.poll_fds[fd][0] ^ self.poll_fds[fd][1]):
emaskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][0])
print "FAIL: Events: " + maskstr + " Expected: " + emaskstr
else:
print "PASS: Events: " + maskstr
self.poll_fds[fd][1] = 0
def set_pool_want_return(self, port, poll_value):
"""
Set value to static variable.
@param port: Port which should be set excepted mask
@param poll_value: Value to check sigio signal.
"""
fd = self._open([port])[0]
self.poll_fds[fd] = [poll_value, 0]
print "PASS: Events: " + VirtioGuest.pollmask_to_str(poll_value)
def catching_signal(self):
"""
return: True if should set catch signal, False if ignore signal and
none when configuration is not changed.
"""
ret = self.catch_signal
self.catch_signal = None
return ret
def async(self, port, mode=True, exp_val=0):
"""
Set port function mode async/sync.
@param port: port which should be pooled.
@param mode: False to set sync mode, True for sync mode.
@param exp_val: Value which should be pooled.
"""
fd = self._open([port])[0]
try:
fcntl.fcntl(fd, fcntl.F_SETOWN, os.getpid())
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
self.use_config.clear()
if mode:
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_ASYNC)
self.poll_fds[fd] = [exp_val, 0]
self.catch_signal = True
else:
del self.poll_fds[fd]
fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_ASYNC)
self.catch_signal = False
os.kill(os.getpid(), signal.SIGUSR1)
self.use_config.wait()
except Exception, inst:
print "FAIL: Setting (a)sync mode: " + str(inst)
return
if mode:
print "PASS: Set to async mode"
else:
print "PASS: Set to sync mode"
def close(self, file):
"""
Close open port.
@param file: File to close.
"""
descriptor = None
path = self.ports[file]["path"]
if path is not None:
if path in self.files.keys():
descriptor = self.files[path]
del self.files[path]
if descriptor is not None:
try:
os.close(descriptor)
except Exception, inst:
print "FAIL: Closing the file: " + str(inst)
return
print "PASS: Close"
def open(self, in_file):
"""
Direct open devices.
@param in_file: Array of files.
@return: Array of descriptors.
"""
name = self.ports[in_file]["path"]
try:
self.files[name] = os.open(name, os.O_RDWR)
if (self.ports[in_file]["is_console"] == "yes"):
print os.system("stty -F %s raw -echo" % (name))
print "PASS: Open all filles correctly."
except Exception, inst:
print "%s\nFAIL: Failed open file %s" % (str(inst), name)
def loopback(self, in_files, out_files, cachesize=1024, mode=LOOP_NONE):
"""
Start a switch thread.
(There is a problem with multiple opens of a single file).
@param in_files: Array of input files.
@param out_files: Array of output files.
@param cachesize: Cachesize.
@param mode: Mode of switch.
"""
self.ports = self._get_port_status()
in_f = self._open(in_files)
out_f = self._open(out_files)
s = self.Switch(in_f, out_f, self.exit_thread, cachesize, mode)
s.start()
self.threads.append(s)
print "PASS: Start switch"
def exit_threads(self):
"""
Function end all running data switch.
"""
self.exit_thread.set()
for th in self.threads:
print "join"
th.join()
self.exit_thread.clear()
del self.threads[:]
for desc in self.files.itervalues():
os.close(desc)
self.files.clear()
print "PASS: All threads finished"
def die(self):
"""
Quit consoleswitch.
"""
self.exit_threads()
exit()
def send_loop_init(self, port, length):
"""
Prepares the sender thread. Requires clean thread structure.
"""
self.ports = self._get_port_status()
in_f = self._open([port])
self.threads.append(self.Sender(in_f[0], self.exit_thread, length))
print "PASS: Sender prepare"
def send_loop(self):
"""
Start sender data transfer. Requires senderprepare run first.
"""
self.threads[0].start()
print "PASS: Sender start"
def send(self, port, length=1, mode=True, is_static=False):
"""
Send a data of some length
@param port: Port to write data
@param length: Length of data
@param mode: True = loop mode, False = one shoot mode
"""
in_f = self._open([port])
data = ""
writes = 0
if not is_static:
while len(data) < length:
data += "%c" % random.randrange(255)
try:
writes = os.write(in_f[0], data)
except Exception, inst:
print inst
else:
while len(data) < 4096:
data += "%c" % random.randrange(255)
if mode:
while (writes < length):
try:
writes += os.write(in_f[0], data)
except Exception, inst:
print inst
if writes >= length:
print "PASS: Send data length %d" % writes
else:
print ("FAIL: Partial send: desired %d, transfered %d" %
(length, writes))
def recv(self, port, length=1, buffer=1024, mode=True):
"""
Recv a data of some length
@param port: Port to write data
@param length: Length of data
@param mode: True = loop mode, False = one shoot mode
"""
in_f = self._open([port])
recvs = ""
try:
recvs = os.read(in_f[0], buffer)
except Exception, inst:
print inst
if mode:
while (len(recvs) < length):
try:
recvs += os.read(in_f[0], buffer)
except Exception, inst:
print inst
if len(recvs) >= length:
print "PASS: Recv data length %d" % len(recvs)
else:
print ("FAIL: Partial recv: desired %d, transfered %d" %
(length, len(recvs)))
def clean_port(self, port, buffer=1024):
in_f = self._open([port])
ret = select.select([in_f[0]], [], [], 1.0)
buf = ""
if ret[0]:
buf = os.read(in_f[0], buffer)
print ("PASS: Rest in socket: ") + str(buf[:10])
def is_alive():
"""
Check is only main thread is alive and if guest react.
"""
if threading.activeCount() == 2:
print ("PASS: Guest is ok no thread alive")
else:
threads = ""
for thread in threading.enumerate():
threads += thread.name + ", "
print ("FAIL: On guest run thread. Active thread:" + threads)
def compile():
"""
Compile virtio_console_guest.py to speed up.
"""
import py_compile
py_compile.compile(sys.path[0] + "/virtio_console_guest.py")
print "PASS: compile"
sys.exit()
def guest_exit():
global exiting
exiting = True
def worker(virt):
"""
Worker thread (infinite) loop of virtio_guest.
"""
global exiting
print "PASS: Daemon start."
p = select.poll()
p.register(sys.stdin.fileno())
while not exiting:
d = p.poll()
if (d[0][1] == select.POLLIN):
str = raw_input()
try:
exec str
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "On Guest exception from: \n" + "".join(
traceback.format_exception(exc_type,
exc_value,
exc_traceback))
print "FAIL: Guest command exception."
elif (d[0][1] & select.POLLHUP):
time.sleep(0.5)
def sigusr_handler(sig, frame):
pass
class Daemon:
"""
Daemonize guest
"""
def __init__(self, stdin, stdout, stderr):
"""
Init daemon.
@param stdin: path to stdin file.
@param stdout: path to stdout file.
@param stderr: path to stderr file.
"""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
@staticmethod
def is_file_open(path):
"""
Determine process which open file.
@param path: Path to file.
@return [[pid,mode], ... ].
"""
opens = []
pids = os.listdir('/proc')
for pid in sorted(pids):
try:
int(pid)
except ValueError:
continue
fd_dir = os.path.join('/proc', pid, 'fd')
try:
for file in os.listdir(fd_dir):
try:
p = os.path.join(fd_dir, file)
link = os.readlink(os.path.join(fd_dir, file))
if link == path:
mode = os.lstat(p).st_mode
opens.append([pid, mode])
except OSError:
continue
except OSError, e:
if e.errno == 2:
continue
raise
return opens
def daemonize(self):
"""
Run guest as a daemon.
"""
try:
pid = os.fork()
if pid > 0:
return False
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin,'r')
so = file(self.stdout,'w')
se = file(self.stderr,'w')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
return True
def start(self):
"""
Start the daemon
@return: PID of daemon.
"""
# Check for a pidfile to see if the daemon already runs
openers = self.is_file_open(self.stdout)
rundaemon = False
if len(openers) > 0:
for i in openers:
if i[1] & stat.S_IWUSR:
rundaemon = True
openers.remove(i)
if len(openers) > 0:
for i in openers:
os.kill(int(i[0]), 9)
time.sleep(0.3)
# Start the daemon
if not rundaemon:
if self.daemonize():
self.run()
def run(self):
"""
Run guest main thread
"""
global exiting
virt = VirtioGuest()
slave = Thread(target=worker, args=(virt, ))
slave.start()
signal.signal(signal.SIGUSR1, sigusr_handler)
signal.signal(signal.SIGALRM, sigusr_handler)
while not exiting:
signal.alarm(1)
signal.pause()
catch = virt.catching_signal()
if catch:
signal.signal(signal.SIGIO, virt)
elif catch is False:
signal.signal(signal.SIGIO, signal.SIG_DFL)
if catch is not None:
virt.use_config.set()
print "PASS: guest_exit"
sys.exit(0)
def main():
"""
Main function with infinite loop to catch signal from system.
"""
if (len(sys.argv) > 1) and (sys.argv[1] == "-c"):
compile()
stdin = "/tmp/guest_daemon_pi"
stdout = "/tmp/guest_daemon_po"
stderr = "/tmp/guest_daemon_pe"
for f in [stdin, stdout, stderr]:
try:
os.mkfifo(f)
except OSError, e:
if e.errno == 17:
pass
daemon = Daemon(stdin,
stdout,
stderr)
daemon.start()
d_stdin = os.open(stdin, os.O_WRONLY)
d_stdout = os.open(stdout, os.O_RDONLY)
d_stderr = os.open(stderr, os.O_RDONLY)
s_stdin = sys.stdin.fileno()
s_stdout = sys.stdout.fileno()
s_stderr = sys.stderr.fileno()
pid = filter(lambda x: x[0] != str(os.getpid()),
daemon.is_file_open(stdout))[0][0]
print "PASS: Start"
while 1:
ret = select.select([d_stderr,
d_stdout,
s_stdin],
[], [], 1.0)
if s_stdin in ret[0]:
os.write(d_stdin,os.read(s_stdin, 1))
if d_stdout in ret[0]:
os.write(s_stdout,os.read(d_stdout, 1024))
if d_stderr in ret[0]:
os.write(s_stderr,os.read(d_stderr, 1024))
if not os.path.exists("/proc/" + pid):
sys.exit(0)
os.close(d_stdin)
os.close(d_stdout)
os.close(d_stderr)
if __name__ == "__main__":
main()
|
Here at the Recreational Authority of Roseville & Eastpointe, we strive to create opportunities for friends and neighbors to escape their daily routines and enjoy the simpler elements of life. We offer programs that support healthy lifestyles, promote leisure activity, athletics, educational pursuits and family-like support for the entire community. Our friendly staff is always available to answer questions and provide information about the wide array of programs available through the Authority.
Our business hours run from Monday to Friday – 8:30 a.m. to closing time.
If you select to mail in your registration form, a form is required per each person signing up per class with proper payment.
Program fees must be paid by check, money order, credit or debit M-F after 4:00 p.m. and all day on Saturdays and Sundays.
Checks must be made payable to the R.A.R.E.
The mission of the Recreational Authority of Roseville & Eastpointe is to provide a full range of quality recreational programs, facilities and family-focused events that will increase the overall quality of life for all residents.
When Roseville Community Schools AND Eastpointe Community Schools are closed due to inclement weather (i.e. “snow day”), all programs and sports practices affiliated with the Recreation Authority will be canceled. In addition, all scheduled Senior programs and drop-in activities will also be canceled.
The Recreation Authority prides itself on providing its' guests clean and well maintained facilities. in order to ensure this high standard, we will be shutdown for programs and activities from: Saturday, August 17th thru Monday, September 2nd.
Our building will remain open to process registrations only. during the business hours of 8:30 a.m. - 4:00 p.m. , Monday through Friday.
Please visit our Facebook page to get the latest news from the Recreation Authority. Our Facebook page is updated regularly and we encourage our Facebook followers to interact with our posts and share our news updates.
Through funding provided by the Roseville Optimist Club, the Recreation Authority offers activity scholarships for Authority Members in financial need.
We believe everyone should have the opportunity to enjoy quality recreational programming. Depending on the size of your family and income, you may qualify. All you need to do is complete an Activity Scholarship Application form. It's easy and confidential to apply. Applications are available at the Recreation Center. Children may receive one scholarship per calendar year (November 1st - October 31st).
The Right Program for You is a Call Away!
Discover the many recreation opportunities available within the local community today! Contact our offices and speak directly with one of our helpful staff members. There are programs available for every age.
|
import requests
from .session import Session
from .visualization import Visualization, VisualizationLocal
class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium'):
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized")
|
With a beautiful photo-set the "teenage years" will be even more memorable. Alone or with friends, in my studio or outdoor with flattering natural light. I create unique portraits that capture the personality. Teenage portrait photography is also a nice gift for your graduating daughter or son, friend, nephew… Contact me for more information, or just visit my photo gift shop.
|
import os
import json
import urllib
from base64 import b64encode
# external libs
import requests
class Proxy(object):
URL = None
ACCESS_TOKEN = None
def __init__(self):
super(Proxy, self).__init__()
def build_query(self, params):
return urllib.urlencode(params)
def build_headers(self):
return {
'Content-Type': 'application/json',
}
def get_url(self):
return self.URL
def fetch(self, params):
response = requests.get(
url = '?'.join([self.get_url(), self.build_query(params)]),
headers = self.build_headers()
)
return response
class YelpProxy(Proxy):
URL = 'https://api.yelp.com/v3/businesses/search'
def get_access_token(self):
'''
Use environment credentials to obtain access token
Tokens are cached so it takes only one request
'''
if not self.ACCESS_TOKEN:
url = 'https://api.yelp.com/oauth2/token'
params = {
'grant_type' : 'client_credentials',
'client_id' : os.environ['YELP_CLIENT_ID'],
'client_secret' : os.environ['YELP_CLIENT_SECRET']
}
response = requests.post(url, params=params)
message = response.json()
self.ACCESS_TOKEN = message['access_token']
return self.ACCESS_TOKEN
def build_headers(self):
headers = super(YelpProxy, self).build_headers()
access_token = self.get_access_token()
headers.update({
'Authorization': 'Bearer {}'.format(access_token)
})
return headers
class TwitterProxy(Proxy):
URL = 'https://api.twitter.com/1.1/search/tweets.json'
def get_access_token(self):
if not self.ACCESS_TOKEN:
url = 'https://api.twitter.com/oauth2/token'
credentials = b64encode(':'.join([
os.environ['TWITTER_CONSUMER_KEY'],
os.environ['TWITTER_CONSUMER_SECRET']
]))
headers = {
'Authorization': 'Basic ' + credentials,
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
params = {
'grant_type': 'client_credentials'
}
response = requests.post(url, params=params, headers=headers)
message = response.json()
self.ACCESS_TOKEN = message['access_token']
return self.ACCESS_TOKEN
def build_headers(self):
headers = super(TwitterProxy, self).build_headers()
access_token = self.get_access_token()
headers.update({
'Authorization': 'Bearer {}'.format(access_token)
})
return headers
class FoursquareProxy(Proxy):
URL = 'https://api.foursquare.com/v2/venues/search'
def build_query(self, params):
params = dict(params)
auth = {
'client_id' : os.environ['FOURSQUARE_CLIENT_ID'],
'client_secret' : os.environ['FOURSQUARE_CLIENT_SECRET'],
'v' : '20170801'
}
params.update(auth)
return super(FoursquareProxy, self).build_query(params)
def base_handler(proxy):
'''
A simple catch - adding auth - request and return kind of proxy server
The handle forward the entire query string to 3rd party API
only adding authentication credentials if neccessary
'''
fetcher = proxy()
# depending on wheather proxy integration is used
# data should be in serialized or plain dict form
# status code, headers and body mus be present in the response
def handler(event, context):
response_headers = {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
try:
params = event['queryStringParameters']
r = fetcher.fetch(params)
return {
'statusCode' : r.status_code,
'body' : r.content,
'headers' : response_headers
}
except Exception as e:
print(e) # print the cause
return {
'statusCode' : '400',
'body' : 'Exception thrown while fetching data',
'headers' : response_headers
}
return handler
def make_proxy_handler(name):
proxy = None
available_proxies = {
'Twitter' : TwitterProxy,
'Foursquare' : FoursquareProxy,
'Yelp' : YelpProxy
}
try:
proxy = available_proxies[name]
except KeyError as e:
raise ValueError('Invalid proxy: {}'.format(name))
return base_handler(proxy)
|
Running Time: 1 hr. and 40 min.
This was the worst movie I have seen in a long time. I was somewhat enthused about seeing this movie because of last year’s Happy Death Day. I went into Happy Death Day expecting a terrible horror movie, but got an enthusiastic, funny, parody of horror movie tropes in a horror movie version of Groundhog Day. Blumhouse also produced Get Out and while I was not expecting this movie to Get Out I was expecting a certain level of quality.
Instead of something witty or interesting or horrifying or…anything we get something singularly and uniquely bad. Olivia Barron (Lucy Hale) is a college student that gets roped into going to Mexico on spring break. While there her and her friends go up to an abandoned church where something bad has happened. They join in a game of truth or dare with a stranger and get sucked into its devilish world. The game becomes real and they either need to follow the rules of the game or die. The only character that is remotely funny or in any way redeeming is Ronnie and the rest of them are just boring as hell. He dies first.
The drama that results from the game is incredibly stupid. They share stupid secrets about who likes who, they sleep around, they come out of the closet, etc. You would think the teenage drama displayed by these *cough* intelligent college students *cough* would be dissipated by the fact that supernatural factors are compelling them to do it. It doesn’t. The story rapidly becomes preposterous with various subplots and the explanation for the game itself is hilariously bad. The ending is laughable.
Along the way it is possible to enjoy yourself by pondering how such a movie was created in the first place and to laugh at it uncontrollably. That will get you through the first half hour. After that, you’re on your own.
Truth or dare? The truth is you should skip it.
Running Time: 1 hr. and 41 min.
Isle of Dogs is a witty and visually engaging movie that is slightly outside of Wes Anderson’s wheelhouse. The city of Megasaki has a problem. The dogs in the city are overpopulated and carrying diseases that threaten to cross species. The mayor also has a thousand year old feud with dogs because of his clans preference for cats. In order to solve this conundrum the dogs are deported to Trash Island where they are expected to eek out an existence or die trying.
Atari, the estranged nephew of the current mayor, wants his dog bad. Spots was the first dog deported and he was Atari’s bodyguard and companion. He sets out in a prop plane to find his missing companion. The voice acting is great and the main group of dogs have the most hilarious roles in the movie. The group features Bryan Cranston, Edward Norton, Bill Murray, and Jeff Goldblum. Was that noise I just heard you knocking over furniture to go see it? Well, it should have been. Chief (Bryan Cranston) is a lifelong stray who is leery of people and the story is about his quest for survival. They undertake a journey to get back to the mainland and help Atari find his lost companion. Along the way they run into a variety of characters who are mostly interesting and thought out.
The movie does suffer from some flaws namely repetition. Some of the gags get recycled a little too quickly and are not nearly as interesting the second, third, or fourth time you see them. The movie is also a little too dark at times and lacking humor in those moments. Typically Wes Anderson movies are dark, but paired with levity. The levity is at times too light in Isle of Dogs.
It is a fun, interesting, original movie. See it.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.