text
stringlengths
29
850k
from django.core.exceptions import ObjectDoesNotExist from django.db import models from django.utils.translation import gettext_lazy as _ from autoslug.fields import AutoSlugField from symposion.markdown_parser import parse from symposion.proposals.models import ProposalBase, AdditionalSpeaker from symposion.conference.models import Section from symposion.speakers.models import Speaker class Schedule(models.Model): section = models.OneToOneField( Section, on_delete=models.CASCADE, verbose_name=_("Section") ) published = models.BooleanField(default=True, verbose_name=_("Published")) hidden = models.BooleanField( _("Hide schedule from overall conference view"), default=False ) def __str__(self): return "%s Schedule" % self.section def first_date(self): if self.day_set.count(): return self.day_set.first().date else: return None class Meta: ordering = ["section"] verbose_name = _("Schedule") verbose_name_plural = _("Schedules") class Day(models.Model): schedule = models.ForeignKey( Schedule, on_delete=models.CASCADE, verbose_name=_("Schedule") ) date = models.DateField(verbose_name=_("Date")) def __str__(self): return "%s" % self.date class Meta: unique_together = [("schedule", "date")] ordering = ["date"] verbose_name = _("date") verbose_name_plural = _("dates") class Room(models.Model): schedule = models.ForeignKey( Schedule, on_delete=models.CASCADE, verbose_name=_("Schedule") ) name = models.CharField(max_length=65, verbose_name=_("Name")) order = models.PositiveIntegerField(verbose_name=_("Order")) def __str__(self): return self.name class Meta: verbose_name = _("Room") verbose_name_plural = _("Rooms") class SlotKind(models.Model): """ A slot kind represents what kind a slot is. For example, a slot can be a break, lunch, or X-minute talk. """ schedule = models.ForeignKey( Schedule, on_delete=models.CASCADE, verbose_name=_("schedule") ) label = models.CharField(max_length=50, verbose_name=_("Label")) def __str__(self): return self.label class Meta: verbose_name = _("Slot kind") verbose_name_plural = _("Slot kinds") class Slot(models.Model): day = models.ForeignKey( Day, on_delete=models.CASCADE, verbose_name=_("Day") ) kind = models.ForeignKey( SlotKind, on_delete=models.CASCADE, verbose_name=_("Kind") ) start = models.DateTimeField(verbose_name=_("Start")) end = models.DateTimeField(verbose_name=_("End")) content_override = models.TextField( blank=True, verbose_name=_("Content override") ) content_override_html = models.TextField(blank=True, editable=False) def assign(self, content): """ Assign the given content to this slot and if a previous slot content was given we need to unlink it to avoid integrity errors. """ self.unassign() content.slot = self content.save() def unassign(self): """ Unassign the associated content with this slot. """ content = self.content if content and content.slot_id: content.slot = None content.save() @property def content(self): """ Return the content this slot represents. @@@ hard-coded for presentation for now """ try: return self.content_ptr except ObjectDoesNotExist: return None @property def length_in_minutes(self): return int( (self.end - self.start).total_seconds() / 60 ) @property def rooms(self): return Room.objects.filter(pk__in=self.slotroom_set.values("room")) def save(self, *args, **kwargs): self.content_override_html = parse(self.content_override) super(Slot, self).save(*args, **kwargs) def __str__(self): return "{!s} {!s} ({!s} - {!s}) {!s}".format( self.day, self.kind, self.start, self.end, " ".join(map(lambda r: r.__str__(), self.rooms)), ) class Meta: ordering = ["day", "start", "end"] verbose_name = _("slot") verbose_name_plural = _("slots") class SlotRoom(models.Model): """ Links a slot with a room. """ slot = models.ForeignKey( Slot, on_delete=models.CASCADE, verbose_name=_("Slot") ) room = models.ForeignKey( Room, on_delete=models.CASCADE, verbose_name=_("Room") ) def __str__(self): return "%s %s" % (self.room, self.slot) class Meta: unique_together = [("slot", "room")] ordering = ["slot", "room__order"] verbose_name = _("Slot room") verbose_name_plural = _("Slot rooms") class Presentation(models.Model): slot = models.OneToOneField( Slot, null=True, blank=True, on_delete=models.CASCADE, related_name="content_ptr", verbose_name=_("Slot"), ) title = models.CharField(max_length=100, verbose_name=_("Title")) slug = AutoSlugField( default="", editable=True, help_text=( "Slug that appears in presentation URLs. Automatically " "generated from the presentation's title. This field should " "not be edited after the schedule has been published." ), max_length=100, populate_from="title", unique=True) description = models.TextField(verbose_name=_("Description")) description_html = models.TextField(blank=True, editable=False) abstract = models.TextField(verbose_name=_("Abstract")) abstract_html = models.TextField(blank=True, editable=False) speaker = models.ForeignKey( Speaker, on_delete=models.CASCADE, related_name="presentations", verbose_name=_("Speaker"), ) additional_speakers = models.ManyToManyField( Speaker, related_name="copresentations", blank=True, verbose_name=_("Additional speakers"), ) cancelled = models.BooleanField(default=False, verbose_name=_("Cancelled")) proposal_base = models.OneToOneField( ProposalBase, on_delete=models.CASCADE, related_name="presentation", verbose_name=_("Proposal base"), ) section = models.ForeignKey( Section, on_delete=models.CASCADE, related_name="presentations", verbose_name=_("Section"), ) def save(self, *args, **kwargs): self.description_html = parse(self.description) self.abstract_html = parse(self.abstract) return super(Presentation, self).save(*args, **kwargs) @property def number(self): return self.proposal.number @property def proposal(self): if self.proposal_base_id is None: return None return ProposalBase.objects.get_subclass(pk=self.proposal_base_id) def speakers(self): yield self.speaker accepted_status = AdditionalSpeaker.SPEAKING_STATUS_ACCEPTED speakers = self.additional_speakers.filter( additionalspeaker__status=accepted_status, additionalspeaker__proposalbase=self.proposal_base, ) for speaker in speakers: yield speaker def __str__(self): return "#%s %s (%s)" % (self.number, self.title, self.speaker) class Meta: ordering = ["slot"] verbose_name = _("presentation") verbose_name_plural = _("presentations")
Contact us if you have any requirement for our weather stripping custom applications. We are always available to answer your questions. Call us today! We at Weather Stripping Guys in Roseburg, OR are waiting for your call.
# # Old or obsolete code. # # ------------------------------------------------------------------------------------------------- # Kodi favorites launcher # Do not use, AEL must not access favoruites.xml directly. Otherwise, addon will not be # accepted in the Kodi official repository. # ------------------------------------------------------------------------------------------------- class KodiLauncher(LauncherABC): def launch(self): self.title = self.entity_data['m_name'] self.application = FileName('xbmc.exe') self.arguments = self.entity_data['application'] super(KodiLauncher, self).launch() def supports_launching_roms(self): return False def get_launcher_type(self): return LAUNCHER_KODI_FAVOURITES def get_launcher_type_name(self): return "Kodi favourites launcher" def change_application(self): current_application = self.entity_data['application'] dialog = KodiDictionaryDialog() selected_application = dialog.select('Select the favourite', self._get_kodi_favourites(), current_application) if selected_application is None or selected_application == current_application: return False self.entity_data['application'] = selected_application self.entity_data['original_favname'] = self._get_title_from_selected_favourite(selected_application, 'original_favname', self.entity_data) return True def get_edit_options(self): options = collections.OrderedDict() options['EDIT_METADATA'] = 'Edit Metadata ...' options['EDIT_ASSETS'] = 'Edit Assets/Artwork ...' options['SET_DEFAULT_ASSETS'] = 'Choose default Assets/Artwork ...' options['CHANGE_CATEGORY'] = 'Change Category' options['LAUNCHER_STATUS'] = 'Launcher status: {0}'.format(self.get_state()) options['ADVANCED_MODS'] = 'Advanced Modifications ...' options['EXPORT_LAUNCHER'] = 'Export Launcher XML configuration ...' options['DELETE_LAUNCHER'] = 'Delete Launcher' return options def get_advanced_modification_options(self): log_debug('KodiLauncher::get_advanced_modification_options() Returning edit options') toggle_window_str = 'ON' if self.entity_data['toggle_window'] else 'OFF' non_blocking_str = 'ON' if self.entity_data['non_blocking'] else 'OFF' org_favname = self.entity_data['original_favname'] if 'original_favname' in self.entity_data else 'unknown' options = super(KodiLauncher, self).get_advanced_modification_options() options['CHANGE_APPLICATION'] = "Change favourite: '{0}'".format(org_favname) options['TOGGLE_WINDOWED'] = "Toggle Kodi into windowed mode (now {0})".format(toggle_window_str) options['TOGGLE_NONBLOCKING'] = "Non-blocking launcher (now {0})".format(non_blocking_str) return options def _get_builder_wizard(self, wizard): wizard = DictionarySelectionWizardDialog('application', 'Select the favourite', self._get_kodi_favourites(), wizard) wizard = DummyWizardDialog('s_icon', '', wizard, self._get_icon_from_selected_favourite) wizard = DummyWizardDialog('original_favname', '', wizard, self._get_title_from_selected_favourite) wizard = DummyWizardDialog('m_name', '', wizard, self._get_title_from_selected_favourite) wizard = KeyboardWizardDialog('m_name','Set the title of the launcher', wizard) wizard = SelectionWizardDialog('platform', 'Select the platform', AEL_platform_list, wizard) return wizard def _get_kodi_favourites(self): favourites = kodi_read_favourites() fav_options = {} for key in favourites: fav_options[key] = favourites[key][0] return fav_options def _get_icon_from_selected_favourite(self, input, item_key, launcher): fav_action = launcher['application'] favourites = kodi_read_favourites() for key in favourites: if fav_action == key: return favourites[key][1] return 'DefaultProgram.png' def _get_title_from_selected_favourite(self, input, item_key, launcher): fav_action = launcher['application'] favourites = kodi_read_favourites() for key in favourites: if fav_action == key: return favourites[key][0] return _get_title_from_app_path(input, launcher) # ------------------------------------------------------------------------------------------------- # XmlDataContext should be a singleton and only used by the repository classes. # This class holds the actual XML data and reads/writes that data. # OBSOLETE CODE THAT WILL BE REMOVE. # ------------------------------------------------------------------------------------------------- class XmlDataContext(object): def __init__(self, xml_file_path): log_debug('XmlDataContext::init() "{0}"'.format(xml_file_path.getPath())) self._xml_root = None self.repo_fname = xml_file_path def xml_exists(self): return self.repo_fname.exists() # # If the XML file does not exists (for example, when the addon is first execute) then # allow to setup initial data. # def set_xml_root(self, xml_repo_str): self._xml_root = fs_get_XML_root_from_str(xml_repo_str) # Lazy loading of xml data through property @property def xml_data(self): if self._xml_root is None: self._load_xml() return self._xml_root # # If there is any problem with the filesystem then the functions display an error # dialog and produce an Addon_Error exception. # def _load_xml(self): log_debug('XmlDataContext::_load_xml() Loading "{0}"'.format(self.repo_fname.getPath())) xml_repo_str = self.repo_fname.loadFileToStr() self._xml_root = fs_get_XML_root_from_str(xml_repo_str) def commit(self): log_info('XmlDataContext::commit() Saving "{0}"'.format(self.repo_fname.getPath())) xml_repo_str = fs_get_str_from_XML_root(self._xml_root) self.repo_fname.saveStrToFile(xml_repo_str) def get_nodes(self, tag): log_debug('XmlDataContext::get_nodes(): xpath query "{}"'.format(tag)) return self.xml_data.findall(tag) def get_node(self, tag, id): query = "{}[id='{}']".format(tag, id) log_debug('XmlDataContext::get_node(): xpath query "{}"'.format(query)) return self.xml_data.find(query) def get_nodes_by(self, tag, field, value): query = "{}[{}='{}']".format(tag, field, value) log_debug('XmlDataContext::get_nodes_by(): xpath query "{}"'.format(query)) return self.xml_data.findall(query) # creates/updates xml node identified by tag and id with the given dictionary of data def save_node(self, tag, id, updated_data): node_to_update = self.get_node(tag, id) if node_to_update is None: node_to_update = self.xml_data.makeelement(tag, {}) self.xml_data.append(node_to_update) node_to_update.clear() for key in updated_data: element = self.xml_data.makeelement(key, {}) updated_value = updated_data[key] # >> To simulate a list with XML allow multiple XML tags. if isinstance(updated_data, list): for extra_value in updated_value: element.text = unicode(extra_value) node_to_update.append(element) else: element.text = unicode(updated_value) node_to_update.append(element) def remove_node(self, tag, id): node_to_remove = self.get_node(tag, id) if node_to_remove is None: return self.xml_data.remove(node_to_remove) # ------------------------------------------------------------------------------------------------- # --- Repository class for Category objects --- # Arranges retrieving and storing of the categories from and into the XML data file. # Creates Category objects with a reference to an instance of AELObjectFactory. # OBSOLETE CODE THAT WILL BE REMOVE. # ------------------------------------------------------------------------------------------------- class CategoryRepository(object): def __init__(self, data_context, obj_factory): self.data_context = data_context self.obj_factory = obj_factory # When AEL is executed for the first time categories.xml does not exists. In this case, # create an empty memory file to avoid concurrent writing problems (AEL maybe called # concurrently by skins). When the user creates a Category/Launcher then write # categories.xml to the filesystem. if not self.data_context.xml_exists(): log_debug('CategoryRepository::init() Creating empty categories repository.') xml_repo_str = ( '<?xml version="1.0" encoding="utf-8" standalone="yes"?>' '<advanced_emulator_launcher version="1">' '<control>' '<update_timestamp>0.0</update_timestamp>' '</control>' '</advanced_emulator_launcher>' ) data_context.set_xml_root(xml_repo_str) # ------------------------------------------------------------------------------------------------- # Data model used in the plugin # Internally all string in the data model are Unicode. They will be encoded to # UTF-8 when writing files. # ------------------------------------------------------------------------------------------------- # These three functions create a new data structure for the given object and (very importantly) # fill the correct default values). These must match what is written/read from/to the XML files. # Tag name in the XML is the same as in the data dictionary. # def _parse_xml_to_dictionary(self, category_element): __debug_xml_parser = False category = {} # Parse child tags of category for category_child in category_element: # By default read strings xml_text = category_child.text if category_child.text is not None else '' xml_text = text_unescape_XML(xml_text) xml_tag = category_child.tag if __debug_xml_parser: log_debug('{0} --> {1}'.format(xml_tag, xml_text.encode('utf-8'))) # Now transform data depending on tag name if xml_tag == 'finished': category[xml_tag] = True if xml_text == 'True' else False else: # Internal data is always stored as Unicode. ElementTree already outputs Unicode. category[xml_tag] = xml_text return category # Finds a Category by ID in the database. ID may be a Virtual/Special category. # Returns a Category object instance or None if the category ID is not found in the DB. def find(self, category_id): if category_id == VCATEGORY_ADDONROOT_ID: category_dic = fs_new_category() category_dic['type'] = OBJ_CATEGORY_VIRTUAL category_dic['id'] = VCATEGORY_ADDONROOT_ID category_dic['m_name'] = 'Root category' else: category_element = self.data_context.get_node('category', category_id) if category_element is None: log_debug('Cannot find category with id {0}'.format(category_id)) return None category_dic = self._parse_xml_to_dictionary(category_element) category = self.obj_factory.create_from_dic(category_dic) return category # Returns a list with all the Category objects. Each list element if a Category instance. def find_all(self): categories = [] category_elements = self.data_context.get_nodes('category') log_debug('Found {0} categories'.format(len(category_elements))) for category_element in category_elements: category_dic = self._parse_xml_to_dictionary(category_element) log_debug('Creating category instance for category {0}'.format(category_dic['id'])) category = self.obj_factory.create_from_dic(category_dic) categories.append(category) return categories def get_simple_list(self): category_list = {} category_elements = self.data_context.get_nodes('category') for category_element in category_elements: id = category_element.find('id').text name = category_element.find('m_name').text category_list[id] = name return category_list def count(self): return len(self.data_context.get_nodes('category')) def save(self, category): category_id = category.get_id() self.data_context.save_node('category', category_id, category.get_data_dic()) self.data_context.commit() def save_multiple(self, categories): for category in categories: category_id = category.get_id() self.data_context.save_node('category', category_id, category.get_data_dic()) self.data_context.commit() def delete(self, category): category_id = category.get_id() self.data_context.remove_node('category', category_id) self.data_context.commit() # ------------------------------------------------------------------------------------------------- # Repository class for Launchers objects. # Arranges retrieving and storing of the launchers from and into the xml data file. # OBSOLETE CODE THAT WILL BE REMOVE. # ------------------------------------------------------------------------------------------------- class LauncherRepository(object): def __init__(self, data_context, obj_factory): # Categories and Launchers share an XML repository file. If categories.xml does not # exists, the CategoryRepository() class initialises the XmlDataContext() with # empty data. self.data_context = data_context self.obj_factory = obj_factory def _parse_xml_to_dictionary(self, launcher_element): __debug_xml_parser = False # Sensible default values launcher = fs_new_launcher() if __debug_xml_parser: log_debug('Element has {0} child elements'.format(len(launcher_element))) # Parse child tags of launcher element for element_child in launcher_element: # >> By default read strings xml_text = element_child.text if element_child.text is not None else '' xml_text = text_unescape_XML(xml_text) xml_tag = element_child.tag if __debug_xml_parser: log_debug('{0} --> {1}'.format(xml_tag, xml_text.encode('utf-8'))) # >> Transform list() datatype if xml_tag == 'args_extra': launcher[xml_tag].append(xml_text) # >> Transform Bool datatype elif xml_tag == 'finished' or xml_tag == 'toggle_window' or xml_tag == 'non_blocking' or \ xml_tag == 'multidisc': launcher[xml_tag] = True if xml_text == 'True' else False # >> Transform Int datatype elif xml_tag == 'num_roms' or xml_tag == 'num_parents' or xml_tag == 'num_clones' or \ xml_tag == 'num_have' or xml_tag == 'num_miss' or xml_tag == 'num_unknown': launcher[xml_tag] = int(xml_text) # >> Transform Float datatype elif xml_tag == 'timestamp_launcher' or xml_tag == 'timestamp_report': launcher[xml_tag] = float(xml_text) else: launcher[xml_tag] = xml_text return launcher def find(self, launcher_id): if launcher_id in [VLAUNCHER_FAVOURITES_ID, VLAUNCHER_RECENT_ID, VLAUNCHER_MOST_PLAYED_ID]: launcher = self.launcher_factory.create_new(launcher_id) return launcher launcher_element = self.data_context.get_node('launcher', launcher_id) if launcher_element is None: log_debug('Launcher ID {} not found'.format(launcher_id)) return None launcher_dic = self._parse_xml_to_dictionary(launcher_element) launcher = self.obj_factory.create_from_dic(launcher_dic) return launcher def find_all_ids(self): launcher_ids = [] launcher_id_elements = self.data_context.get_nodes('launcher/id') for launcher_id_element in launcher_id_elements: launcher_ids.append(launcher_id_element.text()) return launcher_ids def find_all(self): launchers = [] launcher_elements = self.data_context.get_nodes('launcher') for launcher_element in launcher_elements: launcher_dic = self._parse_xml_to_dictionary(launcher_element) launcher = self.obj_factory.create_from_dic(launcher_dic) launchers.append(launcher) return launchers def find_by_launcher_type(self, launcher_type): launchers = [] launcher_elements = self.data_context.get_nodes_by('launcher', 'type', launcher_type ) for launcher_element in launcher_elements: launcher_dic = self._parse_xml_to_dictionary(launcher_element) launcher = self.obj_factory.create_from_dic(launcher_dic) launchers.append(launcher) return launchers def find_by_category(self, category_id): launchers = [] launcher_elements = self.data_context.get_nodes_by('launcher', 'categoryID', category_id ) if launcher_elements is None or len(launcher_elements) == 0: log_debug('No launchers found in category {0}'.format(category_id)) return launchers log_debug('{0} launchers found in category {1}'.format(len(launcher_elements), category_id)) for launcher_element in launcher_elements: launcher_dic = self._parse_xml_to_dictionary(launcher_element) launcher = self.obj_factory.create_from_dic(launcher_dic) launchers.append(launcher) return launchers def count(self): return len(self.data_context.get_nodes('launcher')) def save(self, launcher, update_launcher_timestamp = True): if update_launcher_timestamp: launcher.update_timestamp() launcher_id = launcher.get_id() launcher_data_dic = launcher.get_data_dic() self.data_context.save_node('launcher', launcher_id, launcher_data_dic) self.data_context.commit() def save_multiple(self, launchers, update_launcher_timestamp = True): for launcher in launchers: if update_launcher_timestamp: launcher.update_timestamp() launcher_id = launcher.get_id() launcher_data_dic = launcher.get_data_dic() self.data_context.save_node('launcher', launcher_id, launcher_data_dic) self.data_context.commit() def delete(self, launcher): launcher_id = launcher.get_id() self.data_context.remove_node('launcher', launcher_id) self.data_context.commit() # ################################################################################################# # ################################################################################################# # ROMsets # ################################################################################################# # ################################################################################################# class RomSetFactory(): def __init__(self, pluginDataDir): self.ROMS_DIR = pluginDataDir.pjoin('db_ROMs') self.FAV_JSON_FILE_PATH = pluginDataDir.pjoin('favourites.json') self.RECENT_PLAYED_FILE_PATH = pluginDataDir.pjoin('history.json') self.MOST_PLAYED_FILE_PATH = pluginDataDir.pjoin('most_played.json') self.COLLECTIONS_FILE_PATH = pluginDataDir.pjoin('collections.xml') self.COLLECTIONS_DIR = pluginDataDir.pjoin('db_Collections') self.VIRTUAL_CAT_TITLE_DIR = pluginDataDir.pjoin('db_title') self.VIRTUAL_CAT_YEARS_DIR = pluginDataDir.pjoin('db_years') self.VIRTUAL_CAT_GENRE_DIR = pluginDataDir.pjoin('db_genre') self.VIRTUAL_CAT_DEVELOPER_DIR = pluginDataDir.pjoin('db_developer') self.VIRTUAL_CAT_CATEGORY_DIR = pluginDataDir.pjoin('db_category') self.VIRTUAL_CAT_NPLAYERS_DIR = pluginDataDir.pjoin('db_nplayers') self.VIRTUAL_CAT_ESRB_DIR = pluginDataDir.pjoin('db_esrb') self.VIRTUAL_CAT_RATING_DIR = pluginDataDir.pjoin('db_rating') if not self.ROMS_DIR.exists(): self.ROMS_DIR.makedirs() if not self.VIRTUAL_CAT_TITLE_DIR.exists(): self.VIRTUAL_CAT_TITLE_DIR.makedirs() if not self.VIRTUAL_CAT_YEARS_DIR.exists(): self.VIRTUAL_CAT_YEARS_DIR.makedirs() if not self.VIRTUAL_CAT_GENRE_DIR.exists(): self.VIRTUAL_CAT_GENRE_DIR.makedirs() if not self.VIRTUAL_CAT_DEVELOPER_DIR.exists(): self.VIRTUAL_CAT_DEVELOPER_DIR.makedirs() if not self.VIRTUAL_CAT_CATEGORY_DIR.exists(): self.VIRTUAL_CAT_CATEGORY_DIR.makedirs() if not self.VIRTUAL_CAT_NPLAYERS_DIR.exists(): self.VIRTUAL_CAT_NPLAYERS_DIR.makedirs() if not self.VIRTUAL_CAT_ESRB_DIR.exists(): self.VIRTUAL_CAT_ESRB_DIR.makedirs() if not self.VIRTUAL_CAT_RATING_DIR.exists(): self.VIRTUAL_CAT_RATING_DIR.makedirs() if not self.COLLECTIONS_DIR.exists(): self.COLLECTIONS_DIR.makedirs() def create(self, categoryID, launcher_data): launcherID = launcher_data['id'] log_debug('romsetfactory.create(): categoryID={0}'.format(categoryID)) log_debug('romsetfactory.create(): launcherID={0}'.format(launcherID)) description = self.createDescription(categoryID) # --- ROM in Favourites --- if categoryID == VCATEGORY_FAVOURITES_ID and launcherID == VLAUNCHER_FAVOURITES_ID: return FavouritesRomSet(self.FAV_JSON_FILE_PATH, launcher_data, description) # --- ROM in Most played ROMs --- elif categoryID == VCATEGORY_MOST_PLAYED_ID and launcherID == VLAUNCHER_MOST_PLAYED_ID: return FavouritesRomSet(self.MOST_PLAYED_FILE_PATH, launcher_data, description) # --- ROM in Recently played ROMs list --- elif categoryID == VCATEGORY_RECENT_ID and launcherID == VLAUNCHER_RECENT_ID: return RecentlyPlayedRomSet(self.RECENT_PLAYED_FILE_PATH, launcher_data, description) # --- ROM in Collection --- elif categoryID == VCATEGORY_COLLECTIONS_ID: return CollectionRomSet(self.COLLECTIONS_FILE_PATH, launcher_data, self.COLLECTIONS_DIR, launcherID, description) # --- ROM in Virtual Launcher --- elif categoryID == VCATEGORY_TITLE_ID: log_info('RomSetFactory() loading ROM set Title Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_TITLE_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_YEARS_ID: log_info('RomSetFactory() loading ROM set Years Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_YEARS_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_GENRE_ID: log_info('RomSetFactory() loading ROM set Genre Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_GENRE_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_DEVELOPER_ID: log_info('RomSetFactory() loading ROM set Studio Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_DEVELOPER_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_NPLAYERS_ID: log_info('RomSetFactory() loading ROM set NPlayers Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_NPLAYERS_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_ESRB_ID: log_info('RomSetFactory() loading ROM set ESRB Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_ESRB_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_RATING_ID: log_info('RomSetFactory() loading ROM set Rating Virtual Launcher ...') return VirtualLauncherRomSet(self.VIRTUAL_CAT_RATING_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_CATEGORY_ID: return VirtualLauncherRomSet(self.VIRTUAL_CAT_CATEGORY_DIR, launcher_data, launcherID, description) elif categoryID == VCATEGORY_PCLONES_ID \ and 'launcher_display_mode' in launcher_data \ and launcher_data['launcher_display_mode'] != LAUNCHER_DMODE_FLAT: return PcloneRomSet(self.ROMS_DIR, launcher_data, description) log_info('RomSetFactory() loading standard romset...') return StandardRomSet(self.ROMS_DIR, launcher_data, description) def createDescription(self, categoryID): if categoryID == VCATEGORY_FAVOURITES_ID: return RomSetDescription('Favourite', 'Browse favourites') elif categoryID == VCATEGORY_MOST_PLAYED_ID: return RomSetDescription('Most Played ROM', 'Browse most played') elif categoryID == VCATEGORY_RECENT_ID: return RomSetDescription('Recently played ROM', 'Browse by recently played') elif categoryID == VCATEGORY_TITLE_ID: return RomSetDescription('Virtual Launcher Title', 'Browse by Title') elif categoryID == VCATEGORY_YEARS_ID: return RomSetDescription('Virtual Launcher Years', 'Browse by Year') elif categoryID == VCATEGORY_GENRE_ID: return RomSetDescription('Virtual Launcher Genre', 'Browse by Genre') elif categoryID == VCATEGORY_DEVELOPER_ID: return RomSetDescription('Virtual Launcher Studio','Browse by Studio') elif categoryID == VCATEGORY_NPLAYERS_ID: return RomSetDescription('Virtual Launcher NPlayers', 'Browse by Number of Players') elif categoryID == VCATEGORY_ESRB_ID: return RomSetDescription('Virtual Launcher ESRB', 'Browse by ESRB Rating') elif categoryID == VCATEGORY_RATING_ID: return RomSetDescription('Virtual Launcher Rating', 'Browse by User Rating') elif categoryID == VCATEGORY_CATEGORY_ID: return RomSetDescription('Virtual Launcher Category', 'Browse by Category') #if virtual_categoryID == VCATEGORY_TITLE_ID: # vcategory_db_filename = VCAT_TITLE_FILE_PATH # vcategory_name = 'Browse by Title' #elif virtual_categoryID == VCATEGORY_YEARS_ID: # vcategory_db_filename = VCAT_YEARS_FILE_PATH # vcategory_name = 'Browse by Year' #elif virtual_categoryID == VCATEGORY_GENRE_ID: # vcategory_db_filename = VCAT_GENRE_FILE_PATH # vcategory_name = 'Browse by Genre' #elif virtual_categoryID == VCATEGORY_STUDIO_ID: # vcategory_db_filename = VCAT_STUDIO_FILE_PATH # vcategory_name = 'Browse by Studio' #elif virtual_categoryID == VCATEGORY_NPLAYERS_ID: # vcategory_db_filename = VCAT_NPLAYERS_FILE_PATH # vcategory_name = 'Browse by Number of Players' #elif virtual_categoryID == VCATEGORY_ESRB_ID: # vcategory_db_filename = VCAT_ESRB_FILE_PATH # vcategory_name = 'Browse by ESRB Rating' #elif virtual_categoryID == VCATEGORY_RATING_ID: # vcategory_db_filename = VCAT_RATING_FILE_PATH # vcategory_name = 'Browse by User Rating' #elif virtual_categoryID == VCATEGORY_CATEGORY_ID: # vcategory_db_filename = VCAT_CATEGORY_FILE_PATH # vcategory_name = 'Browse by Category' return None class RomSetDescription(): def __init__(self, title, description, isRegularLauncher = False): self.title = title self.description = description self.isRegularLauncher = isRegularLauncher class RomSet(): __metaclass__ = abc.ABCMeta def __init__(self, romsDir, launcher, description): self.romsDir = romsDir self.launcher = launcher self.description = description @abc.abstractmethod def romSetFileExists(self): return False @abc.abstractmethod def loadRoms(self): return {} @abc.abstractmethod def loadRomsAsList(self): return [] @abc.abstractmethod def loadRom(self, romId): return None @abc.abstractmethod def saveRoms(self, roms): pass @abc.abstractmethod def clear(self): pass class StandardRomSet(RomSet): def __init__(self, romsDir, launcher, description): self.roms_base_noext = launcher['roms_base_noext'] if launcher is not None and 'roms_base_noext' in launcher else None self.view_mode = launcher['launcher_display_mode'] if launcher is not None and 'launcher_display_mode' in launcher else None if self.roms_base_noext is None: self.repositoryFile = romsDir elif self.view_mode == LAUNCHER_DMODE_FLAT: self.repositoryFile = romsDir.pjoin(self.roms_base_noext + '.json') else: self.repositoryFile = romsDir.pjoin(self.roms_base_noext + '_parents.json') super(StandardRomSet, self).__init__(romsDir, launcher, description) def romSetFileExists(self): return self.repositoryFile.exists() def loadRoms(self): if not self.romSetFileExists(): log_warning('Launcher "{0}" JSON not found.'.format(self.roms_base_noext)) return None log_info('StandardRomSet() Loading ROMs in Launcher ...') # was disk_IO.fs_load_ROMs_JSON() roms = {} # --- Parse using json module --- # >> On Github issue #8 a user had an empty JSON file for ROMs. This raises # exception exceptions.ValueError and launcher cannot be deleted. Deal # with this exception so at least launcher can be rescanned. log_verb('StandardRomSet.loadRoms() FILE {0}'.format(self.repositoryFile.getPath())) try: roms = self.repositoryFile.readJson() except ValueError: statinfo = roms_json_file.stat() log_error('StandardRomSet.loadRoms() ValueError exception in json.load() function') log_error('StandardRomSet.loadRoms() Dir {0}'.format(self.repositoryFile.getPath())) log_error('StandardRomSet.loadRoms() Size {0}'.format(statinfo.st_size)) return roms def loadRomsAsList(self): roms_dict = self.loadRoms() if roms_dict is None: return None roms = [] for key in roms_dict: roms.append(roms_dict[key]) return roms def loadRom(self, romId): roms = self.loadRoms() if roms is None: log_error("StandardRomSet(): Could not load roms") return None romData = roms[romId] if romData is None: log_warning("StandardRomSet(): Rom with ID '{0}' not found".format(romId)) return None return romData def saveRoms(self, roms): fs_write_ROMs_JSON(self.romsDir, self.launcher, roms) pass def clear(self): fs_unlink_ROMs_database(self.romsDir, self.launcher) class PcloneRomSet(StandardRomSet): def __init__(self, romsDir, launcher, description): super(PcloneRomSet, self).__init__(romsDir, launcher, description) self.roms_base_noext = launcher['roms_base_noext'] if launcher is not None and 'roms_base_noext' in launcher else None self.repositoryFile = self.romsDir.pjoin(self.roms_base_noext + '_index_PClone.json') class FavouritesRomSet(StandardRomSet): def loadRoms(self): log_info('FavouritesRomSet() Loading ROMs in Favourites ...') roms = fs_load_Favourites_JSON(self.repositoryFile) return roms def saveRoms(self, roms): log_info('FavouritesRomSet() Saving Favourites ROMs ...') fs_write_Favourites_JSON(self.repositoryFile, roms) class VirtualLauncherRomSet(StandardRomSet): def __init__(self, romsDir, launcher, launcherID, description): self.launcherID = launcherID super(VirtualLauncherRomSet, self).__init__(romsDir, launcher, description) def romSetFileExists(self): hashed_db_filename = self.romsDir.pjoin(self.launcherID + '.json') return hashed_db_filename.exists() def loadRoms(self): if not self.romSetFileExists(): log_warning('VirtualCategory "{0}" JSON not found.'.format(self.launcherID)) return None log_info('VirtualCategoryRomSet() Loading ROMs in Virtual Launcher ...') roms = fs_load_VCategory_ROMs_JSON(self.romsDir, self.launcherID) return roms def saveRoms(self, roms): fs_write_Favourites_JSON(self.romsDir, roms) pass class RecentlyPlayedRomSet(RomSet): def romSetFileExists(self): return self.romsDir.exists() def loadRoms(self): log_info('RecentlyPlayedRomSet() Loading ROMs in Recently Played ROMs ...') romsList = self.loadRomsAsList() roms = collections.OrderedDict() for rom in romsList: roms[rom['id']] = rom return roms def loadRomsAsList(self): roms = fs_load_Collection_ROMs_JSON(self.romsDir) return roms def loadRom(self, romId): roms = self.loadRomsAsList() if roms is None: log_error("RecentlyPlayedRomSet(): Could not load roms") return None current_ROM_position = fs_collection_ROM_index_by_romID(romId, roms) if current_ROM_position < 0: kodi_dialog_OK('Collection ROM not found in list. This is a bug!') return None romData = roms[current_ROM_position] if romData is None: log_warning("RecentlyPlayedRomSet(): Rom with ID '{0}' not found".format(romId)) return None return romData def saveRoms(self, roms): fs_write_Collection_ROMs_JSON(self.romsDir, roms) pass def clear(self): pass class CollectionRomSet(RomSet): def __init__(self, romsDir, launcher, collection_dir, launcherID, description): self.collection_dir = collection_dir self.launcherID = launcherID super(CollectionRomSet, self).__init__(romsDir, launcher, description) def romSetFileExists(self): (collections, update_timestamp) = fs_load_Collection_index_XML(self.romsDir) collection = collections[self.launcherID] roms_json_file = self.romsDir.pjoin(collection['roms_base_noext'] + '.json') return roms_json_file.exists() def loadRomsAsList(self): (collections, update_timestamp) = fs_load_Collection_index_XML(self.romsDir) collection = collections[self.launcherID] roms_json_file = self.collection_dir.pjoin(collection['roms_base_noext'] + '.json') romsList = fs_load_Collection_ROMs_JSON(roms_json_file) return romsList # NOTE ROMs in a collection are stored as a list and ROMs in Favourites are stored as # a dictionary. Convert the Collection list into an ordered dictionary and then # converted back the ordered dictionary into a list before saving the collection. def loadRoms(self): log_info('CollectionRomSet() Loading ROMs in Collection ...') romsList = self.loadRomsAsList() roms = collections.OrderedDict() for rom in romsList: roms[rom['id']] = rom return roms def loadRom(self, romId): roms = self.loadRomsAsList() if roms is None: log_error("CollectionRomSet(): Could not load roms") return None current_ROM_position = fs_collection_ROM_index_by_romID(romId, roms) if current_ROM_position < 0: kodi_dialog_OK('Collection ROM not found in list. This is a bug!') return romData = roms[current_ROM_position] if romData is None: log_warning("CollectionRomSet(): Rom with ID '{0}' not found".format(romId)) return None return romData def saveRoms(self, roms): # >> Convert back the OrderedDict into a list and save Collection collection_rom_list = [] for key in roms: collection_rom_list.append(roms[key]) json_file_path = self.romsDir.pjoin(collection['roms_base_noext'] + '.json') fs_write_Collection_ROMs_JSON(json_file_path, collection_rom_list) def clear(self): pass
Let us take the complication out of ranking your website and services. When you love what you do, it’s easy to get good at it. And, the easier something is, the faster you can perform the tasks associated with it efficiently, and effectively. Don’t lose time and money trying build your own website! The internet is a constantly changing and evolving landscape, so let JADE Computer handle the perpetual website construction “learning curve” while you do what YOU do best. Single page brochure websites start at $997.00 and go up from there depending on our clients needs for web hosting, number of web pages, competition and keyword research, website graphics, company logos, and consulting time. Some web services like this one and this one, take advantage of their clients lack of knowledge about the web to sell them cheap websites that seldom ever rank for anything. Every business owner wants to rank #1 for their product or service, but only those that put the time, effort, and money into their websites will rank well and have a client generating machine. You CAN do this yourself and significantly decrease development costs with an investment of about 1000 reading hours, or you can pay us for our time and expertise to build your web presence for you. Either way, there is no solution that is “FREE” or “Cheap” that works. The services listed below are only a partial list due to the secret (but ethical) nature of some methods and services we use to boost your website ranking. For more information just call us @ (330) 332-7984 or use our contact page. **NOTE: We reserve the right to decline new website clients that are in the same business vertical as another client who already holds a support subscription with us. It would be unethical to build competing web sites for 2 or more clients in the same business that all want to rank #1. Give us a call at the number above to see if we are available for work in your business category. Every solution we offer for site improvement or development comes from a place of tested strategies. From local website design to internet marketing and advertising, it’s our goal to deliver a project that is both successful and cost-effective. We create professional website designs and custom e-commerce website designs that span from simple to complex depending on our clients requirements. When techy people talk about “website design” they are referring to the the layout of the website, in relation to the ease-of-use. Making a website complicated and hard to navigate is easy. It’s much harder to build a website that has a semantic, and logical layout of the information being presented, which makes it easy to use. A good CMS or Content Management System allows every day computer users to edit their website pages as easily as they would write and send an email. There are many content management systems available on the market today, some free and some with recurring licensing fees. At JADE Computer we prefer the popular and FREE website content manager called WordPress. Although there are many platforms a person can choose from when setting up their website, we prefer to use WordPress which is what over 25% of the entire internet is built on. Because of this, the code is maintained at a very high level (harder to hack), and the large number of users mean there’s a huge market for 3rd party vendors to build and extend WordPress in incredible ways. A great logo is an excellent way to establish public recognition of your businesses brand. Most companies have a logo already, but if you don’t, or your existing logo feels a little dated, let us help you re-design it. This can be a great way to re-package your company and service offerings. It can be hard to stand out in today’s crowded business marketplace. At JADE Computer Services, we take an individualized approach to our client’s needs and business goals. This includes ethical, tested SEO practices for small businesses that will deliver new leads. When utilizing a combination of our services such as Ohio SEO site design and paid search engine advertising (Google Adwords), we will also provide the website analytics software to help measure the success, traffic conversion, and customer value of each advertising campaign that is deployed.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for aggregate_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.framework import tensor_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test class AddNTest(test.TestCase): # AddN special-cases adding the first M inputs to make (N - M) divisible by 8, # after which it adds the remaining (N - M) tensors 8 at a time in a loop. # Test N in [1, 10] so we check each special-case from 1 to 9 and one # iteration of the loop. _MAX_N = 10 def _supported_types(self): if test.is_gpu_available(): return [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.int64 ] return [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] def _buildData(self, shape, dtype): data = np.random.randn(*shape).astype(dtype.as_numpy_dtype) # For complex types, add an index-dependent imaginary component so we can # tell we got the right value. if dtype.is_complex: return data + 10j * data return data def testAddN(self): np.random.seed(12345) with self.session(use_gpu=True) as sess: for dtype in self._supported_types(): for count in range(1, self._MAX_N + 1): data = [self._buildData((2, 2), dtype) for _ in range(count)] actual = self.evaluate(math_ops.add_n(data)) expected = np.sum(np.vstack( [np.expand_dims(d, 0) for d in data]), axis=0) tol = 5e-3 if dtype == dtypes.float16 else 5e-7 self.assertAllClose(expected, actual, rtol=tol, atol=tol) @test_util.run_deprecated_v1 def testUnknownShapes(self): np.random.seed(12345) with self.session(use_gpu=True) as sess: for dtype in self._supported_types(): data = self._buildData((2, 2), dtype) for count in range(1, self._MAX_N + 1): data_ph = array_ops.placeholder(dtype=dtype) actual = sess.run(math_ops.add_n([data_ph] * count), {data_ph: data}) expected = np.sum(np.vstack([np.expand_dims(data, 0)] * count), axis=0) tol = 5e-3 if dtype == dtypes.float16 else 5e-7 self.assertAllClose(expected, actual, rtol=tol, atol=tol) @test_util.run_deprecated_v1 def testVariant(self): def create_constant_variant(value): return constant_op.constant( tensor_pb2.TensorProto( dtype=dtypes.variant.as_datatype_enum, tensor_shape=tensor_shape.TensorShape([]).as_proto(), variant_val=[ tensor_pb2.VariantTensorDataProto( # Match registration in variant_op_registry.cc type_name=b"int", metadata=np.array(value, dtype=np.int32).tobytes()) ])) # TODO(ebrevdo): Re-enable use_gpu=True once non-DMA Variant # copying between CPU and GPU is supported. with self.session(use_gpu=False): num_tests = 127 values = list(range(100)) variant_consts = [create_constant_variant(x) for x in values] sum_count_indices = np.random.randint(1, 29, size=num_tests) sum_indices = [ np.random.randint(100, size=count) for count in sum_count_indices] expected_sums = [np.sum(x) for x in sum_indices] variant_sums = [math_ops.add_n([variant_consts[i] for i in x]) for x in sum_indices] # We use as_string() to get the Variant DebugString for the # variant_sums; we know its value so we can check via string equality # here. # # Right now, non-numpy-compatible objects cannot be returned from a # session.run call; similarly, objects that can't be converted to # native numpy types cannot be passed to ops.convert_to_tensor. variant_sums_string = string_ops.as_string(variant_sums) self.assertAllEqual( variant_sums_string, ["Variant<type: int value: {}>".format(s).encode("utf-8") for s in expected_sums]) if __name__ == "__main__": test.main()
New feature saves time by incorporating all hotel systems and data directly within apaleo's PMS. One tab, and you’re done. It happens all too often that we start off the work day organized, and then somehow within a matter of hours (or sometimes even minutes!), we've ended up with dozens of tabs opened, with many systems running, and it takes tons of clicks and searching around to find the info we need when we need it. It's especially true for hoteliers, who work with many different systems throughout the day, and it is not pleasant. Open property management system, apaleo, already connects to any hotel application, but with its recently released apaleo One connection, data from all these pre-connected tools is available directly within apaleo's property management system. For example, take passport and ID scanning tool, Adria Scan. It recently used apaleo One to scan guest IDs and passports directly into apaleo's PMS. Just imagine…Paper forms. We love to hate them. Guests don't enjoy filling them out when ID or passport details are required upon check in. Front desk staff don't enjoy manually adding these details to their property management system. Even if the data can be scanned to reduce some of the manual work, front desk staff are left clicking between dozens of different systems to find the right information. With apaleo One, hotels use Adria Scan to scan guests' IDs or passport details and watch all the information populate directly within apaleo's PMS in seconds. No forms, no manual entry, no extra systems. Just collect a guest's passport, click to scan it, and you're done. "Using paper forms at check-in creates a clunky guest experience within the first few seconds of the guest arriving at the hotel. It also takes away time from your front desk staff. We wanted to fix that," said Andrea Stubbe, co-founder and product owner at apaleo. "What's also really exciting about this partnership is that all the data goes directly into apaleo's product using a new user interface integration. This reduces the amount of systems and browser tabs that the hotel needs to have open." Additional integrated partners, including Atomize, HotelAppz, and RoomPriceGenie are already building similar connections directly within apaleo's PMS, which will further reduce the amount of systems that have to be opened and clicked through at any given time. "Deep integrations add immense value for hotels, giving them the ability to use and understand guest data in real-time. Perhaps most importantly, it allows hotels to plug in the systems that are the best and most innovative in their space," said Rui Teixeira Guerra, CEO of HotelAppz. "apaleo One takes these integrations one step further by tightly connecting the systems so that hotels can see all important data in one screen rather than in multiple different browser tabs." Hotels looking to see live demos can stop by apaleo's innovation hub at ITB (reserve a spot, space is limited) or can schedule a meeting here to get to know apaleo better.
#!/usr/bin/env python """ Draws a transparent overlay of the forced alignment output over the input image. Needs OpenFST bindings installed. """ import re import os import click import unicodedata from itertools import cycle from collections import defaultdict from PIL import Image, ImageDraw cmap = cycle([(230, 25, 75, 127), (60, 180, 75, 127), (255, 225, 25, 127), (0, 130, 200, 127), (245, 130, 48, 127), (145, 30, 180, 127), (70, 240, 240, 127)]) def slugify(value): """ Normalizes string, converts to lowercase, removes non-alpha characters, and converts spaces to hyphens. """ value = unicodedata.normalize('NFKD', value) value = re.sub(r'[^\w\s-]', '', value).strip().lower() value = re.sub(r'[-\s]+', '-', value) return value @click.command() @click.option('-f', '--format-type', type=click.Choice(['xml', 'alto', 'page']), default='xml', help='Sets the input document format. In ALTO and PageXML mode all' 'data is extracted from xml files containing both baselines, polygons, and a' 'link to source images.') @click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True), help='Transcription model to use.') @click.option('-o', '--output', type=click.Choice(['alto', 'pagexml', 'overlay']), show_default=True, default='overlay', help='Output mode. Either page or' ' alto for xml output, overlay for image overlays.') @click.argument('files', nargs=-1) def cli(format_type, model, output, files): """ A script producing overlays of lines and regions from either ALTO or PageXML files or run a model to do the same. """ if len(files) == 0: ctx = click.get_current_context() click.echo(ctx.get_help()) ctx.exit() from PIL import Image, ImageDraw from kraken.lib import models, xml from kraken import align, serialization if format_type == 'xml': fn = xml.parse_xml elif format_type == 'alto': fn = xml.parse_palto else: fn = xml.parse_page click.echo(f'Loading model {model}') net = models.load_any(model) for doc in files: click.echo(f'Processing {doc} ', nl=False) data = fn(doc) im = Image.open(data['image']).convert('RGBA') records = align.forced_align(data, net) if output == 'overlay': tmp = Image.new('RGBA', im.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(tmp) for record in records: for pol in record.cuts: c = next(cmap) draw.polygon([tuple(x) for x in pol], fill=c, outline=c[:3]) base_image = Image.alpha_composite(im, tmp) base_image.save(f'high_{os.path.basename(doc)}_algn.png') else: with open(f'{os.path.basename(doc)}_algn.xml', 'w') as fp: fp.write(serialization.serialize(records, image_name=data['image'], regions=data['regions'], template=output)) click.secho('\u2713', fg='green') if __name__ == '__main__': cli()
JM Eagle’s Ultra Corr and Ultra Rib (corrugated) pipe products are suitable sanitary sewer and storm drain applications. Ultra Rib sewer pipe, Ultra Corr sewer pipe and Ultra Rib storm drain pipe offer increased load-bearing and system capacity at a reasonable cost. They are unaffected by the fluids found in ordinary domestic sewage; sewer gasses and the sulfuring acid generated by the completion of the hydrogen sulfide cycle; and corrosive soils both alkaline and acidic. They resist abrasion, gouging and scouring far better than most common piping materials. The pipes’ interiors stay smooth over long years of service while maximizing system capacity, allowing for savings in pumping costs, as well as savings on the size of the pipe required. The light weight of the pipes reduces manpower required for installation. Gasketed Joints 8" - 27" Gasketed Joints 18" - 24" Gasketed Joints 24" - 36" JM Eagle™ realizes the growing demand for an effective all-out attack on water pollution, highlighting the need for improved collection systems. A modern system needs pipe with improved design for reserve strength and stiff-ness to increase load-bearing capacity—all within the framework of maximizing system capacity at a reasonable cost. JM Eagle™ Ultra Rib™ Sewer Pipe, Ultra Corr™ Sewer Pipe, and Ultra Rib™ Storm Drain pipe are designed to meet this need.
import parole from parole.colornames import colors from parole.display import interpolateRGB import pygame, random import sim_items from util import * class Dagger(sim_items.Weapon): def __init__(self): bonuses = { 'hitMod': +4, 'damageSkewMod': -4, } # TODO: depth, materials, etc. sim_items.Weapon.__init__(self, 'dagger', # base name parole.map.AsciiTile('(', colors['Silver']), # symbol 1, # weight 800, # wield energy 850, # attack energy 30, # maximum damage. a wielder with avg stats will average half # this much damage bonuses, 'stab', # verb False, # startsVowel unidDescription="A small bladed weapon perhaps better suited "\ "for a bar fight than dungeon crawling.", projectileDamage=25 ) #======================================== thingClass = Dagger
Best price koyo 61905-2rz bearing in Botswana are widely used in industrial drive, agriculture, compressors, motors and generators, construction, industrial fan, industrial transmission and other fields. Best price koyo 61905-2rz bearing in Botswana can ensure high quality for more than 80% raw material of Best price koyo 61905-2rz bearing in Botswana provided by the steel plant. Explorer series bearing of Best price koyo 61905-2rz bearing in Botswana are better than any brand bearings currently in bearing performance , lifetime, design, and the use of performance. Explorer series bearings of Best price koyo 61905-2rz bearing in Botswana have more advantages ,such as higher meet rated , lower noise, reducing warranty costs greatly, increasing running time of the machine.
# -*- coding: utf-8 -*- import os import time import string import datetime try: from hashlib import md5 except ImportError: from md5 import md5 class dict_to_object(dict): def __getattr__(self, key): try: return self[key] except: return '' def __setattr__(self, key, value): self[key] = value def is_int(s): for i in s: if i not in "1234567890": return False return True def isset(v): try: type (eval(v)) except: return False else: return True def check_str(cstr): return filter(lambda st: st not in " '\";()<>[]", cstr) def cut_str(cstr): pass def timestamp(): return time.time() def now(): return time.localtime() def micro_time(): return datetime.datetime.now() def format_time(tformat = "%Y-%m-%d %X", ttime = None): if not ttime: ttime = now() return time.strftime(tformat, ttime) def hash_md5(s): m = md5(s) m.digest() return m.hexdigest() def rand_name(s): if not isinstance(s, unicode): s = s.decode('utf-8') return hash_md5("%s-%f" % (s, timestamp())) def is_file_exist(filepath): return os.path.isfile(filepath) def file_real_path(filename, subdir = ''): folder = os.path.join(os.path.dirname(__file__), subdir) if not os.path.exists(folder): os.makedirs(folder) return '%s/%s' % (folder, filename) if __name__ == "__main__": print ''
I’ve been working on a customer quilt this past weekend. My customer did her version of the pattern Coneflower Garden by Barbara Persing and Mary Hoover. She decided she didn’t want the whole thing so she cropped it down and just made a portion of the pattern. This is the second pattern she has completed from these this pattern company. The first was Summer Blooms and you can find the post here. Aren’t these beautiful patterns? I finished the secret project quilt top and now I am working on the quilting design. I have worked out about half of it still undecided about some of it. I may have to just load it on the frame and start and hope inspiration strikes as I am working through the quilting as this needs to be completed by next week. We are finally getting a little warmer weather here in Victoria BC this week. Our backyard is small and our townhouse unit faces North so we don’t get a lot of sun at the back of the house however we do get sun in the morning until late afternoon in the back yard. Cocoa loves the sun and will find whatever patch he can get his paws on. Here he is in the late afternoon lying in the last sliver of sunshine in the backyard. Well another week has gone by, where does the time go. I try to think back on my week and what I have been doing and it all seems to be a blur. The Victoria Quilt Guild had their semi-annual quilt show this past weekend. I went and had a look around on the Friday. The show was a good one. Lots of great eye candy and plenty of vendors to spend your money at if you wished. I am very proud of myself, I actually came away without spending any money at all. Why you might ask, because although there was some really good deals and lots of great things to buy, I simply don’t need anything quilt related at this time. I have a whole studio full of fabric, thread and notions some of which are still in the packaging. I did pop into the Library branch that was next door to the show venue and borrow 3 books on entrepreneurship. Before the quilt show I quilted a quilt made by Susan McGregor that was to hang in the Bib n’ Tucker booth. The name of the quilt pattern is Summer Blooms by Barbara Pershing and Mary Hoover. It turned out great and the pattern sold out at the Bib n’ Tucker booth quickly although I believe Gail is ordering more of the patterns in. I really enjoyed quilting this pattern it was a lot of fun and the big colourful blossoms were so cheery to work on. I may have to get this pattern and make this quilt up so I have it hanging in my studio. I am currently quilting a quilt top I have had hanging around for awhile. It is one of my own designs made from some leftover blocks I had. Not one to waste things I turned them into a quilt. Saturday is quilting with my friends at the clubhouse here at my townhouse complex. Once a month about 14 women get together and enjoy catching up and sewing away. It is always a good time and I am looking forward to it.
import numpy from orbkit.qcinfo import QCinfo from orbkit.orbitals import AOClass, MOClass from orbkit.units import aa_to_a0, ev_to_ha from orbkit.display import display from orbkit.tools import l_deg, lquant, get_atom_symbol from importlib import import_module def read_with_cclib(filename, cclib_parser=None, all_mo=False, spin=None, **kwargs): '''Reads all information desired using cclib. **Parameters:** filename : str Specifies the filename for the input file. cclib_parser : str If itype is 'cclib', specifies the cclib.parser. all_mo : bool, optional If True, all molecular orbitals are returned. spin : {None, 'alpha', or 'beta'}, optional If not None, returns exclusively 'alpha' or 'beta' molecular orbitals. **Returns:** qc (class QCinfo) with attributes geo_spec, geo_info, ao_spec, mo_spec, etot : See :ref:`Central Variables` for details. ''' #Maybe we actually don't need this #Can someone check if cclib can handle #file descriptors? assert isinstance(filename, str) if not isinstance(cclib_parser,str): raise IOError('cclib requires the specification of parser, e.g., ' + 'cclib_parser="Gaussian".') if cclib_parser == 'Molpro': display('\nThe Molpro basis set is not properly read by the cclib parser.') display('Please create a molden file with Molpro, i.e., ' + '\n\tput,molden,output.molden,NEW;\n') parsedic = {'Gaussian': 'gaussianparser', 'Gamess': 'gamessparser', 'Orca': 'orcaparser'} module = import_module('cclib.parser.{}'.format(parsedic[cclib_parser])) if cclib_parser != 'Gaussian': cclib_parser = cclib_parser.upper() parser = getattr(module,cclib_parser)(filename) ccData = parser.parse() return convert_cclib(ccData, all_mo=all_mo, spin=spin) def convert_cclib(ccData, all_mo=False, spin=None): '''Converts a ccData class created by cclib to an instance of orbkit's QCinfo class. **Parameters:** ccData : class Contains the input data created by cclib. all_mo : bool, optional If True, all molecular orbitals are returned. spin : {None, 'alpha', or 'beta'}, optional If not None, returns exclusively 'alpha' or 'beta' molecular orbitals. **Returns:** qc (class QCinfo) with attributes geo_spec, geo_info, ao_spec, mo_spec, etot : See :ref:`Central Variables` for details. ''' # Initialize the variables qc = QCinfo() qc.ao_spec = AOClass([]) qc.mo_spec = MOClass([]) # Converting all information concerning atoms and geometry qc.geo_spec = ccData.atomcoords[0] * aa_to_a0 for ii in range(ccData.natom): symbol = get_atom_symbol(atom=ccData.atomnos[ii]) qc.geo_info.append([symbol,str(ii+1),str(ccData.atomnos[ii])]) # Convert geo_info and geo_spec to numpy.ndarrays qc.format_geo() # Converting all information about atomic basis set for ii in range(ccData.natom): for jj in range(len(ccData.gbasis[ii])): pnum = len(ccData.gbasis[ii][jj][1]) qc.ao_spec.append({'atom': ii, 'type': str(ccData.gbasis[ii][jj][0]).lower(), 'pnum': pnum, 'coeffs': numpy.zeros((pnum, 2)) }) for kk in range(pnum): qc.ao_spec[-1]['coeffs'][kk][0] = ccData.gbasis[ii][jj][1][kk][0] qc.ao_spec[-1]['coeffs'][kk][1] = ccData.gbasis[ii][jj][1][kk][1] if hasattr(ccData,'aonames'): # Reconstruct exponents list for ao_spec cartesian_basis = True for i in ccData.aonames: if '+' in i or '-' in i: cartesian_basis = False if not cartesian_basis: qc.ao_spec.spherical = True count = 0 for i,ao in enumerate(qc.ao_spec): l = l_deg(lquant[ao['type']],cartesian_basis=cartesian_basis) if cartesian_basis: ao['lxlylz'] = [] else: ao['lm'] = [] for ll in range(l): if cartesian_basis: ao['lxlylz'].append((ccData.aonames[count].lower().count('x'), ccData.aonames[count].lower().count('y'), ccData.aonames[count].lower().count('z'))) else: m = ccData.aonames[count].lower().split('_')[-1] m = m.replace('+',' +').replace('-',' -').replace('s','s 0').split(' ') p = 'yzx'.find(m[0][-1]) if p != -1: m = p - 1 else: m = int(m[-1]) ao['lm'].append((lquant[ao['type']],m)) count += 1 # Converting all information about molecular orbitals ele_num = numpy.sum(ccData.atomnos) - numpy.sum(ccData.coreelectrons) - ccData.charge ue = (ccData.mult-1) # Check for natural orbitals and occupation numbers is_natorb = False if hasattr(ccData,'nocoeffs'): if not hasattr(ccData,'nooccnos'): raise IOError('There are natural orbital coefficients (`nocoeffs`) in the cclib' + ' ccData, but no natural occupation numbers (`nooccnos`)!') is_natorb = True restricted = (len(ccData.mosyms) == 1) if spin is not None: if spin != 'alpha' and spin != 'beta': raise IOError('`spin=%s` is not a valid option' % spin) elif restricted: raise IOError('The keyword `spin` is only supported for unrestricted calculations.') else: qc.mo_spec.spinpola display('Converting only molecular orbitals of spin %s.' % spin) sym = {} if len(ccData.mosyms) == 1: add = [''] orb_sym = [None] else: add = ['_a','_b'] orb_sym = ['alpha','beta'] nmo = ccData.nmo if hasattr(ccData,'nmo') else len(ccData.mocoeffs[0]) for ii in range(nmo): for i,j in enumerate(add): a = '%s%s' % (ccData.mosyms[i][ii],j) if a not in sym.keys(): sym[a] = 1 else: sym[a] += 1 if is_natorb: occ_num = ccData.nooccnos[ii] elif not restricted: occ_num = 1.0 if ii <= ccData.homos[i] else 0.0 elif ele_num > ue: occ_num = 2.0 ele_num -= 2.0 elif ele_num > 0.0 and ele_num <= ue: occ_num = 1.0 ele_num -= 1.0 ue -= 1.0 else: occ_num = 0.0 qc.mo_spec.append({'coeffs': (ccData.nocoeffs if is_natorb else ccData.mocoeffs[i])[ii], 'energy': 0.0 if is_natorb else ccData.moenergies[i][ii]*ev_to_ha, 'occ_num': occ_num, 'sym': '%d.%s' %(sym[a],a) }) if orb_sym[i] is not None: qc.mo_spec[-1]['spin'] = orb_sym[i] if spin is not None and spin != orb_sym[i]: del qc.mo_spec[-1] # Use default order for atomic basis functions if aonames is not present if not hasattr(ccData,'aonames'): display('The attribute `aonames` is not present in the parsed data.') display('Using the default order of basis functions.') # Check which basis functions have been used c_cart = sum([l_deg(l=ao['type'], cartesian_basis=True) for ao in qc.ao_spec]) c_sph = sum([l_deg(l=ao['type'], cartesian_basis=False) for ao in qc.ao_spec]) c = qc.mo_spec.get_coeffs().shape[-1] if c != c_cart and c == c_sph: # Spherical basis qc.ao_spec.set_lm_dict(p=[0,1]) elif c != c_cart: display('Warning: The basis set type does not match with pure spherical ' + 'or pure Cartesian basis!') display('Please specify qc.ao_spec["lxlylz"] and/or qc.ao_spec["lm"] by your self.') # Are all MOs requested for the calculation? if not all_mo: for i in range(len(qc.mo_spec))[::-1]: if qc.mo_spec[i]['occ_num'] < 0.0000001: del qc.mo_spec[i] qc.mo_spec.update() qc.ao_spec.update() return qc
Linda Sullivan has been in the sales business for more than two decades and has a unique and innovative flair for design. Extensive experience running a successful business managing and motivating people and working to tight deadlines places Linda in good stead for a career that leads the industry in volume sold and prices achieved. Linda’s professionalism, passion, integrity, sense of humour and love for her own local area of Dalkeith means she is well placed to deliver her clients results well above market expectations. Responsible for some of the highest results in the western suburbs, there is no one more effective in delivering the results setting new levels of excellence.
from ..Workbook import Workbook import time import numpy import nose import os from datetime import datetime from nose.tools import eq_ from .utils import get_output_path def test_get_xml_data(): wb = Workbook() ws = wb.new_sheet("Test") ws[1][1].value = 1 eq_(ws[1][1].value, 1) ws[1][3].value = 3 eq_(ws[1][3].value, 3) def test_save(): ROWS = 65 COLUMNS = 100 wb = Workbook() testData = [[1] * COLUMNS] * ROWS stime = time.clock() ws = wb.new_sheet("Test 1", data=testData) wb.save(get_output_path("test.xlsx")) #print("%s, %s, %s" % (ROWS, COLUMNS, time.clock() - stime)) def test_formulas(): wb = Workbook() ws = wb.new_sheet("test") ws[1][1].value = 1 ws[1][2].value = 2 ws[1][3].value = '=SUM(A1,B1)' ws[1][4].value = datetime.now() ws[1][5].value = datetime(1900,1,1,1,0,0) wb.save(get_output_path("formula-test.xlsx")) def test_merge(): wb = Workbook() ws = wb.new_sheet("test") ws[1][1].value = "asdf" ws.range("A1", "B1").merge() eq_(ws[1][2].value, ws[1][1].value) ws[1][2].value = "qwer" eq_(ws[1][2].value, ws[1][1].value) wb.save(get_output_path("merge-test.xlsx")) def test_cell(): wb = Workbook() ws = wb.new_sheet("test") ws.cell("C3").value = "test" eq_(ws[3][3].value, "test") def test_range(): wb = Workbook() ws = wb.new_sheet("test") ws.range("B2", "D3").value = [[1, 2, 3], [4, 5, 6]] eq_(ws[2][2].value, 1) eq_(ws[2][3].value, 2) eq_(ws[2][4].value, 3) eq_(ws[3][2].value, 4) eq_(ws[3][3].value, 5) eq_(ws[3][4].value, 6) def test_numpy_range(): wb = Workbook() ws = wb.new_sheet("test") ws.range("A1", "GN13").value = numpy.zeros((13,196)) wb.save(get_output_path("numpy-range-test.xlsx")) def test_none(): testData = [[1,2,None]] wb = Workbook() ws = wb.new_sheet("Test 1", data=testData) ws[1][1].style.font.bold = True wb.save(get_output_path("none-test.xlsx")) def test_number_precision(): try: import xlrd except ImportError: raise nose.SkipTest('xlrd not installed') filename = get_output_path('precision.xlsx') sheetname = 'Sheet1' nums = [ 1, 1.2, 1.23, 1.234, 1.2345, 1.23456, 1.234567, 1.2345678, 1.23456789, 1.234567890, 1.2345678901, 1.23456789012, 1.234567890123, 1.2345678901234, 1.23456789012345, ] write_workbook = Workbook() write_worksheet = write_workbook.new_sheet(sheetname) for index, value in enumerate(nums): write_worksheet[index + 1][1].value = value write_workbook.save(filename) read_workbook = xlrd.open_workbook(filename) read_worksheet = read_workbook.sheet_by_name(sheetname) for row_num in range(len(nums)): expected = nums[row_num] got = read_worksheet.cell(row_num, 0).value if os.path.exists(filename): os.remove(filename)
Nigeria is the hub for the West Africa sub region. Murtala Mohammed International airport is the busiest airport in the Nigeria, and most international flights come in through Murtala Mohammed. Other International airports include Nnamdi Azikiwe International Airport Abuja, Aminu Kano International Airpor Kano and Port Harcourt International Airport. Most major international airlines fly into Nigeria, inclusing British Airway, Virgin Atlantic, Lufthansa, KLM, Etihad, Air France, Emirates, Qatar Air, Turkish Airlines, South African Airlines, Kenya Airlines, Ethiopian Airlines etc. Or by sea, through the ports of Lagos (Apapa and Tin Can), Port Harcourt, Koko, Burutu, Warri, and Calabar. By road, you can come into Nigeria through several authorized border posts from Benin Republic, Niger Republic, and the Republic of Cameroon. Be it for conferences, business, sports, and other reasons, you can visit Nigeria anytime. However, there are two distinct seasons-the rainy and the dry. To all intents, the dry season, which begins generally late in October and runs into late February or early March in the south, and April in the north, is the best time to visit Nigeria for leisure. All visitors to Nigeria must carry a valid national passport or other internationally recognized travel documents endorsed for travel to Nigeria. Citizens of ECOWAS states are allowed to enter Nigeria without a visa, subject to their stay not exceeding three months. Requests for multiple entry visa will be given with delay for other nationals provided they meet the requirements for entry into Nigeria. The above entry regulations are subject to changes. Cholera and Small Pox vaccinations are required for visitors entering Nigeria. Yellow Fever vaccinations are required for arrivals from infected areas. Children under the age of one year are however exempted. No certificate of AIDS free tests are required. Visitors from Europe and North America should take precausional steps against Malaria Fever, which are prevalent in the rainy season from May to October. Visitors to Nigeria are allowed four liters of spirit and 200 cigarettes duty free, personal effects such as cameras, watches, pen lighters, and cosmetics are allowed duty free within reasonable quantity. Other goods, such as video equipment, are dutiable and visitors are expected to pay the duties on the spot. If in doubt, ask to see a senior customs officer for clearance. Illegal drugs of all descriptions are not allowed into Nigeria. You can roam with your mobile phone in Nigeria on most mobile networks. Confirm roaming charges from your network provider before your trip. If you plan to stay long in Nigeria, you can purchase a SIM card for one of the major Nigerian Teelecoms companies - MTN, Glo, Airtel and Etisalat from major outlets. You will need a form of ID and address in order to register your SIM to activate your line. As in many countries, medical services or hospital care are borne by visitors themselves. If any travel insurance plan exists in your country of origin, please take one. Many international HMOs have offices or representatives in Nigeria, hence are able to provide health insurance cover for policy holders travelling to Nigeria. Find out from your provider before your trip if you have cover while visiting Nigeria. Private clinics and hospitals manned by qualified practitioners exist, even in rural towns. Government owned hospitals and teaching hospitals exist in major towns. The main voltage in Nigeria is 220 volts. If you bring along any electrical equipment with 110 volts, remember to bring along a converter. Nigeria operates a metric system, through the Imperial measure is still usually referred to. Cloth is sold in meters and yards, beef in kilograms, and petrol in liters. Distance is marked in kilometers, and temperature in centigrade.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the Microsoft Office MRUs Windows Registry plugin.""" import unittest from plaso.lib import definitions from plaso.parsers.winreg_plugins import officemru from tests.parsers.winreg_plugins import test_lib class OfficeMRUPluginTest(test_lib.RegistryPluginTestCase): """Tests for the Microsoft Office MRUs Windows Registry plugin.""" def testFilters(self): """Tests the FILTERS class attribute.""" plugin = officemru.OfficeMRUPlugin() key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Access\\File MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Access\\Place MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Excel\\File MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Excel\\Place MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'PowerPoint\\File MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'PowerPoint\\Place MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Word\\File MRU') self._AssertFiltersOnKeyPath(plugin, key_path) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\' 'Word\\Place MRU') self._AssertFiltersOnKeyPath(plugin, key_path) self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus') def testProcess(self): """Tests the Process function.""" test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT']) key_path = ( 'HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\14.0\\Word\\' 'File MRU') win_registry = self._GetWinRegistryFromFileEntry(test_file_entry) registry_key = win_registry.GetKeyByPath(key_path) plugin = officemru.OfficeMRUPlugin() storage_writer = self._ParseKeyWithPlugin( registry_key, plugin, file_entry=test_file_entry) self.assertEqual(storage_writer.number_of_events, 6) self.assertEqual(storage_writer.number_of_extraction_warnings, 0) self.assertEqual(storage_writer.number_of_recovery_warnings, 0) events = list(storage_writer.GetEvents()) expected_event_values = { 'date_time': '2012-03-13 18:27:15.0898020', 'data_type': 'windows:registry:office_mru_list', 'entries': ( 'Item 1: [F00000000][T01CD0146EA1EADB0][O00000000]*' 'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\' 'SA-23E Mitchell-Hyundyne Starfury.docx ' 'Item 2: [F00000000][T01CD00921FC127F0][O00000000]*' 'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\Earthforce ' 'SA-26 Thunderbolt Star Fury.docx ' 'Item 3: [F00000000][T01CD009208780140][O00000000]*' 'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\StarFury.docx ' 'Item 4: [F00000000][T01CCFE0B22DA9EF0][O00000000]*' 'C:\\Users\\nfury\\Documents\\VIBRANIUM.docx ' 'Item 5: [F00000000][T01CCFCBA595DFC30][O00000000]*' 'C:\\Users\\nfury\\Documents\\ADAMANTIUM-Background.docx'), # This should just be the plugin name, as we're invoking it directly, # and not through the parser. 'parser': plugin.NAME, 'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN} self.CheckEventValues(storage_writer, events[5], expected_event_values) # Test OfficeMRUWindowsRegistryEvent. expected_value_string = ( '[F00000000][T01CD0146EA1EADB0][O00000000]*' 'C:\\Users\\nfury\\Documents\\StarFury\\StarFury\\' 'SA-23E Mitchell-Hyundyne Starfury.docx') expected_event_values = { 'date_time': '2012-03-13 18:27:15.0830000', 'data_type': 'windows:registry:office_mru', 'key_path': key_path, 'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN, 'value_string': expected_value_string} self.CheckEventValues(storage_writer, events[0], expected_event_values) if __name__ == '__main__': unittest.main()
This term the children will be discussing the NSPCC’s PANTS rules in our PSHE lessons. During these discussions we will aim to teach our pupils the following important safety skills without giving explicit information or telling scary stories. We will be teaching our pupils how to stay safe by learning the PANTS rules, which the NSPCC have developed to be like a Green Cross Code for staying safe from sexual abuse. • Always remember your body belongs to you. • Talk about secrets that upset you. If you would like to know more about the NSPCC’s campaign and learn how you can help keep your children safe in partnership with us, more information can be found at nspcc.org.uk/pants. If you have any questions or concerns please don’t hesitate to get in touch.
""" @name TemperatureConversion.py @file TemperatureConversion.py @author Ernest Yeung @date 20150913 @email ernestyalumni@gmail.com @brief I implement temperature conversion with symbolic computation in sympy @ref @details @copyright If you find this code useful, feel free to donate directly and easily at this direct PayPal link: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted which won't go through a 3rd. party such as indiegogo, kickstarter, patreon. Otherwise, I receive emails and messages on how all my (free) material on physics, math, and engineering have helped students with their studies, and I know what it's like to not have money as a student, but love physics (or math, sciences, etc.), so I am committed to keeping all my material open-source and free, whether or not sufficiently crowdfunded, under the open-source MIT license: feel free to copy, edit, paste, make your own versions, share, use as you wish. Peace out, never give up! -EY """ import sympy from sympy import Eq from sympy import Rational as Rat from sympy import symbols from sympy.solvers import solve T_F, T_C, T_K = symbols("T_F T_C T_K", real=True) FahrenheitCelsiusConversion = Eq(T_F, T_C * (Rat(9) / Rat(5)) + Rat(32)) KelvinCelsiusConversion = Eq(T_K, T_C + 273.15)
A BIRKENSTOCK premiere. For the first time ever, the Gizeh classic thong sandal is available in a vegan version. This means that no animal components whatsoever have been used during the manufacturing process. The vegan material composition is analyzed by independent testing laboratories. Vegan footwear that’s the height of fashion: the BIRKENSTOCK Gizeh classic thong sandal.
# This script is run by the PanDA pilot # Its purpose is to extract the VmPeak, its average and the rss value from the *.pmon.gz file(s), and # place these values in a file (VmPeak_values.txt) which in turn is read back by the pilot # VmPeak_values.txt has the format: # <VmPeak max>,<VmPeam max mean>,<rss mean> # Note that for a composite trf, there are multiple *.pmon.gz files. The values reported are the max values of all files (thus the 'max average') # Prerequisite: the ATLAS environment needs to have been setup before the script is run import os def processFiles(): """ Process the PerfMon files using PerfMonComps """ vmem_peak_max = 0 vmem_mean_max = 0 rss_mean_max = 0 # get list of all PerfMon files from glob import glob file_list = glob("*.pmon.gz") if file_list != []: # loop over all files for file_name in file_list: # process this file using PerfMonComps print "[Pilot VmPeak] Processing file: %s" % (file_name) info = PerfMonComps.PMonSD.parse(file_name) if info: vmem_peak = info[0]['special']['values']['vmem_peak'] vmem_mean = info[0]['special']['values']['vmem_mean'] rss_mean = info[0]['special']['values']['rss_mean'] print "[Pilot VmPeak] vmem_peak = %.1f, vmem_mean = %.1f, rss_mean = %.1f" % (vmem_peak, vmem_mean, rss_mean) if vmem_peak > vmem_peak_max: vmem_peak_max = vmem_peak if vmem_mean > vmem_mean_max: vmem_mean_max = vmem_mean if rss_mean > rss_mean_max: rss_mean_max = rss_mean else: print "!!WARNING!!1212!! PerfMonComps.PMonSD.parse returned None while parsing file %s" % (file_name) # convert to integers vmem_peak_max = int(vmem_peak_max) vmem_mean_max = int(vmem_mean_max) rss_mean_max = int(rss_mean_max) else: print "[Pilot VmPeak] Did not find any PerfMon log files" return vmem_peak_max, vmem_mean_max, rss_mean_max def dumpValues(vmem_peak_max, vmem_mean_max, rss_mean_max): """ Create the VmPeak_values.txt file""" file_name = os.path.join(os.getcwd(), "VmPeak_values.txt") print "[Pilot VmPeak] Creating file: %s" % (file_name) try: f = open(file_name, "w") except OSError, e: print "[Pilot VmPeak] Could not create %s" % (file_name) else: s = "%d,%d,%d" % (vmem_peak_max, vmem_mean_max, rss_mean_max) f.write(s) f.close() print "[Pilot VmPeak] Wrote values to file %s" % (file_name) # main function if __name__ == "__main__": try: import PerfMonComps.PMonSD except Exception, e: print "Failed to import PerfMonComps.PMonSD: %s" % (e) print "Aborting VmPeak script" else: vmem_peak_max, vmem_mean_max, rss_mean_max = processFiles() if vmem_peak_max == 0 and vmem_mean_max == 0 and rss_mean_max == 0: print "[Pilot VmPeak] All VmPeak and RSS values zero, will not create VmPeak values file" else: print "[Pilot VmPeak] vmem_peak_max = %d, vmem_mean_max = %d, rss_mean_max = %d" % (vmem_peak_max, vmem_mean_max, rss_mean_max) # create the VmPeak_values.txt file dumpValues(vmem_peak_max, vmem_mean_max, rss_mean_max) print "[Pilot VmPeak] Done"
Irish Separatism is a letter written by Arthur Conan Doyle first published in The Times on 9 september 1921. Sir, — I do not think that it has been brought home to the Irishman that in case the separation proposition was adopted he would himself be a foreigner within that British Empire which Irish soldiers and Irish Colonists have helped to build. His status in Australia or Canada would be that of the Russian or the Slovak — a foreign immigrant, who was there on sufferance. It would equally affect his status within Great Britain, where new-comers at any rate would have to be nationalised and renounce their mother country before they could have the vote. I am sure that there are many of us who are of Irish extraction, and who have supported Home Rule, who would view with horror such a position. Might I suggest that a petition be drawn up expressing these sentiments, that it be signed by men and women of Irish extraction who have supported the Home Rule cause, and that it be then forwarded either to Mr. de Valera or to the Speaker of Dail Eireann ? This page was last edited on 22 July 2017, at 23:27.
""" Base association measure estimators. """ from numpy import mean, prod, triu, ones, dot, sum, maximum, all from scipy.special import binom from ite.cost.x_initialization import InitX from ite.cost.x_verification import VerOneDSubspaces, VerCompSubspaceDims from ite.shared import copula_transformation class BASpearman1(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimator of the first multivariate extension of Spearman's rho. Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). Examples -------- >>> import ite >>> co = ite.cost.BASpearman1() """ def estimation(self, y, ds=None): """ Estimate the first multivariate extension of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated first multivariate extension of Spearman's rho. References ---------- Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra Gaiser, and Martin Ruppert. Copula Theory and Its Applications, Chapter Copula based Measures of Multivariate Association. Lecture Notes in Statistics. Springer, 2010. Friedrich Schmid and Rafael Schmidt. Multivariate extensions of Spearman's rho and related statistics. Statistics & Probability Letters, 77:407-416, 2007. Roger B. Nelsen. Nonparametric measures of multivariate association. Lecture Notes-Monograph Series, Distributions with Fixed Marginals and Related Topics, 28:223-232, 1996. Edward F. Wolff. N-dimensional measures of dependence. Stochastica, 4:175-188, 1980. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) dim = y.shape[1] # dimension u = copula_transformation(y) h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(dim) a = h * (2**dim * mean(prod(1 - u, axis=1)) - 1) return a class BASpearman2(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimator of the second multivariate extension of Spearman's rho. Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). Examples -------- >>> import ite >>> co = ite.cost.BASpearman2() """ def estimation(self, y, ds=None): """ Estimate the second multivariate extension of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated second multivariate extension of Spearman's rho. References ---------- Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra Gaiser, and Martin Ruppert. Copula Theory and Its Applications, Chapter Copula based Measures of Multivariate Association. Lecture Notes in Statistics. Springer, 2010. Friedrich Schmid and Rafael Schmidt. Multivariate extensions of Spearman's rho and related statistics. Statistics & Probability Letters, 77:407-416, 2007. Roger B. Nelsen. Nonparametric measures of multivariate association. Lecture Notes-Monograph Series, Distributions with Fixed Marginals and Related Topics, 28:223-232, 1996. Harry Joe. Multivariate concordance. Journal of Multivariate Analysis, 35:12-30, 1990. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) dim = y.shape[1] # dimension u = copula_transformation(y) h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(dim) a = h * (2**dim * mean(prod(u, axis=1)) - 1) return a class BASpearman3(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimator of the third multivariate extension of Spearman's rho. Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). Examples -------- >>> import ite >>> co = ite.cost.BASpearman3() """ def estimation(self, y, ds=None): """ Estimate the third multivariate extension of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated third multivariate extension of Spearman's rho. References ---------- Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra Gaiser, and Martin Ruppert. Copula Theory and Its Applications, Chapter Copula based Measures of Multivariate Association. Lecture Notes in Statistics. Springer, 2010. Roger B. Nelsen. An Introduction to Copulas (Springer Series in Statistics). Springer, 2006. Roger B. Nelsen. Distributions with Given Marginals and Statistical Modelling, chapter Concordance and copulas: A survey, pages 169-178. Kluwer Academic Publishers, Dordrecht, 2002. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) dim = y.shape[1] # dimension u = copula_transformation(y) h = (dim + 1) / (2**dim - (dim + 1)) # h_rho(d) a1 = h * (2**dim * mean(prod(1 - u, axis=1)) - 1) a2 = h * (2**dim * mean(prod(u, axis=1)) - 1) a = (a1 + a2) / 2 return a class BASpearman4(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimator of the fourth multivariate extension of Spearman's rho. Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims'; (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). Examples -------- >>> import ite >>> co = ite.cost.BASpearman4() """ def estimation(self, y, ds=None): """ Estimate the fourth multivariate extension of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated fourth multivariate extension of Spearman's rho. References ---------- Friedrich Shmid, Rafael Schmidt, Thomas Blumentritt, Sandra Gaiser, and Martin Ruppert. Copula Theory and Its Applications, Chapter Copula based Measures of Multivariate Association. Lecture Notes in Statistics. Springer, 2010. Friedrich Schmid and Rafael Schmidt. Multivariate extensions of Spearman's rho and related statistics. Statistics & Probability Letters, 77:407-416, 2007. Maurice G. Kendall. Rank correlation methods. London, Griffin, 1970. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples, dim = y.shape # number of samples, dimension u = copula_transformation(y) m_triu = triu(ones((dim, dim)), 1) # upper triangular mask b = binom(dim, 2) a = 12 * sum(dot((1 - u).T, (1 - u)) * m_triu) /\ (b * num_of_samples) - 3 return a class BASpearmanCondLT(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimate multivariate conditional version of Spearman's rho. The measure weights the lower tail of the copula. Partial initialization comes from 'InitX'; verification capabilities are inherited from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). """ def __init__(self, mult=True, p=0.5): """ Initialize the estimator. Parameters ---------- mult : bool, optional 'True': multiplicative constant relevant (needed) in the estimation. 'False': estimation up to 'proportionality'. (default is True) p : float, 0<p<=1, optional (default is 0.5) Examples -------- >>> import ite >>> co1 = ite.cost.BASpearmanCondLT() >>> co2 = ite.cost.BASpearmanCondLT(p=0.4) """ # initialize with 'InitX': super().__init__(mult=mult) # p: self.p = p def estimation(self, y, ds=None): """ Estimate multivariate conditional version of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated multivariate conditional version of Spearman's rho. References ---------- Friedrich Schmid and Rafael Schmidt. Multivariate conditional versions of Spearman's rho and related measures of tail dependence. Journal of Multivariate Analysis, 98:1123-1140, 2007. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples, dim = y.shape # number of samples, dimension u = copula_transformation(y) c1 = (self.p**2 / 2)**dim c2 = self.p**(dim + 1) / (dim + 1) a = (mean(prod(maximum(self.p - u, 0), axis=1)) - c1) / (c2 - c1) return a class BASpearmanCondUT(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimate multivariate conditional version of Spearman's rho. The measure weights the upper tail of the copula. Partial initialization comes from 'InitX'; verification capabilities are inherited from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). """ def __init__(self, mult=True, p=0.5): """ Initialize the estimator. Parameters ---------- mult : bool, optional 'True': multiplicative constant relevant (needed) in the estimation. 'False': estimation up to 'proportionality'. (default is True) p : float, 0<p<=1, optional (default is 0.5) Examples -------- >>> import ite >>> co1 = ite.cost.BASpearmanCondUT() >>> co2 = ite.cost.BASpearmanCondUT(p=0.4) """ # initialize with 'InitX': super().__init__(mult=mult) # p: self.p = p def estimation(self, y, ds=None): """ Estimate multivariate conditional version of Spearman's rho. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated multivariate conditional version of Spearman's rho. References ---------- Friedrich Schmid and Rafael Schmidt. Multivariate conditional versions of Spearman's rho and related measures of tail dependence. Journal of Multivariate Analysis, 98:1123-1140, 2007. C. Spearman. The proof and measurement of association between two things. The American Journal of Psychology, 15:72-101, 1904. Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples, dim = y.shape # number of samples, dimension u = copula_transformation(y) c = mean(prod(1 - maximum(u, 1 - self.p), axis=1)) c1 = (self.p * (2 - self.p) / 2)**dim c2 = self.p**dim * (dim + 1 - self.p * dim) / (dim + 1) a = (c - c1) / (c2 - c1) return a class BABlomqvist(InitX, VerOneDSubspaces, VerCompSubspaceDims): """ Estimator of the multivariate extension of Blomqvist's beta. Blomqvist's beta is also known as the medial correlation coefficient. Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' ('ite.cost.x_classes.py'). Initialization is inherited from 'InitX', verification capabilities come from 'VerOneDSubspaces' and 'VerCompSubspaceDims' (see 'ite.cost.x_initialization.py', 'ite.cost.x_verification.py'). Examples -------- >>> import ite >>> co = ite.cost.BABlomqvist() """ def estimation(self, y, ds=None): """ Estimate multivariate extension of Blomqvist's beta. Parameters ---------- y : (number of samples, dimension)-ndarray One row of y corresponds to one sample. ds : int vector, vector of ones ds[i] = 1 (for all i): the i^th subspace is one-dimensional. If ds is not given (ds=None), the vector of ones [ds = ones(y.shape[1],dtype='int')] is emulated inside the function. Returns ------- a : float Estimated multivariate extension of Blomqvist's beta. References ---------- Friedrich Schmid, Rafael Schmidt, Thomas Blumentritt, Sandra Gaiser, and Martin Ruppert. Copula Theory and Its Applications, Chapter Copula based Measures of Multivariate Association. Lecture Notes in Statistics. Springer, 2010. (multidimensional case, len(ds)>=2) Manuel Ubeda-Flores. Multivariate versions of Blomqvist's beta and Spearman's footrule. Annals of the Institute of Statistical Mathematics, 57:781-788, 2005. Nils Blomqvist. On a measure of dependence between two random variables. The Annals of Mathematical Statistics, 21:593-600, 1950. (2D case, statistical properties) Frederick Mosteller. On some useful ''inefficient'' statistics. Annals of Mathematical Statistics, 17:377--408, 1946. (2D case, def) Examples -------- a = co.estimation(y,ds) """ if ds is None: # emulate 'ds = vector of ones' ds = ones(y.shape[1], dtype='int') # verification: self.verification_compatible_subspace_dimensions(y, ds) self.verification_one_dimensional_subspaces(ds) num_of_samples, dim = y.shape # number of samples, dimension u = copula_transformation(y) h = 2**(dim - 1) / (2**(dim - 1) - 1) # h(dim) c1 = mean(all(u <= 1/2, axis=1)) # C(1/2) c2 = mean(all(u > 1/2, axis=1)) # \bar{C}(1/2) a = h * (c1 + c2 - 2**(1 - dim)) return a
Twitter, soon to be TWTR on the New York Stock Exchange, is readying for its initial public offering (IPO) in a few weeks. As with many companies that have recently become public, there is a cloud of ambiguity looming over the heads of the public. There is much skepticism about the company’s ability to generate substantive returns for investors as Twitter’s profits until this point are perceived by many to be lacking. This is evident from a comparison of Twitter’s average monthly revenue per user at 73 cents with Facebook’s average monthly revenue per user of $1.60. Twitter recently acquired MoPub, a mobile advertising business, in an attempt to expand ad revenues. What investors are finding out, however, is that MoPub is still in its infancy and is currently dwarfed by its competitors in terms of revenues. In the past six months, the company has only generated $6.5 million in net revenue as compared to $45 million by its main competitor Millennial Media. Perhaps one remedy for the low revenue streams lies within Twitter’s recent action to access the debt market. It seems that Twitter is attempting to mimic other social media companies by obtaining a $1 billion credit line that could possibly be used to help fund other acquisitions. One of the other sources of uncertainty is the company’s lack of patents, according to a Friday to a Friday Bloomberg article. Its prospectus said the company has issued nine U.S. patents. This compares with the more than 700 patents that Facebook had just before it reached its IPO. Twitter likes having its engineers able to own their own inventions, but analysts and investors are worried about the possible repercussions. Patents can help analysts quantify the value of technological advances and are important for investor confidence. Twitter maintains that a larger patent portfolio will only subject the company to potential litigation in the future. Twitter implemented the Innovator’s Patent Agreement, or IPA, in order for the employees who pioneer inventions to maintain the ownership of them. This also means that Twitter cannot enter litigation without the consent of the inventor. The idea behind this agreement is to attract the best engineers with the allure of keeping all of their innovations under their own name. Many people argue that this method of dealing with intellectual property will hurt the company not only in the long run, but also in its IPO stage. Peter Adriaens, an entrepreneurship professor at the University of Michigan said Teitter is aiming to raise $1 billion in the offering with a share price of roughly $20.62. “The more IP is protected, the less infringement opportunity there is, and therefore, would increase the valuation,” Adriaens said.
import xlwings as xw from xlwings import Workbook, Sheet, Range, Chart import time import datetime from subprocess import call import matplotlib.pyplot as plt # connect to the active workbook #wb = Workbook('run.xlsm') wb = Workbook.active() # log status Range('B8').value = 'running upload ...' file = 'Output/exposure_trade_Swap_20y.csv' # load data into arrays and cells x = [] y = [] z = [] line = 2 import csv with open(file) as csvfile: reader = csv.DictReader(csvfile, delimiter=',') Range('H1').value = 'Time' Range('I1').value = 'EPE' Range('J1').value = 'ENE' for row in reader: x.append(float(row['Time'])) y.append(float(row['EPE'])) z.append(float(row['ENE'])) Range('H' + str(line)).value = float(row['Time']) Range('I' + str(line)).value = float(row['EPE']) Range('J' + str(line)).value = float(row['ENE']) line = line + 1 # add chart cellrange = str("H1:J") + str(line) chart = xw.Chart.add(source_data=xw.Range(cellrange).table) chart.name = 'chart' chart.chart_type = xw.ChartType.xlLine chart.top = 200 chart.left = 0 chart.height = 250 chart.width = 350 chart.title = 'Exposure Evolution' chart.xlabel = 'Time / Years' chart.ylabel = 'Exposure' # log status ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') Range('B8').value = st + " Upload completed" # add same plot again using matplotlib fig = plt.figure() ax = fig.add_subplot(111) ax.set_title("Exposure Evolution") ax.set_xlabel("Time / Years") ax.set_ylabel("Exposure") ax.plot(x,y, label="EPE") ax.plot(x,z, label="ENE") legend = ax.legend(loc="upper right") plot = xw.Plot(fig) plot.show('plot', left=xw.Range('A33').left, top=xw.Range('A33').top, width=350, height=250) # log status ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') xw.Range('B8').value = st + " Upload completed"
Run your GUI tests in a headless X-server using Python and Xvfb. celery-tasktree is a module which helps to execute trees of celery tasks asynchronously in a particular order. Tasktree comes to the rescue when a number of tasks and dependencies grows and when naive callback-based approach becomes hard to understand and maintain. A secure cookie-based session engine for Django. Dagny is a Django adaptation of Ruby on Rails’s Resource-Oriented Architecture (a.k.a. ‘RESTful Rails’). Dagny makes it really easy to build resourceful web applications. A simple lighting talk timer in your browser. Plug and play continuous integration with django and jenkins. This tool provides a demonstration of the HTTPS stripping attacks that I presented at Black Hat DC 2009. It will transparently hijack HTTP traffic on a network, watch for HTTPS links and redirects, then map those links into either look-alike HTTP links or homograph-similar HTTPS links. A reusable Django application for extending managers and the querysets they produce. Needle is a tool for testing your CSS with Selenium and nose. Allows you to register a transforming map function with a Django QuerySet that will be executed only when the QuerySet itself has been evaluated. Simple PyPI server written in Django. Homebrew is the easiest and most flexible way to install the UNIX tools Apple didn't include with OS X. mod_macro is a third-party module to the Apache Http Server, distributed with a BSD-style license like Apache. It allows the definition and use of macros within apache runtime configuration files. Hg flow is a development model for Mercurial inspired by git-flow.
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """ Implements data iterators and I/O related functions for sequence-to-sequence models. """ import bisect import gzip import logging import pickle import random from collections import OrderedDict from typing import Dict, Iterator, Iterable, List, NamedTuple, Optional, Tuple import mxnet as mx import numpy as np from sockeye.utils import check_condition from . import config from . import constants as C logger = logging.getLogger(__name__) def define_buckets(max_seq_len: int, step=10) -> List[int]: """ Returns a list of integers defining bucket boundaries. Bucket boundaries are created according to the following policy: We generate buckets with a step size of step until the final bucket fits max_seq_len. We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step). :param max_seq_len: Maximum bucket size. :param step: Distance between buckets. :return: List of bucket sizes. """ buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)] buckets[-1] = max_seq_len return buckets def define_parallel_buckets(max_seq_len_source: int, max_seq_len_target: int, bucket_width=10, length_ratio=1.0) -> List[Tuple[int, int]]: """ Returns (source, target) buckets up to (max_seq_len_source, max_seq_len_target). The longer side of the data uses steps of bucket_width while the shorter side uses steps scaled down by the average target/source length ratio. If one side reaches its max_seq_len before the other, width of extra buckets on that side is fixed to that max_seq_len. :param max_seq_len_source: Maximum source bucket size. :param max_seq_len_target: Maximum target bucket size. :param bucket_width: Width of buckets on longer side. :param length_ratio: Length ratio of data (target/source). """ source_step_size = bucket_width target_step_size = bucket_width if length_ratio >= 1.0: # target side is longer -> scale source source_step_size = max(1, int(bucket_width / length_ratio)) else: # source side is longer, -> scale target target_step_size = max(1, int(bucket_width * length_ratio)) source_buckets = define_buckets(max_seq_len_source, step=source_step_size) target_buckets = define_buckets(max_seq_len_target, step=target_step_size) # Extra buckets if len(source_buckets) < len(target_buckets): source_buckets += [source_buckets[-1] for _ in range(len(target_buckets) - len(source_buckets))] elif len(target_buckets) < len(source_buckets): target_buckets += [target_buckets[-1] for _ in range(len(source_buckets) - len(target_buckets))] # minimum bucket size is 2 (as we add BOS symbol to target side) source_buckets = [max(2, b) for b in source_buckets] target_buckets = [max(2, b) for b in target_buckets] parallel_buckets = list(zip(source_buckets, target_buckets)) # deduplicate for return return list(OrderedDict.fromkeys(parallel_buckets)) def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]: """ Given sequence length and a list of buckets, return corresponding bucket. :param seq_len: Sequence length. :param buckets: List of buckets. :return: Chosen bucket. """ bucket_idx = bisect.bisect_left(buckets, seq_len) if bucket_idx == len(buckets): return None return buckets[bucket_idx] def read_parallel_corpus(data_source: str, data_target: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> Tuple[List[List[int]], List[List[int]]]: """ Loads source and target data, making sure they have the same length. :param data_source: Path to source training data. :param data_target: Path to target training data. :param vocab_source: Source vocabulary. :param vocab_target: Target vocabulary. :return: Tuple of (source sentences, target sentences). """ source_sentences = read_sentences(data_source, vocab_source, add_bos=False) target_sentences = read_sentences(data_target, vocab_target, add_bos=True) check_condition(len(source_sentences) == len(target_sentences), "Number of source sentences does not match number of target sentences") return source_sentences, target_sentences def get_training_data_iters(source: str, target: str, validation_source: str, validation_target: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int], batch_size: int, fill_up: str, max_seq_len_source: int, max_seq_len_target: int, bucketing: bool, bucket_width: int) -> Tuple['ParallelBucketSentenceIter', 'ParallelBucketSentenceIter']: """ Returns data iterators for training and validation data. :param source: Path to source training data. :param target: Path to target training data. :param validation_source: Path to source validation data. :param validation_target: Path to target validation data. :param vocab_source: Source vocabulary. :param vocab_target: Target vocabulary. :param batch_size: Batch size. :param fill_up: Fill-up strategy for buckets. :param max_seq_len_source: Maximum source sequence length. :param max_seq_len_target: Maximum target sequence length. :param bucketing: Whether to use bucketing. :param bucket_width: Size of buckets. :return: Tuple of (training data iterator, validation data iterator). """ logger.info("Creating train data iterator") train_source_sentences, train_target_sentences = read_parallel_corpus(source, target, vocab_source, vocab_target) length_ratio = sum(len(t) / float(len(s)) for t, s in zip(train_target_sentences, train_source_sentences)) / len( train_target_sentences) logger.info("Average training target/source length ratio: %.2f", length_ratio) # define buckets buckets = define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width, length_ratio) if bucketing else [ (max_seq_len_source, max_seq_len_target)] train_iter = ParallelBucketSentenceIter(train_source_sentences, train_target_sentences, buckets, batch_size, vocab_target[C.EOS_SYMBOL], C.PAD_ID, vocab_target[C.UNK_SYMBOL], fill_up=fill_up) logger.info("Creating validation data iterator") val_source_sentences, val_target_sentences = read_parallel_corpus(validation_source, validation_target, vocab_source, vocab_target) val_iter = ParallelBucketSentenceIter(val_source_sentences, val_target_sentences, buckets, batch_size, vocab_target[C.EOS_SYMBOL], C.PAD_ID, vocab_target[C.UNK_SYMBOL], fill_up=fill_up) return train_iter, val_iter class DataConfig(config.Config): """ Stores data paths from training. """ def __init__(self, source: str, target: str, validation_source: str, validation_target: str, vocab_source: str, vocab_target: str) -> None: super().__init__() self.source = source self.target = target self.validation_source = validation_source self.validation_target = validation_target self.vocab_source = vocab_source self.vocab_target = vocab_target def smart_open(filename: str, mode="rt", ftype="auto", errors='replace'): """ Returns a file descriptor for filename with UTF-8 encoding. If mode is "rt", file is opened read-only. If ftype is "auto", uses gzip iff filename endswith .gz. If ftype is {"gzip","gz"}, uses gzip. Note: encoding error handling defaults to "replace" :param filename: The filename to open. :param mode: Reader mode. :param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open :param errors: Encoding error handling during reading. Defaults to 'replace' :return: File descriptor """ if ftype == 'gzip' or ftype == 'gz' or (ftype == 'auto' and filename.endswith(".gz")): return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors) else: return open(filename, mode=mode, encoding='utf-8', errors=errors) def read_content(path: str, limit=None) -> Iterator[List[str]]: """ Returns a list of tokens for each line in path up to a limit. :param path: Path to files containing sentences. :param limit: How many lines to read from path. :return: Iterator over lists of words. """ with smart_open(path) as indata: for i, line in enumerate(indata): if limit is not None and i == limit: break yield list(get_tokens(line)) def get_tokens(line: str) -> Iterator[str]: """ Yields tokens from input string. :param line: Input string. :return: Iterator over tokens. """ for token in line.rstrip().split(): if len(token) > 0: yield token def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]: """ Returns sequence of ids given a sequence of tokens and vocab. :param tokens: List of tokens. :param vocab: Vocabulary (containing UNK symbol). :return: List of word ids. """ return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens] def read_sentences(path: str, vocab: Dict[str, int], add_bos=False, limit=None) -> List[List[int]]: """ Reads sentences from path and creates word id sentences. :param path: Path to read data from. :param vocab: Vocabulary mapping. :param add_bos: Whether to add Beginning-Of-Sentence (BOS) symbol. :param limit: Read limit. :return: List of integer sequences. """ assert C.UNK_SYMBOL in vocab assert C.UNK_SYMBOL in vocab assert vocab[C.PAD_SYMBOL] == C.PAD_ID assert C.BOS_SYMBOL in vocab assert C.EOS_SYMBOL in vocab sentences = [] for sentence_tokens in read_content(path, limit): sentence = tokens2ids(sentence_tokens, vocab) check_condition(sentence, "Empty sentence in file %s" % path) if add_bos: sentence.insert(0, vocab[C.BOS_SYMBOL]) sentences.append(sentence) logger.info("%d sentences loaded from '%s'", len(sentences), path) return sentences def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]: """ Returns the default bucket from a list of buckets, i.e. the largest bucket. :param buckets: List of buckets. :return: The largest bucket in the list. """ return max(buckets) def get_parallel_bucket(buckets: List[Tuple[int, int]], length_source: int, length_target: int) -> Optional[Tuple[int, Tuple[int, int]]]: """ Returns bucket index and bucket from a list of buckets, given source and target length. Returns (None, None) if no bucket fits. :param buckets: List of buckets. :param length_source: Length of source sequence. :param length_target: Length of target sequence. :return: Tuple of (bucket index, bucket), or (None, None) if not fitting. """ bucket = None, None for j, (source_bkt, target_bkt) in enumerate(buckets): if source_bkt >= length_source and target_bkt >= length_target: bucket = j, (source_bkt, target_bkt) break return bucket # TODO: consider more memory-efficient data reading (load from disk on demand) # TODO: consider using HDF5 format for language data class ParallelBucketSentenceIter(mx.io.DataIter): """ A Bucket sentence iterator for parallel data. Randomly shuffles the data after every call to reset(). Data is stored in NDArrays for each epoch for fast indexing during iteration. :param source_sentences: List of source sentences (integer-coded). :param target_sentences: List of target sentences (integer-coded). :param buckets: List of buckets. :param batch_size: Batch_size of generated data batches. Incomplete batches are discarded if fill_up == None, or filled up according to the fill_up strategy. :param fill_up: If not None, fill up bucket data to a multiple of batch_size to avoid discarding incomplete batches. for each bucket. If set to 'replicate', sample examples from the bucket and use them to fill up. :param eos_id: Word id for end-of-sentence. :param pad_id: Word id for padding symbols. :param unk_id: Word id for unknown symbols. :param dtype: Data type of generated NDArrays. """ def __init__(self, source_sentences: List[List[int]], target_sentences: List[List[int]], buckets: List[Tuple[int, int]], batch_size: int, eos_id: int, pad_id: int, unk_id: int, fill_up: Optional[str] = None, source_data_name=C.SOURCE_NAME, source_data_length_name=C.SOURCE_LENGTH_NAME, target_data_name=C.TARGET_NAME, label_name=C.TARGET_LABEL_NAME, dtype='float32'): super(ParallelBucketSentenceIter, self).__init__() self.buckets = list(buckets) self.buckets.sort() self.default_bucket_key = get_default_bucket_key(self.buckets) self.batch_size = batch_size self.eos_id = eos_id self.pad_id = pad_id self.unk_id = unk_id self.dtype = dtype self.source_data_name = source_data_name self.source_data_length_name = source_data_length_name self.target_data_name = target_data_name self.label_name = label_name self.fill_up = fill_up # TODO: consider avoiding explicitly creating length and label arrays to save host memory self.data_source = [[] for _ in self.buckets] self.data_length = [[] for _ in self.buckets] self.data_target = [[] for _ in self.buckets] self.data_label = [[] for _ in self.buckets] # assign sentence pairs to buckets self._assign_to_buckets(source_sentences, target_sentences) # convert to single numpy array for each bucket self._convert_to_array() self.provide_data = [ mx.io.DataDesc(name=source_data_name, shape=(batch_size, self.default_bucket_key[0]), layout=C.BATCH_MAJOR), mx.io.DataDesc(name=source_data_length_name, shape=(batch_size,), layout=C.BATCH_MAJOR), mx.io.DataDesc(name=target_data_name, shape=(batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR)] self.provide_label = [ mx.io.DataDesc(name=label_name, shape=(self.batch_size, self.default_bucket_key[1]), layout=C.BATCH_MAJOR)] self.data_names = [self.source_data_name, self.source_data_length_name, self.target_data_name] self.label_names = [self.label_name] # create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array self.idx = [] for i, buck in enumerate(self.data_source): rest = len(buck) % batch_size if rest > 0: logger.info("Discarding %d samples from bucket %s due to incomplete batch", rest, self.buckets[i]) idxs = [(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)] self.idx.extend(idxs) self.curr_idx = 0 # holds NDArrays self.indices = [] # This will define how the data arrays will be organized self.nd_source = [] self.nd_length = [] self.nd_target = [] self.nd_label = [] self.reset() def _assign_to_buckets(self, source_sentences, target_sentences): ndiscard = 0 tokens_source = 0 tokens_target = 0 num_of_unks_source = 0 num_of_unks_target = 0 for source, target in zip(source_sentences, target_sentences): tokens_source += len(source) tokens_target += len(target) num_of_unks_source += source.count(self.unk_id) num_of_unks_target += target.count(self.unk_id) buck_idx, buck = get_parallel_bucket(self.buckets, len(source), len(target)) if buck is None: ndiscard += 1 continue buff_source = np.full((buck[0],), self.pad_id, dtype=self.dtype) buff_target = np.full((buck[1],), self.pad_id, dtype=self.dtype) buff_label = np.full((buck[1],), self.pad_id, dtype=self.dtype) buff_source[:len(source)] = source buff_target[:len(target)] = target buff_label[:len(target)] = target[1:] + [self.eos_id] self.data_source[buck_idx].append(buff_source) self.data_length[buck_idx].append(len(source)) self.data_target[buck_idx].append(buff_target) self.data_label[buck_idx].append(buff_label) logger.info("Source words: %d", tokens_source) logger.info("Target words: %d", tokens_target) logger.info("Vocab coverage source: %.0f%%", (1 - num_of_unks_source / tokens_source) * 100) logger.info("Vocab coverage target: %.0f%%", (1 - num_of_unks_target / tokens_target) * 100) logger.info('Total: {0} samples in {1} buckets'.format(len(self.data_source), len(self.buckets))) nsamples = 0 for bkt, buck in zip(self.buckets, self.data_length): logger.info("bucket of {0} : {1} samples".format(bkt, len(buck))) nsamples += len(buck) check_condition(nsamples > 0, "0 data points available in the data iterator. " "%d data points have been discarded because they " "didn't fit into any bucket. Consider increasing " "the --max-seq-len to fit your data." % ndiscard) logger.info("%d sentence pairs out of buckets", ndiscard) logger.info("fill up mode: %s", self.fill_up) logger.info("") def _convert_to_array(self): for i in range(len(self.data_source)): self.data_source[i] = np.asarray(self.data_source[i], dtype=self.dtype) self.data_length[i] = np.asarray(self.data_length[i], dtype=self.dtype) self.data_target[i] = np.asarray(self.data_target[i], dtype=self.dtype) self.data_label[i] = np.asarray(self.data_label[i], dtype=self.dtype) n = len(self.data_source[i]) if n % self.batch_size != 0: buck_shape = self.buckets[i] rest = self.batch_size - n % self.batch_size if self.fill_up == 'pad': raise NotImplementedError elif self.fill_up == 'replicate': logger.info( "Replicating %d random examples from bucket %s to size it to multiple of batch size %d", rest, buck_shape, self.batch_size) random_indices = np.random.randint(self.data_source[i].shape[0], size=rest) self.data_source[i] = np.concatenate((self.data_source[i], self.data_source[i][random_indices, :]), axis=0) self.data_length[i] = np.concatenate((self.data_length[i], self.data_length[i][random_indices]), axis=0) self.data_target[i] = np.concatenate((self.data_target[i], self.data_target[i][random_indices, :]), axis=0) self.data_label[i] = np.concatenate((self.data_label[i], self.data_label[i][random_indices, :]), axis=0) def reset(self): """ Resets and reshuffles the data. """ self.curr_idx = 0 # shuffle indices random.shuffle(self.idx) self.nd_source = [] self.nd_length = [] self.nd_target = [] self.nd_label = [] self.indices = [] for i in range(len(self.data_source)): # shuffle indices within each bucket self.indices.append(np.random.permutation(len(self.data_source[i]))) self._append_ndarrays(i, self.indices[-1]) def _append_ndarrays(self, bucket: int, shuffled_indices: np.array): """ Appends the actual data, selected by the given indices, to the NDArrays of the appropriate bucket. Use when reshuffling the data. :param bucket: Current bucket. :param shuffled_indices: Indices indicating which data to select. """ self.nd_source.append(mx.nd.array(self.data_source[bucket].take(shuffled_indices, axis=0), dtype=self.dtype)) self.nd_length.append(mx.nd.array(self.data_length[bucket].take(shuffled_indices, axis=0), dtype=self.dtype)) self.nd_target.append(mx.nd.array(self.data_target[bucket].take(shuffled_indices, axis=0), dtype=self.dtype)) self.nd_label.append(mx.nd.array(self.data_label[bucket].take(shuffled_indices, axis=0), dtype=self.dtype)) def iter_next(self) -> bool: """ True if iterator can return another batch """ return self.curr_idx != len(self.idx) def next(self) -> mx.io.DataBatch: """ Returns the next batch from the data iterator. """ if not self.iter_next(): raise StopIteration i, j = self.idx[self.curr_idx] self.curr_idx += 1 source = self.nd_source[i][j:j + self.batch_size] length = self.nd_length[i][j:j + self.batch_size] target = self.nd_target[i][j:j + self.batch_size] data = [source, length, target] label = [self.nd_label[i][j:j + self.batch_size]] provide_data = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in zip(self.data_names, data)] provide_label = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in zip(self.label_names, label)] # TODO: num pad examples is not set here if fillup strategy would be padding return mx.io.DataBatch(data, label, pad=0, index=None, bucket_key=self.buckets[i], provide_data=provide_data, provide_label=provide_label) def save_state(self, fname: str): """ Saves the current state of iterator to a file, so that iteration can be continued. Note that the data is not saved, i.e. the iterator must be initialized with the same parameters as in the first call. :param fname: File name to save the information to. """ with open(fname, "wb") as fp: pickle.dump(self.idx, fp) pickle.dump(self.curr_idx, fp) np.save(fp, self.indices) def load_state(self, fname: str): """ Loads the state of the iterator from a file. :param fname: File name to load the information from. """ with open(fname, "rb") as fp: self.idx = pickle.load(fp) self.curr_idx = pickle.load(fp) self.indices = np.load(fp) # Because of how checkpointing is done (pre-fetching the next batch in # each iteration), curr_idx should be always >= 1 assert self.curr_idx >= 1 # Right after loading the iterator state, next() should be called self.curr_idx -= 1 self.nd_source = [] self.nd_length = [] self.nd_target = [] self.nd_label = [] for i in range(len(self.data_source)): self._append_ndarrays(i, self.indices[i])
Slang has been around since the beginning of the time. If it wasn't for slang we wouldn't have the banter or that feeling of belonging to a place. Slang isn't just a unique spin on a word, but a call home. Also Read: The BRIT Awards Are Just Around The Corner, Check Out The Nominees Here!
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for google.appengine.tools.devappserver2.python.runtime.""" import unittest import google import mox from google.appengine.ext.remote_api import remote_api_stub from google.appengine.tools.devappserver2 import runtime_config_pb2 from google.appengine.tools.devappserver2.python import runtime class SetupStubsTest(unittest.TestCase): def setUp(self): self.mox = mox.Mox() def tearDown(self): self.mox.UnsetStubs() def test_setup_stubs(self): self.mox.StubOutWithMock(remote_api_stub, 'ConfigureRemoteApi') remote_api_stub.ConfigureRemoteApi('app', '/', mox.IgnoreArg(), 'somehost:12345', use_remote_datastore=False) config = runtime_config_pb2.Config() config.app_id = 'app' config.api_host = 'somehost' config.api_port = 12345 self.mox.ReplayAll() runtime.setup_stubs(config) self.mox.VerifyAll() if __name__ == '__main__': unittest.main()
Ross's Law Marketing Blog: Zoolander-style lawyer head shots. Trust me - you must see these animated attorney biography photos. Lynn Hajek is a terrific litigator -- smart, funny, and tough (think Elle Woods from Legally Blonde without the naiveté). The website doesn't look much like 2013, but firms update their websites when it's important to them, and I'm sure they'll call Fishman Marketing when they're ready for a major overhaul (right, Lynn...?) Regardless, I've never seen a website with this type of head shot for the lawyer biographies. I especially like this one, because she starts with her back to the camera. When I first saw them, I actually clicked on each biography multiple times to watch each little animation -- which speaks to how compelling I found them. Visuals and videos (c) 2013 AHL law firm and Paramount Pictures.
#!/usr/bin/env python import unittest from day04 import parse_room, make_checksum, is_real_room, sum_of_sectors from day04 import decrypt_room_name, find_decrypted_room_name class TestIdentifiesRealRooms(unittest.TestCase): """ aaaaa-bbb-z-y-x-123[abxyz] is a real room because the most common letters are a (5), b (3), and then a tie between x, y, and z, which are listed alphabetically. a-b-c-d-e-f-g-h-987[abcde] is a real room because although the letters are all tied (1 of each), the first five are listed alphabetically. not-a-real-room-404[oarel] is a real room. totally-real-room-200[decoy] is not. """ cases = ( ('aaaaa-bbb-z-y-x-123[abxyz]', ('aaaaabbbzyx', 123, 'abxyz'), 'abxyz', True), ('a-b-c-d-e-f-g-h-987[abcde]', ('abcdefgh', 987, 'abcde'), 'abcde', True), ('not-a-real-room-404[oarel]', ('notarealroom', 404, 'oarel'), 'oarel', True), ('totally-real-room-200[decoy]', ('totallyrealroom', 200, 'decoy'), 'loart', False), ) def test_parses_rooms(self): for room, parsed, *_ in self.cases: self.assertEqual(parse_room(room), parsed) def test_calculates_checksums(self): for room, parsed, checksum, _ in self.cases: name, sector, _ = parse_room(room) self.assertEqual(make_checksum(name), checksum) def test_identifies_real_rooms(self): for room, *_, is_real in self.cases: name, sector, checksum = parse_room(room) self.assertEqual(is_real_room(name, checksum), is_real) def test_sums_valid_sectors(self): rooms = [x[0] for x in self.cases] self.assertEqual(sum_of_sectors(rooms), 1514) class TestDecryptingRoomNames(unittest.TestCase): cases = ( ('qzmt-zixmtkozy-ivhz-343[zimth]', 'veryencryptedname'), ) def test_decrypts_room_names(self): for room, expected_plaintext in self.cases: self.assertEqual(decrypt_room_name(room), expected_plaintext) def test_finds_decrypted_room_names(self): rooms = ['garbage-nope-123[lol]', 'haystack-57[wtf]'] + \ [x[0] for x in self.cases] self.assertEqual(find_decrypted_room_name('Very Encrypted', rooms), 343) if __name__ == '__main__': unittest.main()
Corks are in! Doing my happy dance! Another step closer to being ready for my Syrah bottling in a couple of weeks. It has been quite a process with selecting, designing, and keeping my fingers crossed that it will come out good. They had a long trip all the way from Portugal which is one of the largest cork producers in the world. Cork is just another name for the bark of an oak tree known as Cork Oak (the trees are so valuable its illegal in Portugal to cut them down). The cork is harvested by stripping the bark off the tree. Good news - it doesn't hurt the tree and in 9 years the bark is regrown and ready to be harvested again. They range from several cents to over a dollar per cork. The better quality of the cork the less chance of the wine being spoiled.
import numpy as np """ This class describes the source of propagating waves source it's a addition to initial system this class contains defenition of many source current contained in the variable source Different types: Spherical """ class Source: """This class is responsible for creating external sources of waves""" def __init__(self, type): self._type = type self.coordinates_x = 5 self.coordinates_y = 5 self._source = self._get_source_by_type(type) # TODO: make return valid value def _get_source_by_type(self, type): return 'Temp source value' def _create_spherical_source(self, grid, dimension): #TODO delete unnamed constants if dimension == 2 and len(grid[0][0][0]) == 3: grid[self.coordinates_x][self.coordinates_y][0] = np.array([10, 0, 0]) grid[self.coordinates_x+1][self.coordinates_y][0] = np.array([5, 0, 0]) grid[self.coordinates_x-1][self.coordinates_y][0] = np.array([5, 0, 0]) grid[self.coordinates_x][self.coordinates_y+1][0] = np.array([5, 0, 0]) grid[self.coordinates_x][self.coordinates_y-1][0] = np.array([5, 0, 0]) grid[self.coordinates_x- 1][self.coordinates_y - 1][0] = np.array([5, 0, 0]) grid[self.coordinates_x + 1][self.coordinates_y - 1][0] = np.array([5, 0, 0]) grid[self.coordinates_x - 1][self.coordinates_y +1][0] = np.array([5, 0, 0]) grid[self.coordinates_x + 1][self.coordinates_y + 1][0] = np.array([5, 0, 0]) elif dimension == 1: grid[self.coordinates_x][0] = np.array([100, 20]) else: grid[self.coordinates_x][self.coordinates_y][0] = np.array([10, 0, 0, 0, 0]) return grid def update_source_in_grid(self, grid, dimension): return self._create_spherical_source(grid, dimension) #grid[self.coordinates] = np.array([1,1])#TODO create real source class SourcesTypes: def __init__(self): pass
Hello! Thank you for reaching out to us. Please fill in your contact information and we will get back with you quickly.
#!/usr/bin/env python3 """ Module providing forward and inverse Spherical Harmonic Transforms. The algorithm followed is that given in the paper: Zubair Khalid, Rodney A. Kennedy, Jason D. McEwen, ``An Optimal-Dimensionality Sampling Scheme on the Sphere with Fast Spherical Harmonic Transforms'', IEEE Transactions on Signal Processing, vol. 62, no. 17, pp. 4597-4610, Sept.1, 2014 DOI: http://dx.doi.org/10.1109/TSP.2014.2337278 arXiv: http://arxiv.org/abs/1403.4661 [cs.IT] """ from __future__ import print_function, division import numpy as np import scipy.special as spl import scipy.linalg as la def _compute_P(thetas): """Computes all Pm, to be used for intermediate computations.""" L = thetas.size P = [] # List of all Pm's for m in range(L): ls = np.arange(m, L) Pm = spl.sph_harm(m, ls[np.newaxis, :], 0, thetas[:, np.newaxis]) P.append(2 * np.pi * Pm) return P def sht(f, thetas, phis, intermediates=None): """ Computes the spherical harmonic transform of f, for the grid specified by thetas and phis. This grid must conform to a specific format. Currently, f can be at most two dimensional. The first dimension will be transformed. """ f = f.copy().astype(complex) # Shouldn't corrupt the original L = thetas.size # Check intermediates for P, and compute it if absent if intermediates is None: # Caller not using intermediates P = _compute_P(thetas) elif 'P' in intermediates: # Caller provided P P = intermediates['P'] else: # Caller wants P P = _compute_P(thetas) intermediates['P'] = P # Compute and store the LU factors of P[m]'s, so that computing the sht # multiple times is inexpensive if intermediates is None: Pm_factors = [la.lu_factor(P[m][m:, :]) for m in range(L)] elif 'Pm_factors' in intermediates: Pm_factors = intermediates['Pm_factors'] else: Pm_factors = [la.lu_factor(P[m][m:, :]) for m in range(L)] intermediates['Pm_factors'] = Pm_factors # Initialize g: for L=4, it looks like this when complete: # 0 * * * * * * # 0 1 * * * * -1 # 0 1 2 * * -2 -1 # 0 1 2 3 -3 -2 -1 # The numbers here indicate the value of m. A * indicates an unused space. # The l'th row is the FFT of the ring corresponding to theta[l]. # The m'th column (excluding the unused entries) is essentially gm. # Thus, gm is valid only when |m| <= l, and is best indexed from -m to m. g = np.zeros((L, 2 * L - 1) + f.shape[1:], dtype=complex) # Intialize result vector flm = np.zeros(f.shape, dtype=complex) for m in reversed(range(L)): # Update g by computing gm # Perform (2m+1)-point FFT of the m'th phi-ring # The sampling of f is from -m to m, whereas for the FFT, we need it to # be from 0 to 2m+1. Hence the ifftshift. temp = np.fft.fft(np.fft.ifftshift(f[m**2:(m+1)**2], axes=0), axis=0) * 2 * np.pi / (2*m+1) # Add this to the main matrix g g[m, :m+1] = temp[:m+1] g[m, (2*L-1-m):] = temp[m+1:] # Solve for fm and fm_neg fm = la.lu_solve(Pm_factors[m], g[m:, m]) if m > 0: fm_neg = (-1)**m * la.lu_solve(Pm_factors[m], g[m:, -m]) # Store results ls = np.arange(m, L) flm[ls**2 + ls + m] = fm if m > 0: flm[ls**2 + ls - m] = fm_neg # Compute gm for the *other* thetas gm = np.einsum('i...,ki->k...', fm, P[m][:m, :]) if m > 0: gm_neg = np.einsum('i...,ki->k...', fm_neg, (-1)**m * P[m][:m, :]) for k in range(m): # Note: we won't enter this loop if m==0 # Extend dimensions of phi for proper broadcasting with g ext_indices = ((slice(k**2, (k+1)**2),) + (None,) * (len(f.shape) - 1)) f_tilde = ((np.exp(1j * m * phis[ext_indices]) * gm[[k]] + np.exp(-1j * m * phis[ext_indices]) * gm_neg[[k]]) / (2 * np.pi)) f[k**2:(k+1)**2] -= f_tilde return flm def isht(flm, thetas, phis, intermediates=None): """ Computes the inverse spherical harmonic transform. """ L = thetas.size # Check intermediates for P, and compute it if absent if intermediates is None: # Caller not using intermediates P = _compute_P(thetas) elif 'P' in intermediates: # Caller provided P P = intermediates['P'] else: # Caller wants P P = _compute_P(thetas) intermediates['P'] = P # Initialize return vector f = np.zeros(flm.shape, dtype=complex) for m in range(L): ls = np.arange(m, L) gm = np.einsum('i...,ki->k...', flm[ls**2 + ls + m], P[m]) gm_neg = np.einsum('i...,ki->k...', flm[ls**2 + ls - m], (-1)**m * P[m]) for k in range(L): # Extend dimensions of phi for proper broadcasting with g ext_indices = ((slice(k**2, (k+1)**2),) + (None,) * (len(f.shape) - 1)) if m == 0: f_tilde = gm[[k]] / (2 * np.pi) else: f_tilde = ((np.exp(-1j * m * phis[ext_indices]) * gm_neg[[k]] + np.exp(1j * m * phis[ext_indices]) * gm[[k]]) / (2 * np.pi)) f[k**2:(k+1)**2] += f_tilde return f
It’s with warm greetings that I welcome you into the new year and thank you for all that you have made possible in 2018. Last year with your support, we transformed the lives of 3,225,033 people. As we reflect on these numbers, we remember that it was not that long ago in 2015 that we were at an inflection point at RestoringVision. We made a commitment to grow to help 1 million people in need access glasses and transform their lives through clear vision. That year, our donors and service delivery partners stepped up to the challenge and together, we grew to help 2.5 million people. Since then, we’ve continued to grow and we’re continuing to make a difference in people’s lives every day. None of this would be possible without your commitment, dedication, and contributions. From our donors, who provide glasses and support our work, to our more than 1,400 service delivery partners, who work with us to get glasses to the people who need them most, I thank you. Simultaneously, I ask you to keep the momentum going. I’m confident that together this year, we can make even greater strides in creating access to glasses for people living in some of the most impoverished communities around the world. As always, thank you for your partnership and support. RestoringVision has partnered with nine optical organizations and the Bangladesh Ministry of Health and Family Welfare for an innovative project titled the Clear Vision Collective (CvC). The goal of this project is to bring the gift of sight to people in need across Bangladesh. We anticipate that over 80% of the individuals we serve will be receiving their first ever pair of glasses through this program. Click here to learn more about this impactful initiative. We’re excited to announce that we’re offering two new supplemental reading glasses packages in an effort to better serve our partners who are serving a primarily younger or older population. These packages are the perfect add-on to your standard orders, helping you ensure that you have enough glasses for the specific needs of your trip. Our low strength supplemental package includes 100 pairs of glasses from +1.00 to +2.00 diopters. Our high strength supplemental package includes 100 pairs of glasses from +2.25 to +3.50 diopters. To order the new supplemental packages, click here. Some of our most popular glasses strengths are back in stock. You can once again order reading glasses in the powers +2.25, +3.00, and +3.50. We also have a limited supply of +4.00 reading glasses available to order. Visit our shopping cart to place your 2019 orders today! To read the full version of our newsletter, please click here. The year-end holiday season is a time for individuals and organizations to reflect upon the past 12 months and celebrate with family and friends. As families gather around the table and organizations host events and parties, this yearly reflection generates an added spark. It inspires many to help people in need around their communities and the world. On top of this, in order to qualify for a tax deduction during the impending tax season, companies and individuals are required to make their yearly charitable contributions prior to the turning of the calendar. As a result, most individual and corporate donations happen between #GivingTuesday, or as we put it here at RestoringVision, #GivingSightDay, and the end of the year, December 31st. For those who decide to partake in this year-end charitable giving season, it’s important to spend time researching the right nonprofit organization to donate to. We encourage companies and people to investigate and ensure their donations are going to charities and causes that are most important to them, their families or their organizations. Every single day, millions of people around the world are in some way touched by a nonprofit organization. Whether it’s a community-based program, a healthcare program, a food program, or an educational program, nonprofit organizations build vibrant and improving communities around the world through the power of donations. These nonprofit organizations support health, safety, and education in communities that might otherwise lack the capability to provide it. Nonprofits are the foundation of global health and wellness to protect our planet and future generations. While issues like hunger, AIDS, Ebola, and Malaria are rampant on the news and around the world, there are issues, such as poor vision, that have been neglected in the past, but are gaining attention in recent years, and for good reason. Imagine a world in which you didn’t have access to the tools you needed to properly see. Vision is one of our most important senses and as humans we are extremely reliant on our ability to see, which is evident in how complex our eyes are. But as you read this today, over 2 billion people worldwide suffer from an uncorrected vision impairment, and of those upwards of 544 million people only need reading glasses to make the correction but live in impoverished communities lacking access to these life-changing items. Health – Reading glasses make the vision correction needed for people with close range vision problems. Education – Both children and adults benefit from near vision correction, giving adults the ability to work, read, learn, and perform daily tasks without keeping children away from school. Adults can also enter continuing education and workforce training. Safety – Having clear & focused close-range vision is essential to safely completing hand held tasks and providing for one’s family, such as caring safely for a small child. Productivity – Vision correction leads to a 35% increase in productivity and 20% increase in wages. A simple pair of reading glasses gives people the ability to carry out their daily tasks, work, and lead productive lives. Quality of Life – Regaining the ability to see clearly boosts the morale of individuals and allows them to become contributing members of a family or community once again. Overall Community – By providing glasses to impoverished communities without access, it boosts their education rate, median income, economic output, and the overall health of that community. The good news is in 2018 there are plenty of ways to get involved during the holiday season, or anytime, and help restore vision to people in need around the world. Organizations like RestoringVision are dedicated to correcting vision and providing reading glasses to people in communities that otherwise lack access. You or your organization can join forces with us this giving season and help in three different, but effective ways. First, you or your organization can Become a Partner. RestoringVision has a strong network of partners who help in the distribution of glasses to impoverished communities around the globe. Our partners are the ones on the ground taking the glasses overseas and putting them on the faces of people in need. If you are a non-profit organization or mission group looking to make a difference in the world, becoming a partner with RestoringVision is a perfect fit. Second, you can Become a Sponsor. At RestoringVision we are lucky to be blessed with an amazing group of corporate sponsors. These sponsors not only help provide access to glasses for people in need, but also help us create access to health, education, work, and a better life for these same people. If you are a corporation embarking on a corporate social responsibility program, consider working with us. One for One Program: The sale of a unit of your product will result in a donation of a pair of glasses to someone in need. This is RestoringVision’s take on the classic buy one give one program. When a customer buys a product, our sponsors make a monetary donation to RestoringVision, which we then convert into a pair of glasses. Direct Giving Program: Our sponsors can make a direct cash donation that will immediately go to the purchase of glasses for disbursal. In-Kind Program: RestoringVision makes it easy for manufacturers, wholesalers, and retailer sponsors to donate their excess inventory of reading glasses or new sunglasses. Lastly, you can get involved by donating to our cause and helping RestoringVision empower lives by giving the gift of sight to people in need around the world. Make a tax-deductible donation today and every dollar you donate will be converted into glasses delivered to people in need. Remember, this holiday season while reflecting on the memories of the past year, carry on the tradition of giving. Whether you donate with your family or through your company or organization, consider helping those in need see and live better. There’s no better time to spread the spirit of the holiday season and make a lasting, positive, impact in people’s lives. The happiest of holidays to you. Another incredible year comes to a close. When I reflect on the accomplishments of this year and our tremendous progress towards reaching our goal of helping 20 million people see clearly by 2020, I have an overwhelming feeling of gratitude. Gratitude that RestoringVision has been able to celebrate 15 years of helping people in impoverished communities see and live better. Gratitude for the trust and hard work of our service delivery partners who take glasses to the field. Gratitude for the generosity of our sponsors and donors who support our work. Gratitude knowing that behind every pair of glasses we provide is someone whose life has been transformed through the gift of sight. We couldn’t have done this without you. As you celebrate with family and friends this holiday season, please take a moment to reflect on the good you have done to make the world a better place. Whether you took glasses to the field, you donated or told someone about RestoringVision and the work that we do, you made a difference. Cheers to you and best wishes for an amazing 2019! These last two months have been very special for RestoringVision. I celebrated our 15 year anniversary in Yucatan, Mexico, where we helped over 800 people in five villages see clearly again. The trip was especially meaningful to me as it was on a mission trip to Mexico when I first observed the significant need for reading glasses in impoverished communities. It was on that trip that I was inspired to create RestoringVision. It was also great to see everyone participating in World Sight Day last month. Thanks to your support, we were able to bring attention to the serious global problem of uncorrected near vision impairment that affects over 500 million people who lack access to a simple pair of reading glasses. In this month of thanks and giving, we are looking to help even more people by once again turning #GivingTuesday into #GivingSightDay with you. We hope that you will join us in raising funds and awareness. Together, we can work towards a world where every person who needs glasses has them. As always, thank you for your steadfast partnership and support. We couldn’t do it without you. Let’s Turn #GivingTuesday into #GivingSightDay! Mark your calendars: #GivingTuesday is coming up on November 27. For the third consecutive year, RestoringVision is turning #GivingTuesday into #GivingSightDay. Our goal is to raise $30,000 to transform 40,000 lives through the gift of sight. We hope you will join us in this celebration of giving! Donors and supporters: You don’t have to wait until Nov. 27 to help us reach our goal. You can click here to donate now. Thanks to National Vision, Inc., the first $10,000 raised will be doubled! Service Delivery Partners: We’ll once again have our exciting GSD promotion for you on Nov. 27. Be on the lookout for a special email with the details. In the meantime, be sure to add #GivingSightDay to your calendars. Poor vision is a global health problem that is just beginning to gain recognition. In his book “Clearly”, philanthropist and businessman James Chen shines light on the importance of access to glasses through testimonials of individuals affected by vision impairment. RestoringVision is grateful to James and Vision For A Nation for allowing us to share an excerpt of this impactful case study. To read it, please click here. For all of our service delivery partners, we have created a new manual with the most useful tips that have helped our partners to successfully dispense reading glasses. To view and download this document, please click here. Chen, J. (2017) Case study v Felicien. In Clearly. Biteback Publishing Ltd. Poor vision is one of the biggest unaddressed global health problems that recently has been gaining more and more attention. The extraordinary book Clearly by businessman and philanthropist James Chen addresses this health crisis through the personal stories of individuals affected by vision loss. The story of Felicien is particularly special to RestoringVision since it is about near vision impairment. Felicien is a father of seven children living in Kigali, Rwanda, who feared his deteriorating eyesight would cause him to lose his job. Thanks to the work of Vision For A Nation, Felicien learned that he just needed a simple pair of reading glasses to restore his vision and remain a productive member of his family and community. Through our network of non-profit partners, corporate sponsors, individual donors, and supporters, RestoringVision is proud to impact the lives of people like Felicien by providing new, high-quality reading glasses to people living in impoverished communities around the world. Our vision is a world where every person who needs glasses has them, and through the increased recognition of our cause, we believe we can make that vision a reality. Read Felicien’s full story on the slideshow above. Thank you to James Chen and Vision For A Nation for allowing us to share this impactful case study. To learn more about James Chen’s book Clearly, please click here.
#!/usr/bin/python import sys import dbus import mmap import numpy import tempfile import os import zbar import Image import ImageDraw import voxie args = voxie.parser.parse_args() instance = voxie.Voxie(args) slice = instance.getSlice(args) dataSet = slice.getDataSet() data = dataSet.getFilteredData() with instance.createClient() as client: plane = slice.dbus_properties.Get('de.uni_stuttgart.Voxie.Slice', 'Plane') origin = numpy.array(plane[0]) orientation = plane[1] rotation = voxie.Rotation (orientation) size = numpy.array(data.get('Size'), dtype='uint64') dorigin = numpy.array(data.get('Origin'), dtype=numpy.double) dspacing = numpy.array(data.get('Spacing'), dtype=numpy.double) posmin = posmax = None for corner in [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]: cpos = numpy.array(corner,dtype=numpy.double) * size cpos = rotation * (dorigin + dspacing * cpos) if posmin is None: posmin = cpos if posmax is None: posmax = cpos posmin = numpy.minimum (posmin, cpos) posmax = numpy.maximum (posmax, cpos) #print (cpos) #print (posmin) #print (posmax) pixSize = numpy.min (dspacing) pos0 = (posmin[0], posmin[1], (rotation.inverse * origin)[2]) pos = rotation * pos0 width = int ((posmax[0] - posmin[0]) / pixSize + 1) height = int ((posmax[1] - posmin[1]) / pixSize + 1) options = {} options['Interpolation'] = 'NearestNeighbor' options['Interpolation'] = 'Linear' with instance.createImage(client, (width, height)) as image, image.getDataReadonly() as buffer: data.dbus.ExtractSlice (pos, orientation, (width, height), (pixSize, pixSize), image.path, options) image.dbus.UpdateBuffer({}) data = numpy.array (buffer.array) scanner = zbar.ImageScanner() allData = "" def scanImage(image): global allData zbarImage = zbar.Image(image.size[0], image.size[1], 'Y800', image.tobytes()) scanner.scan (zbarImage) print (len(scanner.results)) for result in scanner.results: data = '%s %s %s "%s"' % (result.type, result.quality, result.location, result.data) allData = allData + data + "\n" print (data) # im2 = image.copy ().convert ('RGB') # draw = ImageDraw.ImageDraw (im2) # line = list(result.location) # line.append (result.location[0]) # draw.line (line, fill='blue') # im2.save ('/tmp/xx.png') #print (data.dtype) data[numpy.isnan(data)] = 0 data -= numpy.min (data) data /= numpy.max (data) data *= 255 image = Image.fromarray(data).convert('L') #image.save ('/tmp/qq.png') scanImage (image) image = image.transpose(Image.FLIP_LEFT_RIGHT) scanImage (image) import gtk md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_CLOSE, "QT codes found:\n" + allData) md.run()
The Los Angeles Police Revolver & Athletic Club, Inc. The Challenge Cup/Baker to Vegas Relay (road race) was the vision of Officer Chuck Foote, then General Manager of the Los Angeles Police Revolver and Athletic Club, Inc. (LAPRAAC) and Officer Larry Moore, then LAPD Athletic Director. Its predecessor, the LAPD Metro Division’s Death Valley Relay, established the format still in use today. The 1st Challenge Cup Relay Start Line was located on California Highway 127 in front of Baker High School, Baker California. The Finish Line was on Nevada Highway 160 at the intersection of the Blue Diamond (cut off), 119 miles away (the Death Valley Relay was 119 miles), just 13 miles short of Las Vegas, Nevada. At the conclusion of the race, all nineteen (19) teams gathered at the Finish Line. What a sight! 400 people standing in the desert dirt, next to a camper, along the highway. The Awards Ceremony was conducted by the then Police Chief, Daryl F. Gates. The highway traffic was flying by, but we didn’t care. We were in our own world, doing our thing. LAPD Central Division was the overall winner that first year. In 1986 the Starting Line was moved 18 miles up the road from Baker and the Finish Line was moved to the Hacienda Hotel. The Hotel’s marquee read “Welcome LAPD Challenge Cup Relay - Finish Line.” It was Big Time, Las Vegas style. The Hacienda Hotel Convention Center was bursting at the seams where over 2,000 runners, volunteers, and spectators came to the Awards Ceremony. The race was in town just a few hours and already it had outgrown the Hacienda Hotel. Although the Finish Line remained at the Hacienda Hotel (until the Hotel’s demise in 1996) the Awards Ceremony has moved about town like a hobo looking for a home. In 1987, the Baker to Vegas Challenge Cup Relay (as it is called today) suffered its first setback when the race was halted at leg number 14 due to blizzard conditions on NV Highway 160 over Mountain Springs Pass. Needless to say, once the race was halted, many dejected people went home and only 2500 attended the Awards Ceremony. This was the first year of nine years that the Sahara Hotel would be the host hotel of the Awards Ceremony. The LAPD Training/Air Support Division won the race. They were ahead at the time the race was stopped and awarded the Short Course Victory. A Victory is a Victory, or so the Los Angeles County Sheriff’s have been told (LASD was favored that year, 1987). The 1987 race is still talked about today, as the most infamous victory. The cancellation of the event (mid-race) caused a trickle down effect whereby many dejected participants returned home and did not attend the Awards Ceremony. Therefore, merchandise purchased, i.e.: T-shirts, sweatshirts, hats, etc. went unsold causing the race to lose over $6,000. In 1987, the Los Angeles Police Revolver and Athletic Club stepped in and bailed out the event and now is the sole owner and host of the Baker to Vegas Challenge Cup Relay. As host, LAPRAAC has ensured the success of the race. Today it is regarded as the largest law enforcement event of its kind in the world with teams participating from Australia, England, Canada, and the Navajo Nation as well as throughout the United States. The Baker to Vegas Challenge Cup Relay is truly an international event, and is considered by many to be one of the most positive events offered to law enforcement officers today. It offers participants a opportunity to maintain a physical fitness program so as to help them better perform their duties. In relay form, will challenge the course leading from 26 miles North of Baker California on Highway 127 to Shoshone; turn N/E on CA Highway 178, cross the State Line to Nevada on Highway 372 into Pahrump Nv, Head S/E on NV Highway 160 and finish at the Las Vegas Hotel & Convention Center in Las Vegas, NV. There you have it. The story of the largest police foot pursuit of it’s kind in the world! So help my Asics Tigers (Nike, Adidas, etc.)!
import sys import argparse from pprint import pformat, pprint from .config import parse_args from .deadman import App, willing_replicas def indented_pprint(obj): lines = [] for line in pformat(obj).splitlines(True): lines.append(' ') lines.append(line) print(''.join(lines)) def show_cli(argv=sys.argv): parser = argparse.ArgumentParser(description="Show zgres info") config = parse_args(parser, argv, config_file='deadman.ini') if config.has_section('deadman') and config['deadman'].get('plugins', '').strip(): plugins = App(config)._plugins plugins.initialize() all_state = list(plugins.dcs_list_state()) my_id = plugins.get_my_id() my_state = None for id, state in all_state: if id == my_id: my_state = state break # if deadman is configured show information about it's state # HACK, we only need the plugins, really print('My State:') print(' ID: {}'.format(my_id)) if my_state is None: role = 'not registered in zookeeper' else: role = my_state.get('replication_role') print(' Replication role: {}'.format(role)) print('Cluster:') print(' current master: {}'.format(plugins.dcs_get_lock_owner(name='master'))) print(' database identifier: {}'.format(plugins.dcs_get_database_identifier())) print(' timeline: {}'.format(pformat(plugins.dcs_get_timeline()))) # willing_replicas is removed! willing = list(willing_replicas(all_state)) print('\nwilling replicas:') indented_pprint(willing) best_replicas = list(plugins.best_replicas(states=willing)) print('\nbest replicas:') indented_pprint(best_replicas) print('\nall conn info:') indented_pprint(list(plugins.dcs_list_conn_info())) print('\nall state:') indented_pprint(all_state)
Why despite its recognized importance to organizations regarding the realization of their digital objectives is procurement not even on the radar screen? What can procurement professionals do to elevate its presence and influence in the emerging digital world? Join Procurement Insights’ Jon Hansen to answer these as well as other important questions regarding procurement’s digital reinvention along with the guest panel of industry thought leaders, Colin Cram & Rob Handfield who will talk about what must take place for you and your organization to digitally reinvent your future success. To know what procurement professionals can do to elevate its presence and influence in the emerging digital world. To understand why despite the recognized importance to organizations regarding the realization of their digital objectives is procurement not even on the radar? To get answers regarding procurement’s digital reinvention for future success.
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-10-13 16:48 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('adyengo', '0017_auto_20161013_1848'), ] operations = [ migrations.AlterField( model_name='notification', name='payment_method', field=models.CharField(blank=True, choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50, null=True), ), migrations.AlterField( model_name='session', name='country_code', field=models.CharField(choices=[('NL', 'Netherlands'), ('GB', 'United Kingdom'), ('DE', 'Germany'), ('BE', 'Belgium')], max_length=2), ), migrations.AlterField( model_name='session', name='page_type', field=models.CharField(choices=[('skip', 'Skip'), ('single', 'Single'), ('multiple', 'Multiple')], default='skip', max_length=15), ), migrations.AlterField( model_name='session', name='recurring_contract', field=models.CharField(blank=True, choices=[('RECURRING,ONECLICK', 'Recurring and One click (user chooses)'), ('ONECLICK', 'One click'), ('RECURRING', 'Recurring')], max_length=50), ), migrations.AlterField( model_name='session', name='session_type', field=models.CharField(choices=[('api_recurring', 'API Recurring'), ('hpp_recurring', 'HPP Recurring'), ('hpp_regular', 'HPP Regular')], max_length=25), ), migrations.AlterField( model_name='session', name='shopper_locale', field=models.CharField(blank=True, choices=[('de_DE', 'German (Germany)'), ('fr_BE', 'French (Belgium)'), ('nl_NL', 'Dutch (Holland)'), ('nl_BE', 'Dutch (Belgium)'), ('en_GB', 'English (United Kingdom)')], default='nl_NL', max_length=5), ), migrations.AlterField( model_name='sessionallowedpaymentmethods', name='method', field=models.CharField(choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50), ), migrations.AlterField( model_name='sessionblockedpaymentmethods', name='method', field=models.CharField(choices=[('mc', 'Master Card'), ('dotpay', 'Dotpay'), ('bankTransfer_DE', 'German Banktransfer'), ('giropay', 'GiroPay'), ('diners', 'Diners Club'), ('visa', 'Visa'), ('bcmc', 'Bancontact card'), ('bankTransfer_IBAN', 'International Bank Transfer (IBAN)'), ('directdebit_NL', 'Direct Debit (Netherlands)'), ('discover', 'Discover'), ('bankTransfer', 'All banktransfers'), ('maestro', 'Maestro'), ('sepadirectdebit', 'SEPA Direct Debit'), ('amex', 'Amex'), ('directEbanking', 'SofortUberweisung'), ('bankTransfer_NL', 'Dutch Banktransfer'), ('ebanking_FI', 'Finnish E-Banking'), ('ideal', 'iDEAL'), ('card', 'All debit and credit cards'), ('elv', 'ELV'), ('paypal', 'PayPal')], max_length=50), ), ]
The Lyme Borreliosis illness, more commonly known as Lyme disease, was first recognised in 1975. Mothers who lived in Lyme, Connecticut, noticed that their children had developed what they thought to be Rheumatoid Arthritis. Lyme disease is a bacterial illness caused by bacterium called Borrelia Burgdorferi. It is spread when infected ticks, often found on deer, bite the skin and feed on the blood, which gives a gateway for the bacterium to infect the body. Lyme disease affects people in the United Kingdom with an estimated 2,000 to 3,000 cases a year, whereas the United States of America has more than 16,000 cases annually. However, it is prevalent in other parts of the world with common bacteria such as Borrelia Afzelii and Borrelia Garinii affecting many people each year. Who does it affect and who can claim? Lyme disease is an endemic in northern hemisphere regions such as Canada, Europe, United Kingdom and the United States. 20-25% of Lyme disease cases occur while people are abroad. Simpson Miller LLP's travel solicitors can advise you if you have been affected whilst on a package holiday. Wear protective clothing – such as long sleeved shirts, long trousers, boots etc. Wherever you have contracted Lyme Borreliosis, our lawyers can advise you. If you have contracted the disease through no fault of your own, and you believe your tour operator is to blame get in touch today. Our blog Holiday Hotel Watch is dedicated to hotels we are investigating and monitoring for sickness epidemics and illnesses affecting UK holidaymakers, view today to see if your hotel is on our list.
import unittest from labsys.app import create_app, db from labsys.admissions.models import ( Patient, Address, Admission, Symptom, ObservedSymptom, Method, Sample, InfluenzaExam, Vaccine, Hospitalization, UTIHospitalization, ClinicalEvolution,) from . import mock class TestModelsRelationships(unittest.TestCase): def setUp(self): self.app = create_app('testing') self.app_context = self.app.app_context() self.app_context.push() db.create_all() self.client = self.app.test_client(use_cookies=True) def tearDown(self): db.session.remove() db.drop_all() self.app_context.pop() def test_patient_address_1to1(self): patient = mock.patient() residence = mock.address() patient.residence = residence self.assertEqual(patient.residence.patient, patient) db.session.add(patient) db.session.commit() def test_patient_admission_1toM(self): patient = mock.patient() admission = mock.admission() patient.admissions.append(admission) self.assertEqual(admission.patient, patient) self.assertEqual(len(patient.admissions.all()), 1) patient = mock.patient() admission = mock.admission() admission.patient = patient self.assertEqual(admission.patient, patient) self.assertEqual(len(patient.admissions.all()), 1) def test_admission_dated_event_1to1(self): ''' Where dated event is a vaccine, hospitalizaion, utihospitalization or clinicalEvolution. That's why their constructor must be the same as MockDatedEvent. ''' # Setup admission = mock.admission() vaccine = mock.dated_event(Vaccine) # Add to admission admission.vaccine = vaccine # Assert they are linked self.assertEqual(vaccine.admission.vaccine, vaccine) # Overrides previous vaccine (since it's one-to-one) vaccine2 = mock.dated_event(Vaccine) vaccine2.admission = admission # Assert it was replaced self.assertNotEqual(admission.vaccine, vaccine) self.assertEqual(admission.vaccine, vaccine2) # Ensures commit works db.session.add(admission) db.session.commit() self.assertEqual(vaccine2.id, 1) self.assertIsNone(vaccine.id) self.assertEqual(len(Admission.query.all()), 1) self.assertEqual(len(Vaccine.query.all()), 1) # Ensures cascade all, delete-orphan works db.session.delete(admission) db.session.commit() self.assertEqual(len(Admission.query.all()), 0) self.assertEqual(len(Vaccine.query.all()), 0) def test_admission_symptoms_1toM(self): # Generate mock models admission = mock.admission() obs_symptom0 = ObservedSymptom( observed=True, details='obs symptom details', admission=admission, symptom=Symptom(name='symptom1'), ) obs_symptom1 = ObservedSymptom( observed=False, details='obs symptom details', admission=admission, symptom=Symptom(name='symptom2'), ) # Assert relationship between is setup self.assertEqual(len(admission.symptoms), 2) self.assertEqual(obs_symptom0.admission, obs_symptom1.admission) self.assertEqual(admission.symptoms[0], obs_symptom0) self.assertEqual(admission.symptoms[1], obs_symptom1) # Assert they are correctly commited db.session.add(admission) db.session.commit() # Assert symptoms have the same admission_id self.assertEqual(obs_symptom0.admission_id, obs_symptom1.admission_id) # Assert cascade all, delete-orphan works db.session.delete(admission) db.session.commit() self.assertEqual(len(Admission.query.all()), 0) self.assertEqual(len(ObservedSymptom.query.all()), 0) def test_syptom_observations_Mto1(self): symptom = Symptom(name='symptom') admission0 = mock.admission() admission1 = mock.admission() # id_lvrs_intern must be unique admission1.id_lvrs_intern += 'lvrs0002' # Generate mock models obs_symptom0 = ObservedSymptom( observed=True, details='obs symptom details', admission=admission0, symptom=symptom ) obs_symptom1 = ObservedSymptom( observed=False, details='obs symptom details', admission=admission1, symptom=symptom, ) # Assert relationship is correctly setup self.assertEqual(len(symptom.observations), 2) self.assertEqual(symptom.observations[0], obs_symptom0) # Collaterally, admission has relation with observed symptom self.assertEqual(admission0.symptoms[0], obs_symptom0) # Assert they are correctly commited db.session.add(symptom) db.session.commit() # Assert symptoms have the same admission_id self.assertEqual(obs_symptom0.symptom_id, symptom.id) # Assert cascade all, delete-orphan works db.session.delete(symptom) db.session.commit() self.assertEqual(len(Symptom.query.all()), 0) self.assertEqual(len(ObservedSymptom.query.all()), 0) # Collaterally, admission does not have the observed symptom self.assertEqual(len(admission0.symptoms), 0)
The Oakland As are baseballs hottest team after a record-setting win streak that has made them favorites to win the World Series. Check out these facts on the Oakland Athletics. They've won 20 straight games in a row, thanks to great pitching, timely hitting and an easy schedule. Here's a closer look at the hottest team in baseball - the Oakland A's. The A's winning streak is the longest in baseball history since the Chicago Cubs won 21 in a row in 1935. That's 67 years ago - four years before games were shown on television! Oakland A's shortstop, Miguel Tajada, knocked in the game-winning run for Oakland in the team's 18th and 19th wins. So far during this streak, Miguel is hitting .372 and has become a favorite to win the American League MVP Award. Oakland is the third stop in the Athletics' history. The team was formed in Philadelphia in 1901 where they played until 1955 when they moved to Kansas City. In 1968, the team moved to Oakland and changed to the gold and green colored uniforms they now wear. The Oakland A's last won the World Series in 1989 when the defeated the San Francisco Giants in a series that was interrupted by an earthquake. The massive quake damaged San Francisco's Candlestick Park and the series was postponed for ten days. Once the two teams returned to firm ground, the A's beat their crosstown rivals in the series, 4-0. Best Feat in Baseball? Vote!
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'plantilla.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui import matplotlib.pyplot as plt from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(1024, 600) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_12 = QtGui.QHBoxLayout() self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12")) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.label = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label.setFont(font) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout.addWidget(self.label) self.plane_of = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.plane_of.setFont(font) self.plane_of.setObjectName(_fromUtf8("plane_of")) self.horizontalLayout.addWidget(self.plane_of) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.horizontalLayout_12.addLayout(self.horizontalLayout) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label_5 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setObjectName(_fromUtf8("label_5")) self.horizontalLayout_2.addWidget(self.label_5) self.perito = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.perito.setFont(font) self.perito.setObjectName(_fromUtf8("perito")) self.horizontalLayout_2.addWidget(self.perito) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.horizontalLayout_12.addLayout(self.horizontalLayout_2) self.verticalLayout.addLayout(self.horizontalLayout_12) self.horizontalLayout_13 = QtGui.QHBoxLayout() self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13")) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.label_7 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_7.setFont(font) self.label_7.setObjectName(_fromUtf8("label_7")) self.horizontalLayout_3.addWidget(self.label_7) self.possession = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.possession.setFont(font) self.possession.setObjectName(_fromUtf8("possession")) self.horizontalLayout_3.addWidget(self.possession) spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem2) self.horizontalLayout_13.addLayout(self.horizontalLayout_3) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.label_9 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_9.setFont(font) self.label_9.setObjectName(_fromUtf8("label_9")) self.horizontalLayout_4.addWidget(self.label_9) self.homeowner = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.homeowner.setFont(font) self.homeowner.setObjectName(_fromUtf8("homeowner")) self.horizontalLayout_4.addWidget(self.homeowner) spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem3) self.horizontalLayout_13.addLayout(self.horizontalLayout_4) self.verticalLayout.addLayout(self.horizontalLayout_13) self.horizontalLayout_14 = QtGui.QHBoxLayout() self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14")) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.label_3 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_3.setFont(font) self.label_3.setObjectName(_fromUtf8("label_3")) self.horizontalLayout_5.addWidget(self.label_3) self.location = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.location.setFont(font) self.location.setObjectName(_fromUtf8("location")) self.horizontalLayout_5.addWidget(self.location) spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_5.addItem(spacerItem4) self.horizontalLayout_14.addLayout(self.horizontalLayout_5) self.horizontalLayout_6 = QtGui.QHBoxLayout() self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6")) self.label_11 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_11.setFont(font) self.label_11.setObjectName(_fromUtf8("label_11")) self.horizontalLayout_6.addWidget(self.label_11) self.date = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.date.setFont(font) self.date.setObjectName(_fromUtf8("date")) self.horizontalLayout_6.addWidget(self.date) spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_6.addItem(spacerItem5) self.horizontalLayout_14.addLayout(self.horizontalLayout_6) self.verticalLayout.addLayout(self.horizontalLayout_14) # Table Result and graph self.figure = plt.figure(figsize=(3, 3)) self.canvas = FigureCanvas(self.figure) self.toolbarGraph = NavigationToolbar(self.canvas, self.centralwidget) self.verticalLayoutGraph = QtGui.QVBoxLayout() self.verticalLayoutGraph.setObjectName(_fromUtf8("verticalGraph")) self.verticalLayoutGraph.addWidget(self.toolbarGraph) self.verticalLayoutGraph.addWidget(self.canvas) self.horizontalLayoutGraph = QtGui.QHBoxLayout() self.verticalLayoutGraph.setObjectName(_fromUtf8("horizontalGraph")) self.tableResult = QtGui.QTableWidget(self.centralwidget) self.tableResult.setObjectName(_fromUtf8("tableResult")) self.tableResult.setColumnCount(0) self.tableResult.setRowCount(0) self.horizontalLayoutGraph.addWidget(self.tableResult) self.horizontalLayoutGraph.addLayout(self.verticalLayoutGraph) self.verticalLayout.addLayout(self.horizontalLayoutGraph) # self.verticalLayout.addWidget(self.tableResult) self.horizontalLayout_7 = QtGui.QHBoxLayout() self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7")) spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem6) self.calcBtn = QtGui.QPushButton(self.centralwidget) self.calcBtn.setObjectName(_fromUtf8("calcBtn")) self.horizontalLayout_7.addWidget(self.calcBtn) spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_7.addItem(spacerItem7) self.verticalLayout.addLayout(self.horizontalLayout_7) self.horizontalLayout_11 = QtGui.QHBoxLayout() self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11")) self.horizontalLayout_8 = QtGui.QHBoxLayout() self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8")) self.label_2 = QtGui.QLabel(self.centralwidget) self.label_2.setObjectName(_fromUtf8("label_2")) self.horizontalLayout_8.addWidget(self.label_2) self.label_sumAngs = QtGui.QLabel(self.centralwidget) self.label_sumAngs.setObjectName(_fromUtf8("label_sumAngs")) self.horizontalLayout_8.addWidget(self.label_sumAngs) spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_8.addItem(spacerItem8) self.horizontalLayout_11.addLayout(self.horizontalLayout_8) self.horizontalLayout_9 = QtGui.QHBoxLayout() self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9")) self.label_6 = QtGui.QLabel(self.centralwidget) self.label_6.setObjectName(_fromUtf8("label_6")) self.horizontalLayout_9.addWidget(self.label_6) self.label_perim = QtGui.QLabel(self.centralwidget) self.label_perim.setObjectName(_fromUtf8("label_perim")) self.horizontalLayout_9.addWidget(self.label_perim) spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_9.addItem(spacerItem9) self.horizontalLayout_11.addLayout(self.horizontalLayout_9) self.horizontalLayout_10 = QtGui.QHBoxLayout() self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10")) self.label_10 = QtGui.QLabel(self.centralwidget) self.label_10.setObjectName(_fromUtf8("label_10")) self.horizontalLayout_10.addWidget(self.label_10) self.label_area = QtGui.QLabel(self.centralwidget) self.label_area.setObjectName(_fromUtf8("label_area")) self.horizontalLayout_10.addWidget(self.label_area) spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_10.addItem(spacerItem10) self.horizontalLayout_11.addLayout(self.horizontalLayout_10) self.verticalLayout.addLayout(self.horizontalLayout_11) self.tableResult.raise_() self.label.raise_() self.plane_of.raise_() self.label_3.raise_() self.location.raise_() self.label_5.raise_() self.perito.raise_() self.label_7.raise_() self.possession.raise_() self.label_9.raise_() self.homeowner.raise_() self.label_11.raise_() self.date.raise_() self.label_11.raise_() self.calcBtn.raise_() self.perito.raise_() self.label_3.raise_() self.label_2.raise_() self.label_sumAngs.raise_() self.label_6.raise_() self.label_perim.raise_() self.label_10.raise_() self.label_area.raise_() MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 738, 25)) self.menubar.setObjectName(_fromUtf8("menubar")) self.menuArchivo = QtGui.QMenu(self.menubar) self.menuArchivo.setObjectName(_fromUtf8("menuArchivo")) MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.toolBar = QtGui.QToolBar(MainWindow) self.toolBar.setObjectName(_fromUtf8("toolBar")) MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar) self.actionNew = QtGui.QAction(MainWindow) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8("icons/new-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionNew.setIcon(icon) self.actionNew.setObjectName(_fromUtf8("actionNew")) self.actionEdit = QtGui.QAction(MainWindow) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8("icons/edit-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionEdit.setIcon(icon1) self.actionEdit.setObjectName(_fromUtf8("actionEdit")) self.actionToPdf = QtGui.QAction(MainWindow) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(_fromUtf8("icons/pdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionToPdf.setIcon(icon2) self.actionToPdf.setObjectName(_fromUtf8("actionToPdf")) self.actionSave = QtGui.QAction(MainWindow) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(_fromUtf8("icons/floppy-128.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionSave.setIcon(icon3) self.actionSave.setObjectName(_fromUtf8("actionSave")) self.actionSave_as = QtGui.QAction(MainWindow) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(_fromUtf8("icons/save_as-128.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionSave_as.setIcon(icon4) self.actionSave_as.setObjectName(_fromUtf8("actionSave_as")) self.actionOpen = QtGui.QAction(MainWindow) icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap(_fromUtf8("icons/open-file.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionOpen.setIcon(icon5) self.actionOpen.setObjectName(_fromUtf8("actionOpen")) self.menuArchivo.addAction(self.actionNew) self.menuArchivo.addAction(self.actionOpen) self.menuArchivo.addAction(self.actionSave) self.menuArchivo.addAction(self.actionSave_as) self.menuArchivo.addAction(self.actionToPdf) self.menubar.addAction(self.menuArchivo.menuAction()) self.toolBar.addAction(self.actionNew) self.toolBar.addAction(self.actionOpen) self.toolBar.addAction(self.actionSave) self.toolBar.addAction(self.actionEdit) self.toolBar.addAction(self.actionToPdf) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Planilla de cálculo de coordenadas y superficie", None)) self.label.setText(_translate("MainWindow", "Plano de:", None)) self.plane_of.setText(_translate("MainWindow", "-", None)) self.label_5.setText(_translate("MainWindow", "Perito:", None)) self.perito.setText(_translate("MainWindow", "Ing. COPA, Rodi Alfredo", None)) self.label_7.setText(_translate("MainWindow", "Propiedad:", None)) self.possession.setText(_translate("MainWindow", "-", None)) self.label_9.setText(_translate("MainWindow", "Propietario:", None)) self.homeowner.setText(_translate("MainWindow", "-", None)) self.label_3.setText(_translate("MainWindow", "Ubicación:", None)) self.location.setText(_translate("MainWindow", "-", None)) self.label_11.setText(_translate("MainWindow", "Fecha:", None)) self.date.setText(_translate("MainWindow", "-", None)) self.calcBtn.setText(_translate("MainWindow", "Calcular", None)) self.label_2.setText(_translate("MainWindow", "Suma de ángulos:", None)) self.label_sumAngs.setText(_translate("MainWindow", "-", None)) self.label_6.setText(_translate("MainWindow", "Perímetro:", None)) self.label_perim.setText(_translate("MainWindow", "-", None)) self.label_10.setText(_translate("MainWindow", "Superficie:", None)) self.label_area.setText(_translate("MainWindow", "-", None)) self.menuArchivo.setTitle(_translate("MainWindow", "Archivo", None)) self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None)) self.actionNew.setText(_translate("MainWindow", "Nuevo", None)) self.actionNew.setToolTip(_translate("MainWindow", "Nuevo", None)) self.actionNew.setShortcut(_translate("MainWindow", "Ctrl+N", None)) self.actionEdit.setText(_translate("MainWindow", "Editar", None)) self.actionEdit.setToolTip(_translate("MainWindow", "Editar", None)) self.actionToPdf.setText(_translate("MainWindow", "Exportar a pdf", None)) self.actionToPdf.setToolTip(_translate("MainWindow", "Exportar a pdf", None)) self.actionSave.setText(_translate("MainWindow", "Guardar", None)) self.actionSave.setToolTip(_translate("MainWindow", "Guardar", None)) self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+G", None)) self.actionSave_as.setText(_translate("MainWindow", "Guardar como", None)) self.actionSave_as.setToolTip(_translate("MainWindow", "Guardaar archivo como", None)) self.actionSave_as.setShortcut(_translate("MainWindow", "Ctrl+Shift+G", None)) self.actionOpen.setText(_translate("MainWindow", "Abrir", None)) self.actionOpen.setToolTip(_translate("MainWindow", "Abrir archivo", None)) self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O", None)) # import icons_rc
Ahead of protests against the use of 1080 poison planned for this weekend, experts have again set out the hard science around the pest-busting tool. Toxicologist Dr Belinda Cridge (BC) and AgResearch chair in reproduction and genomics Professor Neil Gemmell (NG), both of Otago University, and Auckland University conservation biologist Associate Professor James Russell (JR) fielded these questions from the NZ Science Media Centre. What are the persistent concerns people have about 1080? BC: As I understand it, the ongoing concerns are around non-target species toxicity and water contamination. Non-target species toxicity means that species that are not being targeted by the poison drop consume the poison and die. Common concerns centre on deaths concerning other native species, such as birds and fish, and hunted species such as deer and pigs. There is debate about how many of these deaths are directly caused by 1080 exposure as compared to other causes. 1080 is toxic to all species - as a toxicologist, actually everything is toxic if you are exposed to enough of it so 1080 isn't novel in this regard. However, birds and reptiles seem to have a degree of tolerance. In contrast, mammals are very susceptible to 1080 toxicity and so in New Zealand where all mammals, except sea lions and a bat species, are introduced, 1080 is an important pest control tool. The other concerns are around 1080 leaching into waterways and causing a range of effects to wildlife and humans. Scientifically, the understanding is that the original 1080 compound is broken down quickly in the environment and that 1080 doesn't persist in the environment or water like many other toxins. This makes it unlikely that it will accumulate in waterways and cause down-stream poisonings. Have there been any new developments in research into its effects over recent years? BC: Scientific work has slowed down on 1080 as most recent research efforts are focused on finding alternatives. Many people will be aware that New Zealand is fairly unique in its large-scale use of 1080, this is because we are in the privileged position of having few native mammals. Therefore, we are uniquely placed to use 1080 in pest control. Since 2014 only 400 articles on 1080 have been published worldwide in scientific journals, so around 100 per year. Many of these are case studies of poisonings - 1080 is used in other countries, just not to the same scale - or studies that are referring to 1080 in comparison to other toxins. We understand the mechanism of toxicity of 1080 fairly well and New Zealand scientists, in particular, have done a lot of work on the toxicity and environmental fate of this compound over many years. Are there any areas of uncertainty that more research would be helpful to resolve? BC: From my own interests, I would like to understand more about how 1080 is detoxified in the body as this may give us clues as to why dogs and kea and have a unique sensitivity to the compound. But, this is because this is my area of expertise. I think overall we actually have a very good handle on what the toxin does and at what doses. Developments in targeted application using GPS have improved the overall safety of the compound as it is much less likely to be found in non-target areas. These type of technological advances are much more important for the ongoing use of 1080 and to improve its overall use and safety. With that said, I suspect the public will remain sceptical of 1080 due to its history of use in this country. This creates many issues that are not able to be resolved by scientific evidence alone. Toxin baits, like 1080, have played a major role in pest control in New Zealand - what other options are being investigated and how would these work? JR: Pest control to achieve biodiversity outcomes requires removing pests from the ecosystem. Traditionally, the tools fall into the three classes of traps, toxins and biocontrol, and can be either lethal or non-lethal. Non-lethal tools are typically inefficient: They either can't scale up, are not cost-effective, or in some cases are even less humane than a quick death. Scientists are always refining all three tools so they are more humane, more pest-specific, more efficient and more cost effective. For example, new self-resetting traps have been developed, and sowing rates for 1080 have been reduced ten-fold. When deciding what pest control tool to use managers must optimise efficiency - does it kill enough pests to restore biodiversity? - humaneness - does it do so humanely? - and cost. Currently, aerially delivered 1080 is the optimal tool for mammalian predator pest control over most of New Zealand, costing $12-16 per hectare, being relatively humane, and achieving conservation goals. Looking to the future, scientists such as those working in the Biological Heritage National Science Challenge are investigating all three types of tools. They are developing new lures to make trapping more efficient, new toxins which are more humane and only harm the targeted pest species, and investigating the potential of genetic editing as a form of biocontrol. They are also considering the bioethics of predator control, so that the social, cultural and ethical issues of pest control are incorporated into decision making. Gene editing is one of the novel pest control tools with most potential, as it could be a non-lethal tool that allows pest populations to breed themselves humanely to extinction. However, such a novel tool would require New Zealanders accepting gene editing, and is yet to be even developed, let alone proven as efficient and cost-effective. What kind of timeframe might there be for new pest or predator control tools to be used in New Zealand? JR: New Zealand scientists, engineers and innovators are always making incremental improvements in pest control tools to achieve better biodiversity outcomes through more efficient, humane and cost-effective tools. These include modifying existing tools and inventing new variations of old tools. This work is happening every day and government, private landowners and community groups are already using the improved tools across the country. Entirely novel tools, particularly those in the area of biocontrol, require extensive laboratory development, testing and regulatory approval to demonstrate safety and efficacy before being trialled in the field. Gene editing as a pest control tool is likely to be over a decade away from deployment in New Zealand, if it is even proven to be viable. Our wildlife cannot wait until such new tools are developed and approved, and so until then, we must continue to use the most optimal tool we have, which is aerial 1080. What genetic tools might be useful for future pest or predator control? What kind of timeframe might we expect for these to be in use? NG: Pest control with current technologies over significant spatial scales is possible, but it's time-consuming and expensive. The best tool we currently have for large-scale pest control is 1080. It is aerially deployed, with precision, at the lowest amounts required to keep pest species in check so that our native birds and other species stand some chance of survival, and it rapidly breaks down in the environment. The case of 1080 use is well established and it works – where it is used our native species are recovering, where it is not they die, it really is that simple. While 1080 is the best tool we currently have in our ongoing battle with mammalian pests, we are constantly seeking improvements to our pest control toolkit. As a nation, we are currently investigating approaches that are more humane, species-specific, and that provide persistent control across large areas without the ongoing cost of deployment. Among this list of new tools, genetic control technologies are promising approaches that might help us meet that goal. The most direct approaches currently being explored are species-specific toxins. Essentially, we look for genetic or biochemical features unique to the species we wish to control so that we might identify toxins that will only work only on that species or its close relatives – think of it as a biochemical Achilles heel. To date, there are promising "species-specific" toxins identified for stoats and rats. However, it will likely take many years before such substances can be proven to be specific and approved for use in our environment. If we are serious about Predator Free 2050 and wish genetic technologies to be part of the solution we need to step up the conversations, increase our investment, and start planning out what the workflow for this project would look like over the next decade plus. If we start today we need months to plan, several years in the lab, and years for controlled field trials, before eventual deployment at landscape scales. It will take massive effort and years to achieve eradication of possums, rats and stoats - recent models on some hypothetical, ideal, gene drives suggest 20 years from deployment to eradication. This is the New Zealand version of the Space Race and we need commitment and resource aplenty if we are to achieve it. It can be done, but whether we have the resolve to resource this appropriately and see this through to completion remains uncertain.
#!/usr/bin/python import curses import shelve from Adafruit_PWM_Servo_Driver import PWM import time import numpy as np # Initialise the PWM device using the default address pwm = PWM(0x40, debug=False) pulseMin = 2000 # Min pulse length out of 4096 pulseLow = 2500 pulseMax = 3047 pulseStop = 0 motorChannel = 0 pwm.setPWMFreq(400) # Set frequency to x Hz pwm.setPWM(motorChannel, 0, pulseMin) # Set to min (thrtle down) time.sleep(2) # Wait for motors to be armed drone_vars = shelve.open('drone_vars') stdscr = curses.initscr() curses.cbreak() stdscr.keypad(1) stdscr.addstr(0, 10, "Hit 'q' to quit, 'j' to go down and 'k' to go up") stdscr.refresh() key = '' def close_safely(): drone_vars.close() pwm.setPWM(motorChannel, 0, pulseStop) curses.endwin() print('Stopping motor') cal_index = 3 pwm_pulse = 2200 try: while key != ord('q'): key = stdscr.getch() stdscr.refresh() cal_rpm = str(cal_index * 1000) if key == curses.KEY_LEFT: pwm_pulse = pwm_pulse - 1 stdscr.addstr(cal_index, 20, 'cal rpm: ' + cal_rpm + \ ' pulse: ' + str(pwm_pulse)) pwm.setPWM(motorChannel, 0, pwm_pulse) elif key == curses.KEY_RIGHT: pwm_pulse = pwm_pulse + 1 stdscr.addstr(cal_index, 20, 'cal rpm: ' + cal_rpm + \ ' pulse: ' + str(pwm_pulse)) pwm.setPWM(motorChannel, 0, pwm_pulse) elif key == ord('j'): stdscr.addstr(cal_index, 20, 'saved rpm: ' + cal_rpm + \ ' pulse: ' + str(pwm_pulse)) drone_vars[cal_rpm] = pwm_pulse cal_index = cal_index + 1 elif key == ord('k'): stdscr.addstr(cal_index, 20, 'saved rpm: ' + cal_rpm + \ ' pulse: ' + str(pwm_pulse)) drone_vars[cal_rpm] = pwm_pulse cal_index = cal_index - 1 close_safely() # Catch an interrupt and set the motor to stop except KeyboardInterrupt: close_safely()
OMG Napoleon Dynamite is on Showtime. This is one of my favourite movies of all time. Which got me thinking, about my favourite films of all time. But if you asked me straight up, what movie do you want to watch, Footloose and Napoleon Dynamite would top the list.
#!/opt/anaconda2/bin/python # -*- coding: utf-8 -*- """ ################################################################################ # # Copyright (c) 2015 Wojciech Migda # All rights reserved # Distributed under the terms of the MIT license # ################################################################################ # # Filename: cell_patches_kmeans.py # # Decription: # Cell patches from images (with KMeans) # # Authors: # Wojciech Migda # ################################################################################ # # History: # -------- # Date Who Ticket Description # ---------- --- --------- ------------------------------------------------ # 2015-12-20 wm Initial version # ################################################################################ """ from __future__ import print_function DEBUG = False __all__ = [] __version__ = 0.1 __date__ = '2015-12-20' __updated__ = '2015-12-20' from sys import path as sys_path sys_path.insert(0, './Pipe') #from pipe import * import pipe as P def pois(im, num_peaks, footprint_radius=2.5, min_dist=8, thr_abs=0.7): from skimage.draw import circle FOOTPRINT_RADIUS = footprint_radius cxy = circle(4, 4, FOOTPRINT_RADIUS) from numpy import zeros cc = zeros((9, 9), dtype=int) cc[cxy] = 1 from skimage.feature import peak_local_max MIN_DIST = min_dist THR_ABS = thr_abs coordinates = [ peak_local_max( im[:, :, layer], min_distance=MIN_DIST, footprint=cc, threshold_abs=THR_ABS, num_peaks=num_peaks) for layer in range(im.shape[2])] return coordinates @P.Pipe def cluster(seq, nclust, window, with_polar): from numpy import where,array from skimagepipes import cart2polar_ w2 = window / 2 for im, pois in seq: for layer in range(im.shape[2]): p = pois[layer] p = p[where( (p[:, 0] >= w2) & (p[:, 0] < (im.shape[0] - w2)) & (p[:, 1] >= w2) & (p[:, 1] < (im.shape[1] - w2)) ) ] print(str(p.shape[0]) + " pois") patches = array([im[cx - w2:cx + w2, cy - w2:cy + w2, layer].ravel() for cx, cy in p]) if with_polar: patches = array([cart2polar_(im[cx - w2:cx + w2, cy - w2:cy + w2, layer]).ravel() for cx, cy in p]) pass from sklearn.cluster import KMeans,MiniBatchKMeans #clf = KMeans(n_clusters=nclust, random_state=1, n_jobs=4) clf = MiniBatchKMeans( n_clusters=nclust, random_state=1, batch_size=5000) clf.fit(patches) VISUALIZE = False #VISUALIZE = True if VISUALIZE: from matplotlib import pyplot as plt fig, ax = plt.subplots(1, nclust, figsize=(8, 3), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) for i in range(nclust): ax[i].imshow(clf.cluster_centers_[i].reshape((window, window)), interpolation='nearest' #, cmap=plt.cm.gray ) ax[i].axis('off') pass fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.02, left=0.02, right=0.98) plt.show() pass yield clf.cluster_centers_ pass return def work(in_csv_file, out_csv_file, max_n_pois, npatches, patch_size, with_polar): from pypipes import as_csv_rows,iformat,loopcount,itime,iattach from nppipes import itake,iexpand_dims from skimagepipes import as_image,as_float,equalize_hist,imshow,trim,rgb_as_hed from tcopipes import clean features = ( in_csv_file | as_csv_rows #| P.skip(5) #| P.take(4) | itake(0) | P.tee | iformat('../../data/DX/{}-DX.png') | as_image | itime | loopcount | trim(0.2) | as_float | clean | rgb_as_hed | itake(0, axis=2) | iexpand_dims(axis=2) | equalize_hist #| imshow("H layer", cmap='gray') | iattach(pois, max_n_pois) | cluster(npatches, patch_size, with_polar) | P.as_list ) #print(type(next(features, None))) from numpy import vstack from numpy import savetxt #print(vstack(features).shape) savetxt(out_csv_file, vstack(features), delimiter=',', fmt='%f') pass def main(argv=None): # IGNORE:C0111 '''Command line options.''' from sys import argv as Argv if argv is None: argv = Argv pass else: Argv.extend(argv) pass from os.path import basename program_name = basename(Argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date) program_shortdesc = __import__('__main__').__doc__.split("\n")[1] program_license = '''%s Created by Wojciech Migda on %s. Copyright 2015 Wojciech Migda. All rights reserved. Licensed under the MIT License Distributed on an "AS IS" basis without warranties or conditions of any kind, either express or implied. USAGE ''' % (program_shortdesc, str(__date__)) try: from argparse import ArgumentParser from argparse import RawDescriptionHelpFormatter from argparse import FileType from sys import stdout,stdin # Setup argument parser parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) #parser.add_argument("-D", "--data-dir", # type=str, action='store', dest="data_dir", required=True, # help="directory with input CSV files, BMP 'train' and 'test' subfolders, and where H5 will be stored") parser.add_argument("-i", "--in-csv", action='store', dest="in_csv_file", default=stdin, type=FileType('r'), help="input CSV file name") parser.add_argument("-o", "--out-csv", action='store', dest="out_csv_file", default=stdout, type=FileType('w'), help="output CSV file name") parser.add_argument("-p", "--patch-size", type=int, default=16, action='store', dest="patch_size", help="size of square patch to build the codebook upon, in pixels") parser.add_argument("-C", "--num-patches", type=int, default=80, action='store', dest="npatches", help="number of patches per image") parser.add_argument("-N", "--max-pois", type=int, default=5000, action='store', dest="max_n_pois", help="max number of PoIs to collect (num_peaks of peak_local_max)") parser.add_argument("-P", "--with-polar", default=False, action='store_true', dest="with_polar", help="convert patches to polar coordinates") # Process arguments args = parser.parse_args() for k, v in args.__dict__.items(): print(str(k) + ' => ' + str(v)) pass work(args.in_csv_file, args.out_csv_file, args.max_n_pois, args.npatches, args.patch_size, args.with_polar) return 0 except KeyboardInterrupt: ### handle keyboard interrupt ### return 0 except Exception as e: if DEBUG: raise(e) pass indent = len(program_name) * " " from sys import stderr stderr.write(program_name + ": " + repr(e) + "\n") stderr.write(indent + " for help use --help") return 2 pass if __name__ == "__main__": if DEBUG: from sys import argv argv.append("--in-csv=../../data/training.csv") argv.append("--max-pois=5000") pass from sys import exit as Exit Exit(main()) pass
Kosoko Adekunle (born 28th January) popularly and professionally known as Adekunle Gold, is a Nigerian professional graphics designer, music recording artiste, singer, songwriter and performer with a unique genre of music he calls Urban Highlife. Adekunle discovered his musical talent at a tender age; he started out singing in his church junior choir and wrote his first song at the age of 15. His first single Sade enjoyed massive airplay from radio stations in Nigeria, it was termed the best cover ever by music lovers and critics. The single won the Best Alternative Song at the 2015 Headies Award. Orente, his follow­ up single took it’s listeners through a tale of romance with great melodies. After the release of two successful singles he dropped another banger in the last quarter of the year 2015 titled Pick Up and it became a massive hit. After the release of Pick Up, it was a near herculean task for his fans to choose their favourite song. After numerous back to back hits, the Songster and his hardworking team released an album titled GOLD in July 2016 which was widely received in Nigeria and in the diaspora. With production credits from the likes of Oscar Herman Ackah, Pheelz, Seyikeyz, Masterkraft, Sleekamo, mixing and mastering by Simi and Vtek, the much anticipated album did not fall short of the fans expectation. Presently, Adekunle Gold is the rave of the moment as fans cannot stop pouring encomiums celebrating the artiste for making a great album at a time where the quality of music in Nigeria is being questioned. The GOLD album has a lot for everyone to relate to and is also the first album in Nigeria where all tracks have pictorial representation. The GOLD Album also made it #7 on Billboard in the first week of release. This talented artiste has definitely won the hearts of many.
import os import tempfile __author__ = 'Brauni' from pywps import Process, LiteralInput, ComplexInput, ComplexOutput, Format, get_format class Buffer(Process): def __init__(self): inputs = [ComplexInput('poly_in', 'Input1', supported_formats=[get_format('GML')], max_occurs='2'), LiteralInput('buffer', 'Buffer', data_type='float') ] outputs = [ComplexOutput('buff_out', 'Buffered', supported_formats=[get_format('GML')])] super(Buffer, self).__init__( self._handler, identifier='buffer', version='0.1', title="Brauni's 1st process", abstract='This process is the best ever being coded', profile='', metadata=['Process', '1st', 'Hilarious'], inputs=inputs, outputs=outputs, store_supported=True, status_supported=True ) def _handler(self, request, response): from osgeo import ogr inSource = ogr.Open(request.inputs['poly_in'][0].file) inLayer = inSource.GetLayer() out = inLayer.GetName() outPath = os.path.join(tempfile.gettempdir(), out) # create output file driver = ogr.GetDriverByName('GML') outSource = driver.CreateDataSource(outPath, ["XSISCHEMAURI=http://schemas.opengis.net/gml/2.1.2/feature.xsd"]) outLayer = outSource.CreateLayer(out, None, ogr.wkbUnknown) # for each feature featureCount = inLayer.GetFeatureCount() index = 0 import time while index < featureCount: # get the geometry inFeature = inLayer.GetNextFeature() inGeometry = inFeature.GetGeometryRef() # make the buffer buff = inGeometry.Buffer(float(request.inputs['buffer'][0].data)) # create output feature to the file outFeature = ogr.Feature(feature_def=outLayer.GetLayerDefn()) outFeature.SetGeometryDirectly(buff) outLayer.CreateFeature(outFeature) outFeature.Destroy() # makes it crash when using debug index += 1 time.sleep(1) # making things little bit slower response.update_status("Calculating buffer for feature %d from %d" % (index + 1, featureCount), (100 * (index + 1) / featureCount * 1)) response.outputs['buff_out'].data_format = get_format('GML') response.outputs['buff_out'].file = outPath return response
English Premier League club Queens Park Rangers yesterday stripped suspended midfielder Joey Barton of the club captaincy and fined him six weeks’ wages for disciplinary breaches. The west London club, in a statement, also warned Barton that he could be shown the door if he did not stay out of trouble. “The club has reached agreement with Barton that if he seriously breaches the club’s disciplinary procedures again, the club reserves the right to terminate his contract,” the statement said. Barton was handed a 12-match ban in May after being found guilty of violent conduct in QPR’s final match of last season against Manchester City and was also fined €93,000 last month.
#!/usr/bin/env python # -*- coding: utf-8 -*- # Scheduled Tweet Bot written in Python intended to run on a Raspberry Pi # (will work anywhere Python and the dependancies are installed though) # version: 0.9 import tweepy, sys import dbconnect import twitterfunctions from configuration import dbconfig getKeySecretQuery = ("SELECT CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET FROM Accounts WHERE user = 'default'") authcnx=dbconnect.dbconnect(dbconfig) authcursor=dbconnect.dbcursor(authcnx) gotKeySecretResult = authcursor.execute(getKeySecretQuery) KeySecretResult = authcursor.fetchall() for (CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET) in KeySecretResult : THE_CONSUMER_KEY = CONSUMER_KEY THE_CONSUMER_SECRET = CONSUMER_SECRET THE_ACCESS_KEY = ACCESS_KEY THE_ACCESS_SECRET = ACCESS_SECRET api = twitterfunctions.authenticatetwitter(THE_CONSUMER_KEY, THE_CONSUMER_SECRET, THE_ACCESS_KEY, THE_ACCESS_SECRET) def searchtweets(query, getcount): results = api.search(q=query,rpp=1,count=getcount) cnx = dbconnect.dbconnect(dbconfig) cursor = dbconnect.dbcursor(cnx) for result in results: tweet = result.text.encode('utf-8') user = result.user.screen_name.encode('utf-8') timesent = result.created_at tweetid = result.id_str.encode('utf-8') insertQuery = ('INSERT IGNORE INTO SearchedForTweets ' '(tweetid, username, tweetcontent, timesent, searchterm) ' 'VALUES ' '("%s", "%s", %r, "%s", "%s")' % (tweetid, user, tweet, timesent, query)) cursor.execute(insertQuery) cnx.commit() print user + " " + tweet query = str(sys.argv[1]) getcount = int(sys.argv[2]) searchtweets(query, getcount)
What zipcodes are in Banks, Pennsylvania? When was Banks, PA founded? A professional substitute making use of quality items is crucial to car safety and security as the windscreen gives architectural integrity to the automobile as well as plays a vital duty in a car's restriction system in case of an accident. Registered Business-- The Auto Glass Safety and security Council establishes specific safety standards that need to be adhered to throughout windshield installments to make sure a proper and also safe mount. Companies that have been verified via the AGSC had actually gone through a necessary evaluation, onsite examination and random audits to inspect their compliance with the standard. Licensed Specialists-- Even if the firm itself is confirmed, you should ask if the service technician mounting your windscreen has been certified with the Automobile Glass Security Council. The not-for-profit team trains as well as licenses service technicians to follow their methods which were created in partnership with the American National Criteria Institute. High quality Glass-- When initial calling the company that will handle your windshield substitute, ask what kind of glass they will certainly be making use of. Banks data in Carbon County: Population with number of households and other statistics. Replacing an automobile windshield is a costly event. A viable choice is fixing, which most insurance companies now approve in place of replacement. As a matter of fact, they also forgo the client's insurance deductible ought to he or she choose to fix rather than replace the automobile windshield. This makes monetary feeling to insurer also, because they save millions every year in this manner. By repairing instead of replacing an auto windscreen, the original windscreen maintains the honesty of its manufacturing facility seal. In short, every person victories. Windscreen fixing is a high margin specialized solution, making it a major sector. Brand-new vehicle windscreen fixing remedies include the revolutionary PRISM (pre-resin injection suspension technique) modern technology and also numerous methods that include infusing resins right into the cracks with or without vacuum cleaner. This procedure could take a few mins or last as long a hr, depending upon the size of the split. Makers make unbreakable vehicle windshield glass under extreme stress. In such situations, the automobile proprietor has no option but to replace the windshield. Unboxing the Aegis Quicksilver windshield repair kit. Windshield Replacement 2011 Ford Taurus Install Platinum Auto Glass New Jersey.
from google.appengine.ext.webapp import template import webapp2 import json from models.user import User from models.page import Page import time class createPageBus(webapp2.RequestHandler): def get(self): user = User.checkToken(self.request.cookies.get('session')) if self.request.cookies.get('our_token'): #the cookie that should contain the access token! user = User.checkToken(self.request.cookies.get('our_token')) if not user: self.error(403) self.response.write('No user - access denied') return page = None page.title = self.request.get('title') page.name = self.request.get('name') page.address = self.request.get('address') page.details = self.request.get('details') page.emailBuss = self.request.get('emailBuss') page.admin = user.key if page: if Page.checkIfPageExists(page.title,user.email): self.response.write(json.dumps({'status':'exists'})) return else: page = Page.addPage(page.title,page.name,page.address,page.details,page.emailBuss,page.admin) time.sleep(0.5); # page.put() # time.sleep(0.5) pages = Page.getAllPages(user) self.response.write(json.dumps({"status": "OK", "pages": pages})) app = webapp2.WSGIApplication([ ('/api/createPageBus', createPageBus) ], debug=True)
NEW YORK, NY, AUGUST 21, 2017 - Innovest announced today that it has completed the acquisition of PDS Companies ("PDS"), a financial technology and services provider based out of Arlington, Texas that specializes in unique and hard-to-value assets (real estate, closely held companies, oil & gas, farm & ranch, notes and mortgages). The deal capitalizes on the continued growth in the alternative and unique asset market. In 2015, PWC released a report indicating that the market was expected to grow from $10 trillion to $18 trillion in by 2020. This deal further cements Innovest's already significant footprint in this market. "The PDS team has a great deal of expertise in the highly complex but rapidly growing alternative asset market," said Glenn Schmidt, chief executive officer of Innovest. "In addition, PDS has a longstanding reputation for its commitment to outstanding customer service. The expertise of the staff and the loyal client relationships are both a big part of what we found attractive in the deal. We are looking forward to welcoming the PDS team to Innovest." PDS has been in business for over 30 years and operates several divisions including financial technology software, operational outsourcing, consulting services, and tax services. PDS serves a range of institution types including Ultra High Net Worth (UHNW), Family Office, and International Banks. PDS has created the only integrated platform for the preservation, management, and oversight of unique and hard-to-value assets held in the fiduciary context in the country. Glynn Broussard, chief executive officer of PDS, said, "Innovest's investment represents a significant opportunity for us. Innovest has long been ahead of the curve in the financial technology space and is a proven market leader. We are very pleased with the growth opportunities this presents us with and we look forward to working with the Innovest team." Broussard and other key management members of PDS will be staying on with Innovest. The acquisition closed on August 18, 2017. Terms of the deal were not disclosed. Innovest is a leading provider of financial technology solutions delivered to forward-thinking trust, wealth management, and retirement professionals. Innovest's solutions empower its clients to acquire new customers, invest assets effectively, manage trust and investment portfolios efficiently, and flexibly report results to customers. Innovest has over $425 billion in assets under administration on its trust and wealth management platform, processes more than 4 million payments annually and provides fulfillment services for more than 10 million documents, including checks, advices, and tax forms each year. Innovest's wholly owned subsidiary, FinTech Securities, executes over 69 million equity shares annually. For more information about Innovest, visit www.innovestsystems.com. For more information about PDS, visit www.pdscompanies.com.
# CTK: Cherokee Toolkit # # Authors: # Alvaro Lopez Ortega <alvaro@alobbs.com> # # Copyright (C) 2009-2014 Alvaro Lopez Ortega # # This program is free software; you can redistribute it and/or # modify it under the terms of version 2 of the GNU General Public # License as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # from Widget import Widget from Container import Container from consts import HTML_JS_BLOCK from util import * HTML = '<div id="%(id)s" %(props)s>%(content)s%(embedded_js)s</div>' class Box (Container): """ Widget for the base DIV element. All arguments are optional. Arguments: props: dictionary with properties for the DIV element, such as {'class': 'test', 'display': 'none'} content: if provided, it must be a CTK widget embed_javascript: True|False. Disabled by default. If enabled, Javascript code associated to the widget will be rendered as part of the DIV definition instead of using a separate Javascript block. Examples: box1 = CTK.Box() box2 = CTK.Box({'class': 'test', 'id': 'test-box'}, CTK.RawHTML('This is a test box')) """ def __init__ (self, props={}, content=None, embed_javascript=False): Container.__init__ (self) self.props = props.copy() self.embed_javascript = embed_javascript # Object ID if 'id' in self.props: self.id = self.props.pop('id') # Initial value if content: if isinstance (content, Widget): self += content elif type(content) in (list, type): for o in content: self += o else: raise TypeError, 'Unknown type: "%s"' %(type(content)) def Render (self): render = Container.Render (self) if self.embed_javascript and render.js: js = HTML_JS_BLOCK %(render.js) render.js = '' else: js = '' props = {'id': self.id, 'props': props_to_str (self.props), 'content': render.html, 'embedded_js': js} render.html = HTML %(props) return render
Bayle is a Senior Associate in Green Street's Advisory Group, which provides strategic advisory and valuation analysis for a variety of companies in the commercial real estate business. He joined Green Street in 2013 and spent his first two years as an analyst covering office REITs as part of Green Street's award-winning research team. Within Advisory, Bayle has been instrumental in engagements focused on company & portfolio valuation, corporate strategy, and REIT best practices. He earned a B.S. in Business Administration with a concentration in Real Estate Finance at the University of Southern California’s Marshall School of Business. Bayle is a Chartered Financial Analyst (CFA) charterholder.
""" Django settings for project project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '6=7fv&1yl9uuc-i1oi4gm5(*(vh+)@tgo7s3r&pmi@0z8y46ws' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'project.urls' WSGI_APPLICATION = 'project.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' # ------------------------------------------ # *** CUSTOM settings # ------------------------------------------ import os from os.path import join BASE_DIR = os.path.dirname(os.path.dirname(__file__)) DJANGO_APPS = INSTALLED_APPS + ( ) THIRD_PARTY_APPS = ( 'south', 'debug_toolbar', ) LOCAL_APPS = ( 'accounts', ) INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS TEMPLATE_DIRS = ( join(BASE_DIR, 'templates'), ) # Media files MEDIA_ROOT = join(BASE_DIR, 'public/media') MEDIA_URL = '/media/' # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_ROOT = join(BASE_DIR, 'public/static') STATIC_URL = '/static/' STATICFILES_DIRS = ( join(BASE_DIR, 'static'), ) # Sites framework SITE_ID = 1 # Email DEFAULT_FROM_EMAIL = 'project@example.com' EMAIL_SUBJECT_PREFIX = '[project] ' SERVER_EMAIL = 'project@example.com' # Authentication AUTH_USER_MODEL = 'accounts.Profile' # LOGIN_REDIRECT_URL = '/' LOGIN_URL = 'accounts:login' LOGOUT_URL = 'accounts:logout' if DEBUG: MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + ( 'debug_toolbar.middleware.DebugToolbarMiddleware', ) INTERNAL_IPS = ('127.0.0.1',) INTERCEPT_REDIRECTS = False
This Alto dining table and chairs set is all about natural simplicity. The table is constructed from 100% solid oak with a natural wax finish, which showcases the organic colours and patterns of the timbers. It has a minimalist shape, clean lines and a high-quality construction, which makes it a perfect fit for any modern home. The set is completed with six of our light and airy Upholstered Curve Back Chairs, with a beautiful plain beige fabric which keeps the set looking crisp and elegant. With free delivery to your dining room, now's the perfect time to give your home a stylish new look.
# coding=utf-8 # --------------------------------------------------------------- # Desenvolvedor: Arannã Sousa Santos # Mês: 12 # Ano: 2015 # Projeto: pagseguro_xml # e-mail: asousas@live.com # --------------------------------------------------------------- from .. import v2 class CONST(v2.CONST): URL_TRANSACAO_DETALHES_V3 = u'https://ws.{ambiente}pagseguro.uol.com.br/v3/transactions/{chave_transacao}?{parametros}' class ApiPagSeguroConsulta(v2.ApiPagSeguroConsulta): def __init__(self, ambiente=CONST.AMBIENTE.SANDBOX): super(ApiPagSeguroConsulta, self).__init__(ambiente=ambiente) self.__url_transacao_detalhes_v3 = CONST.URL_TRANSACAO_DETALHES_V3 def detalhes_v3(self, email, token, chave_transacao): from urllib import urlencode from classes import ClasseTransacaoDetalhes URL = self.__url_transacao_detalhes_v3.format( ambiente=CONST.AMBIENTE._resolve_[self.__ambiente], chave_transacao=chave_transacao, parametros=urlencode(dict(email=email, token=token)) ) # resposta pode conter a ClassXML de resposta ou uma mensagem de erro return self.__requisicao(URL, ClasseTransacaoDetalhes())
Cae Court Clinic aims to ensure the highest standard of medical care for our patients. To do this we keep records about you, your health and the care we have provided or plan to provide to you. Why issue a privacy notice? Cae Court Clinic recognises the importance of protecting personal and confidential information in all that we do and takes care to meet its legal and regulatory duties. This notice is one of the ways in which we can demonstrate our commitment to our values and being transparent and open. This notice also explains what rights you have to control how we use your information. What are we governed by? – If you see more than one healthcare professional in the clinic, they can all readily access the information they need to provide you with the best advice and treatment possible. – Where possible, when using information to inform future services and provision, non-identifiable information will be used. We may also share your information with your consent, and subject to strict sharing protocols, about how it will be used, with other Health and Social Care departments and the Police and Fire Services. If we have your current mobile telephone number, we may send you text notifications about appointments, information about treatments, cancellation of clinics and changes in service provision. Please ensure that we have your most up to date mobile telephone number for this to continue. (You can opt out of the text notification service at any time by contacting the practice). This clinic operates a software system on which clinic staff record information securely. This information can then be shared with other team members in the clinic so that everyone providing a service for you is fully informed about your relevant history. We are committed to protecting your privacy and will only use information collected lawfully in accordance with the Data Protection Act 1998, Article 8 of the Human Rights Act, the Common Law of Confidentiality and The General Data Protection Regulation. Everyone working in the clinic must use personal information in a secure and confidential way. To protect your confidentiality, we will not normally disclose any treatment information about you over the telephone unless we are sure that we are talking to you. We will only ever use or pass on your information if there is a genuine need to do so. We will not disclose information about you to third parties without your permission unless there are exceptional circumstances, such as when the law requires. All persons in the clinic sign a confidentiality agreement that explicitly makes clear, their duties in relation to personal information and the consequences of breaching that duty. – You will need to make a request to the practice manager. – We may ask you to complete a request form to establish exactly what parts of your record you need. It is important that you tell us if any of your details such as your name, address, home telephone number or mobile telephone number has changed or if any of your details such as date of birth is incorrect in order for this to be amended. You have a responsibility to inform us of any changes so our records are kept accurate and up to date at all times.
#! /usr/bin/env python # # hcron-info.py # GPL--start # This file is part of hcron # Copyright (C) 2008-2010 Environment/Environnement Canada # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # GPL--end """Provide hcron related information. Simply print the fully qualified host name of the executing machine. """ # system imports import os import os.path import pwd import socket import sys # hcron imports from hcron.constants import * # constants PROG_NAME = os.path.basename(sys.argv[0]) def print_usage(PROG_NAME): print """\ usage: %s --allowed %s -es %s --fqdn Print hcron related information. Where: --allowed output "yes" if permitted to use hcron -es event statuses --fqdn fully qualified hostname""" % (PROG_NAME, PROG_NAME, PROG_NAME) def print_allowed(): try: userName = pwd.getpwuid(os.getuid()).pw_name userEventListsPath = "%s/%s" % (HCRON_EVENT_LISTS_DUMP_DIR, userName) if os.path.exists(userEventListsPath): print "yes" except Exception, detail: pass def print_fqdn(): try: print socket.getfqdn() except Exception, detail: print "Error: Could not determine the fully qualified host name." def print_user_event_status(): try: userName = pwd.getpwuid(os.getuid()).pw_name userEventListsPath = "%s/%s" % (HCRON_EVENT_LISTS_DUMP_DIR, userName) print open(userEventListsPath, "r").read(), except Exception, detail: print "Error: Could not read event status information." def main(): args = sys.argv[1:] if len(args) == 0: print_usage(PROG_NAME) sys.exit(-1) while args: arg = args.pop(0) if arg in [ "--allowed" ]: print_allowed() break if arg in [ "-es" ]: print_user_event_status() break elif arg in [ "--fqdn" ]: print_fqdn() break elif arg in [ "-h", "--help" ]: print_usage(PROG_NAME) break else: print_usage(PROG_NAME) sys.exit(-1) if __name__ == "__main__": main()
It hasn't been because of discount jerseys how Ubaldo Jimenez pitched. You can wear them anytime to show off your team coronary heart. A better Dline can easily cause pressure on private will let the rest of your defense, the linebackers and defensive backs, to have better coverage on the other teams when they are not blitzing, as well as some better pressure when blitzing. Better yet, be in the warmth and convenience of your own residence. After lengthy drives that pick up the game clock CSU must convert for touchdowns not field goals whenever they want november some games in for this year The Rams could only manage field goals in only two weeks total points but they did get into the final zone in week things amass double digits scoring there's finally someone all ambient temperature. Along with August-September, the favorites ignite, as winners win and losers drawback. Online shopping is the most convenient way to get you stuff: Steelers jerseys. A game could be as short as ninety minutes or as long as four hours. You'll receive an e-mail any time a new column is published. Following retirement, Lolich opened and operated a donut shop in the Detroit suburb of Lake Orion. An umpire who calls a smaller strike zone will force pitchers to yield more balls than can be hit. Your baseball picks either successful or unsuccessful outright. They're 3 of 5 on kicks from 30-39 yards, a distance from which most nfl kickers are automatic. On paper and ESPN it looks like a blowout so tons of folks that load standing on the Colts even as soon as the line is 10 or even more. New York Jets jerseys Louis Rams to clinch the NFC West title drew the most effective preliminary television rating for the final game of Sunday Night Football in the five years on NBC. He'll provide excellent depth needed on this team. One is strong enough to handle blockers and that can draw double teams to help the others to go through. Here is the best leg exercises to build and tone leg muscles. Analysis: Allen and Mays were very closely rated on our draft mother board. One of the more common questions of this topic the place does Ryan Fitzpatrick jersey the betting appeal to mlb games, what perform the numbers mean, like -120 or 110. So you have seen that your eczema flares up even more after working out at the gym. So pet needs something great to put to video game? That can take the pressure off the balls for this feet and spreads it across the actual whole bottom of your shoe. Children with constipation can use exactly the same organic remedies as adults, but in lower quantities. The Eagles wished to improve their pass defense and find another player who get the ball away.
from .utils import PyKEArgumentHelpFormatter import numpy as np from astropy.io import fits as pyfits from matplotlib import pyplot as plt from astropy.stats import LombScargle from tqdm import tqdm from . import kepio, kepmsg, kepkey, kepfit, kepfunc, kepstat __all__ = ['keptrial'] def keptrial(infile, outfile=None, datacol='SAP_FLUX', errcol='SAP_FLUX_ERR', fmin=0.1, fmax=50, nfreq=10, method='ft', ntrials=1000, plot=False, overwrite=False, verbose=False, logfile='keptrial.log'): """ keptrial -- Calculate best period and error estimate from time series ``keptrial`` measures the strongest period within the frequency range :math:`fmin` to :math:`fmax` and estimates 1-:math:`\sigma` error associated with that period. The error estimate is performed by constructing ntrial new light curves from the original data provided in datacol and adjusting each individual data point according to a random number generator and a shot noise model. While a shot noise model is not uniformly applicable to all Kepler targets it provides a useful 1st order estimate for most. A power spectrum is calculated for each light curve using a user-specified method and the highest peak in each power spectrum recorded. The distribution of peaks is fit by a normal function, the centroid is adopted as the best period and 1-standard deviation error is taken from the standard deviation. A confidence limit is recorded as the range within which all trial periods fall. While this is termed a '100%' confidence limit, this only refers to the total number of trials rather than formal confidence. The larger the number of **ntrials**, the more robust the result. The values of nfreq and ntrial have to be chosen carefully to avoid excessive run times. The values of **fmin**, **fmax** and **nfreq** have to be chosen carefully in order to provide a sensible measure of period and error. It is recommended that ``kepperiodogram`` be used to estimate the period and error before attempting to use ``keptrial``. An exercise of trial and error will most-likely be needed to choose a permutation of :math:`fmin`, :math:`fmax` and :math:`nfreq` that resolves the period distribution over a significant number of frequency bins. If requested, the distribution and normal fit are plotted. The plot updates after every ntrial iteration, partly to relieve boredom, and partly for the user to assess whether they are using the correct permutation of input parameters. Parameters ---------- infile : str The name of a MAST standard format FITS file containing a Kepler light curve within the first data extension. outfile : str The name of the output FITS file with a new extension containing the results of a Monte Carlo period analysis. datacol : str The column name containing data stored within extension 1 of infile. This data is the input data for periodogram. Typically this name is SAP_FLUX (Simple Aperture Photometry fluxes), but any data column within extension 1 of the FITS file can be used provided it is coupled to an error column name using errcol. errcol : str The uncertainty data coupled to datacol. Typically this column is called SAP_FLUX_ERR. fmin : float [1/day] The minimum frequency on which each power spectrum will be calculated. fmax : float [1/day] The maximum frequency on which each power spectrum will be calculated. nfreq : int The number of uniform frequency steps between fmin and fmax over which the power spectrum will be calculated. method : str Choose a method for calculating the power spectrum. Currently, only 'ft', a discrete Fourier transform, is available. ntrials : int The number of Monte Carlo trials required before calculating the best periods, period uncertainty and confidence in the measurement. plot : bool Plot the output window function? overwrite : bool Overwrite the output file? verbose : bool Print informative messages and warnings to the shell and logfile? logfile : str Name of the logfile containing error and warning messages. """ if outfile is None: outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0]) # log the call hashline = '--------------------------------------------------------------' kepmsg.log(logfile, hashline, verbose) call = ('KEPTRIAL -- ' + ' infile={}'.format(infile) + ' outfile={}'.format(outfile) + ' datacol={}'.format(datacol) + ' errcol={}'.format(errcol) + ' fmin={}'.format(fmin) + ' fmax={}'.format(fmax) + ' nfreq={}'.format(nfreq) + ' method={}'.format(method) + ' ntrials={}'.format(ntrials) + ' plot={}'.format(plot) + ' overwrite={}'.format(overwrite) + ' verbose={}'.format(verbose) + ' logfile={}'.format(logfile)) kepmsg.log(logfile, call+'\n', verbose) # start time kepmsg.clock('KEPTRIAL started at', logfile, verbose) # overwrite output file if overwrite: kepio.overwrite(outfile, logfile, verbose) if kepio.fileexists(outfile): errmsg = 'ERROR -- KEPTRIAL: {} exists. Use --overwrite'.format(outfile) kepmsg.err(logfile, errmsg, verbose) # open input file instr = pyfits.open(infile, 'readonly') # fudge non-compliant FITS keywords with no values instr = kepkey.emptykeys(instr, infile, logfile, verbose) # input data try: barytime = instr[1].data.field('barytime') except: barytime = kepio.readfitscol(infile, instr[1].data, 'time', logfile, verbose) signal = kepio.readfitscol(infile, instr[1].data, datacol, logfile, verbose) err = kepio.readfitscol(infile, instr[1].data, errcol, logfile, verbose) # remove infinite data from time series try: nanclean = instr[1].header['NANCLEAN'] except: incols = [barytime, signal, err] [barytime, signal, err] = kepstat.removeinfinlc(signal, incols) # frequency steps and Monte Carlo iterations deltaf = (fmax - fmin) / float(nfreq) freq, pmax, trial = [], [], [] for i in tqdm(range(ntrials)): trial.append(i + 1) # adjust data within the error bars #work1 = kepstat.randarray(signal, err) # determine FT power fr = np.arange(fmin, fmax, deltaf) power = LombScargle(barytime, signal, signal.max()-signal.min()).power(fr) # determine peak in FT pmax.append(-1.0e30) for j in range(len(fr)): if (power[j] > pmax[-1]): pmax[-1] = power[j] f1 = fr[j] freq.append(f1) # plot stop-motion histogram plt.figure() plt.clf() plt.axes([0.08, 0.08, 0.88, 0.89]) plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False)) n, bins, patches = plt.hist(freq, bins=nfreq, range=[fmin, fmax], align='mid', rwidth=1, ec='#0000ff', fc='#ffff00', lw=2) # fit normal distribution to histogram x = np.zeros(len(bins)) for j in range(1, len(bins)): x[j] = (bins[j] + bins[j - 1]) / 2. pinit = np.array([float(i), freq[-1], deltaf]) n = np.array(n, dtype='float32') coeffs, errors, covar, sigma, chi2, dof, fit, plotx, ploty = \ kepfit.leastsquares(kepfunc.gauss, pinit, x[1:], n, None, logfile, verbose) f = np.arange(fmin, fmax, (fmax - fmin) / 100.) fit = kepfunc.gauss(coeffs, f) plt.plot(f, fit, 'r-', linewidth=2) plt.xlabel(r'Frequency (1/d)', {'color' : 'k'}) plt.ylabel('N', {'color' : 'k'}) plt.xlim(fmin, fmax) plt.grid() # render plot if plot: plt.show() # period results p = 1.0 / coeffs[1] perr = p * coeffs[2] / coeffs[1] f1 = fmin; f2 = fmax gotbin = False for i in range(len(n)): if n[i] > 0 and not gotbin: f1 = bins[i] gotbin = True gotbin = False for i in range(len(n) - 1, 0, -1): if n[i] > 0 and not gotbin: f2 = bins[i + 1] gotbin = True powave, powstdev = np.mean(pmax), np.std(pmax) # print result print(' best period: %.10f days (%.7f min)' % (p, p * 1440.0)) print(' 1-sigma period error: %.10f days (%.7f min)' % (perr, perr * 1440.0)) print(' search range: %.10f - %.10f days ' % (1.0 / fmax, 1.0 / fmin)) print(' 100%% confidence range: %.10f - %.10f days ' % (1.0 / f2, 1.0 / f1)) print(' number of trials: %d' % ntrials) print(' number of frequency bins: %d' % nfreq) # history keyword in output file kepkey.history(call, instr[0], outfile, logfile, verbose) ## write output file col1 = pyfits.Column(name='TRIAL', format='J', array=trial) col2 = pyfits.Column(name='FREQUENCY', format='E', unit='1/day', array=freq) col3 = pyfits.Column(name='POWER', format='E', array=pmax) cols = pyfits.ColDefs([col1,col2,col3]) instr.append(pyfits.BinTableHDU.from_columns(cols)) try: instr[-1].header['EXTNAME'] = ('TRIALS', 'Extension name') except: raise KeyError("Could not write EXTNAME to the header of the output" " file") try: instr[-1].header['SEARCHR1'] = (1.0 / fmax, 'Search range lower bound (days)') except: raise KeyError("Could not write SEARCHR1 to the header of the output" " file") try: instr[-1].header['SEARCHR2'] = (1.0 / fmin, 'Search range upper bound (days)') except: raise KeyError("Could not write SEARCHR2 to the header of the output" " file") try: instr[-1].header['NFREQ'] = (nfreq, 'Number of frequency bins') except: raise KeyError("Could not write NFREQ to the header of the output" " file") try: instr[-1].header['PERIOD'] = (p, 'Best period (days)') except: raise KeyError("Could not write PERIOD to the header of the output" " file") try: instr[-1].header['PERIODE'] = (perr, '1-sigma period error (days)') except: raise KeyError("Could not write PERIODE to the header of the output" " file") try: instr[-1].header['CONFIDR1'] = (1.0 / f2, 'Trial confidence lower bound (days)') except: raise KeyError("Could not write CONFIDR1 to the header of the output" " file") try: instr[-1].header['CONFIDR2'] = (1.0 / f1, 'Trial confidence upper bound (days)') except: raise KeyError("Could not write CONFIDR2 to the header of the output" " file") try: instr[-1].header['NTRIALS'] = (ntrials, 'Number of trials') except: raise KeyError("Could not write NTRIALS to the header of the output" " file") print("Writing output file {}...".format(outfile)) instr.writeto(outfile) # close input file instr.close() ## end time kepmsg.clock('KEPTRAIL completed at', logfile, verbose) def keptrial_main(): import argparse parser = argparse.ArgumentParser( description=('Calculate best period and error estimate from' ' Fourier transform'), formatter_class=PyKEArgumentHelpFormatter) parser.add_argument('infile', help='Name of input file', type=str) parser.add_argument('--outfile', help=('Name of FITS file to output.' ' If None, outfile is infile-keptrial.'), default=None) parser.add_argument('--datacol', default='SAP_FLUX', help='Name of data column', type=str) parser.add_argument('--errcol', default='SAP_FLUX_ERR', help='Name of data error column', type=str) parser.add_argument('--fmin', default=0.1, help='Minimum search frequency [1/day]', type=float) parser.add_argument('--fmax', default=50., help='Maximum search frequency [1/day]', type=float) parser.add_argument('--nfreq', default=100, help='Number of frequency intervals', type=int) parser.add_argument('--method', default='ft', help='Frequency search method', type=str, choices=['ft']) parser.add_argument('--ntrials', default=1000, help='Number of search trials', type=int) parser.add_argument('--plot', action='store_true', help='Plot result?') parser.add_argument('--overwrite', action='store_true', help='Overwrite output file?') parser.add_argument('--verbose', action='store_true', help='Write to a log file?') parser.add_argument('--logfile', '-l', help='Name of ascii log file', default='keptrial.log', type=str) args = parser.parse_args() keptrial(args.infile, args.outfile, args.datacol, args.errcol, args.fmin, args.fmax, args.nfreq, args.method, args.ntrials, args.plot, args.overwrite, args.verbose, args.logfile)
In light of the New Brunswick government’s decision to appeal the landmark Comeau decision on interprovincial alcohol transportation, Conservative MP Dan Albas renewed a call Monday for the Liberal government to refer the ruling to the Supreme Court. Almost a month after a New Brunswick judge dismissed charges against 62-year old Gérard Comeau for importing 14 cases of beer and three bottles of liquor into the province from Québec, Brian Gallant’s Liberal government announced Friday that they’re appealing. They’re arguing the judge incorrectly interpreted section 121 of the Constitution Act. Section 121 states that “All Articles of the Growth, Produce, or Manufacture of any one of the Provinces shall, from and after the Union, be admitted free into each of the other Provinces”. But a 1921 ruling subsequently opened the door for restrictions. “Regrettably, last week the province announced they will be appealing this (Comeau) ruling,” Albas said Monday in the house of commons foyer, joined by deputy Conservative leader Denis Lebel. “Simply put, we are asking that the Liberal government elevate the Comeau case to the Supreme Court for constitutional clarification…We have a real opportunity here that wasn’t available before the Comeau decision,” he said. Though the Comeau decision only involved alcohol, Albas would like the Supreme Court to give a broader interpretation. Innovation, Science and Economic Development Minister Navdeep Bains has said he welcomes the Comeau decision, but has skirted questions about a Supreme Court reference. The New Brunswick government’s decision to appeal comes as the provinces and territories, with the encouragement of Bains, try to conclude negotiations for an enhanced Agreement on Internal Trade (AIT). The provinces and territories were supposed to conclude those talks by March 2016, a deadline established when the Conservatives were in power and James Moore was industry minister. There hasn’t been any indication since the missed deadline that a new agreement is imminent. Lebel suggested Monday that the New Brunswick appeal could be seen as evidence the political will isn’t there. “We think it (the Agreement on Internal Trade) would be the best way, but in the current context — with the judgment that just took place, and the appeal of that judgement — that demonstrates to us that the provinces might not be ready to go in that direction,” he said in French. Following question period, Bains acknowledged there can be a “mentality to protect your own jurisdiction” and a “revenue issue” for some provinces. But he continued to insist the AIT was the best approach. “I very much support the free flow of alcohol from one jurisdiction to the other, particularly amongst provinces and territories. I think it’s a very important step,” he said of the Comeau decision.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from guessit import UnicodeMixin, base_text_type, u from guessit.textutils import find_words from babelfish import Language import babelfish import re import logging from guessit.guess import Guess __all__ = ['Language', 'UNDETERMINED', 'search_language', 'guess_language'] log = logging.getLogger(__name__) UNDETERMINED = babelfish.Language('und') SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'], ('ell', None): ['gr', 'greek'], ('spa', None): ['esp', 'español'], ('fra', None): ['français', 'vf', 'vff', 'vfi'], ('swe', None): ['se'], ('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'], ('cat', None): ['català'], ('ces', None): ['cz'], ('ukr', None): ['ua'], ('zho', None): ['cn'], ('jpn', None): ['jp'], ('hrv', None): ['scr'], ('mul', None): ['multi', 'dl'], # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/ } class GuessitConverter(babelfish.LanguageReverseConverter): _with_country_regexp = re.compile('(.*)\((.*)\)') _with_country_regexp2 = re.compile('(.*)-(.*)') def __init__(self): self.guessit_exceptions = {} for (alpha3, country), synlist in SYN.items(): for syn in synlist: self.guessit_exceptions[syn.lower()] = (alpha3, country, None) @property def codes(self): return (babelfish.language_converters['alpha3b'].codes | babelfish.language_converters['alpha2'].codes | babelfish.language_converters['name'].codes | babelfish.language_converters['opensubtitles'].codes | babelfish.country_converters['name'].codes | frozenset(self.guessit_exceptions.keys())) def convert(self, alpha3, country=None, script=None): return str(babelfish.Language(alpha3, country, script)) def reverse(self, name): with_country = (GuessitConverter._with_country_regexp.match(name) or GuessitConverter._with_country_regexp2.match(name)) if with_country: lang = babelfish.Language.fromguessit(with_country.group(1).strip()) lang.country = babelfish.Country.fromguessit(with_country.group(2).strip()) return (lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None) # exceptions come first, as they need to override a potential match # with any of the other guessers try: return self.guessit_exceptions[name.lower()] except KeyError: pass for conv in [babelfish.Language, babelfish.Language.fromalpha3b, babelfish.Language.fromalpha2, babelfish.Language.fromname, babelfish.Language.fromopensubtitles]: try: c = conv(name) return c.alpha3, c.country, c.script except (ValueError, babelfish.LanguageReverseError): pass raise babelfish.LanguageReverseError(name) babelfish.language_converters['guessit'] = GuessitConverter() COUNTRIES_SYN = {'ES': ['españa'], 'GB': ['UK'], 'BR': ['brazilian', 'bra'], # FIXME: this one is a bit of a stretch, not sure how to do # it properly, though... 'MX': ['Latinoamérica', 'latin america'] } class GuessitCountryConverter(babelfish.CountryReverseConverter): def __init__(self): self.guessit_exceptions = {} for alpha2, synlist in COUNTRIES_SYN.items(): for syn in synlist: self.guessit_exceptions[syn.lower()] = alpha2 @property def codes(self): return (babelfish.country_converters['name'].codes | frozenset(babelfish.COUNTRIES.values()) | frozenset(self.guessit_exceptions.keys())) def convert(self, alpha2): return str(babelfish.Country(alpha2)) def reverse(self, name): # exceptions come first, as they need to override a potential match # with any of the other guessers try: return self.guessit_exceptions[name.lower()] except KeyError: pass try: return babelfish.Country(name.upper()).alpha2 except ValueError: pass for conv in [babelfish.Country.fromname]: try: return conv(name).alpha2 except babelfish.CountryReverseError: pass raise babelfish.CountryReverseError(name) babelfish.country_converters['guessit'] = GuessitCountryConverter() class Language(UnicodeMixin): """This class represents a human language. You can initialize it with pretty much anything, as it knows conversion from ISO-639 2-letter and 3-letter codes, English and French names. You can also distinguish languages for specific countries, such as Portuguese and Brazilian Portuguese. There are various properties on the language object that give you the representation of the language for a specific usage, such as .alpha3 to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles language code. >>> Language('fr') Language(French) >>> (Language('eng').english_name) == 'English' True >>> (Language('pt(br)').country.name) == 'BRAZIL' True >>> (Language('zz', strict=False).english_name) == 'Undetermined' True >>> (Language('pt(br)').opensubtitles) == 'pob' True """ def __init__(self, language, country=None, strict=False): language = u(language.strip().lower()) country = babelfish.Country(country.upper()) if country else None try: self.lang = babelfish.Language.fromguessit(language) # user given country overrides guessed one if country: self.lang.country = country except babelfish.LanguageReverseError: msg = 'The given string "%s" could not be identified as a language' % language if strict: raise ValueError(msg) log.debug(msg) self.lang = UNDETERMINED @property def country(self): return self.lang.country @property def alpha2(self): return self.lang.alpha2 @property def alpha3(self): return self.lang.alpha3 @property def alpha3term(self): return self.lang.alpha3b @property def english_name(self): return self.lang.name @property def opensubtitles(self): return self.lang.opensubtitles @property def tmdb(self): if self.country: return '%s-%s' % (self.alpha2, self.country.alpha2) return self.alpha2 def __hash__(self): return hash(self.lang) def __eq__(self, other): if isinstance(other, Language): # in Guessit, languages are considered equal if their main languages are equal return self.alpha3 == other.alpha3 if isinstance(other, base_text_type): try: return self == Language(other) except ValueError: return False return False def __ne__(self, other): return not self == other def __bool__(self): return self.lang != UNDETERMINED __nonzero__ = __bool__ def __unicode__(self): if self.lang.country: return '%s(%s)' % (self.english_name, self.country.alpha2) else: return self.english_name def __repr__(self): if self.lang.country: return 'Language(%s, country=%s)' % (self.english_name, self.lang.country) else: return 'Language(%s)' % self.english_name # list of common words which could be interpreted as languages, but which # are far too common to be able to say they represent a language in the # middle of a string (where they most likely carry their commmon meaning) LNG_COMMON_WORDS = frozenset([ # english words 'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to', 'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan', 'fry', 'cop', 'zen', 'gay', 'fat', 'one', 'cherokee', 'got', 'an', 'as', 'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi', # french words 'bas', 'de', 'le', 'son', 'ne', 'ca', 'ce', 'et', 'que', 'mal', 'est', 'vol', 'or', 'mon', 'se', # spanish words 'la', 'el', 'del', 'por', 'mar', # other 'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii', 'vi', 'ben', 'da', 'lt', 'ch', # new from babelfish 'mkv', 'avi', 'dmd', 'the', 'dis', 'cut', 'stv', 'des', 'dia', 'and', 'cab', 'sub', 'mia', 'rim', 'las', 'une', 'par', 'srt', 'ano', 'toy', 'job', 'gag', 'reel', 'www', 'for', 'ayu', 'csi', 'ren', 'moi', 'sur', 'fer', 'fun', 'two', 'big', 'psy', 'air', # release groups 'bs' # Bosnian ]) subtitle_prefixes = ['sub', 'subs', 'st', 'vost', 'subforced', 'fansub', 'hardsub'] subtitle_suffixes = ['subforced', 'fansub', 'hardsub'] lang_prefixes = ['true'] def find_possible_languages(string): """Find possible languages in the string :return: list of tuple (property, Language, lang_word, word) """ words = find_words(string) valid_words = [] for word in words: lang_word = word.lower() key = 'language' for prefix in subtitle_prefixes: if lang_word.startswith(prefix): lang_word = lang_word[len(prefix):] key = 'subtitleLanguage' for suffix in subtitle_suffixes: if lang_word.endswith(suffix): lang_word = lang_word[:len(suffix)] key = 'subtitleLanguage' for prefix in lang_prefixes: if lang_word.startswith(prefix): lang_word = lang_word[len(prefix):] if not lang_word in LNG_COMMON_WORDS: try: lang = Language(lang_word) # Keep language with alpha2 equilavent. Others are probably an uncommon language. if lang == 'mul' or hasattr(lang, 'alpha2'): valid_words.append((key, lang, lang_word, word)) except babelfish.Error: pass return valid_words def search_language(string, lang_filter=None): """Looks for language patterns, and if found return the language object, its group span and an associated confidence. you can specify a list of allowed languages using the lang_filter argument, as in lang_filter = [ 'fr', 'eng', 'spanish' ] >>> search_language('movie [en].avi')['language'] Language(English) >>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es']) """ if lang_filter: lang_filter = set(babelfish.Language.fromguessit(lang) for lang in lang_filter) confidence = 1.0 # for all of them for prop, language, lang, word in find_possible_languages(string): pos = string.find(word) end = pos + len(word) if lang_filter and language not in lang_filter: continue # only allow those languages that have a 2-letter code, those that # don't are too esoteric and probably false matches #if language.lang not in lng3_to_lng2: # continue # confidence depends on alpha2, alpha3, english name, ... if len(lang) == 2: confidence = 0.8 elif len(lang) == 3: confidence = 0.9 elif prop == 'subtitleLanguage': confidence = 0.6 # Subtitle prefix found with language else: # Note: we could either be really confident that we found a # language or assume that full language names are too # common words and lower their confidence accordingly confidence = 0.3 # going with the low-confidence route here return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end)) return None def guess_language(text): # pragma: no cover """Guess the language in which a body of text is written. This uses the external guess-language python module, and will fail and return Language(Undetermined) if it is not installed. """ try: from guess_language import guessLanguage return babelfish.Language.fromguessit(guessLanguage(text)) except ImportError: log.error('Cannot detect the language of the given text body, missing dependency: guess-language') log.error('Please install it from PyPI, by doing eg: pip install guess-language') return UNDETERMINED
Caring.com offers a free service to help families find senior care. To help you with your search, browse the 1 review below for home healthcare agencies in Tuckahoe. On average, consumers rate home health agencies in Tuckahoe 5.0 out of 5 stars. To speak with one of our Family Advisors about senior care options and costs in Tuckahoe, call (855) 863-8283.
import os, cv2 import numpy as np import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec _projectDirectory = os.path.dirname(__file__) _imagesDirectory = os.path.join(_projectDirectory, "images") _images = [] for _root, _dirs, _files in os.walk(_imagesDirectory): for _file in _files: if _file.endswith(".jpg"): _images.append(os.path.join(_imagesDirectory, _file)) _imageIndex = 0 _imageTotal = len(_images) _img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED) _img = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY) _imgHeight, _imgWidth = _img.shape _fig = plt.figure("Averaging Smoothing") _gs = GridSpec(3, 4) _noiseMean1 = 0.0 _stdDeviation1 = 0.5 _noiseMean2 = 0.0 _stdDeviation2 = 0.5 _noiseMean3 = 0.0 _stdDeviation3 = 0.5 _noiseMean4 = 0.0 _stdDeviation4 = 0.5 _fig1 = plt.subplot(_gs[0:3, 3]) _fig1.set_title("Original") plt.imshow(_img, cmap = "gray") _fimg = np.empty((_imgHeight, _imgWidth)) _fimg = cv2.normalize(_img, _fimg, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F) _fig2 = plt.subplot(_gs[0:1, 0]) _fig2.set_title("Deviation: " + str(_stdDeviation1)) _fnoise1 = np.empty((_imgHeight, _imgWidth)) cv2.randn(_fnoise1, _noiseMean1, _stdDeviation1) _fresult1 = _fimg + _fnoise1 _fresult1 = cv2.normalize(_fresult1, _fresult1, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F) plt.imshow(_fresult1, cmap = "gray") _fig3 = plt.subplot(_gs[2:3, 0]) _fig3.set_title("Deviation: " + str(_stdDeviation2)) _fnoise2 = np.empty((_imgHeight, _imgWidth)) cv2.randn(_fnoise2, _noiseMean2, _stdDeviation2) _fresult2 = _fimg + _fnoise2 _fresult2 = cv2.normalize(_fresult2, _fresult2, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F) plt.imshow(_fresult2, cmap = "gray") _fig4 = plt.subplot(_gs[0:1, 1]) _fig4.set_title("Deviation: " + str(_stdDeviation3)) _fnoise3 = np.empty((_imgHeight, _imgWidth)) cv2.randn(_fnoise3, _noiseMean3, _stdDeviation3) _fresult3 = _fimg + _fnoise3 _fresult3 = cv2.normalize(_fresult3, _fresult3, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F) plt.imshow(_fresult3, cmap = "gray") _fig5 = plt.subplot(_gs[2:3, 1]) _fig5.set_title("Deviation: " + str(_stdDeviation4)) _fnoise4 = np.empty((_imgHeight, _imgWidth)) cv2.randn(_fnoise4, _noiseMean4, _stdDeviation4) _fresult4 = _fimg + _fnoise4 _fresult4 = cv2.normalize(_fresult4, _fresult4, 0.0, 1.0, cv2.NORM_MINMAX, cv2.CV_32F) plt.imshow(_fresult4, cmap = "gray") _fig6 = plt.subplot(_gs[0:3, 2]) _fig6.set_title("Averaging") _fresult5 = (_fresult1 + _fresult2 + _fresult3 + _fresult4) / 4 plt.imshow(_fresult5, cmap = "gray") plt.tight_layout() plt.show()
To keep all our members informed, we'll create pages like this one for the minutes of meetings. We'll change the title of each of these pages to reflect the date and type of meeting held. This would be an excellent place for information from city council meetings and Olive Branch Cemetery meetings. This will be where the information will be placed.
# -*- coding: utf-8 -*- """ *************************************************************************** GdalAlgorithmRasterTest.py --------------------- Date : January 2016 Copyright : (C) 2016 by Matthias Kuhn Email : matthias@opengis.ch *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Matthias Kuhn' __date__ = 'January 2016' __copyright__ = '(C) 2016, Matthias Kuhn' import nose2 import os import shutil import tempfile from qgis.core import (QgsProcessingContext, QgsProcessingFeedback, QgsRectangle, QgsRasterLayer, QgsProject) from qgis.testing import (start_app, unittest) import AlgorithmsTestBase from processing.algs.gdal.GdalUtils import GdalUtils from processing.algs.gdal.AssignProjection import AssignProjection from processing.algs.gdal.ClipRasterByExtent import ClipRasterByExtent from processing.algs.gdal.ClipRasterByMask import ClipRasterByMask from processing.algs.gdal.ColorRelief import ColorRelief from processing.algs.gdal.GridAverage import GridAverage from processing.algs.gdal.GridDataMetrics import GridDataMetrics from processing.algs.gdal.GridInverseDistance import GridInverseDistance from processing.algs.gdal.GridInverseDistanceNearestNeighbor import GridInverseDistanceNearestNeighbor from processing.algs.gdal.GridLinear import GridLinear from processing.algs.gdal.GridNearestNeighbor import GridNearestNeighbor from processing.algs.gdal.gdal2tiles import gdal2tiles from processing.algs.gdal.gdalcalc import gdalcalc from processing.algs.gdal.gdaltindex import gdaltindex from processing.algs.gdal.contour import contour, contour_polygon from processing.algs.gdal.gdalinfo import gdalinfo from processing.algs.gdal.hillshade import hillshade from processing.algs.gdal.aspect import aspect from processing.algs.gdal.buildvrt import buildvrt from processing.algs.gdal.proximity import proximity from processing.algs.gdal.rasterize import rasterize from processing.algs.gdal.retile import retile from processing.algs.gdal.translate import translate from processing.algs.gdal.warp import warp from processing.algs.gdal.fillnodata import fillnodata from processing.algs.gdal.rearrange_bands import rearrange_bands from processing.algs.gdal.gdaladdo import gdaladdo from processing.algs.gdal.sieve import sieve from processing.algs.gdal.gdal2xyz import gdal2xyz from processing.algs.gdal.polygonize import polygonize from processing.algs.gdal.pansharp import pansharp from processing.algs.gdal.merge import merge from processing.algs.gdal.nearblack import nearblack from processing.algs.gdal.slope import slope from processing.algs.gdal.rasterize_over import rasterize_over from processing.algs.gdal.rasterize_over_fixed_value import rasterize_over_fixed_value from processing.algs.gdal.viewshed import viewshed testDataPath = os.path.join(os.path.dirname(__file__), 'testdata') class TestGdalRasterAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest): @classmethod def setUpClass(cls): start_app() from processing.core.Processing import Processing Processing.initialize() cls.cleanup_paths = [] @classmethod def tearDownClass(cls): for path in cls.cleanup_paths: shutil.rmtree(path) def test_definition_file(self): return 'gdal_algorithm_raster_tests.yaml' def testAssignProjection(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = AssignProjection() alg.initAlgorithm() # with target srs self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'CRS': 'EPSG:3111'}, context, feedback), ['gdal_edit.py', '-a_srs EPSG:3111 ' + source]) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'CRS': custom_crs}, context, feedback), ['gdal_edit.py', '-a_srs EPSG:20936 ' + source]) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'CRS': custom_crs}, context, feedback), ['gdal_edit.py', '-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' + source]) # with non-EPSG crs code self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'CRS': 'POSTGIS:3111'}, context, feedback), ['gdal_edit.py', '-a_srs EPSG:3111 ' + source]) @unittest.skipIf(os.environ.get('TRAVIS', '') == 'true', 'gdal_edit.py: not found') def testRunAssignProjection(self): # Check that assign projection updates QgsRasterLayer info # GDAL Assign Projection is based on gdal_edit.py context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = AssignProjection() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: fake_dem = os.path.join(outdir, 'dem-fake-crs.tif') shutil.copy(source, fake_dem) self.assertTrue(os.path.exists(fake_dem)) rlayer = QgsRasterLayer(fake_dem, "Fake dem") self.assertTrue(rlayer.isValid()) self.assertEqual(rlayer.crs().authid(), 'EPSG:4326') project = QgsProject() project.setFileName(os.path.join(outdir, 'dem-fake-crs.qgs')) project.addMapLayer(rlayer) self.assertEqual(project.count(), 1) context.setProject(project) alg.run({'INPUT': fake_dem, 'CRS': 'EPSG:3111'}, context, feedback) self.assertEqual(rlayer.crs().authid(), 'EPSG:3111') def testGdalTranslate(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') translate_alg = translate() translate_alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with None NODATA value self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'NODATA': None, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_nodata 9999.0 ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_nodata 0.0 ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value and custom data type self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'DATA_TYPE': 6, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_nodata 0.0 ' + '-ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target srs self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'TARGET_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_srs EPSG:3111 ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_srs EPSG:20936 ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with non-EPSG crs code self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'TARGET_CRS': 'POSTGIS:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-a_srs EPSG:3111 ' + '-of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with copy subdatasets self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'COPY_SUBDATASETS': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_translate', '-sds ' + '-of GTiff ' + source + ' ' + outdir + '/check.tif']) # additional parameters self.assertEqual( translate_alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-strict -unscale -epo', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-of JPEG -strict -unscale -epo ' + source + ' ' + outdir + '/check.jpg']) def testClipRasterByExtent(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = ClipRasterByExtent() alg.initAlgorithm() extent = QgsRectangle(1, 2, 3, 4) with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -a_nodata 9999.0 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value and custom data type self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'NODATA': 0, 'DATA_TYPE': 6, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9', 'DATA_TYPE': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -of JPEG -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' + source + ' ' + outdir + '/check.jpg']) # with additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTENT': extent, 'EXTRA': '-s_srs EPSG:4326 -tps -tr 0.1 0.1', 'DATA_TYPE': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_translate', '-projwin 0.0 0.0 0.0 0.0 -of JPEG -s_srs EPSG:4326 -tps -tr 0.1 0.1 ' + source + ' ' + outdir + '/check.jpg']) def testClipRasterByMask(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') mask = os.path.join(testDataPath, 'polys.gml') alg = ClipRasterByMask() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline -dstnodata 9999.0 ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value and custom data type self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'NODATA': 0, 'DATA_TYPE': 6, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-ot Float32 -of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' + outdir + '/check.jpg']) # with creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' + source + ' ' + outdir + '/check.jpg']) # with multothreading and additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK': mask, 'MULTITHREADING': True, 'EXTRA': '-nosrcalpha -wm 2048 -nomd', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-of JPEG -cutline ' + mask + ' -cl polys2 -crop_to_cutline -multi -nosrcalpha -wm 2048 -nomd ' + source + ' ' + outdir + '/check.jpg']) def testContourPolygon(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = contour_polygon() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD_NAME_MIN': 'min', 'FIELD_NAME_MAX': 'max', 'INTERVAL': 5, 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-p -amax max -amin min -b 1 -i 5.0 -f "ESRI Shapefile" ' + source + ' ' + outdir + '/check.shp']) def testContour(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = contour() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD_NAME': 'elev', 'INTERVAL': 5, 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a elev -i 5.0 -f "ESRI Shapefile" ' + source + ' ' + outdir + '/check.shp']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD_NAME': 'elev', 'INTERVAL': 5, 'NODATA': 9999, 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a elev -i 5.0 -snodata 9999.0 -f "ESRI Shapefile" ' + source + ' ' + outdir + '/check.shp']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD_NAME': 'elev', 'INTERVAL': 5, 'NODATA': 0, 'OUTPUT': outdir + '/check.gpkg'}, context, feedback), ['gdal_contour', '-b 1 -a elev -i 5.0 -snodata 0.0 -f "GPKG" ' + source + ' ' + outdir + '/check.gpkg']) # with CREATE_3D self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'CREATE_3D': True, 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a ELEV -i 10.0 -3d -f "ESRI Shapefile" ' + source + ' ' + outdir + '/check.shp']) # with IGNORE_NODATA and OFFSET self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'IGNORE_NODATA': True, 'OFFSET': 100, 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a ELEV -i 10.0 -inodata -off 100.0 -f "ESRI Shapefile" ' + source + ' ' + outdir + '/check.shp']) # with additional command line parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'EXTRA': '-e 3 -amin MIN_H', 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -e 3 -amin MIN_H ' + source + ' ' + outdir + '/check.shp']) # obsolete OPTIONS param self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'OPTIONS': '-fl 100 125 150 200', 'OUTPUT': outdir + '/check.shp'}, context, feedback), ['gdal_contour', '-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -fl 100 125 150 200 ' + source + ' ' + outdir + '/check.shp']) def testGdal2Tiles(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = gdal2tiles() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average ' + source + ' ' + outdir + '/']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': -9999, 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -a -9999.0 ' + source + ' ' + outdir + '/']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -a 0.0 ' + source + ' ' + outdir + '/']) # with input srs self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -s EPSG:3111 ' + source + ' ' + outdir + '/']) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': custom_crs, 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -s EPSG:20936 ' + source + ' ' + outdir + '/']) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': custom_crs, 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -s "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' + source + ' ' + outdir + '/']) # with non-EPSG crs code self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'POSTGIS:3111', 'OUTPUT': outdir + '/'}, context, feedback), ['gdal2tiles.py', '-p mercator -w all -r average -s EPSG:3111 ' + source + ' ' + outdir + '/']) def testGdalCalc(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = gdalcalc() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: output = outdir + '/check.jpg' # default execution formula = 'A*2' # default formula self.assertEqual( alg.getConsoleCommands({'INPUT_A': source, 'BAND_A': 1, 'FORMULA': formula, 'OUTPUT': output}, context, feedback), ['gdal_calc.py', '--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)]) # check that formula is not escaped and formula is returned as it is formula = 'A * 2' # <--- add spaces in the formula self.assertEqual( alg.getConsoleCommands({'INPUT_A': source, 'BAND_A': 1, 'FORMULA': formula, 'OUTPUT': output}, context, feedback), ['gdal_calc.py', '--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)]) # additional creation options formula = 'A*2' self.assertEqual( alg.getConsoleCommands({'INPUT_A': source, 'BAND_A': 1, 'FORMULA': formula, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'OUTPUT': output}, context, feedback), ['gdal_calc.py', '--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --co COMPRESS=JPEG --co JPEG_QUALITY=75 --outfile {}'.format(formula, source, output)]) # additional parameters formula = 'A*2' self.assertEqual( alg.getConsoleCommands({'INPUT_A': source, 'BAND_A': 1, 'FORMULA': formula, 'EXTRA': '--debug --quiet', 'OUTPUT': output}, context, feedback), ['gdal_calc.py', '--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --debug --quiet --outfile {}'.format(formula, source, output)]) def testGdalInfo(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = gdalinfo() alg.initAlgorithm() self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': False, 'NO_METADATA': False, 'STATS': False}, context, feedback), ['gdalinfo', source]) source = os.path.join(testDataPath, 'raster with spaces.tif') self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': False, 'NO_METADATA': False, 'STATS': False}, context, feedback), ['gdalinfo', '"' + source + '"']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': True, 'NOGCP': False, 'NO_METADATA': False, 'STATS': False}, context, feedback), ['gdalinfo', '-mm "' + source + '"']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': True, 'NO_METADATA': False, 'STATS': False}, context, feedback), ['gdalinfo', '-nogcp "' + source + '"']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': False, 'NO_METADATA': True, 'STATS': False}, context, feedback), ['gdalinfo', '-nomd "' + source + '"']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': False, 'NO_METADATA': False, 'STATS': True}, context, feedback), ['gdalinfo', '-stats "' + source + '"']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MIN_MAX': False, 'NOGCP': False, 'NO_METADATA': False, 'STATS': False, 'EXTRA': '-proj4 -listmdd -checksum'}, context, feedback), ['gdalinfo', '-proj4 -listmdd -checksum "' + source + '"']) def testGdalTindex(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = gdaltindex() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: commands = alg.getConsoleCommands({'LAYERS': [source], 'OUTPUT': outdir + '/test.shp'}, context, feedback) self.assertEqual(len(commands), 2) self.assertEqual(commands[0], 'gdaltindex') self.assertIn('-tileindex location -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1]) self.assertIn('--optfile ', commands[1]) # with input srs commands = alg.getConsoleCommands({'LAYERS': [source], 'TARGET_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/test.shp'}, context, feedback) self.assertEqual(len(commands), 2) self.assertEqual(commands[0], 'gdaltindex') self.assertIn('-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1]) self.assertIn('--optfile ', commands[1]) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' commands = alg.getConsoleCommands({'LAYERS': [source], 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/test.shp'}, context, feedback) self.assertEqual(len(commands), 2) self.assertEqual(commands[0], 'gdaltindex') self.assertIn('-tileindex location -t_srs EPSG:20936 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1]) self.assertIn('--optfile ', commands[1]) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' commands = alg.getConsoleCommands({'LAYERS': [source], 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/test.shp'}, context, feedback) self.assertEqual(len(commands), 2) self.assertEqual(commands[0], 'gdaltindex') self.assertIn('-tileindex location -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1]) self.assertIn('--optfile ', commands[1]) # with non-EPSG crs code commands = alg.getConsoleCommands({'LAYERS': [source], 'TARGET_CRS': 'POSTGIS:3111', 'OUTPUT': outdir + '/test.shp'}, context, feedback) self.assertEqual(len(commands), 2) self.assertEqual(commands[0], 'gdaltindex') self.assertIn( '-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1]) self.assertIn('--optfile ', commands[1]) def testGridAverage(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridAverage() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG -z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.jpg']) def testGridDataMetrics(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridDataMetrics() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # non-default datametrics self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'METRIC': 4, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a average_distance:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_grid', '-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 ' + '-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.tif']) def testGridInverseDistance(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridInverseDistance() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_grid', '-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 ' + '-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.tif']) def testGridInverseDistanceNearestNeighbour(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridInverseDistanceNearestNeighbor() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_grid', '-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 ' + '-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.tif']) def testGridLinear(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridLinear() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a linear:radius=-1.0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_grid', '-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of GTiff ' + '-z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.tif']) def testGridNearestNeighbour(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'points.gml') alg = GridNearestNeighbor() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_grid', '-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-z_multiply 1.5 -outsize 1754 1394', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdal_grid', '-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of GTiff ' + '-z_multiply 1.5 -outsize 1754 1394 ' + source + ' ' + outdir + '/check.tif']) def testHillshade(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = hillshade() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0']) # paths with space source_with_space = os.path.join(testDataPath, 'raster with spaces.tif') self.assertEqual( alg.getConsoleCommands({'INPUT': source_with_space, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'OUTPUT': outdir + '/check out.tif'}, context, feedback), ['gdaldem', 'hillshade ' + '"' + source_with_space + '" ' + '"{}/check out.tif" -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0'.format(outdir)]) # compute edges self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'COMPUTE_EDGES': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -compute_edges']) # with ZEVENBERGEN self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'ZEVENBERGEN': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -alg ZevenbergenThorne']) # with COMBINED self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'COMBINED': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -combined']) # with multidirectional - "az" argument is not allowed! self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'Z_FACTOR': 5, 'SCALE': 2, 'AZIMUTH': 90, 'ALTITUDE': 20, 'MULTIDIRECTIONAL': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -alt 20.0 -multidirectional']) # defaults with additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'EXTRA': '-q', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'hillshade ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -z 1.0 -s 1.0 -az 315.0 -alt 45.0 -q']) def testAspect(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = aspect() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1']) # paths with space source_with_space = os.path.join(testDataPath, 'raster with spaces.tif') self.assertEqual( alg.getConsoleCommands({'INPUT': source_with_space, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'OUTPUT': outdir + '/check out.tif'}, context, feedback), ['gdaldem', 'aspect ' + '"' + source_with_space + '" ' + '"{}/check out.tif" -of GTiff -b 1'.format(outdir)]) # compute edges self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': True, 'ZEVENBERGEN': False, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -compute_edges']) # with ZEVENBERGEN self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -alg ZevenbergenThorne']) # with ZERO_FLAT self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': True, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -zero_for_flat']) # with TRIG_ANGLE self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': True, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -trigonometric']) # with creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -co COMPRESS=JPEG -co JPEG_QUALITY=75']) # with additional parameter self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'TRIG_ANGLE': False, 'ZERO_FLAT': False, 'COMPUTE_EDGES': False, 'ZEVENBERGEN': False, 'EXTRA': '-q', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'aspect ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -q']) def testSlope(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = slope() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -s 1.0']) # compute edges self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COMPUTE_EDGES': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -s 1.0 -compute_edges']) # with ZEVENBERGEN self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'ZEVENBERGEN': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -s 1.0 -alg ZevenbergenThorne']) # custom ratio self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'SCALE': 2.0, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -s 2.0']) # with creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.tif -of GTiff -b 1 -s 1.0 -co COMPRESS=JPEG -co JPEG_QUALITY=75']) # with additional parameter self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'EXTRA': '-q', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdaldem', 'slope ' + source + ' ' + outdir + '/check.jpg -of JPEG -b 1 -s 1.0 -q']) def testColorRelief(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') colorTable = os.path.join(testDataPath, 'colors.txt') alg = ColorRelief() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COLOR_TABLE': colorTable, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'color-relief ' + source + ' ' + colorTable + ' ' + outdir + '/check.tif -of GTiff -b 1']) # paths with space source_with_space = os.path.join(testDataPath, 'raster with spaces.tif') self.assertEqual( alg.getConsoleCommands({'INPUT': source_with_space, 'BAND': 1, 'COLOR_TABLE': colorTable, 'OUTPUT': outdir + '/check out.tif'}, context, feedback), ['gdaldem', 'color-relief ' + '"' + source_with_space + '" ' + colorTable + ' ' + '"{}/check out.tif" -of GTiff -b 1'.format(outdir)]) # compute edges self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COLOR_TABLE': colorTable, 'COMPUTE_EDGES': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'color-relief ' + source + ' ' + colorTable + ' ' + outdir + '/check.tif -of GTiff -b 1 -compute_edges']) # with custom matching mode self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COLOR_TABLE': colorTable, 'MATCH_MODE': 1, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'color-relief ' + source + ' ' + colorTable + ' ' + outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry']) # with creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COLOR_TABLE': colorTable, 'MATCH_MODE': 1, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'color-relief ' + source + ' ' + colorTable + ' ' + outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry -co COMPRESS=JPEG -co JPEG_QUALITY=75']) # with additional parameter self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'COLOR_TABLE': colorTable, 'EXTRA': '-alpha -q', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdaldem', 'color-relief ' + source + ' ' + colorTable + ' ' + outdir + '/check.tif -of GTiff -b 1 -alpha -q']) def testProximity(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = proximity() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # without NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_proximity.py', '-srcband 1 -distunits PIXEL -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'BAND': 2, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_proximity.py', '-srcband 2 -distunits PIXEL -nodata 9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'BAND': 1, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_proximity.py', '-srcband 1 -distunits PIXEL -nodata 0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'EXTRA': '-dstband 2 -values 3,4,12', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_proximity.py', '-srcband 1 -distunits PIXEL -ot Float32 -of JPEG -dstband 2 -values 3,4,12 ' + source + ' ' + outdir + '/check.jpg']) def testRasterize(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'polys.gml') alg = rasterize() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'FIELD': 'id', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'FIELD': 'id', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -ts 0.0 0.0 -a_nodata 9999.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" INIT value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'INIT': 0, 'FIELD': 'id', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -ts 0.0 0.0 -init 0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'FIELD': 'id', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -ts 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'FIELD': 'id', 'EXTRA': '-at -add', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG -at -add ' + source + ' ' + outdir + '/check.jpg']) def testRasterizeOver(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() raster = os.path.join(testDataPath, 'dem.tif') vector = os.path.join(testDataPath, 'polys.gml') alg = rasterize_over() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'FIELD': 'id', 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -a id ' + vector + ' ' + raster]) self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'FIELD': 'id', 'ADD': True, 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -add ' + vector + ' ' + raster]) self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'FIELD': 'id', 'EXTRA': '-i', 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -a id -i ' + vector + ' ' + raster]) def testRasterizeOverFixed(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() raster = os.path.join(testDataPath, 'dem.tif') vector = os.path.join(testDataPath, 'polys.gml') alg = rasterize_over_fixed_value() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'BURN': 100, 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -burn 100.0 ' + vector + ' ' + raster]) self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'BURN': 100, 'ADD': True, 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -burn 100.0 -add ' + vector + ' ' + raster]) self.assertEqual( alg.getConsoleCommands({'INPUT': vector, 'BURN': 100, 'EXTRA': '-i', 'INPUT_RASTER': raster}, context, feedback), ['gdal_rasterize', '-l polys2 -burn 100.0 -i ' + vector + ' ' + raster]) def testRetile(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = retile() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -targetDir {} '.format(outdir) + source]) # with input srs self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source) ]) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'SOURCE_CRS': custom_crs, 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:20936 -r near -ot Float32 -targetDir {} {}'.format(outdir, source) ]) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'SOURCE_CRS': custom_crs, 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -ot Float32 -targetDir {} {}'.format(outdir, source) ]) # with non-EPSG crs code self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'SOURCE_CRS': 'POSTGIS:3111', 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source) ]) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': [source], 'EXTRA': '-v -tileIndex tindex.shp', 'OUTPUT': outdir}, context, feedback), ['gdal_retile.py', '-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -v -tileIndex tindex.shp -targetDir {} '.format(outdir) + source]) def testWarp(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = warp() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # with no NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with None NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': None, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 9999, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -dstnodata 9999.0 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -dstnodata 0.0 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with "0" NODATA value and custom data type self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NODATA': 0, 'DATA_TYPE': 6, 'SOURCE_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -dstnodata 0.0 -r near -ot Float32 -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using EPSG self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'TARGET_CRS': 'EPSG:4326', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -t_srs EPSG:4326 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using proj string custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': custom_crs, 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:20936 -t_srs EPSG:20936 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using custom projection custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': custom_crs, 'TARGET_CRS': custom_crs, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target using custom projection and user-defined extent custom_crs2 = 'proj4: +proj=longlat +a=6378388 +b=6356912 +no_defs' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': custom_crs2, 'TARGET_CRS': custom_crs2, 'TARGET_EXTENT': '18.67,18.70,45.78,45.81', 'TARGET_EXTENT_CRS': custom_crs2, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['gdalwarp', '-s_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -t_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -r near -te 18.67 45.78 18.7 45.81 -te_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -of GTiff ' + source + ' ' + outdir + '/check.tif']) # with non-EPSG crs code self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'POSTGIS:3111', 'TARGET_CRS': 'POSTGIS:3111', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -t_srs EPSG:3111 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with target resolution with None value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'TARGET_RESOLUTION': None, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # test target resolution with a valid value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'TARGET_RESOLUTION': 10.0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -tr 10.0 10.0 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # test target resolution with a value of zero, to be ignored self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'SOURCE_CRS': 'EPSG:3111', 'TARGET_RESOLUTION': 0.0, 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-s_srs EPSG:3111 -r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) # with additional command-line parameter self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-dstalpha', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-r near -of JPEG -dstalpha ' + source + ' ' + outdir + '/check.jpg']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-dstalpha -srcnodata -9999', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-r near -of JPEG -dstalpha -srcnodata -9999 ' + source + ' ' + outdir + '/check.jpg']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-dstalpha -srcnodata "-9999 -8888"', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-r near -of JPEG -dstalpha -srcnodata "-9999 -8888" ' + source + ' ' + outdir + '/check.jpg']) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '', 'OUTPUT': outdir + '/check.jpg'}, context, feedback), ['gdalwarp', '-r near -of JPEG ' + source + ' ' + outdir + '/check.jpg']) def testMerge(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = [os.path.join(testDataPath, 'dem1.tif'), os.path.join(testDataPath, 'dem1.tif')] alg = merge() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # this algorithm creates temporary text file with input layers # so we strip its path, leaving only filename cmd = alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.tif'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):] self.assertEqual(cmd, ['gdal_merge.py', '-ot Float32 -of GTiff ' + '-o ' + outdir + '/check.tif ' + '--optfile mergeInputFiles.txt']) # separate cmd = alg.getConsoleCommands({'INPUT': source, 'SEPARATE': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):] self.assertEqual(cmd, ['gdal_merge.py', '-separate -ot Float32 -of GTiff ' + '-o ' + outdir + '/check.tif ' + '--optfile mergeInputFiles.txt']) # assign nodata cmd = alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-tap -ps 0.1 0.1', 'OUTPUT': outdir + '/check.tif'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):] self.assertEqual(cmd, ['gdal_merge.py', '-ot Float32 -of GTiff -tap -ps 0.1 0.1 ' + '-o ' + outdir + '/check.tif ' + '--optfile mergeInputFiles.txt']) # additional parameters cmd = alg.getConsoleCommands({'INPUT': source, 'NODATA_OUTPUT': -9999, 'OUTPUT': outdir + '/check.tif'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):] self.assertEqual(cmd, ['gdal_merge.py', '-a_nodata -9999 -ot Float32 -of GTiff ' + '-o ' + outdir + '/check.tif ' + '--optfile mergeInputFiles.txt']) def testNearblack(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = nearblack() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['nearblack', source + ' -of GTiff -o ' + outdir + '/check.tif ' + '-near 15']) # search white pixels self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'WHITE': True, 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['nearblack', source + ' -of GTiff -o ' + outdir + '/check.tif ' + '-near 15 -white']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-nb 5 -setalpha', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['nearblack', source + ' -of GTiff -o ' + outdir + '/check.tif ' + '-near 15 -nb 5 -setalpha']) # additional parameters and creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'EXTRA': '-nb 5 -setalpha', 'OUTPUT': outdir + '/check.tif'}, context, feedback), ['nearblack', source + ' -of GTiff -o ' + outdir + '/check.tif ' + '-near 15 -co COMPRESS=JPEG -co JPEG_QUALITY=75 -nb 5 -setalpha']) def testRearrangeBands(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/check.tif' alg = rearrange_bands() alg.initAlgorithm() # single band self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BANDS': 1, 'OUTPUT': outsource}, context, feedback), ['gdal_translate', '-b 1 ' + '-of GTiff ' + source + ' ' + outsource]) # three bands, re-ordered self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BANDS': [3, 2, 1], 'OUTPUT': outsource}, context, feedback), ['gdal_translate', '-b 3 -b 2 -b 1 ' + '-of GTiff ' + source + ' ' + outsource]) # three bands, re-ordered with custom data type self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BANDS': [3, 2, 1], 'DATA_TYPE': 6, 'OUTPUT': outsource}, context, feedback), ['gdal_translate', '-b 3 -b 2 -b 1 ' + '-ot Float32 -of GTiff ' + source + ' ' + outsource]) def testFillnodata(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') mask = os.path.join(testDataPath, 'raster.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/check.tif' alg = fillnodata() alg.initAlgorithm() # with mask value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'DISTANCE': 10, 'ITERATIONS': 0, 'MASK_LAYER': mask, 'NO_MASK': False, 'OUTPUT': outsource}, context, feedback), ['gdal_fillnodata.py', '-md 10 -b 1 -mask ' + mask + ' -of GTiff ' + source + ' ' + outsource]) # without mask value self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'DISTANCE': 10, 'ITERATIONS': 0, 'NO_MASK': False, 'OUTPUT': outsource}, context, feedback), ['gdal_fillnodata.py', '-md 10 -b 1 ' + '-of GTiff ' + source + ' ' + outsource]) # nomask true self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'DISTANCE': 10, 'ITERATIONS': 0, 'NO_MASK': True, 'OUTPUT': outsource}, context, feedback), ['gdal_fillnodata.py', '-md 10 -b 1 -nomask ' + '-of GTiff ' + source + ' ' + outsource]) # creation options self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75', 'OUTPUT': outsource}, context, feedback), ['gdal_fillnodata.py', '-md 10 -b 1 -of GTiff -co COMPRESS=JPEG -co JPEG_QUALITY=75 ' + source + ' ' + outsource]) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'EXTRA': '-q', 'OUTPUT': outsource}, context, feedback), ['gdal_fillnodata.py', '-md 10 -b 1 -of GTiff -q ' + source + ' ' + outsource]) def testGdalAddo(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') with tempfile.TemporaryDirectory() as outdir: alg = gdaladdo() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False, 'RESAMPLING': 0, 'FORMAT': 0}, context, feedback), ['gdaladdo', source + ' ' + '-r nearest 2 4 8 16']) # with "clean" option self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': True, 'RESAMPLING': 0, 'FORMAT': 0}, context, feedback), ['gdaladdo', source + ' ' + '-r nearest -clean 2 4 8 16']) # ovr format self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False, 'RESAMPLING': 0, 'FORMAT': 1}, context, feedback), ['gdaladdo', source + ' ' + '-r nearest -ro 2 4 8 16']) # Erdas format self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False, 'RESAMPLING': 0, 'FORMAT': 2}, context, feedback), ['gdaladdo', source + ' ' + '-r nearest --config USE_RRD YES 2 4 8 16']) # custom resampling method format self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False, 'RESAMPLING': 4, 'FORMAT': 0}, context, feedback), ['gdaladdo', source + ' ' + '-r cubicspline 2 4 8 16']) # more levels self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16 32 64', 'CLEAN': False, 'RESAMPLING': 0, 'FORMAT': 0}, context, feedback), ['gdaladdo', source + ' ' + '-r nearest 2 4 8 16 32 64']) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False, 'EXTRA': '--config COMPRESS_OVERVIEW JPEG'}, context, feedback), ['gdaladdo', source + ' ' + '--config COMPRESS_OVERVIEW JPEG 2 4 8 16']) if GdalUtils.version() >= 230000: # without levels self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'CLEAN': False}, context, feedback), ['gdaladdo', source]) # without advanced params self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'LEVELS': '2 4 8 16', 'CLEAN': False}, context, feedback), ['gdaladdo', source + ' ' + '2 4 8 16']) def testSieve(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') mask = os.path.join(testDataPath, 'raster.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/check.tif' alg = sieve() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'OUTPUT': outsource}, context, feedback), ['gdal_sieve.py', '-st 10 -4 -of GTiff ' + source + ' ' + outsource]) # Eight connectedness and custom threshold self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'THRESHOLD': 16, 'EIGHT_CONNECTEDNESS': True, 'OUTPUT': outsource}, context, feedback), ['gdal_sieve.py', '-st 16 -8 -of GTiff ' + source + ' ' + outsource]) # without default mask layer self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'NO_MASK': True, 'OUTPUT': outsource}, context, feedback), ['gdal_sieve.py', '-st 10 -4 -nomask -of GTiff ' + source + ' ' + outsource]) # defaults with external validity mask self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'MASK_LAYER': mask, 'OUTPUT': outsource}, context, feedback), ['gdal_sieve.py', '-st 10 -4 -mask ' + mask + ' -of GTiff ' + source + ' ' + outsource]) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'EXTRA': '-q', 'OUTPUT': outsource}, context, feedback), ['gdal_sieve.py', '-st 10 -4 -of GTiff -q ' + source + ' ' + outsource]) def testGdal2Xyz(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/check.csv' alg = gdal2xyz() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'CSV': False, 'OUTPUT': outsource}, context, feedback), ['gdal2xyz.py', '-band 1 ' + source + ' ' + outsource]) # csv output self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'CSV': True, 'OUTPUT': outsource}, context, feedback), ['gdal2xyz.py', '-band 1 -csv ' + source + ' ' + outsource]) def testGdalPolygonize(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/check.shp' alg = polygonize() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD': 'DN', 'EIGHT_CONNECTEDNESS': False, 'OUTPUT': outsource}, context, feedback), ['gdal_polygonize.py', source + ' ' + outsource + ' ' + '-b 1 -f "ESRI Shapefile" check DN' ]) self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD': 'VAL', 'EIGHT_CONNECTEDNESS': False, 'OUTPUT': outsource}, context, feedback), ['gdal_polygonize.py', source + ' ' + outsource + ' ' + '-b 1 -f "ESRI Shapefile" check VAL' ]) # 8 connectedness self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD': 'DN', 'EIGHT_CONNECTEDNESS': True, 'OUTPUT': outsource}, context, feedback), ['gdal_polygonize.py', source + ' ' + outsource + ' ' + '-8 -b 1 -f "ESRI Shapefile" check DN' ]) # custom output format outsource = outdir + '/check.gpkg' self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD': 'DN', 'EIGHT_CONNECTEDNESS': False, 'OUTPUT': outsource}, context, feedback), ['gdal_polygonize.py', source + ' ' + outsource + ' ' + '-b 1 -f "GPKG" check DN' ]) # additional parameters self.assertEqual( alg.getConsoleCommands({'INPUT': source, 'BAND': 1, 'FIELD': 'DN', 'EXTRA': '-nomask -q', 'OUTPUT': outsource}, context, feedback), ['gdal_polygonize.py', source + ' ' + outsource + ' ' + '-b 1 -f "GPKG" -nomask -q check DN' ]) def testGdalPansharpen(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() panchrom = os.path.join(testDataPath, 'dem.tif') spectral = os.path.join(testDataPath, 'raster.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/out.tif' alg = pansharp() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'SPECTRAL': spectral, 'PANCHROMATIC': panchrom, 'OUTPUT': outsource}, context, feedback), ['gdal_pansharpen.py', panchrom + ' ' + spectral + ' ' + outsource + ' ' + '-r cubic -of GTiff' ]) # custom resampling self.assertEqual( alg.getConsoleCommands({'SPECTRAL': spectral, 'PANCHROMATIC': panchrom, 'RESAMPLING': 4, 'OUTPUT': outsource}, context, feedback), ['gdal_pansharpen.py', panchrom + ' ' + spectral + ' ' + outsource + ' ' + '-r lanczos -of GTiff' ]) # additional parameters self.assertEqual( alg.getConsoleCommands({'SPECTRAL': spectral, 'PANCHROMATIC': panchrom, 'EXTRA': '-bitdepth 12 -threads ALL_CPUS', 'OUTPUT': outsource}, context, feedback), ['gdal_pansharpen.py', panchrom + ' ' + spectral + ' ' + outsource + ' ' + '-r cubic -of GTiff -bitdepth 12 -threads ALL_CPUS' ]) def testGdalViewshed(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() dem = os.path.join(testDataPath, 'dem.tif') with tempfile.TemporaryDirectory() as outdir: outsource = outdir + '/out.tif' alg = viewshed() alg.initAlgorithm() # defaults self.assertEqual( alg.getConsoleCommands({'INPUT': dem, 'BAND': 1, 'OBSERVER': '18.67274,45.80599', 'OUTPUT': outsource}, context, feedback), ['gdal_viewshed', '-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' + dem + ' ' + outsource ]) self.assertEqual( alg.getConsoleCommands({'INPUT': dem, 'BAND': 2, 'OBSERVER': '18.67274,45.80599', 'OBSERVER_HEIGHT': 1.8, 'TARGET_HEIGHT': 20, 'MAX_DISTANCE': 1000, 'OUTPUT': outsource}, context, feedback), ['gdal_viewshed', '-b 2 -ox 18.67274 -oy 45.80599 -oz 1.8 -tz 20.0 -md 1000.0 -f GTiff ' + dem + ' ' + outsource ]) self.assertEqual( alg.getConsoleCommands({'INPUT': dem, 'BAND': 1, 'OBSERVER': '18.67274,45.80599', 'EXTRA': '-a_nodata=-9999 -cc 0.2', 'OUTPUT': outsource}, context, feedback), ['gdal_viewshed', '-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' + '-a_nodata=-9999 -cc 0.2 ' + dem + ' ' + outsource ]) self.assertEqual( alg.getConsoleCommands({'INPUT': dem, 'BAND': 1, 'OBSERVER': '18.67274,45.80599', 'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9', 'OUTPUT': outsource}, context, feedback), ['gdal_viewshed', '-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' + '-co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' + dem + ' ' + outsource ]) def testBuildVrt(self): context = QgsProcessingContext() feedback = QgsProcessingFeedback() source = os.path.join(testDataPath, 'dem.tif') alg = buildvrt() alg.initAlgorithm() with tempfile.TemporaryDirectory() as outdir: # defaults cmd = alg.getConsoleCommands({'INPUT': [source], 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # custom resolution cmd = alg.getConsoleCommands({'INPUT': [source], 'RESOLUTION': 2, 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution lowest -separate -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # single layer cmd = alg.getConsoleCommands({'INPUT': [source], 'SEPARATE': False, 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # projection difference cmd = alg.getConsoleCommands({'INPUT': [source], 'PROJ_DIFFERENCE': True, 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -allow_projection_difference -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # add alpha band cmd = alg.getConsoleCommands({'INPUT': [source], 'ADD_ALPHA': True, 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -addalpha -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # assign CRS cmd = alg.getConsoleCommands({'INPUT': [source], 'ASSIGN_CRS': 'EPSG:3111', 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -a_srs EPSG:3111 -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs' cmd = alg.getConsoleCommands({'INPUT': [source], 'ASSIGN_CRS': custom_crs, 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -a_srs EPSG:20936 -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # source NODATA cmd = alg.getConsoleCommands({'INPUT': [source], 'SRC_NODATA': '-9999', 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -r nearest -srcnodata "-9999" ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) cmd = alg.getConsoleCommands({'INPUT': [source], 'SRC_NODATA': '-9999 9999', 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -r nearest -srcnodata "-9999 9999" ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) cmd = alg.getConsoleCommands({'INPUT': [source], 'SRC_NODATA': '', 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -r nearest ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) # additional parameters cmd = alg.getConsoleCommands({'INPUT': [source], 'EXTRA': '-overwrite -optim RASTER -vrtnodata -9999', 'OUTPUT': outdir + '/check.vrt'}, context, feedback) t = cmd[1] cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):] self.assertEqual(cmd, ['gdalbuildvrt', '-resolution average -separate -r nearest -overwrite -optim RASTER -vrtnodata -9999 ' + '-input_file_list buildvrtInputFiles.txt ' + outdir + '/check.vrt']) if __name__ == '__main__': nose2.main()
Click here to save up to 5% on the Nikon ProStaff 3-9x40mm! Rated 4.8 out of 5 stars from over 282 customers! Click the image for reviews and discounts! Life and death situations don’t have second chances so anyone would want to have the highest possibility to survive by obtaining the best equipment and knowledge. Hunting games and law enforcement operations are the same. To improve your chances of victory, get a load of the benefits offered by Nikon ProStaff 3-9 x 40mm Black Matte Riflescope and experience guaranteed high accuracy shooting. Let me tackle below the different points you should know about this model and determine whether it’s a good fit for your taste. After reading our Nikon ProStaff 3-9x40mm Review you will know if it is the right scope for you. Among rifling enthusiasts, a lot of beginners don’t see the importance of having a riflescope. Some consider it to be a waste of investment since a rifle can function properly even without it. However, the most experienced hunters would definitely say otherwise. It might just be a small article, but it is capable of turning an amateur into a sharpshooter with the right handling. The Nikon ProStaff 3-9 x 40mm Black Matte Riflescope (BDC) is a scope made for short range to middle range target distances. It belongs to the low-priced scope group but can easily compete with expensive types with its useful features. It is a great choice for shooting under heavy cover and stealth operations. The highlight of this product is its reticle set-up that employs a high technology mechanism. The overall effect of this feature is easier scope navigation and magnification. It has see through ballistic circles allowing precise long range target shooting. You can also manually decide to drop the bullet up to 600 yards and set it to “dead on” on any type of focus. For short range shooting, the crosshairs itself serve as the target mark. The simplified way of pinpointing an object makes it a very useful tool for commando missions that don’t have a specific event flow. For a low cost scope, this manufacturer is very generous when it comes to improving individual functions. The ProStaff’s multiple coated lenses for instance is a good example of high-end optic engineering enabling users to see clearly regardless if its day or night. The set-up for this particular model can allow up to 98% of light to enter through the eyes. This is the reason why it is suitable for heavy cover shooting locations. The scope’s zero reset turret adjustment features makes use of it convenient and fast. The knob loaded with spring allows this luxury for your use. You can easily reset to zero and re-engage anytime depending if the situation calls for it. Minor adjustments to improve shooting performance and precision can be easily done because of the uncomplicated design of the turret controls. The 40mm objective lens in itself is a very good feature for an affordable multi-purpose scope. Added with its zooming capability that can go as high as 9 times, focusing on a long range target is as easy as looking through a microscope. To give you an idea, you only need to multiply the diameter of the objective lenses with the zoom level to obtain the clarity of the image you are seeing. For a 40mm diameter paired with 10x magnification mean that you can see the target 400x closer. It would be even more amazing if you use the 12x option. The scope’s zoom range is good for people who like to explore different target ranges and want to save on purchasing different rifle sets by getting a multi-purpose one. The model is essentially cheap for what it offers. This is a great saving option since it can work with many types of rifles and different ranges. The lenses allow clear and vivid focus images without complicated controls. The overall usability of the product is very high due to the fact that the controls are simple to understand and can be directly understood even for beginners. You use lesser time to make minute adjustments on position and clarity. The adjustable BDC reticle provides a flexible target search and easier landscape tracking. It is lightweight which allows less possibility of body soreness due to long periods of use and weight support. Although it is lightweight compared to others, it has a very bulky structure. It can be awkward to use for those who are used to compact riflescopes. Moreover, it doesn’t match well with smaller rifle guns due to its size. It doesn’t come with a mounting ring. You can easily buy the ring from any gun hardware store so it is not much of a problem. The Nikon ProStaff 3-9 x 40mm Black Matte best performs with an AR 15 rifle. Based on user experience, it also does will with an REM 700 in .308 and the Ruger American .22LR. It has a clear lens that makes it a good match for a Tikka T3, or a pair of Remington 700 bdl 30-06 and H&R (NEF). The scope comes in a standard size so it would fit in most types of rifles. In order to get the best out of this advanced riflescope, you need to have a similarly efficient handling skill. Once you get enough experience and develop the right instincts to productively traverse in different shooting terrains, this particular scope model will definitely return the favor by giving you the most accurate target focus. Improve your shooting precision overnight by getting a riflescope that’s best suited for you. A versatile kind like the Nikon ProStaff 3-9 x 40mm Black Matte Riflescope (BDC) will bring you more than just accuracy but also helpful features that don’t demand much physical prowess. Protect your finance and body with an affordable scope that’s made to make shooting convenient and easy as ever. Overall our Nikon ProStaff 3-9x40mm Review is rated 4.8 out of 5 stars.
#!/usr/bin/env python import gtk import pango from kiwi.ui.gadgets import quit_if_last from kiwi.ui.delegates import GladeDelegate from kiwi.ui.objectlist import ObjectList, Column, ObjectTree from xmlrpclib import ServerProxy from urllib import urlencode import gtkmozembed import gtksourceview #import gtksourceview2 as gtksourceview #import gtkhtml2 #import simplebrowser from buffer import DokuwikiBuffer dialog_buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_ACCEPT) class ModalDialog(gtk.Dialog): def __init__(self, title): gtk.Dialog.__init__(self, title = title, flags = gtk.DIALOG_MODAL, buttons = dialog_buttons) # wrappers for kiwi treeview widgets class Section(object): def __init__(self, name, id=None): self.name = name if id: self.id = id else: self.id = name class DictWrapper(object): def __init__(self, obj, id=None): self._obj = obj if id: self.name = id def __getattr__(self, name): try: return self._obj[name] except: raise AttributeError # funtion to setup some simple style tags def setup_tags(table): for i,tag in enumerate(['h1','h2','h3','h4','h5','h6']): tag_h1 = gtk.TextTag(tag) tag_h1.set_property('size-points', 20-i*2) tag_h1.set_property('weight', 700) table.add(tag_h1) tag_bold = gtk.TextTag('bold') tag_bold.set_property('weight', 700) table.add(tag_bold) tag_italic = gtk.TextTag('italic') tag_italic.set_property('style', pango.STYLE_ITALIC) table.add(tag_italic) # setup the tag table #table = gtk.TextTagTable() table = gtksourceview.SourceTagTable() setup_tags(table) # main application classes class DokuwikiView(GladeDelegate): """ A dokuwiki editor window """ def __init__(self): GladeDelegate.__init__(self, gladefile="pydoku", delete_handler=self.quit_if_last) self.setup_wikitree() self.setup_attachments() self.setup_side() self.setup_sourceview() self.setup_htmlview() self.page_edit = self.view.notebook1.get_nth_page(0) self.page_view = self.view.notebook1.get_nth_page(1) self.page_attach = self.view.notebook1.get_nth_page(2) self.show_all() def quit_if_last(self, *args): self.htmlview.destroy() # for some reason has to be deleted explicitly GladeDelegate.quit_if_last(self) # general interface functions def post(self, text): id = self.view.statusbar.get_context_id("zap") self.view.statusbar.push(id, text) # setup functions def setup_side(self): columns = ['user', 'sum', 'type', 'version', 'ip'] columns = [Column(s) for s in columns] self.versionlist = ObjectList(columns) self.view.side_vbox.pack_start(gtk.Label('Version Log:'), False, False) self.view.side_vbox.add(self.versionlist) self.view.side_vbox.pack_start(gtk.Label('BackLinks:'), False, False) self.backlinks = ObjectList([Column('name')]) self.view.side_vbox.add(self.backlinks) def setup_attachments(self): columns = ['id', 'size', 'lastModified', 'writable', 'isimg', 'perms'] columns = [Column(s) for s in columns] self.attachmentlist = ObjectList(columns) self.view.attachments_vbox.add(self.attachmentlist) def setup_wikitree(self): columns = ['name', 'id', 'lastModified', 'perms', 'size'] columns = [Column(s) for s in columns] self.objectlist = ObjectTree(columns) self.objectlist.connect("selection-changed", self.selected) self.view.vbox2.add(self.objectlist) def setup_htmlview(self): self.htmlview = gtkmozembed.MozEmbed() self.view.html_scrolledwindow.add(self.htmlview) self.htmlview.realize() self.htmlview.show() def setup_sourceview(self): self.buffer = DokuwikiBuffer(table) self.editor = gtksourceview.SourceView(self.buffer) accel_group = gtk.AccelGroup() self.get_toplevel().add_accel_group(accel_group) self.editor.add_accelerator("paste-clipboard", accel_group, ord('v'), gtk.gdk.CONTROL_MASK, 0) self.editor.add_accelerator("copy-clipboard", accel_group, ord('c'), gtk.gdk.CONTROL_MASK, 0) self.editor.add_accelerator("cut-clipboard", accel_group, ord('x'), gtk.gdk.CONTROL_MASK, 0) #self.editor = gtk.TextView(self.buffer) self.editor.set_left_margin(5) self.editor.set_right_margin(5) self.editor.set_wrap_mode(gtk.WRAP_WORD_CHAR) self.view.scrolledwindow1.add(self.editor) # dokuwiki operations def get_version(self): version = self._rpc.dokuwiki.getVersion() self.view.version.set_text(version) def get_pagelist(self): pages = self._rpc.wiki.getAllPages() self._sections = {} self.objectlist.clear() for page in pages: self.add_page(page) self.view.new_page.set_sensitive(True) self.view.delete_page.set_sensitive(True) def get_attachments(self, ns): attachments = self._rpc.wiki.getAttachments(ns, {}) attachments = [DictWrapper(s) for s in attachments] self.attachmentlist.add_list(attachments) def get_backlinks(self, pagename): backlinks = self._rpc.wiki.getBackLinks(pagename) backlinks = [Section(s) for s in backlinks] self.backlinks.add_list(backlinks) def get_versions(self, pagename): versionlist = self._rpc.wiki.getPageVersions(pagename, 0) versionlist = [DictWrapper(s) for s in versionlist] self.versionlist.add_list(versionlist) def get_htmlview(self, pagename): text = self._rpc.wiki.getPageHTML(pagename) self.htmlview.render_data(text, len(text), self.url.get_text(), 'text/html') # XXX following is for gtkhtml (not used) #self.document.clear() #self.document.open_stream('text/html') #self.document.write_stream(text) #self.document.close_stream() def put_page(self, text, summary, minor): pars = {} if summary: pars['sum'] = summary if minor: pars['minor'] = minor self._rpc.wiki.putPage(self.current, text, pars) if not self.current in self._sections: self.add_page({"id":self.current}) # put a page into the page tree def add_page(self, page): name = page["id"] path = name.split(":") prev = None for i,pathm in enumerate(path): if i == len(path)-1: # a page new = DictWrapper(page, pathm) self._sections[name] = new self.objectlist.append(prev, new, False) else: # a namespace part_path = ":".join(path[:i+1]) if not part_path in self._sections: new = Section(pathm, part_path) self._sections[part_path] = new self.objectlist.append(prev, new, False) else: new = self._sections[part_path] prev = new # page selected callback def selected(self, widget, object): if not object: return if isinstance(object, Section): self.get_attachments(object.id) if not isinstance(object, DictWrapper): return text = self._rpc.wiki.getPage(object.id) self.current = object.id self.buffer.add_text(text) self.get_htmlview(self.current) self.get_backlinks(object.id) self.get_versions(object.id) # kiwi interface callbacks def on_view_edit__toggled(self, widget): if widget.get_active(): self.notebook1.insert_page(self.page_edit, gtk.Label('edit'), 0) else: self.notebook1.remove_page(self.notebook1.page_num(self.page_edit)) def on_view_view__toggled(self, widget): if widget.get_active(): self.notebook1.insert_page(self.page_view, gtk.Label('view'), 1) else: self.notebook1.remove_page(self.notebook1.page_num(self.page_view)) def on_view_attachments__toggled(self, widget): if widget.get_active(): self.notebook1.insert_page(self.page_attach, gtk.Label('attach')) else: self.notebook1.remove_page(self.notebook1.page_num(self.page_attach)) def on_view_extra__toggled(self, widget): if widget.get_active(): self.backlinks.show() self.versionlist.show() self.view.hpaned2.set_position(self._prevpos) else: self.backlinks.hide() self.versionlist.hide() self._prevpos = self.view.hpaned2.get_position() self.view.hpaned2.set_position(self.view.hpaned2.allocation.width) def on_button_list__clicked(self, *args): self.post("Connecting...") dialog = ModalDialog("User Details") # prepare widgets = {} items = ["user", "password"] for i,item in enumerate(items): widgets[item] = gtk.Entry() if i == 1: widgets[item].set_visibility(False) hbox = gtk.HBox() hbox.pack_start(gtk.Label(item+': ')) hbox.add(widgets[item]) dialog.vbox.add(hbox) dialog.show_all() # run response = dialog.run() user = widgets['user'].get_text() password = widgets['password'].get_text() dialog.destroy() if not response == gtk.RESPONSE_ACCEPT: return # following commented line is for gtkhtml (not used) #simplebrowser.currentUrl = self.view.url.get_text() # handle response params = urlencode({'u':user,'p':password}) fullurl = self.view.url.get_text() + "/lib/exe/xmlrpc.php?"+ params self._rpc = ServerProxy(fullurl) try: self.get_version() except: self.post("Failure to connect") self.get_pagelist() self.post("Connected") def on_delete_page__clicked(self, *args): dialog = ModalDialog("Are you sure?") response = dialog.run() if response == gtk.RESPONSE_ACCEPT: value = self._sections[self.current] sel = self.objectlist.remove(value) self._rpc.wiki.putPage(self.current, "", {}) self.current = None dialog.destroy() def on_new_page__clicked(self, *args): dialog = ModalDialog("Name for the new page") text_w = gtk.Entry() text_w.show() response = [] dialog.vbox.add(text_w) response = dialog.run() if response == gtk.RESPONSE_ACCEPT: text = text_w.get_text() if text: self.current = text dialog.destroy() def on_button_h1__clicked(self, *args): self.buffer.set_style('h1') def on_button_h2__clicked(self, *args): self.buffer.set_style('h2') def on_button_h3__clicked(self, *args): self.buffer.set_style('h3') def on_button_h4__clicked(self, *args): self.buffer.set_style('h4') def on_button_h5__clicked(self, *args): self.buffer.set_style('h5') def on_button_h6__clicked(self, *args): self.buffer.set_style('h6') def on_button_bold__clicked(self, *args): self.buffer.set_style('bold') def on_button_italic__clicked(self, *args): self.buffer.set_style('italic') def on_button_clear_style__clicked(self, *args): self.buffer.clear_style() def on_button_save__clicked(self, *args): self.post("Saving...") dialog = ModalDialog("Commit message") entry = gtk.Entry() minor = gtk.CheckButton("Minor") dialog.vbox.add(gtk.Label("Your attention to detail\nIs greatly appreciated")) dialog.vbox.add(entry) dialog.vbox.add(minor) dialog.show_all() response = dialog.run() if response == gtk.RESPONSE_ACCEPT: text = self.buffer.process_text() self.put_page(text, entry.get_text(), minor.get_active()) self.get_htmlview(self.current) self.get_versions(self.current) self.post("Saved") dialog.destroy() # unused stuff def request_url(self, document, url, stream): f = simplebrowser.open_url(url) stream.write(f.read()) def setup_htmlview_gtkhtml(self): # XXX not used now self.document = gtkhtml2.Document() self.document.connect('request_url', self.request_url) self.htmlview = gtkhtml2.View() self.htmlview.set_document(self.document) def setup_sourceview_gtksourceview(self): # XXX not used now self.buffer = gtksourceview.Buffer(table) self.editor = gtksourceview.View(self.buffer) if True: self.editor.set_show_line_numbers(True) lm = gtksourceview.LanguageManager() self.editor.set_indent_on_tab(True) self.editor.set_indent_width(4) self.editor.set_property("auto-indent", True) self.editor.set_property("highlight-current-line", True) self.editor.set_insert_spaces_instead_of_tabs(True) lang = lm.get_language("python") self.buffer.set_language(lang) self.buffer.set_highlight_syntax(True) if __name__ == "__main__": app = DokuwikiView() app.show() gtk.main()
St Fagans was probably inhabited in prehistoric times, as a number of axe-heads and a spear head have been found in the Plymouth Woods. According to Geoffrey of Monmouth, Ffagan, after whom the village is named, and his companion, Dyfan, brought Christianity to Britain and “purged away the paganism of well-nigh the whole island”. This high praise is based on a very dubious tale of the 2nd century, though a church, dedicated to St Fagan, once stood in the grounds of the castle. Peter le Sore came into the possession of St Fagans after the Norman Conquest and built a motte and bailey castle to control the crossing of the River Ely. The le Sores held the estate for over 200 years before it passed through marriage to the le Vele family from Gloucestershire. In 1475, again because of a matrimonial alliance, David Mathew of Radyr became lord of the manor. At that time the original castle was already crumbling. Nothing of it now remains but one relic from those mediaeval days is the holy well of St Fagan, situated between the two lower fishponds in the Museum of Welsh Life. According to Richard Symonds, who visited the site with Charles I in 1645, people came to drink at the well as a cure for epilepsy and, “ after they have drank of it they relate their health ever since”. Dr. John Gibbon bought the manor in 1560 and began to build the castle which is now part of the museum. He sold the property, still only half built, to his brother-in-law, Nicholas Herbert, who lacked the funds to complete the work. His son, William, desperately needed money to join Raleigh’s ill-fated expedition to South America in 1616 and accepted an offer for the estate from Sir Edward Lewis of The Van. This family, which could trace its ancestry back to the lords of Senghenydd, was one of the most influential in Glamorgan. Different branches of the family acquired estates in the Taff and Rhymney valleys, Llanishen, Whitchurch, Radyr, Penmark and St Fagans. Their wealth was probably the reason Charles I met the gentlemen of Glamorgan at St Fagans in 1645 in the hope of reviving his flagging fortunes. Usually St Fagans was a sleepy, agricultural community, similar to others in the Vale of Glamorgan, but on 8 May 1648 this tranquillity was brutally disturbed. The Civil War, apparently at an end two years earlier, flared up again as Charles I plotted to regain his power. A motley crew of disillusioned Roundheads, who had fought with Parliament earlier in the war, joined forces with diehard Royalists against Cromwell’s Model Army. In South Wales, the Royalists, led by Major-general Rowland Laugharne, planned to seize Cardiff Castle as the first stage in setting the West Country ablaze. His army was 8,000 strong but many of them were no more than bewildered farmhands, armed with pikes and pitchforks. They assembled at St Nicholas but, on hearing that Cromwell was hastening to Wales, Laugharne marched on Cardiff. When Colonel Thomas Horton forestalled him by mounting a heavy guard at Ely Bridge, Laugharne was forced to look for an alternative route through St Fagans. Monday 8 May was a raw, unpleasant morning when the armies clashed on farmland to the north of the castle. Despite superiority in numbers, the Royalists were no match for Horton’s hardened, professional army. Within two hours, Laugharne’s makeshift forces were put to flight, relentlessly pursued by Horton’s cavalry. From St Fagans alone, 65 men were killed and the harvest that year was gathered in by their widows. The River Ely was said to run red with blood and, though the battle is rarely mentioned in English history books, it was the greatest and most significant clash of the Civil War in Wales. The castle dominated the village of St Fagans which at that time consisted of little more than the green, the church, the mill near the bridge, and a few cottages and farm houses. The church was dedicated to St Mary and its history can be traced back to the 12th century. Improvements in the14th century produced a nave and chancel which are superb examples of mediaeval work. A restoration was carried out in 1860 and one of the stained glass windows from that period depicts the ministry of St Fagan. In 1730 the property passed to Other Windsor, the Third Earl of Plymouth, following his marriage to Elizabeth Lewis. During the 18th century, the new owners were usually absentee landlords and this was a time of stagnation, both for the castle and for the village. The situation changed in 1852 when Robert Windsor-Clive, heir to the estate, chose to live at St Fagans with his new bride, Mary. He died seven years later but Lady Windsor continued to live at the castle, and in 1868-69 carried out an extensive restoration. Considerable improvements were also made in the village which one visitor described as, “one of the prettiest and cleanest little villages in the Vale”. St Fagans was virtually a model estate, where new homes were built for its workers, old cottages were refurbished and a national school was built in a Tudor style. By contrast, the Plymouth Arms was rebuilt in 1895 in a Jacobean style. To ensure that there was no unruly behaviour, a former butler from the castle was installed as landlord. He kept a strict regime, making it clear to customers that two pints of beer was their limit. A paternal system existed until well into the 20th century. The parish was virtually self sufficient with one pub, one butcher, one shoemaker and one shop, one of each being sufficient for the village’s needs. Most of the people in the village were employed on the estate. There was a great occasion in 1878 when Lady Mary’s son came of age. A special train brought 400 of the family’s Glamorgan tenants to St Fagans for a banquet at the castle, followed by a fireworks display and other entertainment. The festivities went on for two days, in which 300 chickens, four tons of meat and 25 hogsheads of ale were consumed. So much wine was drunk that extra supplies were required. During the 20th century, the Plymouth family continued Lady Mary’s paternal role at St Fagans, using the castle as a summer residence. More than 50 staff descended on the castle when it was used to entertain important visitors. Queen Mary stayed there in 1938 and a few years earlier the Prince of Wales had been among the guests. Before World War One, a banqueting hall for 40 people was built in the grounds which, during that war, became a military hospital. Another notable house in St Fagans, just off Michaelston Road, was The Court. This became the chosen residence of two families connected with the world of racing. When Lord Glanely, one of the foremost shipping magnates in Cardiff, lived there after World War One, his horses won every classic race including the Derby. After Glanely retired to Newmarket, the Llewellyn family lived at The Court. Harry Llewellyn was a keen horseman and finished second in the Grand National of 1936. His greatest moment came in 1952, when he was the captain of the equestrian team that won Britain’s only gold medal in the Olympic Games at Helsinki. In 1947 the Earl of Plymouth and his mother donated St Fagans Castle and its grounds to the National Museum of Wales as a site for a folk museum. Since that time, the Museum of Welsh Life has become one of the top tourist sites in the country, as buildings of all kinds have been acquired from every part of Wales. Among those rebuilt in the grounds are old farm houses, miners’ cottages, a chapel, a miners’ institute, a postwar “prefab” and even the “house of the future”. Visitors can see craftsmen, such as the cooper or the blacksmith, practise their traditional crafts in an authentic setting. There are also three indoor museums and the castle itself, overlooking lovely gardens and parkland, is a great attraction. When the Earl of Plymouth made a gift of 45 acres of woodland between the new Ely housing estate and the river in 1922, it became a popular place for picnics and ramblers. Sadly, the Plymouth Woods have gained a notoriety for crime and vandalism in recent years. St Fagans itself has remained a charming village since becoming a part of Cardiff in 1974. Housing development has been restricted to the areas around St Fagans Drive and The Court and, in terms of population, the suburb is the smallest in Cardiff. There are few more delightful ways of spending a summer’s afternoon than to watch a game of cricket at the picturesque ground of the local club, the home of a team which a few years ago won the National Village Cricket Final at Lords.
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the # GNU Affero General Public License version 3 (see the file LICENSE). """Test forms.""" from __future__ import ( absolute_import, print_function, unicode_literals, ) str = None __metaclass__ = type __all__ = [] import json from django import forms from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.files.uploadedfile import SimpleUploadedFile from django.core.validators import validate_email from django.http import QueryDict from maasserver.enum import ( ARCHITECTURE, ARCHITECTURE_CHOICES, NODE_AFTER_COMMISSIONING_ACTION_CHOICES, NODE_STATUS, NODEGROUP_STATUS, NODEGROUPINTERFACE_MANAGEMENT, ) from maasserver.forms import ( AdminNodeForm, AdminNodeWithMACAddressesForm, BulkNodeActionForm, CommissioningScriptForm, ConfigForm, DownloadProgressForm, EditUserForm, get_action_form, get_node_create_form, get_node_edit_form, initialize_node_group, INTERFACES_VALIDATION_ERROR_MESSAGE, MACAddressForm, NewUserCreationForm, NodeActionForm, NodeForm, NodeGroupEdit, NodeGroupInterfaceForm, NodeGroupWithInterfacesForm, NodeWithMACAddressesForm, ProfileForm, remove_None_values, UnconstrainedMultipleChoiceField, ValidatorMultipleChoiceField, ) from maasserver.models import ( Config, MACAddress, Node, NodeGroup, NodeGroupInterface, ) from maasserver.models.config import DEFAULT_CONFIG from maasserver.node_action import ( Commission, Delete, StartNode, StopNode, UseCurtin, ) from maasserver.testing import reload_object from maasserver.testing.factory import factory from maasserver.testing.testcase import MAASServerTestCase from metadataserver.models import CommissioningScript from netaddr import IPNetwork from provisioningserver.enum import POWER_TYPE_CHOICES from testtools.matchers import ( AllMatch, Equals, MatchesRegex, MatchesStructure, ) class TestHelpers(MAASServerTestCase): def test_initialize_node_group_leaves_nodegroup_reference_intact(self): preselected_nodegroup = factory.make_node_group() node = factory.make_node(nodegroup=preselected_nodegroup) initialize_node_group(node) self.assertEqual(preselected_nodegroup, node.nodegroup) def test_initialize_node_group_initializes_nodegroup_to_form_value(self): node = Node( NODE_STATUS.DECLARED, architecture=factory.getRandomEnum(ARCHITECTURE)) nodegroup = factory.make_node_group() initialize_node_group(node, nodegroup) self.assertEqual(nodegroup, node.nodegroup) def test_initialize_node_group_defaults_to_master(self): node = Node( NODE_STATUS.DECLARED, architecture=factory.getRandomEnum(ARCHITECTURE)) initialize_node_group(node) self.assertEqual(NodeGroup.objects.ensure_master(), node.nodegroup) class NodeWithMACAddressesFormTest(MAASServerTestCase): def get_QueryDict(self, params): query_dict = QueryDict('', mutable=True) for k, v in params.items(): if isinstance(v, list): query_dict.setlist(k, v) else: query_dict[k] = v return query_dict def make_params(self, mac_addresses=None, architecture=None, hostname=None, nodegroup=None): if mac_addresses is None: mac_addresses = [factory.getRandomMACAddress()] if architecture is None: architecture = factory.getRandomEnum(ARCHITECTURE) if hostname is None: hostname = factory.make_name('hostname') params = { 'mac_addresses': mac_addresses, 'architecture': architecture, 'hostname': hostname, } if nodegroup is not None: params['nodegroup'] = nodegroup return self.get_QueryDict(params) def test_NodeWithMACAddressesForm_valid(self): architecture = factory.getRandomEnum(ARCHITECTURE) form = NodeWithMACAddressesForm( self.make_params( mac_addresses=['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], architecture=architecture)) self.assertTrue(form.is_valid()) self.assertEqual( ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'], form.cleaned_data['mac_addresses']) self.assertEqual(architecture, form.cleaned_data['architecture']) def test_NodeWithMACAddressesForm_simple_invalid(self): # If the form only has one (invalid) MAC address field to validate, # the error message in form.errors['mac_addresses'] is the # message from the field's validation error. form = NodeWithMACAddressesForm( self.make_params(mac_addresses=['invalid'])) self.assertFalse(form.is_valid()) self.assertEqual(['mac_addresses'], list(form.errors)) self.assertEqual( ['Enter a valid MAC address (e.g. AA:BB:CC:DD:EE:FF).'], form.errors['mac_addresses']) def test_NodeWithMACAddressesForm_multiple_invalid(self): # If the form has multiple MAC address fields to validate, # if one or more fields are invalid, a single error message is # present in form.errors['mac_addresses'] after validation. form = NodeWithMACAddressesForm( self.make_params(mac_addresses=['invalid_1', 'invalid_2'])) self.assertFalse(form.is_valid()) self.assertEqual(['mac_addresses'], list(form.errors)) self.assertEqual( ['One or more MAC addresses is invalid.'], form.errors['mac_addresses']) def test_NodeWithMACAddressesForm_empty(self): # Empty values in the list of MAC addresses are simply ignored. form = NodeWithMACAddressesForm( self.make_params( mac_addresses=[factory.getRandomMACAddress(), ''])) self.assertTrue(form.is_valid()) def test_NodeWithMACAddressesForm_save(self): macs = ['aa:bb:cc:dd:ee:ff', '9a:bb:c3:33:e5:7f'] form = NodeWithMACAddressesForm(self.make_params(mac_addresses=macs)) node = form.save() self.assertIsNotNone(node.id) # The node is persisted. self.assertSequenceEqual( macs, [mac.mac_address for mac in node.macaddress_set.all()]) def test_includes_nodegroup_field_for_new_node(self): self.assertIn( 'nodegroup', NodeWithMACAddressesForm(self.make_params()).fields) def test_does_not_include_nodegroup_field_for_existing_node(self): params = self.make_params() node = factory.make_node() self.assertNotIn( 'nodegroup', NodeWithMACAddressesForm(params, instance=node).fields) def test_sets_nodegroup_to_master_by_default(self): self.assertEqual( NodeGroup.objects.ensure_master(), NodeWithMACAddressesForm(self.make_params()).save().nodegroup) def test_leaves_nodegroup_alone_if_unset_on_existing_node(self): # Selecting a node group for a node is only supported on new # nodes. You can't change it later. original_nodegroup = factory.make_node_group() node = factory.make_node(nodegroup=original_nodegroup) factory.make_node_group(network=IPNetwork("192.168.1.0/24")) form = NodeWithMACAddressesForm( self.make_params(nodegroup='192.168.1.0'), instance=node) form.save() self.assertEqual(original_nodegroup, reload_object(node).nodegroup) def test_form_without_hostname_generates_hostname(self): form = NodeWithMACAddressesForm(self.make_params(hostname='')) node = form.save() self.assertTrue(len(node.hostname) > 0) def test_form_with_ip_based_hostname_generates_hostname(self): ip_based_hostname = '192-168-12-10.domain' form = NodeWithMACAddressesForm( self.make_params(hostname=ip_based_hostname)) node = form.save() self.assertNotEqual(ip_based_hostname, node.hostname) class TestOptionForm(ConfigForm): field1 = forms.CharField(label="Field 1", max_length=10) field2 = forms.BooleanField(label="Field 2", required=False) class TestValidOptionForm(ConfigForm): maas_name = forms.CharField(label="Field 1", max_length=10) class ConfigFormTest(MAASServerTestCase): def test_form_valid_saves_into_db(self): value = factory.getRandomString(10) form = TestValidOptionForm({'maas_name': value}) result = form.save() self.assertTrue(result) self.assertEqual(value, Config.objects.get_config('maas_name')) def test_form_rejects_unknown_settings(self): value = factory.getRandomString(10) value2 = factory.getRandomString(10) form = TestOptionForm({'field1': value, 'field2': value2}) valid = form.is_valid() self.assertFalse(valid) self.assertIn('field1', form._errors) self.assertIn('field2', form._errors) def test_form_invalid_does_not_save_into_db(self): value_too_long = factory.getRandomString(20) form = TestOptionForm({'field1': value_too_long, 'field2': False}) result = form.save() self.assertFalse(result) self.assertIn('field1', form._errors) self.assertIsNone(Config.objects.get_config('field1')) self.assertIsNone(Config.objects.get_config('field2')) def test_form_loads_initial_values(self): value = factory.getRandomString() Config.objects.set_config('field1', value) form = TestOptionForm() self.assertItemsEqual(['field1'], form.initial) self.assertEqual(value, form.initial['field1']) def test_form_loads_initial_values_from_default_value(self): value = factory.getRandomString() DEFAULT_CONFIG['field1'] = value form = TestOptionForm() self.assertItemsEqual(['field1'], form.initial) self.assertEqual(value, form.initial['field1']) class NodeEditForms(MAASServerTestCase): def test_NodeForm_contains_limited_set_of_fields(self): form = NodeForm() self.assertEqual( [ 'hostname', 'after_commissioning_action', 'architecture', 'distro_series', 'nodegroup', ], list(form.fields)) def test_NodeForm_changes_node(self): node = factory.make_node() hostname = factory.getRandomString() after_commissioning_action = factory.getRandomChoice( NODE_AFTER_COMMISSIONING_ACTION_CHOICES) form = NodeForm( data={ 'hostname': hostname, 'after_commissioning_action': after_commissioning_action, 'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES), }, instance=node) form.save() self.assertEqual(hostname, node.hostname) self.assertEqual( after_commissioning_action, node.after_commissioning_action) def test_AdminNodeForm_contains_limited_set_of_fields(self): node = factory.make_node() form = AdminNodeForm(instance=node) self.assertEqual( [ 'hostname', 'after_commissioning_action', 'architecture', 'distro_series', 'power_type', 'power_parameters', 'cpu_count', 'memory', 'storage', 'zone', ], list(form.fields)) def test_AdminNodeForm_changes_node(self): node = factory.make_node() zone = factory.make_zone() hostname = factory.getRandomString() after_commissioning_action = factory.getRandomChoice( NODE_AFTER_COMMISSIONING_ACTION_CHOICES) power_type = factory.getRandomChoice(POWER_TYPE_CHOICES) form = AdminNodeForm( data={ 'hostname': hostname, 'after_commissioning_action': after_commissioning_action, 'power_type': power_type, 'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES), 'zone': zone.name, }, instance=node) form.save() self.assertEqual( ( node.hostname, node.after_commissioning_action, node.power_type, node.zone, ), (hostname, after_commissioning_action, power_type, zone)) def test_AdminNodeForm_refuses_to_update_hostname_on_allocated_node(self): old_name = factory.make_name('old-hostname') new_name = factory.make_name('new-hostname') node = factory.make_node( hostname=old_name, status=NODE_STATUS.ALLOCATED) form = AdminNodeForm( data={ 'hostname': new_name, 'architecture': node.architecture, }, instance=node) self.assertFalse(form.is_valid()) self.assertEqual( ["Can't change hostname to %s: node is in use." % new_name], form._errors['hostname']) def test_AdminNodeForm_accepts_unchanged_hostname_on_allocated_node(self): old_name = factory.make_name('old-hostname') node = factory.make_node( hostname=old_name, status=NODE_STATUS.ALLOCATED) form = AdminNodeForm( data={ 'hostname': old_name, 'architecture': node.architecture, }, instance=node) self.assertTrue(form.is_valid(), form._errors) form.save() self.assertEqual(old_name, reload_object(node).hostname) def test_remove_None_values_removes_None_values_in_dict(self): random_input = factory.getRandomString() self.assertEqual( {random_input: random_input}, remove_None_values({ random_input: random_input, factory.getRandomString(): None })) def test_remove_None_values_leaves_empty_dict_untouched(self): self.assertEqual({}, remove_None_values({})) def test_AdminNodeForm_changes_node_with_skip_check(self): node = factory.make_node() hostname = factory.getRandomString() after_commissioning_action = factory.getRandomChoice( NODE_AFTER_COMMISSIONING_ACTION_CHOICES) power_type = factory.getRandomChoice(POWER_TYPE_CHOICES) power_parameters_field = factory.getRandomString() form = AdminNodeForm( data={ 'hostname': hostname, 'after_commissioning_action': after_commissioning_action, 'architecture': factory.getRandomChoice(ARCHITECTURE_CHOICES), 'power_type': power_type, 'power_parameters_field': power_parameters_field, 'power_parameters_skip_check': True, }, instance=node) form.save() self.assertEqual( (hostname, after_commissioning_action, power_type, {'field': power_parameters_field}), (node.hostname, node.after_commissioning_action, node.power_type, node.power_parameters)) def test_AdminForm_does_not_permit_nodegroup_change(self): # We had to make Node.nodegroup editable to get Django to # validate it as non-blankable, but that doesn't mean that we # actually want to allow people to edit it through API or UI. old_nodegroup = factory.make_node_group() node = factory.make_node(nodegroup=old_nodegroup) new_nodegroup = factory.make_node_group() form = AdminNodeForm(data={'nodegroup': new_nodegroup}, instance=node) self.assertRaises(ValueError, form.save) def test_get_node_edit_form_returns_NodeForm_if_non_admin(self): user = factory.make_user() self.assertEqual(NodeForm, get_node_edit_form(user)) def test_get_node_edit_form_returns_APIAdminNodeEdit_if_admin(self): admin = factory.make_admin() self.assertEqual(AdminNodeForm, get_node_edit_form(admin)) def test_get_node_create_form_if_non_admin(self): user = factory.make_user() self.assertEqual( NodeWithMACAddressesForm, get_node_create_form(user)) def test_get_node_create_form_if_admin(self): admin = factory.make_admin() self.assertEqual( AdminNodeWithMACAddressesForm, get_node_create_form(admin)) class TestNodeActionForm(MAASServerTestCase): def test_get_action_form_creates_form_class_with_attributes(self): user = factory.make_admin() form_class = get_action_form(user) self.assertEqual(user, form_class.user) def test_get_action_form_creates_form_class(self): user = factory.make_admin() node = factory.make_node(status=NODE_STATUS.DECLARED) form = get_action_form(user)(node) self.assertIsInstance(form, NodeActionForm) self.assertEqual(node, form.node) def test_get_action_form_for_admin(self): admin = factory.make_admin() node = factory.make_node(status=NODE_STATUS.DECLARED) node.use_traditional_installer() form = get_action_form(admin)(node) self.assertItemsEqual( [Commission.name, Delete.name, UseCurtin.name], form.actions) def test_get_action_form_for_user(self): user = factory.make_user() node = factory.make_node(status=NODE_STATUS.DECLARED) form = get_action_form(user)(node) self.assertIsInstance(form, NodeActionForm) self.assertEqual(node, form.node) self.assertItemsEqual({}, form.actions) def test_save_performs_requested_action(self): admin = factory.make_admin() node = factory.make_node(status=NODE_STATUS.DECLARED) form = get_action_form(admin)( node, {NodeActionForm.input_name: Commission.name}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(NODE_STATUS.COMMISSIONING, node.status) def test_rejects_disallowed_action(self): user = factory.make_user() node = factory.make_node(status=NODE_STATUS.DECLARED) form = get_action_form(user)( node, {NodeActionForm.input_name: Commission.name}) self.assertFalse(form.is_valid()) self.assertEquals( {'action': ['Not a permitted action: %s.' % Commission.name]}, form._errors) def test_rejects_unknown_action(self): user = factory.make_user() node = factory.make_node(status=NODE_STATUS.DECLARED) action = factory.getRandomString() form = get_action_form(user)( node, {NodeActionForm.input_name: action}) self.assertFalse(form.is_valid()) self.assertIn( "is not one of the available choices.", form._errors['action'][0]) class TestUniqueEmailForms(MAASServerTestCase): def assertFormFailsValidationBecauseEmailNotUnique(self, form): self.assertFalse(form.is_valid()) self.assertIn('email', form._errors) self.assertEquals(1, len(form._errors['email'])) # Cope with 'Email' and 'E-mail' in error message. self.assertThat( form._errors['email'][0], MatchesRegex( r'User with this E-{0,1}mail address already exists.')) def test_ProfileForm_fails_validation_if_email_taken(self): another_email = '%s@example.com' % factory.getRandomString() factory.make_user(email=another_email) email = '%s@example.com' % factory.getRandomString() user = factory.make_user(email=email) form = ProfileForm(instance=user, data={'email': another_email}) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_ProfileForm_validates_if_email_unchanged(self): email = '%s@example.com' % factory.getRandomString() user = factory.make_user(email=email) form = ProfileForm(instance=user, data={'email': email}) self.assertTrue(form.is_valid()) def test_NewUserCreationForm_fails_validation_if_email_taken(self): email = '%s@example.com' % factory.getRandomString() username = factory.getRandomString() password = factory.getRandomString() factory.make_user(email=email) form = NewUserCreationForm( { 'email': email, 'username': username, 'password1': password, 'password2': password, }) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_EditUserForm_fails_validation_if_email_taken(self): another_email = '%s@example.com' % factory.getRandomString() factory.make_user(email=another_email) email = '%s@example.com' % factory.getRandomString() user = factory.make_user(email=email) form = EditUserForm(instance=user, data={'email': another_email}) self.assertFormFailsValidationBecauseEmailNotUnique(form) def test_EditUserForm_validates_if_email_unchanged(self): email = '%s@example.com' % factory.getRandomString() user = factory.make_user(email=email) form = EditUserForm( instance=user, data={ 'email': email, 'username': factory.getRandomString(), }) self.assertTrue(form.is_valid()) class TestNewUserCreationForm(MAASServerTestCase): def test_saves_to_db_by_default(self): password = factory.make_name('password') params = { 'email': '%s@example.com' % factory.getRandomString(), 'username': factory.make_name('user'), 'password1': password, 'password2': password, } form = NewUserCreationForm(params) form.save() self.assertIsNotNone(User.objects.get(username=params['username'])) def test_does_not_save_to_db_if_commit_is_False(self): password = factory.make_name('password') params = { 'email': '%s@example.com' % factory.getRandomString(), 'username': factory.make_name('user'), 'password1': password, 'password2': password, } form = NewUserCreationForm(params) form.save(commit=False) self.assertItemsEqual( [], User.objects.filter(username=params['username'])) def test_fields_order(self): form = NewUserCreationForm() self.assertEqual( ['username', 'last_name', 'email', 'password1', 'password2', 'is_superuser'], list(form.fields)) class TestMACAddressForm(MAASServerTestCase): def test_MACAddressForm_creates_mac_address(self): node = factory.make_node() mac = factory.getRandomMACAddress() form = MACAddressForm(node=node, data={'mac_address': mac}) form.save() self.assertTrue( MACAddress.objects.filter(node=node, mac_address=mac).exists()) def test_saves_to_db_by_default(self): node = factory.make_node() mac = factory.getRandomMACAddress() form = MACAddressForm(node=node, data={'mac_address': mac}) form.save() self.assertEqual( mac, MACAddress.objects.get(mac_address=mac).mac_address) def test_does_not_save_to_db_if_commit_is_False(self): node = factory.make_node() mac = factory.getRandomMACAddress() form = MACAddressForm(node=node, data={'mac_address': mac}) form.save(commit=False) self.assertItemsEqual([], MACAddress.objects.filter(mac_address=mac)) def test_MACAddressForm_displays_error_message_if_mac_already_used(self): mac = factory.getRandomMACAddress() node = factory.make_mac_address(address=mac) node = factory.make_node() form = MACAddressForm(node=node, data={'mac_address': mac}) self.assertFalse(form.is_valid()) self.assertEquals( {'mac_address': ['This MAC address is already registered.']}, form._errors) self.assertFalse( MACAddress.objects.filter(node=node, mac_address=mac).exists()) def make_interface_settings(): """Create a dict of arbitrary interface configuration parameters.""" network = factory.getRandomNetwork() return { 'ip': factory.getRandomIPInNetwork(network), 'interface': factory.make_name('interface'), 'subnet_mask': unicode(network.netmask), 'broadcast_ip': unicode(network.broadcast), 'router_ip': factory.getRandomIPInNetwork(network), 'ip_range_low': factory.getRandomIPInNetwork(network), 'ip_range_high': factory.getRandomIPInNetwork(network), 'management': factory.getRandomEnum(NODEGROUPINTERFACE_MANAGEMENT), } nullable_fields = [ 'subnet_mask', 'broadcast_ip', 'router_ip', 'ip_range_low', 'ip_range_high'] class TestNodeGroupInterfaceForm(MAASServerTestCase): def test_NodeGroupInterfaceForm_validates_parameters(self): form = NodeGroupInterfaceForm(data={'ip': factory.getRandomString()}) self.assertFalse(form.is_valid()) self.assertEquals( {'ip': ['Enter a valid IPv4 or IPv6 address.']}, form._errors) def test_NodeGroupInterfaceForm_can_save_fields_being_None(self): settings = make_interface_settings() settings['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED for field_name in nullable_fields: del settings[field_name] nodegroup = factory.make_node_group() form = NodeGroupInterfaceForm( data=settings, instance=NodeGroupInterface(nodegroup=nodegroup)) interface = form.save() field_values = [ getattr(interface, field_name) for field_name in nullable_fields] self.assertThat(field_values, AllMatch(Equals(''))) class TestNodeGroupWithInterfacesForm(MAASServerTestCase): def test_creates_pending_nodegroup(self): name = factory.make_name('name') uuid = factory.getRandomUUID() form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual( (uuid, name, NODEGROUP_STATUS.PENDING, 0), ( nodegroup.uuid, nodegroup.name, nodegroup.status, nodegroup.nodegroupinterface_set.count(), )) def test_creates_nodegroup_with_status(self): name = factory.make_name('name') uuid = factory.getRandomUUID() form = NodeGroupWithInterfacesForm( status=NODEGROUP_STATUS.ACCEPTED, data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual(NODEGROUP_STATUS.ACCEPTED, nodegroup.status) def test_validates_parameters(self): name = factory.make_name('name') too_long_uuid = 'test' * 30 form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': too_long_uuid}) self.assertFalse(form.is_valid()) self.assertEquals( {'uuid': ['Ensure this value has at most 36 characters (it has 120).']}, form._errors) def test_rejects_invalid_json_interfaces(self): name = factory.make_name('name') uuid = factory.getRandomUUID() invalid_interfaces = factory.make_name('invalid_json_interfaces') form = NodeGroupWithInterfacesForm( data={ 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) self.assertFalse(form.is_valid()) self.assertEquals( {'interfaces': ['Invalid json value.']}, form._errors) def test_rejects_invalid_list_interfaces(self): name = factory.make_name('name') uuid = factory.getRandomUUID() invalid_interfaces = json.dumps('invalid interface list') form = NodeGroupWithInterfacesForm( data={ 'name': name, 'uuid': uuid, 'interfaces': invalid_interfaces}) self.assertFalse(form.is_valid()) self.assertEquals( {'interfaces': [INTERFACES_VALIDATION_ERROR_MESSAGE]}, form._errors) def test_rejects_invalid_interface(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface = make_interface_settings() # Make the interface invalid. interface['ip_range_high'] = 'invalid IP address' interfaces = json.dumps([interface]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertFalse(form.is_valid()) self.assertIn( "Enter a valid IPv4 or IPv6 address", form._errors['interfaces'][0]) def test_creates_interface_from_params(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface = make_interface_settings() interfaces = json.dumps([interface]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertThat( nodegroup.nodegroupinterface_set.all()[0], MatchesStructure.byEquality(**interface)) def test_checks_presence_of_other_managed_interfaces(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interfaces = [] for index in range(2): interface = make_interface_settings() interface['management'] = factory.getRandomEnum( NODEGROUPINTERFACE_MANAGEMENT, but_not=(NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED, )) interfaces.append(interface) interfaces = json.dumps(interfaces) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertFalse(form.is_valid()) self.assertIn( "Only one managed interface can be configured for this cluster", form._errors['interfaces'][0]) def test_creates_multiple_interfaces(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface1 = make_interface_settings() # Only one interface at most can be 'managed'. interface2 = make_interface_settings() interface2['management'] = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED interfaces = json.dumps([interface1, interface2]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual(2, nodegroup.nodegroupinterface_set.count()) def test_populates_cluster_name_default(self): name = factory.make_name('name') uuid = factory.getRandomUUID() form = NodeGroupWithInterfacesForm( status=NODEGROUP_STATUS.ACCEPTED, data={'name': name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertIn(uuid, nodegroup.cluster_name) def test_populates_cluster_name(self): cluster_name = factory.make_name('cluster_name') uuid = factory.getRandomUUID() form = NodeGroupWithInterfacesForm( status=NODEGROUP_STATUS.ACCEPTED, data={'cluster_name': cluster_name, 'uuid': uuid}) self.assertTrue(form.is_valid(), form._errors) nodegroup = form.save() self.assertEqual(cluster_name, nodegroup.cluster_name) def test_creates_unmanaged_interfaces(self): name = factory.make_name('name') uuid = factory.getRandomUUID() interface = make_interface_settings() del interface['management'] interfaces = json.dumps([interface]) form = NodeGroupWithInterfacesForm( data={'name': name, 'uuid': uuid, 'interfaces': interfaces}) self.assertTrue(form.is_valid(), form._errors) form.save() uuid_nodegroup = NodeGroup.objects.get(uuid=uuid) self.assertEqual( [NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED], [ nodegroup.management for nodegroup in uuid_nodegroup.nodegroupinterface_set.all() ]) class TestNodeGroupEdit(MAASServerTestCase): def make_form_data(self, nodegroup): """Create `NodeGroupEdit` form data based on `nodegroup`.""" return { 'name': nodegroup.name, 'cluster_name': nodegroup.cluster_name, 'status': nodegroup.status, } def test_changes_name(self): nodegroup = factory.make_node_group(name=factory.make_name('old-name')) new_name = factory.make_name('new-name') data = self.make_form_data(nodegroup) data['name'] = new_name form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(new_name, reload_object(nodegroup).name) def test_refuses_name_change_if_dns_managed_and_nodes_in_use(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertFalse(form.is_valid()) def test_accepts_unchanged_name(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() original_name = nodegroup.name form = NodeGroupEdit( instance=nodegroup, data=self.make_form_data(nodegroup)) self.assertTrue(form.is_valid()) form.save() self.assertEqual(original_name, reload_object(nodegroup).name) def test_accepts_omitted_name(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() original_name = nodegroup.name data = self.make_form_data(nodegroup) del data['name'] form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(original_name, reload_object(nodegroup).name) def test_accepts_name_change_if_nodegroup_not_accepted(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() nodegroup.status = NODEGROUP_STATUS.PENDING data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) def test_accepts_name_change_if_dns_managed_but_no_nodes_in_use(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() node.status = NODE_STATUS.READY node.save() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) def test_accepts_name_change_if_nodes_in_use_but_dns_not_managed(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() interface = nodegroup.get_managed_interface() interface.management = NODEGROUPINTERFACE_MANAGEMENT.DHCP interface.save() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) def test_accepts_name_change_if_nodegroup_has_no_interface(self): nodegroup, node = factory.make_unrenamable_nodegroup_with_node() NodeGroupInterface.objects.filter(nodegroup=nodegroup).delete() data = self.make_form_data(nodegroup) data['name'] = factory.make_name('new-name') form = NodeGroupEdit(instance=nodegroup, data=data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(data['name'], reload_object(nodegroup).name) class TestCommissioningScriptForm(MAASServerTestCase): def test_creates_commissioning_script(self): content = factory.getRandomString().encode('ascii') name = factory.make_name('filename') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertTrue(form.is_valid(), form._errors) form.save() new_script = CommissioningScript.objects.get(name=name) self.assertThat( new_script, MatchesStructure.byEquality(name=name, content=content)) def test_raises_if_duplicated_name(self): content = factory.getRandomString().encode('ascii') name = factory.make_name('filename') factory.make_commissioning_script(name=name) uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertEqual( (False, {'content': ["A script with that name already exists."]}), (form.is_valid(), form._errors)) def test_rejects_whitespace_in_name(self): name = factory.make_name('with space') content = factory.getRandomString().encode('ascii') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertFalse(form.is_valid()) self.assertEqual( ["Name contains disallowed characters (e.g. space or quotes)."], form._errors['content']) def test_rejects_quotes_in_name(self): name = factory.make_name("l'horreur") content = factory.getRandomString().encode('ascii') uploaded_file = SimpleUploadedFile(content=content, name=name) form = CommissioningScriptForm(files={'content': uploaded_file}) self.assertFalse(form.is_valid()) self.assertEqual( ["Name contains disallowed characters (e.g. space or quotes)."], form._errors['content']) class TestUnconstrainedMultipleChoiceField(MAASServerTestCase): def test_accepts_list(self): value = ['a', 'b'] instance = UnconstrainedMultipleChoiceField() self.assertEqual(value, instance.clean(value)) class TestValidatorMultipleChoiceField(MAASServerTestCase): def test_field_validates_valid_data(self): value = ['test@example.com', 'me@example.com'] field = ValidatorMultipleChoiceField(validator=validate_email) self.assertEqual(value, field.clean(value)) def test_field_uses_validator(self): value = ['test@example.com', 'invalid-email'] field = ValidatorMultipleChoiceField(validator=validate_email) error = self.assertRaises(ValidationError, field.clean, value) self.assertEquals(['Enter a valid email address.'], error.messages) class TestBulkNodeActionForm(MAASServerTestCase): def test_performs_action(self): node1 = factory.make_node() node2 = factory.make_node() node3 = factory.make_node() system_id_to_delete = [node1.system_id, node2.system_id] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=Delete.name, system_id=system_id_to_delete)) self.assertTrue(form.is_valid(), form._errors) done, not_actionable, not_permitted = form.save() existing_nodes = list(Node.objects.filter( system_id__in=system_id_to_delete)) node3_system_id = reload_object(node3).system_id self.assertEqual( [2, 0, 0], [done, not_actionable, not_permitted]) self.assertEqual( [[], node3.system_id], [existing_nodes, node3_system_id]) def test_first_action_is_empty(self): form = BulkNodeActionForm(user=factory.make_admin()) action = form.fields['action'] default_action = action.choices[0][0] required = action.required # The default action is the empty string (i.e. no action) # and it's a required field. self.assertEqual(('', True), (default_action, required)) def test_gives_stat_when_not_applicable(self): node1 = factory.make_node(status=NODE_STATUS.DECLARED) node2 = factory.make_node(status=NODE_STATUS.FAILED_TESTS) system_id_for_action = [node1.system_id, node2.system_id] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=StartNode.name, system_id=system_id_for_action)) self.assertTrue(form.is_valid(), form._errors) done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 2, 0], [done, not_actionable, not_permitted]) def test_gives_stat_when_no_permission(self): user = factory.make_user() node = factory.make_node( status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) system_id_for_action = [node.system_id] form = BulkNodeActionForm( user=user, data=dict( action=StopNode.name, system_id=system_id_for_action)) self.assertTrue(form.is_valid(), form._errors) done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 0, 1], [done, not_actionable, not_permitted]) def test_gives_stat_when_action_is_inhibited(self): node = factory.make_node( status=NODE_STATUS.ALLOCATED, owner=factory.make_user()) form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=Delete.name, system_id=[node.system_id])) self.assertTrue(form.is_valid(), form._errors) done, not_actionable, not_permitted = form.save() self.assertEqual( [0, 1, 0], [done, not_actionable, not_permitted]) def test_rejects_empty_system_ids(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict(action=Delete.name, system_id=[])) self.assertFalse(form.is_valid(), form._errors) self.assertEqual( ["No node selected."], form._errors['system_id']) def test_rejects_invalid_system_ids(self): node = factory.make_node() system_id_to_delete = [node.system_id, "wrong-system_id"] form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action=Delete.name, system_id=system_id_to_delete)) self.assertFalse(form.is_valid(), form._errors) self.assertEqual( ["Some of the given system ids are invalid system ids."], form._errors['system_id']) def test_rejects_if_no_action(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict(system_id=[factory.make_node().system_id])) self.assertFalse(form.is_valid(), form._errors) def test_rejects_if_invalid_action(self): form = BulkNodeActionForm( user=factory.make_admin(), data=dict( action="invalid-action", system_id=[factory.make_node().system_id])) self.assertFalse(form.is_valid(), form._errors) class TestDownloadProgressForm(MAASServerTestCase): def test_updates_instance(self): progress = factory.make_download_progress_incomplete(size=None) new_bytes_downloaded = progress.bytes_downloaded + 1 size = progress.bytes_downloaded + 2 error = factory.getRandomString() form = DownloadProgressForm( data={ 'size': size, 'bytes_downloaded': new_bytes_downloaded, 'error': error, }, instance=progress) new_progress = form.save() progress = reload_object(progress) self.assertEqual(progress, new_progress) self.assertEqual(size, progress.size) self.assertEqual(new_bytes_downloaded, progress.bytes_downloaded) self.assertEqual(error, progress.error) def test_rejects_unknown_ongoing_download(self): form = DownloadProgressForm( data={'bytes_downloaded': 1}, instance=None) self.assertFalse(form.is_valid()) def test_get_download_returns_ongoing_download(self): progress = factory.make_download_progress_incomplete() self.assertEqual( progress, DownloadProgressForm.get_download( progress.nodegroup, progress.filename, progress.bytes_downloaded + 1)) def test_get_download_recognises_start_of_new_download(self): nodegroup = factory.make_node_group() filename = factory.getRandomString() progress = DownloadProgressForm.get_download(nodegroup, filename, None) self.assertIsNotNone(progress) self.assertEqual(nodegroup, progress.nodegroup) self.assertEqual(filename, progress.filename) self.assertIsNone(progress.bytes_downloaded) def test_get_download_returns_none_for_unknown_ongoing_download(self): self.assertIsNone( DownloadProgressForm.get_download( factory.make_node_group(), factory.getRandomString(), 1))
Every year, when the Hamptons International Film Festival rolls into town, filmmakers and producers do their best to get their offerings to stand out from the dozens and dozens of other movies competing for audience and industry attention. And every year, for the benefit of—and to appeal to—East End audiences, if not industry insiders, a few films get to capitalize on the gravitational pull of a solid link to the local community. For the 2009 installment of the festival, “Paper Man,” featuring Jeff Daniels, Ryan Reynolds, Emma Stone, Kieran Culkin and Lisa Kudrow, has a clear edge in the local connection category: exterior shooting for the film was done exclusively in Montauk just about a year ago. “Paper Man,” written and directed by the wife-and-husband team of Michele Mulroney and Kieran Mulroney, tells the story of failed writer Richard Dunn, played by Mr. Daniels, who rents a cottage in Montauk in an effort to finish his latest novel and possibly save his troubled marriage. His mission is complicated by the ubiquitous presence of Captain Excellent, played by Mr. Reynolds, an imaginary friend who has been his shadow since childhood. Of course, as with every film, the story of the making of “Paper Man” goes back a lot further than last year’s shooting schedule in Montauk. Ms. Mulroney recounted in a telephone interview from her home in Los Angeles last week that she and her husband, who is the brother of actor Dermot Mulroney, have been writing partners for 12 years now. In 2004, she said, they had taken a draft of their script for “Paper Man” to the Sundance Film Institute’s writing lab and, after working on it for a bit, they arrived at a conclusion. One of the biggest hurdles to clear in getting the movie made, beyond securing financing, was casting the lead and the supporting actors. Once shooting started, exteriors in Montauk and interiors in Nyack, New York, Ms. Mulroney and her husband started to reap the rewards of the successful casting process, and she had high praise for all of the actors. After casting, the next critical choice was tied to the real estate mantra: location, location, location. The two writers’ story was originally set, at least on the page, in Cape Cod. But when the writer-directors started scouting on the Cape, they quickly discovered that nothing had the texture they were looking for. So they decided to start looking on Long Island and agreed to hold off on re-writes until after a new location had been picked.
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-14 07:22 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0011_auto_20160714_1521'), ] operations = [ migrations.AlterField( model_name='account', name='college', field=models.CharField(blank=True, default='', max_length=30), preserve_default=False, ), migrations.AlterField( model_name='account', name='entry_year', field=models.CharField(blank=True, default='', max_length=4), preserve_default=False, ), migrations.AlterField( model_name='account', name='phone', field=models.CharField(blank=True, default='', max_length=11), preserve_default=False, ), migrations.AlterField( model_name='account', name='qq', field=models.CharField(blank=True, default='', max_length=20), preserve_default=False, ), ]
instalover.com domain name is for sale. Inquire now. 12 Monthly Payments, only $499.75 per month. Start Using the domain today.
''' #============================================================================= # FileName: scale.py # Desc: # Author: jlpeng # Email: jlpeng1201@gmail.com # HomePage: # Created: 2013-09-23 20:43:19 # LastChange: 2014-04-09 10:49:21 # History: #============================================================================= ''' import sys from getopt import getopt from copy import deepcopy def generate_parameter(infile): ''' parameter ========= infile: string, generated by `calcDescriptor.py` return ====== para: dict {atom_type:[[min...],[max...]], ...} ''' descriptors = {} #{atom_type:[[values...],[values...],...], ...} inf = open(infile,"r") line = inf.readline() while line != "": line = inf.readline() while line.startswith("\t"): line = line.strip().split(":") atom_type = line[1] values = map(float,line[2].split(",")) if not descriptors.has_key(atom_type): descriptors[atom_type] = [] descriptors[atom_type].append(values) line = inf.readline() inf.close() para = {} #{atom_type:[[min...],[max...]],...} for atom_type in descriptors.iterkeys(): para[atom_type] = [deepcopy(descriptors[atom_type][0]),deepcopy(descriptors[atom_type][0])] for i in xrange(1,len(descriptors[atom_type])): for j in xrange(len(descriptors[atom_type][i])): if descriptors[atom_type][i][j] < para[atom_type][0][j]: para[atom_type][0][j] = descriptors[atom_type][i][j] if descriptors[atom_type][i][j] > para[atom_type][1][j]: para[atom_type][1][j] = descriptors[atom_type][i][j] return para def save_parameter(para,outfile): ''' parameter ========= para: dict, {atom_type:[[min...],[max...]], ...} outfile: string, where to save parameters parameters will be saved as follows: atom_type \\tmin max \\t... atom_type \\tmin max \\t... ... ''' outf = open(outfile,"w") for key in para.iterkeys(): print >>outf, key for i in xrange(len(para[key][0])): print >>outf, "\t%.16g %.16g"%(para[key][0][i],para[key][1][i]) outf.close() def read_parameter(infile): ''' to read scaling parameters from `infile` ''' para = {} # {atom_type:[[min...],[max...]],...} inf = open(infile,"r") line = inf.readline() while line != "": atom_type = line.strip() if para.has_key(atom_type): print >>sys.stderr, "Error: more than one set of scalling parameters found for atom type",atom_type inf.close() sys.exit(1) para[atom_type] = [[],[]] line = inf.readline() while line.startswith("\t"): line = line.split() para[atom_type][0].append(float(line[0])) para[atom_type][1].append(float(line[1])) line = inf.readline() inf.close() return para def scale(orig_value, min_, max_): if min_ == max_: #return orig_value return 0. else: return 1.*(orig_value-min_)/(max_-min_) def runScale(para, infile, outfile, verbose): ''' to scale `infile` according to para, and scaled values will be saved in `outfile` ''' inf = open(infile,"r") outf = open(outfile,"w") line = inf.readline() while line != "": outf.write(line) name = line.split()[0] line = inf.readline() while line.startswith("\t"): line = line.strip().split(":") if not para.has_key(line[1]): print >>sys.stderr,"Error: Can't find scalling parameters for atom type",line[1] inf.close() outf.close() sys.exit(1) min_max = para[line[1]] orig_values = line[2].split(",") if len(min_max[0]) != len(orig_values): print >>sys.stderr, "Error: different number of descriptors found for atom type",line[1] print >>sys.stderr, " suppose to be %d, but found %d"%(len(min_max[0]),len(orig_values)) inf.close() outf.close() sys.exit(1) scaled_value = scale(float(orig_values[0]),min_max[0][0],min_max[1][0]) if verbose and (scaled_value<=-0.5 or scaled_value>=1.5): print "Warning:",name,line[0],line[1],"1",scaled_value outf.write("\t%s:%s:%.6g"%(line[0],line[1],scaled_value)) for i in xrange(1,len(orig_values)): scaled_value = scale(float(orig_values[i]),min_max[0][i],min_max[1][i]) if verbose and (scaled_value<=-0.5 or scaled_value>=1.5): print "Warning:",name,line[0],line[1],i+1,scaled_value outf.write(",%.6g"%scaled_value) outf.write("\n") line = inf.readline() inf.close() outf.close() def main(argv=sys.argv): if len(argv)!=5 and len(argv)!=6: print "\nUsage:" print " %s [options] infile outfile"%argv[0] print "\nOptions:" print " -s save_filename: save scaling parameters" print " -r restore_filename: restore scaling parameters" print " --verbose: if given, display those with scaled value <=-0.5 or >=1.5" print "\nAttention:" print " . if `-s` is given, `infile` will be scalled to (-1,1)," print " and parameters will be saved in `save_filename`" print " . if `-r` is given, scaling `infile` using `restore_filename` instead." print "" sys.exit(1) options,args = getopt(argv[1:],"s:r:",["verbose"]) if len(args) != 2: print "Error: invalid number of arguments" sys.exit(1) save_file = None load_file = None verbose = False for opt,value in options: if opt == "-s": save_file = value elif opt == "-r": load_file = value elif opt == "--verbose": verbose = True else: print "Error: invalid option ",opt sys.exit(1) if save_file is not None: para = generate_parameter(args[0]) save_parameter(para,save_file) if load_file is not None: para = read_parameter(load_file) runScale(para,args[0],args[1],verbose) main()
It would hardly be a surprise if you fell for the charm of the delicate leafy vines design on this doormat. The mats offer the double benefits of elegance and durability. We allow you to personalize your mat with your monogram . This feature makes these doormats good for gifting purposes. The coir surface is very effective in scraping off dirt and debris off shoes, so none of it gets inside the house. The vinyl backing is sturdy, making the mats crack-resistant, durable and long-lasting.
from django import forms from django.utils.translation import ugettext from django.core.exceptions import ValidationError from django.utils.functional import cached_property from apps.products.models import Product from attributes.models import ( Attribute, AttributeValue, AttributeOption) class FilterForm(forms.Form): def __init__(self, category, *args, **kwargs): self._attributes = Attribute\ .objects\ .visible()\ .for_filter()\ .for_categories([category]) super().__init__(*args, **kwargs) for attr in self._attributes: self.fields[attr.full_slug] = forms.MultipleChoiceField( widget=forms.CheckboxSelectMultiple, label=attr.name, required=False) def set_options(self, entries): if not entries: self.fields = {} return choices = {attr.id: [] for attr in self._attributes} attr_values = AttributeValue.objects.filter( attr__in=self._attributes, entry__in=entries ).values_list('id', flat=True) options = AttributeOption\ .objects\ .filter(attr_values__in=attr_values)\ .order_by('name')\ .distinct() for option in options: choices[option.attr_id].append((option.id, option, )) for attr in self._attributes: if choices[attr.id]: self.fields[attr.full_slug].choices = choices[attr.id] else: del self.fields[attr.full_slug] def get_value_ids(self): ids = [] for attr in self._attributes: ids += self.data.getlist(attr.full_slug) return ids def _get_available_options(self): added_options = [] options = {attr.pk: [] for attr in self._attributes} attr_values = AttributeValue.objects.filter( attribute__in=self._attributes, entry__in=self._entries ).select_related('value_option') for value in attr_values: option = value.value_option if option not in added_options: added_options.append(option) options[value.attribute_id].append(option) return options class AttributesForm(forms.ModelForm): def __init__( self, data=None, files=None, instance=None, initial=None, **kwargs): if instance and instance.pk: initial = self._get_initial_data(instance) super().__init__( data=data, files=files, instance=instance, initial=initial, **kwargs) for attr in self._attributes: fields = attr.build_form_fields() self.fields.update(fields) def _get_initial_data(self, instance): initial = {} values = { v.attr.full_slug: v.get_value() for v in AttributeValue.objects.filter( attr__in=self._attributes, entry=instance ) } for attr in self._attributes: initial[attr.full_slug] = values.get(attr.full_slug) return initial def clean(self): data = self.cleaned_data for attr in self._attributes: if attr.has_options: new_option = data.get(attr.get_option_form_field_name()) if new_option: option, c = attr.options.get_or_create(name=new_option) data[attr.full_slug] = option if not data.get(attr.full_slug) and attr.is_required: raise ValidationError({ attr.full_slug: ugettext('{} is required').format( attr.name) }) return data def commit(self, instance): for attr in Attribute.objects.for_categories([instance.category]): if attr.full_slug in self.cleaned_data: value = self.cleaned_data[attr.full_slug] attr.save_value(instance, value) return instance @cached_property def _attributes(self): return list(Attribute.objects.all()) class Media: js = ('attrs/form.js', ) class Meta: model = Product fields = ['id']
GLENN ROSS JOHNSON is a Professor Emeritus who served in various positions at Texas A&M University, where he worked for 30 years. Earlier in his career, he served as a teacher, reading consultant, assistant principal, and principal in public schools in Euclid, Ohio, and Clayton, Missouri. He has a BS in Education from Kent State University, an MA in Education from Ohio State University, and an EdD from Columbia University Teachers College. Johnson’s more than 50 publications have focused primarily on instructional strategies, including First Steps to Excellence in College Teaching. After serving as Professor and Head of the Department of Educational Curriculum and Instruction, Johnson, at the request of the President and Vice President of Texas A&M University, established and directed The Center for Teaching Excellence. The Center provided publications, seminars, workshops, and services to faculty members and teaching assistants. While at Texas A&M University, Johnson developed a College Teaching Program that enabled adults holding bachelor’s degrees and master’s degrees in such fields as English, mathematics, physics, biology, business, and allied health to pursue a PhD in Educational Curriculum and Instruction. The program offered courses dealing with college teaching and curriculum, student personnel services, issues in higher education, and administration. Almost all of the students completing the program pursued dissertation research involving teaching in their subject area. Some of the graduates became college and university department chairs, associate deans, deans, and presidents.
# Simple script to test read button state (pressed / not pressed) for TFT buttons # Buttons are located on pins 12, 16 and 18 # BCM (Broadcom SOC channel) numbers are 18, 23 and 24 import os import sys import RPi.GPIO as GPIO # import needed modules lib_path = os.path.abspath(os.path.join('..')) sys.path.append(lib_path) from gpio.PushButtonTracker import PushButton GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) # buttonUp = 18 # buttonMiddle = 23 # buttonDown = 24 # # GPIO.setup(buttonUp, GPIO.IN, GPIO.PUD_UP) # GPIO.setup(buttonMiddle, GPIO.IN, GPIO.PUD_UP) # GPIO.setup(buttonDown, GPIO.IN, GPIO.PUD_UP) upBtn = PushButton("Up", 18, GPIO.PUD_UP) downBtn = PushButton("Down", 24, GPIO.PUD_UP) middleBtn = PushButton("Middle", 23, GPIO.PUD_UP) buttons = [upBtn, middleBtn, downBtn] try: while True: if downBtn.doubleClicked(): print "EXIT" GPIO.cleanup() sys.exit(0) for button in buttons: if button.clicked(): print button.name if button.doubleClicked(): print "Double click " + button.name except KeyboardInterrupt: # trap a CTRL+C keyboard interrupt GPIO.cleanup() # resets all GPIO ports used by this program
The ancient history of the name MacGivern was found in the allfamilycrests.com archives. The MacGivern coat of arms came into existence centuries ago. The process of creating coats of arms (also often called family crests) began in the eleventh century although a form of Proto-Heraldry may have existed in some countries prior to this. The new art of Heraldry made it possible for families and even individual family members to have their very own coat of arms, including all MacGivern descendants.
# encoding: utf-8 """ interface.py Created by Thomas Mangin on 2015-03-31. Copyright (c) 2009-2015 Exa Networks. All rights reserved. """ import os import socket from struct import pack from struct import unpack from struct import calcsize from collections import namedtuple from exabgp.netlink import NetLinkError from exabgp.netlink.sequence import Sequence from exabgp.netlink.attributes import Attributes try: getattr(socket,'AF_NETLINK') except AttributeError: raise ImportError('This module only works on unix version with netlink support') class NetLinkMessage (object): _IGNORE_SEQ_FAULTS = True NETLINK_ROUTE = 0 format = namedtuple('Message','type flags seq pid data') pid = os.getpid() netlink = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, NETLINK_ROUTE) class Header (object): # linux/netlink.h PACK = 'IHHII' LEN = calcsize(PACK) class Command (object): NLMSG_NOOP = 0x01 NLMSG_ERROR = 0x02 NLMSG_DONE = 0x03 NLMSG_OVERRUN = 0x04 class Flags (object): NLM_F_REQUEST = 0x01 # It is query message. NLM_F_MULTI = 0x02 # Multipart message, terminated by NLMSG_DONE NLM_F_ACK = 0x04 # Reply with ack, with zero or error code NLM_F_ECHO = 0x08 # Echo this query # Modifiers to GET query NLM_F_ROOT = 0x100 # specify tree root NLM_F_MATCH = 0x200 # return all matching NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH NLM_F_ATOMIC = 0x400 # atomic GET # Modifiers to NEW query NLM_F_REPLACE = 0x100 # Override existing NLM_F_EXCL = 0x200 # Do not touch, if it exists NLM_F_CREATE = 0x400 # Create, if it does not exist NLM_F_APPEND = 0x800 # Add to end of list errors = { Command.NLMSG_ERROR: 'netlink error', Command.NLMSG_OVERRUN: 'netlink overrun', } @classmethod def encode (cls, dtype, seq, flags, body, attributes): attrs = Attributes.encode(attributes) length = cls.Header.LEN + len(attrs) + len(body) return pack(cls.Header.PACK, length, dtype, flags, seq, cls.pid) + body + attrs @classmethod def decode (cls, data): while data: length, ntype, flags, seq, pid = unpack(cls.Header.PACK,data[:cls.Header.LEN]) if len(data) < length: raise NetLinkError("Buffer underrun") yield cls.format(ntype, flags, seq, pid, data[cls.Header.LEN:length]) data = data[length:] @classmethod def send (cls, dtype, hflags, family=socket.AF_UNSPEC): sequence = Sequence() message = cls.encode( dtype, sequence, hflags, pack('Bxxx', family), {} ) cls.netlink.send(message) while True: data = cls.netlink.recv(640000) for mtype, flags, seq, pid, data in cls.decode(data): if seq != sequence: if cls._IGNORE_SEQ_FAULTS: continue raise NetLinkError("netlink seq mismatch") if mtype == NetLinkMessage.Command.NLMSG_DONE: raise StopIteration() elif dtype in cls.errors: raise NetLinkError(cls.errors[mtype]) else: yield data # def change (self, dtype, family=socket.AF_UNSPEC): # for _ in self.send(dtype, self.Flags.NLM_F_REQUEST | self.Flags.NLM_F_CREATE,family): # yield _ class InfoMessage (object): # to be defined by the subclasses format = namedtuple('Parent', 'to be subclassed') # to be defined by the subclasses class Header (object): PACK = '' LEN = 0 @classmethod def decode (cls, data): extracted = list(unpack(cls.Header.PACK,data[:cls.Header.LEN])) attributes = Attributes.decode(data[cls.Header.LEN:]) extracted.append(dict(attributes)) return cls.format(*extracted) @classmethod def extract (cls, atype, flags=NetLinkMessage.Flags.NLM_F_REQUEST | NetLinkMessage.Flags.NLM_F_DUMP, family=socket.AF_UNSPEC): for data in NetLinkMessage.send(atype,flags,family): yield cls.decode(data)
This living room had great bones and beautiful furnishings, but it lacked cohesion and suffered at the hands of inappropriate accessorizing! AFTER: A few favorite family photos were featured together in one spot, the old silk flowers were put in a donation bag, and pretty pillows were taken out of storage. BEFORE: The mantel and coffee table were in need of decluttering and rearranging. The conversation seating arrangement also needed work! AFTER: Just by reworking what was already there and bringing a few things from elsewhere in this home, the room became magazine worthy.
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import bpy import blf from mathutils import Color, Vector, Matrix, Quaternion, Euler from .bpy_inspect import BlRna from .utils_python import DummyObject from .utils_gl import cgl #============================================================================# # Note: making a similar wrapper for Operator.report is impossible, # since Blender only shows the report from the currently executing operator. # ===== MESSAGEBOX ===== # if not hasattr(bpy.types, "WM_OT_messagebox"): class WM_OT_messagebox(bpy.types.Operator): bl_idname = "wm.messagebox" # "Attention!" is quite generic caption that suits # most of the situations when "OK" button is desirable. # bl_label isn't really changeable at runtime # (changing it causes some memory errors) bl_label = "Attention!" # We can't pass arguments through normal means, # since in this case a "Reset" button would appear args = {} # If we don't define execute(), there would be # an additional label "*Redo unsupported*" def execute(self, context): return {'FINISHED'} def invoke(self, context, event): text = self.args.get("text", "") self.icon = self.args.get("icon", 'NONE') if (not text) and (self.icon == 'NONE'): return {'CANCELLED'} border_w = 8*2 icon_w = (0 if (self.icon == 'NONE') else 16) w_incr = border_w + icon_w width = self.args.get("width", 300) - border_w self.lines = [] max_x = cgl.text.split_text(width, icon_w, 0, text, self.lines, font=0) width = max_x + border_w self.spacing = self.args.get("spacing", 0.5) self.spacing = max(self.spacing, 0.0) wm = context.window_manager confirm = self.args.get("confirm", False) if confirm: return wm.invoke_props_dialog(self, width) else: return wm.invoke_popup(self, width) def draw(self, context): layout = self.layout col = layout.column() col.scale_y = 0.5 * (1.0 + self.spacing * 0.5) icon = self.icon for line in self.lines: if icon != 'NONE': line = " "+line col.label(text=line, icon=icon) icon = 'NONE' bpy.utils.register_class(WM_OT_messagebox) # REGISTER def messagebox(text, icon='NONE', width=300, confirm=False, spacing=0.5): """ Displays a message box with the given text and icon. text -- the messagebox's text icon -- the icon (displayed at the start of the text) Defaults to 'NONE' (no icon). width -- the messagebox's max width Defaults to 300 pixels. confirm -- whether to display "OK" button (this is purely cosmetical, as the message box is non-blocking). Defaults to False. spacing -- relative distance between the lines Defaults to 0.5. """ WM_OT_messagebox = bpy.types.WM_OT_messagebox WM_OT_messagebox.args["text"] = text WM_OT_messagebox.args["icon"] = icon WM_OT_messagebox.args["width"] = width WM_OT_messagebox.args["spacing"] = spacing WM_OT_messagebox.args["confirm"] = confirm bpy.ops.wm.messagebox('INVOKE_DEFAULT') #============================================================================# # Note: # if item is property group instance and item["pi"] = 3.14, # in UI it should be displayed like this: layout.prop(item, '["pi"]') # ===== NESTED LAYOUT ===== # class NestedLayout: """ Utility for writing more structured UI drawing code. Attention: layout properties are propagated to sublayouts! Example: def draw(self, context): layout = NestedLayout(self.layout, self.bl_idname) exit_layout = True # You can use both the standard way: sublayout = layout.split() sublayout.label("label A") sublayout.label("label B") # And the structured way: with layout: layout.label("label 1") if exit_layout: layout.exit() layout.label("label 2") # won't be executed with layout.row(True)["main"]: layout.label("label 3") with layout.row(True)(enabled=False): layout.label("label 4") if exit_layout: layout.exit("main") layout.label("label 5") # won't be executed layout.label("label 6") # won't be executed with layout.fold("Foldable micro-panel", "box"): if layout.folded: layout.exit() layout.label("label 7") with layout.fold("Foldable 2"): layout.label("label 8") # not drawn if folded """ _sub_names = {"row", "column", "column_flow", "box", "split", "menu_pie"} _default_attrs = dict( active = True, alert = False, alignment = 'EXPAND', enabled = True, operator_context = 'INVOKE_DEFAULT', scale_x = 1.0, scale_y = 1.0, ) def __new__(cls, layout, idname="", parent=None): """ Wrap the layout in a NestedLayout. To avoid interference with other panels' foldable containers, supply panel's bl_idname as the idname. """ if isinstance(layout, cls) and (layout._idname == idname): return layout self = object.__new__(cls) self._idname = idname self._parent = parent self._layout = layout self._stack = [self] self._attrs = dict(self._default_attrs) self._tag = None # propagate settings to sublayouts if parent: self(**parent._stack[-1]._attrs) return self def __getattr__(self, name): layout = self._stack[-1]._layout if not layout: # This is the dummy layout; imitate normal layout # behavior without actually drawing anything. if name in self._sub_names: return (lambda *args, **kwargs: NestedLayout(None, self._idname, self)) else: return self._attrs.get(name, self._dummy_callable) if name in self._sub_names: func = getattr(layout, name) return (lambda *args, **kwargs: NestedLayout(func(*args, **kwargs), self._idname, self)) else: return getattr(layout, name) def __setattr__(self, name, value): if name.startswith("_"): self.__dict__[name] = value else: wrapper = self._stack[-1] wrapper._attrs[name] = value if wrapper._layout: setattr(wrapper._layout, name, value) def __call__(self, **kwargs): """Batch-set layout attributes.""" wrapper = self._stack[-1] wrapper._attrs.update(kwargs) layout = wrapper._layout if layout: for k, v in kwargs.items(): setattr(layout, k, v) return self @staticmethod def _dummy_callable(*args, **kwargs): return NestedLayout._dummy_obj _dummy_obj = DummyObject() # ===== FOLD (currently very hacky) ===== # # Each foldable micropanel needs to store its fold-status # as a Bool property (in order to be clickable in the UI) # somewhere where it would be saved with .blend, but won't # be affected by most of the other things (i.e., in Screen). # At first I thought to implement such storage with # nested dictionaries, but currently layout.prop() does # not recognize ID-property dictionaries as a valid input. class FoldPG(bpy.types.PropertyGroup): def update(self, context): pass # just indicates that the widget needs to be force-updated value = bpy.props.BoolProperty(description="Fold/unfold", update=update, name="") bpy.utils.register_class(FoldPG) # REGISTER # make up some name that's unlikely to be used by normal addons folds_keyname = "dairin0d_ui_utils_NestedLayout_ui_folds" setattr(bpy.types.Screen, folds_keyname, bpy.props.CollectionProperty(type=FoldPG)) # REGISTER folded = False # stores folded status from the latest fold() call def fold(self, text, container=None, folded=False, key=None): """ Create a foldable container. text -- the container's title/label container -- a sequence (type_of_container, arg1, ..., argN) where type_of_container is one of {"row", "column", "column_flow", "box", "split"}; arg1..argN are the arguments of the corresponding container function. If you supply just the type_of_container, it would be interpreted as (type_of_container,). folded -- whether the container should be folded by default. Default value is False. key -- the container's unique identifier within the panel. If not specified, the container's title will be used in its place. """ data_path = "%s:%s" % (self._idname, key or text) folds = getattr(bpy.context.screen, self.folds_keyname) try: this_fold = folds[data_path] except KeyError: this_fold = folds.add() this_fold.name = data_path this_fold.value = folded is_fold = this_fold.value icon = ('DOWNARROW_HLT' if not is_fold else 'RIGHTARROW') # make the necessary container... if not container: container_args = () container = "column" elif isinstance(container, str): container_args = () else: container_args = container[1:] container = container[0] res = getattr(self, container)(*container_args) with res.row(True)(alignment='LEFT'): res.prop(this_fold, "value", text=text, icon=icon, emboss=False, toggle=True) # make fold-status accessible to the calling code self.__dict__["folded"] = is_fold # If folded, return dummy layout if is_fold: return NestedLayout(None, self._idname, self) return res # ===== BUTTON (currently very hacky) ===== # _button_registrator = None def button(self, callback, *args, tooltip=None, **kwargs): """Draw a dynamic button. Callback and tooltip are expected to be stable.""" registrator = self._button_registrator op_idname = (registrator.get(callback, tooltip) if registrator else None) if not op_idname: op_idname = "wm.dynamic_button_dummy" return self.operator(op_idname, *args, **kwargs) # ===== NESTED CONTEXT MANAGEMENT ===== # class ExitSublayout(Exception): def __init__(self, tag=None): self.tag = tag @classmethod def exit(cls, tag=None): """Jump out of current (or marked with the given tag) layout's context.""" raise cls.ExitSublayout(tag) def __getitem__(self, tag): """Mark this layout with the tag""" self._tag = tag return self def __enter__(self): # Only nested (context-managed) layouts are stored in stack parent = self._parent if parent: parent._stack.append(self) def __exit__(self, type, value, traceback): # Only nested (context-managed) layouts are stored in stack parent = self._parent if parent: parent._stack.pop() if type == self.ExitSublayout: # Is this the layout the exit() was requested for? # Yes: suppress the exception. No: let it propagate to the parent. return (value.tag is None) or (value.tag == self._tag) if not hasattr(bpy.types, "WM_OT_dynamic_button_dummy"): class WM_OT_dynamic_button_dummy(bpy.types.Operator): bl_idname = "wm.dynamic_button_dummy" bl_label = " " bl_description = "" bl_options = {'INTERNAL'} arg = bpy.props.StringProperty() def execute(self, context): return {'CANCELLED'} def invoke(self, context, event): return {'CANCELLED'} bpy.utils.register_class(WM_OT_dynamic_button_dummy) class DynamicButton: def __init__(self, id): self.age = 0 self.id = id def register(self, btn_info): data_path, callback, tooltip = btn_info if not callback: def execute(self, context): return {'CANCELLED'} def invoke(self, context, event): return {'CANCELLED'} elif data_path: full_path_resolve = BlRna.full_path_resolve def execute(self, context): _self = full_path_resolve(data_path) return ({'CANCELLED'} if callback(_self, context, None, self.arg) is False else {'FINISHED'}) def invoke(self, context, event): _self = full_path_resolve(data_path) return ({'CANCELLED'} if callback(_self, context, event, self.arg) is False else {'FINISHED'}) else: def execute(self, context): return ({'CANCELLED'} if callback(context, None, self.arg) is False else {'FINISHED'}) def invoke(self, context, event): return ({'CANCELLED'} if callback(context, event, self.arg) is False else {'FINISHED'}) self.op_idname = "wm.dynamic_button_%s" % self.id self.op_class = type("WM_OT_dynamic_button_%s" % self.id, (bpy.types.Operator,), dict( bl_idname = self.op_idname, bl_label = "", bl_description = tooltip, bl_options = {'INTERNAL'}, arg = bpy.props.StringProperty(), execute = execute, invoke = invoke, )) bpy.utils.register_class(self.op_class) def unregister(self): bpy.utils.unregister_class(self.op_class) class ButtonRegistrator: max_age = 2 def __init__(self): self.update_counter = 0 self.layout_counter = 0 self.free_ids = [] self.to_register = set() self.to_unregister = set() self.registered = {} def register_button(self, btn_info): if self.free_ids: btn_id = self.free_ids.pop() else: btn_id = len(self.registered) btn = DynamicButton(btn_id) btn.register(btn_info) self.registered[btn_info] = btn def unregister_button(self, btn_info): btn = self.registered.pop(btn_info) self.free_ids.append(btn.id) btn.unregister() def update(self): if self.to_unregister: for btn_info in self.to_unregister: self.unregister_button(btn_info) self.to_unregister.clear() if self.to_register: for btn_info in self.to_register: self.register_button(btn_info) self.to_register.clear() self.update_counter += 1 def increment_age(self): for btn_info, btn in self.registered.items(): btn.age += 1 if btn.age > self.max_age: self.to_unregister.add(btn_info) def get(self, callback, tooltip): if self.layout_counter != self.update_counter: self.layout_counter = self.update_counter self.increment_age() if not callback: if not tooltip: return None btn_info = (None, None, tooltip) else: if tooltip is None: tooltip = (callback.__doc__ or "") # __doc__ can be None callback_self = getattr(callback, "__self__", None) if isinstance(callback_self, bpy.types.PropertyGroup): # we cannot keep reference to this object, only the data path full_path = BlRna.full_path(callback_self) btn_info = (full_path, callback.__func__, tooltip) else: btn_info = (None, callback, tooltip) btn = self.registered.get(btn_info) if btn: btn.age = 0 return btn.op_idname self.to_register.add(btn_info) #============================================================================# # TODO: put all these into BlUI class? def tag_redraw(arg=None): """A utility function to tag redraw of arbitrary UI units.""" if arg is None: arg = bpy.context.window_manager elif isinstance(arg, bpy.types.Window): arg = arg.screen if isinstance(arg, bpy.types.Screen): for area in arg.areas: area.tag_redraw() elif isinstance(arg, bpy.types.WindowManager): for window in arg.windows: for area in window.screen.areas: area.tag_redraw() else: # Region, Area, RenderEngine arg.tag_redraw() def calc_region_rect(area, r, overlap=True): # Note: there may be more than one region of the same type (e.g. in quadview) if (not overlap) and (r.type == 'WINDOW'): x0, y0, x1, y1 = r.x, r.y, r.x+r.width, r.y+r.height ox0, oy0, ox1, oy1 = x0, y0, x1, y1 for r in area.regions: if r.type == 'TOOLS': ox0 = r.x + r.width elif r.type == 'UI': ox1 = r.x x0, y0, x1, y1 = max(x0, ox0), max(y0, oy0), min(x1, ox1), min(y1, oy1) return (Vector((x0, y0)), Vector((x1-x0, y1-y0))) else: return (Vector((r.x, r.y)), Vector((r.width, r.height))) def point_in_rect(p, r): return ((p[0] >= r.x) and (p[0] < r.x + r.width) and (p[1] >= r.y) and (p[1] < r.y + r.height)) def rv3d_from_region(area, region): if (area.type != 'VIEW_3D') or (region.type != 'WINDOW'): return None space_data = area.spaces.active try: quadviews = space_data.region_quadviews except AttributeError: quadviews = None # old API if not quadviews: return space_data.region_3d x_id = 0 y_id = 0 for r in area.regions: if (r.type == 'WINDOW') and (r != region): if r.x < region.x: x_id = 1 if r.y < region.y: y_id = 1 # 0: bottom left (Front Ortho) # 1: top left (Top Ortho) # 2: bottom right (Right Ortho) # 3: top right (User Persp) return quadviews[y_id | (x_id << 1)] # areas can't overlap, but regions can def ui_contexts_under_coord(x, y, window=None): point = int(x), int(y) if not window: window = bpy.context.window screen = window.screen scene = screen.scene tool_settings = scene.tool_settings for area in screen.areas: if point_in_rect(point, area): space_data = area.spaces.active for region in area.regions: if point_in_rect(point, region): yield dict(window=window, screen=screen, area=area, space_data=space_data, region=region, region_data=rv3d_from_region(area, region), scene=scene, tool_settings=tool_settings) break def ui_context_under_coord(x, y, index=0, window=None): ui_context = None for i, ui_context in enumerate(ui_contexts_under_coord(x, y, window)): if i == index: return ui_context return ui_context def find_ui_area(area_type, region_type='WINDOW', window=None): if not window: window = bpy.context.window screen = window.screen scene = screen.scene tool_settings = scene.tool_settings for area in screen.areas: if area.type == area_type: space_data = area.spaces.active region = None for _region in area.regions: if _region.type == region_type: region = _region return dict(window=window, screen=screen, area=area, space_data=space_data, region=region, region_data=rv3d_from_region(area, region), scene=scene, tool_settings=tool_settings) def ui_hierarchy(ui_obj): if isinstance(ui_obj, bpy.types.Window): return (ui_obj, None, None) elif isinstance(ui_obj, bpy.types.Area): wm = bpy.context.window_manager for window in wm.windows: for area in window.screen.areas: if area == ui_obj: return (window, area, None) elif isinstance(ui_obj, bpy.types.Region): wm = bpy.context.window_manager for window in wm.windows: for area in window.screen.areas: for region in area.regions: if region == ui_obj: return (window, area, region) # TODO: relative coords? def convert_ui_coord(area, region, xy, src, dst, vector=True): x, y = xy if src == dst: pass elif src == 'WINDOW': if dst == 'AREA': x -= area.x y -= area.y elif dst == 'REGION': x -= region.x y -= region.y elif src == 'AREA': if dst == 'WINDOW': x += area.x y += area.y elif dst == 'REGION': x += area.x - region.x y += area.y - region.y elif src == 'REGION': if dst == 'WINDOW': x += region.x y += region.y elif dst == 'AREA': x += region.x - area.x y += region.y - area.y return (Vector((x, y)) if vector else (int(x), int(y))) #============================================================================#
I was searching for a Property and found this listing (MLS® #14057462). Please send me more information regarding 2127 E Mitchell Street, Arlington, TX, 76010-3149. Thank you! I'd like to request a showing of 2127 E Mitchell Street, Arlington, TX, 76010-3149 (MLS® #14057462). Thank you!
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('cloudplayer', '0002_file_play_count'), ] operations = [ migrations.CreateModel( name='Album', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Artist', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Composer', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Genre', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Playlist', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='PlaylistTrack', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('sort_order', models.PositiveSmallIntegerField(default=0)), ('playlist', models.ForeignKey(to='music.Playlist')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Track', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=255)), ('rating', models.FloatField(default=0.0)), ('track_number', models.PositiveSmallIntegerField(default=0)), ('of_tracks', models.PositiveSmallIntegerField(default=0)), ('disk_number', models.PositiveSmallIntegerField(default=0)), ('of_disks', models.PositiveSmallIntegerField(default=0)), ('date_added', models.DateTimeField(auto_now_add=True)), ('last_played', models.DateTimeField(default=None, null=True)), ('play_count', models.PositiveIntegerField(default=0)), ('album', models.ForeignKey(default=None, to='music.Album', null=True)), ('artist', models.ForeignKey(default=None, to='music.Artist', null=True)), ('composer', models.ForeignKey(default=None, to='music.Composer', null=True)), ('file', models.ForeignKey(to='cloudplayer.File')), ('genre', models.ForeignKey(default=None, to='music.Genre', null=True)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='playlisttrack', name='track', field=models.ForeignKey(to='music.Track'), preserve_default=True, ), migrations.AddField( model_name='playlist', name='tracks', field=models.ManyToManyField(to='music.Track', through='music.PlaylistTrack'), preserve_default=True, ), migrations.AddField( model_name='playlist', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
Honored by having been recently dubbed the “Professor of Soul” by the legend Randy Muller, I started my career in the music industry at the tender age of 2 years old. My earliest musical memories of listening to the Beatles and early Motown influences with songs like “Sweet Soul Music by Arthur Connelly, I was hooked. Back in the day Sands in Luton was the place to be, the home of Soul for celebrity artists and the venue for my first professional gig. Rapidly progressing to hosting my own Soul Night moving onto Scamps in Hemel Hempstead who attracted phenomenal dancers Tommy Mack, Clive Clarke, including celebrity footballer Laurie Cunningham. I worked for NBC in the 90s, working in the news department. Rapid promotion ensued and before I knew it I was presenting several different shows per week of different genres. I have an extensive resume of artists who I have had the pleasure to interview throughout my years in the industry including Roy Ayres, Mai Thai, Randy Muller, Eban Brown from the Stylistics, Gerald Albright, Phil Phearon, Freddie Jackson, Michael Lovesmith, DD Bridgewater, to name just a few. I have enjoyed more recently working for local charity radio at Secklow Sounds in Milton Keynes, assisting with the production and presentation of their very successful Sunday Soul Show, which was very well received by out public. I am now delighted to become part of the Sound Fusion Radio team. With their ethos and mine, in tune with each other, I am looking forward building a strong healthy relationship with my new found family at Sound Fusion Radio .
from flask import Flask from flask.ext.bootstrap import Bootstrap from flask.ext.sqlalchemy import SQLAlchemy import os from flask.ext.login import LoginManager from flask.ext.openid import OpenID from flask.ext.admin import Admin from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD from flask_misaka import Misaka from app.renderers import QuilltRenderer app = Flask(__name__) app.config.from_object('config') Bootstrap(app) db = SQLAlchemy(app) m = Misaka(app, QuilltRenderer()) print(m.render("This is a test (r) [[test]]")) #lm = LoginManager() #lm.init_app(app) #lm.login_view = 'login' #oid = OpenID(app, os.path.join(basedir, 'tmp')) if not app.debug: import logging from logging.handlers import SMTPHandler credentials = None if MAIL_USERNAME or MAIL_PASSWORD: credentials = (MAIL_USERNAME, MAIL_PASSWORD) mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), 'no-reply@' + MAIL_SERVER, ADMINS, 'microblog failure', credentials) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not app.debug: import logging from logging.handlers import RotatingFileHandler file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) app.logger.setLevel(logging.INFO) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.info('microblog startup') from app import views, models admin = Admin(app, name = "AppAdmin") #admin.add_view(views.AdminView(models.Post, db.session)) # Testing if not models.User.query.all(): u = models.User(nickname='john', email='john@example.com', role=models.ROLE_USER) db.session.add(u) db.session.commit() if not models.Quillt.query.all(): q1 = models.Quillt(title='Quillt 1', user_id = models.User.query.first().id) q2 = models.Quillt(title='Quillt 2', user_id = models.User.query.first().id) db.session.add(q1) db.session.add(q2) db.session.commit() text = """# The Beginning # It began in the beginning. In the _beginnning_. It was the beginning when it began. I suppose I'll start where it starts, at the end. The end is always where it starts of course. Even the end starts at the end. The question is, where does the end _end_? He told me, ``Hello, son.'' ``What?'' I said. I was pretty dumb about these things. [[Go to the store]] [[Leave town]] [[also]] [A custom link](thisisacustomlink.com "TITLE CUSTOM") """ if not models.Passage.query.all(): p1 = models.Passage(title='A great passage', body=text, quillt_id = models.Quillt.query.get(1).id) db.session.add(p1) db.session.commit()
This 8 oz. marking chalk comes in an easy applicator bottle designed to quickly fill chalk reels and markers. The high visibility chalk is ideal for making a chalk line on wood, concrete, wallboard and metal.
""" Selectors for List Component The list component is used in the mobile to show the list of tasks and the list of patients """ from selenium.webdriver.common.by import By LIST_CONTAINER = (By.CLASS_NAME, 'tasklist') LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a') UNKNOWN_CLINICAL_RISK_LIST_ITEM = \ (By.CSS_SELECTOR, '.tasklist li a.level-not-set') LOW_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-one') MEDIUM_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-two') HIGH_CLINICAL_RISK_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li a.level-three') STATUS_LIST_ITEM = (By.CSS_SELECTOR, '.tasklist li.status-alert') STATUS_LIST_ITEM_FLAG = \ (By.CSS_SELECTOR, '.tasklist li.status-alert .status-flag') LIST_ITEM_DATA_ROW = (By.CSS_SELECTOR, '.tasklist li a .task-meta') LIST_ITEM_DATA_LEFT = (By.CSS_SELECTOR, '.tasklist li a .task-meta .task-left') LIST_ITEM_DATA_RIGHT = \ (By.CSS_SELECTOR, '.tasklist li a .task-meta .task-right') LIST_ITEM_DATA_INFO = (By.CSS_SELECTOR, '.task-meta .taskInfo') LIST_ITEM_DATA_NAME = (By.CSS_SELECTOR, '.task-meta .task-left strong')
Implied land values in the Makati CBD appreciated by 2.0% in 1Q 2013, resulting in an average price of PHP304,230 per sqm or an accommodation value of PHP19,015 per sq m. Ortigas land values grew at a relatively marginal rate of 1.7%, pegged at an average of PHP138,395 per sqm. In Bonifacio Global City, land values sustained double-digit growth annually, currently with an average accommodation value of PHP24,690 per sq m. Land values in both Makati and BGC are expected to grow between 8 and 9% in 2Q 2014, while a 6.4% increase is estimated in Ortigas Center. Average premium secondary capital values in the Makati CBD slightly edged out that of BGC but remain essentially at par with a minimal difference of PHP1,500 per sq m. Premium three-bedroom units in the Makati CBD and BGC were priced at PHP128,730 and 127,575 per sq m, respectively. Similar to premium rents, a 7 – 8% growth in secondary prices is expected in both districts by 2Q 2014. Meanwhile, in Rockwell Center, the average capital value was PHP132,770 per sqm, and is expected to improve by 8% in the next twelve months.
#! /usr/bin/env python '''monitor a task in realtime This should use the HTTP interface. It currently doesn't ''' title = "make-magic stat" from gi.repository import Gtk as gtk from gi.repository import Gdk as gdk from gi.repository import GObject import sys import traceback import lib.magic # Deal with keyboard interrupts gracefully def exc_handler(exc, val, tb): if exc != KeyboardInterrupt: traceback.print_exception(exc, val, tb) sys.exit() else: gtk.main_quit() sys.excepthook = exc_handler class ShowStuff(object): def __init__(self, stuff, update_interval=1000): self.window = window = gtk.Window() window.set_title(title) window.connect('destroy', lambda win: gtk.main_quit()) window.set_border_width(5) window.resize(500,300) sw = gtk.ScrolledWindow() window.add(sw) vp = gtk.Viewport() sw.add(vp) vbox = self.vbox = gtk.VBox(spacing=0) vp.add(vbox) #window.add(vbox) self.labels = {} self.frames = {} self.ebox = {} for k,v,col in stuff: eb = gtk.EventBox() f = gtk.Frame(label=k) l = gtk.Label(v) l.set_alignment(0,.5) #l.set_justification(gtk.Justification.LEFT) f.add(l) eb.add(f) self.labels[k] = l self.frames[k] = f self.ebox[k] = eb vbox.pack_start(eb, True, True, 0) if col: self.set_color(k,col) window.show_all() GObject.timeout_add(update_interval, self.update_stuff) def set_color(self, k, col): self.ebox[k].modify_bg(gtk.StateType.NORMAL, gdk.color_parse(col)) def update_stuff(self): print "update timer" return True class MonitorTask(ShowStuff): def __init__(self, uuid, interval=1000): self.magic = lib.magic.Magic() self.uuid = uuid ShowStuff.__init__(self, self.get_task_tuples(), interval) self.window.set_title("make-magic task: "+uuid) def get_task_tuples(self): rtor = self.magic.ready_to_run(self.uuid) rtor = ", ".join(item['name'] for item in rtor) yield(("Ready to run", rtor, "lightblue")) task = self.magic.get_task(self.uuid) for item in task['items']: color = "green" if item['state'] == 'COMPLETE' else None item.pop('depends',None) desc = ', '.join(str(k)+": "+str(v) for k,v in item.items()) yield (item['name'], desc, color) def update_stuff(self): for k,v,col in self.get_task_tuples(): if col: self.set_color(k,col) if self.labels[k].get_text() != v: self.labels[k].set_text(v) return True def monitor_task(uuid): mt = MonitorTask(uuid,100) gtk.main() if __name__ == "__main__": monitor_task(sys.argv[1])
With the introduction of our latest valve gate technology, Ultra Helix™, it is now possible to direct gate parts with gate vestige so clean it is often unmeasurable.This level of gate quality lasts for millions of cycles – longer than any other valve gate currently available. Ultra Helix also simplifies cold half machining requirements to integrate the hot runner with the mold. Using Ultra Helix together with UltraSync-E, Husky’s market leading servo valve stem actuation, is key to enabling the best performance from Ultra Helix. This combination of technologies also makes it possible to run for over five million cycles without having to replace any moving parts, and eliminates the use of compressed air, which minimizes the energy requirements and cost of valve gate molding. Ultra Helix incorporates a new heater design that provides the performance of an integrated heater with the maintainability benefits of Husky’s removable heater. The consistency and performance of these heaters decouples short shot balance from nozzle heater performance, ensuring that you never have to worry about balance changes when replacing a heater. This new level of heater consistency can significantly reduce mold qualification time and cost. Ultra Helix delivers performance that no other valve gate can beat, with one of a kind, leading-edge hot runner technology. With Ultra Helix Valve Gates, you will invest once and mold worry-free, perfect parts for millions of cycles.
import paho.mqtt.client as mqtt # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): print("Connected with result code "+str(rc)) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. # client.subscribe("DEBUG/pythonscript") client.subscribe("homie/5ccf7f2c12d1/desiredTemp/degrees/set") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): if msg.retain==False: print("addint retain to: "+msg.topic+" "+str(msg.payload)) client.unsubscribe(msg.topic) client.publish(msg.topic, payload=msg.payload, qos=0, retain=True) client.subscribe(msg.topic) else: print("message already has retain flag") return; client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("192.168.1.25", 1883, 60) # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. client.loop_forever()
Harvester 68 is a premium blend hydraulic oil containing anti-wear to improve resistance against premature wear. Harvester 68 maintains it’s viscosity even under extreme operating conditions. Harvester 68 is compatible with most pumps, valves and seal materials. Like all hydraulic oils containing zinc anti-wear additives, it is not suitable for use with pumps having silver plated parts.
#! /usr/bin/env python import argparse import csv import git if __name__ == '__main__': parser = argparse.ArgumentParser(description='Extract git history information.') parser.add_argument('-f', '--from', dest='from_', help='from revno') parser.add_argument('-t', '--to', help='to revno') parser.add_argument('-l', '--limit', help='max number of commits') parser.add_argument('-p', '--project', help='project directory') parser.add_argument('-r', '--git-repository', dest='project', help='project directory') parser.add_argument('-c', '--csv', help='csv file name') args = parser.parse_args() if not args.csv or not args.project: parser.print_help() exit(1) if not args.to and not args.limit: parser.print_help() exit(1) with open(args.csv, 'w') as csvfile: csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', doublequote=True) repo = git.Repo(args.project) if args.to: args.to = repo.commit(args.to).hexsha if args.limit: iter_ = repo.iter_commits(args.from_, max_count=args.limit, no_merges=True) else: iter_ = repo.iter_commits(args.from_, no_merges=True) for commit in iter_: if commit.hexsha == args.to: break summary = commit.summary.encode('utf-8') message = commit.message.encode('utf-8') stats = commit.stats.total csvwriter.writerow((summary, message, commit.hexsha, stats['files'], stats['lines'], stats['insertions'], stats['deletions']))
The Automate 2019 Show and Conference is hosting its first Automation Works! day to help exhibitors identify potential employees to combat an unprecedented workforce shortfall and skills gap in manufacturing. Activities on April 11, the last day of the event, will include theater sessions focused on working in automation, culminating in a networking session from 12:15 p.m. - 1:30 p.m., for job seekers to connect with exhibitors who are specifically looking for new employees. Registration for the show floor and the Automation Works! day activities is free. According to a recent study by Deloitte (News - Alert) and The Manufacturing Institute, more than 2.4 million jobs are expected to remain unfilled in the manufacturing space by 2028. In addition, 80 percent of manufacturers report a shortage of qualified applicants for skilled production positions, and the shortage could cost U.S. manufacturers 11 percent of their annual earnings. Manufacturing executives reported an average of 94 days to recruit engineering and research employees and 70 days to recruit skilled production workers.