text
stringlengths
29
850k
"""Script for picking certain number of sampels. """ import argparse import time import logging import collections import tensorflow as tf parser = argparse.ArgumentParser( description="Eval sample picker for BERT.") parser.add_argument( '--input_tfrecord', type=str, default='', help='Input tfrecord path') parser.add_argument( '--output_tfrecord', type=str, default='', help='Output tfrecord path') parser.add_argument( '--num_examples_to_pick', type=int, default=10000, help='Number of examples to pick') parser.add_argument( '--max_seq_length', type=int, default=512, help='The maximum number of tokens within a sequence.') parser.add_argument( '--max_predictions_per_seq', type=int, default=76, help='The maximum number of predictions within a sequence.') args = parser.parse_args() max_seq_length = args.max_seq_length max_predictions_per_seq = args.max_predictions_per_seq logging.basicConfig(level=logging.INFO) def decode_record(record): """Decodes a record to a TensorFlow example.""" name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def create_int_feature(values): feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return feature def create_float_feature(values): feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) return feature if __name__ == '__main__': tic = time.time() tf.enable_eager_execution() d = tf.data.TFRecordDataset(args.input_tfrecord) num_examples = 0 records = [] for record in d: records.append(record) num_examples += 1 writer = tf.python_io.TFRecordWriter(args.output_tfrecord) i = 0 pick_ratio = num_examples / args.num_examples_to_pick num_examples_picked = 0 for i in range(args.num_examples_to_pick): example = decode_record(records[int(i * pick_ratio)]) features = collections.OrderedDict() features["input_ids"] = create_int_feature( example["input_ids"].numpy()) features["input_mask"] = create_int_feature( example["input_mask"].numpy()) features["segment_ids"] = create_int_feature( example["segment_ids"].numpy()) features["masked_lm_positions"] = create_int_feature( example["masked_lm_positions"].numpy()) features["masked_lm_ids"] = create_int_feature( example["masked_lm_ids"].numpy()) features["masked_lm_weights"] = create_float_feature( example["masked_lm_weights"].numpy()) features["next_sentence_labels"] = create_int_feature( example["next_sentence_labels"].numpy()) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) num_examples_picked += 1 writer.close() toc = time.time() logging.info("Picked %d examples out of %d samples in %.2f sec", num_examples_picked, num_examples, toc - tic)
In a record verdict, a Floyd County man was recently awarded $8.6 million in damages by a Floyd County Superior Court jury. Brinson Askew Berry’s Bob Berry served as the local attorney in the case, partnering with lead attorney Michael Werner of The Werner Law Firm. While riding a motorcycle, plaintiff William “Cleve” Corley was struck by a Floyd County Emergency Services truck. The opposing parties agreed that the driver of the Emergency Services truck was at fault, however they could not agree on an amount of damages, resulting in a superior court trial. The $8.6 million verdict is one of the highest civil verdicts ever awarded in the Floyd County judicial circuit.
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. import sys, string, os from xml.dom import minidom, Node from datetime import datetime, timedelta from Core.Common.FUtils import FindXmlChild, GetXmlContent, ParseDate from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = ['library_visual_scenes', 'visual_scene', 'asset', 'created'] attrName = '' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() def CheckDate(self, context): # Get the <created> time for the input file root = minidom.parse(context.GetInputFilename()).documentElement inputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "created"))) if inputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from test input file.") return None # Get the output file outputFilenames = context.GetStepOutputFilenames("Export") if len(outputFilenames) == 0: context.Log("FAILED: There are no export steps.") return None # Get the <created> time for the output file root = minidom.parse(outputFilenames[0]).documentElement outputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "asset", "created"))) if outputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from the exported file.") return None if (outputCreatedDate - inputCreatedDate) != timedelta(0): context.Log("FAILED: <created> is not preserved.") context.Log("The original <created> time is " + str(inputCreatedDate)) context.Log("The exported <created> time is " + str(outputCreatedDate)) return False context.Log("PASSED: <created> element is preserved.") return True def JudgeBaseline(self, context): # No step should not crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], []) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # To pass intermediate you need to pass basic, this object could also include additional # tests that were specific to the intermediate badge. def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): # if superior fails, no point in further checking if (self.status_superior == False): self.status_exemplary = self.status_superior return self.status_exemplary self.status_exemplary = self.CheckDate(context) return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
Spirals of red and cream dots on an orange background reminiscent of the Australian outback desert in a beautiful Aboriginal art design! Medium weight cotton fabric can be used for dressmaking, quilting, light home decor projects, bags and more! Walkabout Spirals on Orange is from UK designer Beth Studley’s unique and artistic Walkabout range for Makower UK’s Henley Studio Collection. This bright, interesting design is just what you need for distinctive dressmaking and creative quilting projects. Give a modern flair to your home decor plans with this truly beautiful fabric. Just picture the gorgeous cushions you could create!
from pyconstraints import Problem, is_nil, BruteForceSolver __all__ = ['compute_schedules', 'TimeRange', 'Scheduler'] class TimeRange(object): "Represents a time range to be restricted." def __init__(self, start, end, dow): self.start = start self.end = end self.days_of_week = dow def __repr__(self): return "<TimeRange: %r to %r on %r>" % ( self.start, self.end, self.days_of_week ) def days_conflict(self, days): print self.days_of_week, days for day in self.days_of_week: if day in days: return True return False def __contains__(self, period): days, start, end = period return self.days_conflict(days) and ( self.start <= start <= self.end or start <= self.start <= end or self.start <= end <= self.end or start <= self.end <= end ) def conflicts_with(self, section): "Returns True if the given section conflicts with this time range." for p in section.periods: t = (p.int_days, p.start, p.end) if t in self: return True return False def section_constraint(section1, section2): if is_nil(section1) or is_nil(section2): return True return not section1.conflicts_with(section2) class Scheduler(object): """High-level API that wraps the course scheduling feature. ``free_sections_only``: bool. Determines if the only the available sections should be used when using courses provided. Defaults to True. ``problem``: Optional problem instance to provide. If None, the default one is created. """ def __init__(self, free_sections_only=True, problem=None, constraint=None): self.p = Problem() if problem is not None: self.p = problem self.free_sections_only = free_sections_only self.section_constraint = constraint or section_constraint self.clear_excluded_times() def clear_excluded_times(self): """Clears all previously set excluded times.""" self._excluded_times = [] return self def exclude_time(self, start, end, days): """Added an excluded time by start, end times and the days. ``start`` and ``end`` are in military integer times (e.g. - 1200 1430). ``days`` is a collection of integers or strings of fully-spelt, lowercased days of the week. """ self._excluded_times.append(TimeRange(start, end, days)) return self def exclude_times(self, *tuples): """Adds multiple excluded times by tuple of (start, end, days) or by TimeRange instance. ``start`` and ``end`` are in military integer times (e.g. - 1200 1430). ``days`` is a collection of integers or strings of fully-spelt, lowercased days of the week. """ for item in tuples: if isinstance(item, TimeRange): self._excluded_times.append(item) else: self.exclude_time(*item) return self def find_schedules(self, courses=None, return_generator=False): """Returns all the possible course combinations. Assumes no duplicate courses. ``return_generator``: If True, returns a generator instead of collection. Generators are friendlier to your memory and save computation time if not all solutions are used. """ self.p.reset() self.create_variables(courses) self.create_constraints(courses) if return_generator: return self.p.iter_solutions() return self.p.get_solutions() # internal methods -- can be overriden for custom use. def get_sections(self, course): """Internal use. Returns the sections to use for the solver for a given course. """ return course.available_sections if self.free_sections_only else course.sections def time_conflict(self, schedule): """Internal use. Determines when the given time range conflicts with the set of excluded time ranges. """ if is_nil(schedule): return True for timerange in self._excluded_times: if timerange.conflicts_with(schedule): return False return True def create_variables(self, courses): """Internal use. Creates all variables in the problem instance for the given courses. If given a dict of {course: sections}, will use the provided sections. """ has_sections = isinstance(courses, dict) for course in courses: self.p.add_variable(course, courses.get(course, []) if has_sections else self.get_sections(course)) def create_constraints(self, courses): """Internal use. Creates all constraints in the problem instance for the given courses. """ for i, course1 in enumerate(courses): for j, course2 in enumerate(courses): if i <= j: continue self.p.add_constraint(self.section_constraint, [course1, course2]) self.p.add_constraint(self.time_conflict, [course1]) def compute_schedules(courses=None, excluded_times=(), free_sections_only=True, problem=None, return_generator=False, section_constraint=None): """ Returns all possible schedules for the given courses. """ s = Scheduler(free_sections_only, problem, constraint=section_constraint) s.exclude_times(*tuple(excluded_times)) return s.find_schedules(courses, return_generator)
No matter your biking experience, take a closer look at cycling for transportation, health, and sport through video, photos, demonstration, and discussion. Travel the world with Dean Peterson, the coach of the Marian University Cycling Team, as he shares stories from West and East Africa, Cuba, France, and the United States. Walk away with a deeper appreciation of the physical, environmental, and social benefits the bicycle brings to the health of our communities. Presented by Marian University Cycling Team and Zipp Speed Weaponry. Questions? Call 317-955-6340 or email dpeterson@marian.edu.
from setuptools import setup, Extension, find_packages import glob import imp import sys import numpy.distutils.misc_util version = imp.load_source('msaf.version', 'msaf/version.py') # Compile the CC algorithm extra_compile_flags = "" extra_linker_flags = "" if "linux" in sys.platform: extra_compile_flags = "-std=c++11 -DUSE_PTHREADS" extra_linker_flags = "-llapack -lblas -lm" elif "darwin" in sys.platform: extra_compile_flags = "-DUSE_PTHREADS" extra_linker_flags = "-framework Accelerate" cc_path = "msaf/algorithms/cc/" cc_segmenter = Extension(cc_path + "cc_segmenter", sources=[cc_path + "base/Pitch.cpp", cc_path + "dsp/chromagram/Chromagram.cpp", cc_path + "dsp/chromagram/ConstantQ.cpp", cc_path + "dsp/mfcc/MFCC.cpp", cc_path + "dsp/onsets/DetectionFunction.cpp", cc_path + "dsp/phasevocoder/PhaseVocoder.cpp", cc_path + "dsp/rateconversion/Decimator.cpp", cc_path + "dsp/segmentation/cluster_melt.c", cc_path + "dsp/segmentation/ClusterMeltSegmenter.cpp", cc_path + "dsp/segmentation/cluster_segmenter.c", cc_path + "dsp/segmentation/Segmenter.cpp", cc_path + "dsp/transforms/FFT.cpp", cc_path + "hmm/hmm.c", cc_path + "maths/Correlation.cpp", cc_path + "maths/CosineDistance.cpp", cc_path + "maths/MathUtilities.cpp", cc_path + "maths/pca/pca.c", cc_path + "main.cpp" ], include_dirs=[cc_path + "dsp/segmentation", cc_path, cc_path + "include"], libraries=["stdc++"], extra_compile_args=[extra_compile_flags], extra_link_args=[extra_linker_flags], language="c++") # MSAF configuration setup( name='msaf', version=version.version, description='Python module to discover the structure of music files', author='Oriol Nieto', author_email='oriol@nyu.edu', url='https://github.com/urinieto/msaf', download_url='https://github.com/urinieto/msaf/releases', packages=find_packages(), package_data={'msaf': ['algorithms/olda/models/*.npy']}, data_files=[('msaf/algorithms/olda/models', glob.glob('msaf/algorithms/olda/models/*.npy'))], long_description="""A python module to segment audio into all its """ """different large-scale sections and label them based on their """ """acoustic similarity""", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: Multimedia :: Sound/Audio :: Analysis", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6" ], keywords='audio music sound', license='GPL', install_requires=[ 'audioread', 'cvxopt', 'decorator', 'enum34', 'future', 'jams >= 0.3.0', 'joblib', 'librosa >= 0.6.0', 'mir_eval', 'matplotlib >= 1.5', 'numpy >= 1.8.0', 'pandas', 'scikit-learn >= 0.17.0', 'scipy >= 0.13.0', 'seaborn', # For notebook example (but everyone should have this :-)) 'vmo >= 0.3.3' ], extras_require={ 'resample': 'scikits.samplerate>=0.3' }, ext_modules=[cc_segmenter], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs() )
Home Colorado Plateau Archives [A bison meant to be cross-bred with Galloway cattle]. [A bison meant to be cross-bred with Galloway cattle]. Title [A bison meant to be cross-bred with Galloway cattle]. Post a Comment for [A bison meant to be cross-bred with Galloway cattle].
# Purpose: entity module # Created: 11.03.2011 # Copyright (C) 2011, Manfred Moitzi # License: MIT License from __future__ import unicode_literals __author__ = "mozman <mozman@gmx.at>" from .tags import cast_tag_value, DXFTag, DXFStructureError class DXFNamespace(object): """ Provides the dxf namespace for GenericWrapper. """ __slots__ = ('_setter', '_getter') def __init__(self, wrapper): # DXFNamespace.__setattr__ can not set _getter and _setter super(DXFNamespace, self).__setattr__('_getter', wrapper.get_dxf_attrib) super(DXFNamespace, self).__setattr__('_setter', wrapper.set_dxf_attrib) def __getattr__(self, attrib): """Returns value of DXF attribute *attrib*. usage: value = GenericWrapper.dxf.attrib """ return self._getter(attrib) def __setattr__(self, attrib, value): """Set DXF attribute *attrib* to *value. usage: GenericWrapper.dxf.attrib = value """ self._setter(attrib, value) # noinspection PyUnresolvedReferences class GenericWrapper(object): TEMPLATE = None DXFATTRIBS = {} def __init__(self, tags): self.tags = tags self.dxf = DXFNamespace(self) # all DXF attributes are accessible by the dxf attribute, like entity.dxf.handle @classmethod def new(cls, handle, dxfattribs=None, dxffactory=None): if cls.TEMPLATE is None: raise NotImplementedError("new() for type %s not implemented." % cls.__name__) entity = cls(cls.TEMPLATE.clone()) entity.dxf.handle = handle if dxfattribs is not None: entity.update_dxf_attribs(dxfattribs) entity.post_new_hook() return entity def post_new_hook(self): pass def dxftype(self): return self.tags.noclass[0].value def has_dxf_attrib(self, key): return key in self.DXFATTRIBS def get_dxf_attrib(self, key, default=ValueError): try: dxfattr = self.DXFATTRIBS[key] return self._get_dxf_attrib(dxfattr) except KeyError: raise AttributeError(key) except ValueError: if default is ValueError: raise ValueError("DXFAttrib '%s' does not exist." % key) else: return default def set_dxf_attrib(self, key, value): try: dxfattr = self.DXFATTRIBS[key] except KeyError: raise AttributeError(key) # no subclass is subclass index 0 subclasstags = self.tags.subclasses[dxfattr.subclass] if dxfattr.xtype is not None: tags = DXFExtendedPointType(subclasstags) tags.set_value(dxfattr.code, dxfattr.xtype, value) else: self._set_tag(subclasstags, dxfattr.code, value) def clone_dxf_attribs(self): dxfattribs = {} for key in self.DXFATTRIBS.keys(): try: dxfattribs[key] = self.get_dxf_attrib(key) except ValueError: pass return dxfattribs def update_dxf_attribs(self, dxfattribs): for key, value in dxfattribs.items(): self.set_dxf_attrib(key, value) def _get_dxf_attrib(self, dxfattr): # no subclass is subclass index 0 subclass_tags = self.tags.subclasses[dxfattr.subclass] if dxfattr.xtype is not None: tags = DXFExtendedPointType(subclass_tags) return tags.get_value(dxfattr.code, dxfattr.xtype) else: return subclass_tags.get_value(dxfattr.code) def _get_extended_type(self, code, xtype): tags = DXFExtendedPointType(self.tags) return tags.get_value(code, xtype) def _set_extended_type(self, code, xtype, value): tags = DXFExtendedPointType(self.tags) return tags.set_value(code, xtype, value) @staticmethod def _set_tag(tags, code, value): tags.set_first(code, cast_tag_value(code, value)) class DXFExtendedPointType(object): def __init__(self, tags): self.tags = tags def get_value(self, code, xtype): if xtype == 'Point2D': return self._get_fix_point(code, axis=2) elif xtype == 'Point3D': return self._get_fix_point(code, axis=3) elif xtype == 'Point2D/3D': return self._get_flexible_point(code) else: raise TypeError('Unknown extended type: %s' % xtype) def _get_fix_point(self, code, axis): point = self._get_point(code) if len(point) != axis: raise DXFStructureError('Invalid axis count for code: %d' % code) return point def _get_point(self, code): index = self._point_index(code) return tuple( (tag.value for x, tag in enumerate(self.tags[index:index + 3]) if tag.code == code + x * 10) ) def _point_index(self, code): return self.tags.tag_index(code) def _get_flexible_point(self, code): point = self._get_point(code) if len(point) in (2, 3): return point else: raise DXFStructureError('Invalid axis count for code: %d' % code) def set_value(self, code, xtype, value): def set_point(code, axis): if len(value) != axis: raise ValueError('%d axis required' % axis) if self._count_axis(code) != axis: raise DXFStructureError('Invalid axis count for code: %d' % code) self._set_point(code, value) if xtype == 'Point2D': set_point(code, axis=2) elif xtype == 'Point3D': set_point(code, axis=3) elif xtype == 'Point2D/3D': self._set_flexible_point(code, value) else: raise TypeError('Unknown extended type: %s' % xtype) def _set_point(self, code, value): def set_tag(index, tag): if self.tags[index].code == tag.code: self.tags[index] = tag else: raise DXFStructureError('DXF coordinate error') index = self._point_index(code) for x, coord in enumerate(value): set_tag(index + x, DXFTag(code + x * 10, float(coord))) def _set_flexible_point(self, code, value): def append_axis(): index = self._point_index(code) self.tags.insert(index + 2, DXFTag(code + 20, 0.0)) def remove_axis(): index = self._point_index(code) self.tags.pop(index + 2) new_axis = len(value) if new_axis not in (2, 3): raise ValueError("2D or 3D point required (tuple).") old_axis = self._count_axis(code) if old_axis > 1: if new_axis == 2 and old_axis == 3: remove_axis() elif new_axis == 3 and old_axis == 2: append_axis() else: raise DXFStructureError("Invalid axis count of point.") self._set_point(code, value) def _count_axis(self, code): return len(self._get_point(code))
Gray Contact Lenses are colored lenses that are popular for use in costumes, special effects, or for cosmetic purposes. Shop for cosmetic gray lenses for daily, weekly, monthly and yearly wear. Or even for gray special effects lenses for a more dramatic look. They work to make your eyes appear larger and more attractive. You’ll be more alluring than ever in these circle lenses. Find your Gray Contact Lenses on iEyeBeauty! Searching for some gray contact lenses for that big night out? Want high quality but low prices? Look no further than our huge range of gray colored contact lenses. With designs you won't find elsewhere, you're sure to find your perfect pair here at Colored Contacts! Ideal for transforming your look in the blink of an eye, these gray eye contacts can change the color of most natural eye tones. From soft, natural gray contact lenses to steely, dark gray shades, we've got a little something for every taste. So whether you're making the move from brown eyes to gray or blue to gray eyes, you can be sure to achieve a flawless result. There are plenty of occasions you might want gray eyes for, and our range reflects that. Maybe you’re looking for a ghoulish effect for a horrifying Halloween party, or perhaps you want to go gray permanently? Whatever it is, Colored Contacts has got your back. Wondering how many different styles of gray style lenses there are? You can browse a huge range of different silvery styles which could be worked into your everyday outfit, or even into a totally terrifying costume. Perfect for adding a touch of personality to all manner of everyday looks as well as Halloween, cosplay and fancy dress occasions, our range of cheap colored contact lenses will ensure that all eyes are on you. Don’t be fooled by our competitive prices. The quality of our lenses has not been compromised. All our lenses are approved for shipping in the US by FDA regulations, so you are guaranteed to get a pair of contact lenses that is both high-quality and safe to use. For extra advice on using cosmetic contact lenses, we recommend visiting you opticians or eye doctors. We are also here to give some assistance, with our handy guides and blog posts on Colored Contacts. Non prescription grey contact lenses are the perfect way to create a new look if you don’t need prescription lenses. Our costume styles can help you to transform into your favorite anime character or movie star. Only a handful of people have this rare eye color so in order to closely match your character for a cosplay you are going to need a little help. Fashion lenses are the easy, hassle free way to achieve this! Here at Colored Contacts, you can find everything you need to use your dark gray contact lenses. From lens solution to a lens case or kit, with tools to help you insert your lens on the go, you can be sure to keep your lenses clean and ready to use. Are you ready to take your lenses out for the day? Carry your lens kit around with you and you can discreetly place your lenses back in the case and clean them with lens solution. We have several variations in natural gray contact lens so you can get the exact shade you have always dreamed about. If you are going for something very bright that will completely intensify your gaze, try our one-day range of light gray contact lenses. These include our single tone, tri tone, and mystic grey colors.While there are many other options, our most popular styles fall into three categories. The Gray Tri Tone Color Lenses are without a doubt one of our most realistic styles. These lenses come in both daily and 90 day variants so you can choose the duration to suit your needs. The tri tone grey contact lenses feature a fairly dark grey iris. The key is in the details with this style. The inner ring of the iris is a subtle shade of brown for a natural glow. This has the effect of adding depth to your lens. The single gray colored lenses can lack the depth compared to those made with extra color flecks. If you have looked at a natural iris you will notice it is composed of many different colors which is exactly why our tri tone lenses look so realistic. The outer ring or limbal ring is very dark which also enhances this 3D effect. In comparison our two tone natural grey contacts feature a similarly dark limbal ring but do not have the brown color towards the center. These lenses are perfect for those wanting to achieve pure grey eyes. If you are looking to create a flawless makeup style then these could be the lenses to pair with your look. The smoky gray contact lenses shade is sure to draw everyone’s attention towards your eyes so make sure your eye shadow is looking the best. Our third style is the one tone lenses. These feature one solid light gray contact lenses shade with small dark color flecks for realism. The entire iris is the same even tone so is sure to result in a bright eyed effect. These single tone lenses are so light in shade they can look like silver grey contact lenses under certain lights. If you are creating a fantasy costume or just want to add a mystical style to your everyday look then you won’t be able to help falling in love with these light gray contacts. If you like our selection of gray cosmetic contact lenses, then you should check out our full range of contact lenses! With a huge variety of different colors, patterns and finishes available in 1 day, 30 day, 90 day or 1 year options, you can experiment with your look for less. When selecting your gray color lens it is important to consider which duration you would like to try. If you opt for a 90 day or 1 year option you will need to read up on maintaining and caring for your lenses as eye safety is of the utmost importance. In order to do this you will need extra lens solution handy to keep your lenses fresh and sterile. If you love gadgets and tools then be sure to pick up one of our lens kits. The handy tweezers and sealable case will help you to maintain your new lenses. The duration period starts from the first time you use your lenses so be sure to take note of the date as it is important to only use lenses for as long as stated on the packaging. If you want to skip the hassle of cleaning your lenses after every use then take a look at our daily lenses. These one use lenses can be worn once then thrown away. Even though they are single use lenses it is important to prepare them properly in lens solution. These hassle free lenses are perfect if you are looking to try out your first pair of contact lenses. There are certain things that we value when it comes to sterling gray contacts at Colored Contacts. One of these is the quality of our lenses.The high-quality manufacture of our products means that the pigmentation used to color the lenses looks bold and impressive, without becoming too unrealistic. A question we are frequently asked is what will gray contact lenses on brown eyes look like? Head over to our section dedicated to showing you the best lenses for brown eyes to find the brightest colors. Results can vary for gray contacts on dark brown eyes however you should see some variation in color with all our lenses. The difference is some of our lenses are designed to completely change the color of your iris while others are patterned to add color flecks to your natural tone. Now we have talked about the styles and durations of lenses it is time for you to choose which to try first. Remember you can always come back for more! Make sure to always follow safety advice and listen to your optometrist or eye doctor. Once you are confident in using gray contact lenses you will be trying out an array of other shades so enjoy exploring our natural and costume styles for your next fancy dress, Halloween or New Year’s party. You can even switch up your color as part of your normal casual style, we frequently change other parts of our appearance so why not our natural eye color too?
from lightfm import LightFM import pickle import db_utils import scipy import numpy as np import numpy.linalg as la from nltk.tokenize import RegexpTokenizer from nltk.corpus import stopwords regex_tokenizer = RegexpTokenizer(r'\w+') eng_stopwords = stopwords.words('english') NUM_COMPONENTS = 30 NUM_EPOCHS = 20 MODEL_LOCATION = 'lightfm_model.pickle' def get_interactions(engine): num_playlists = db_utils.playlist_max_id(engine) num_songs = db_utils.song_max_id(engine) interactions = scipy.sparse.lil_matrix((num_playlists+1, num_songs+1)) plist_records = db_utils.get_playlist_interactions(engine) for r in plist_records: interaction_value = 2 if r.vote else 1 interactions[r.playlist_id, r.song_id] = interaction_value return interactions def get_audio_analysis_features(engine): features = db_utils.song_audio_features(engine) num_songs = db_utils.song_max_id(engine) feature_mat = scipy.sparse.lil_matrix((num_songs+1, 4)) for s in features: pitch = s.pi or 0 harmonic = s.h or 0 percussive = s.pe or 0 temp = s.t or 0 feature_mat[s.id] = np.array([pitch, harmonic, percussive, temp]) return feature_mat def artist_matrix(engine): ''' Returns matrix of shape (num_songs, num_artists) ''' songs = db_utils.song_artists(engine) num_songs = db_utils.song_max_id(engine) artists = set(s.artist for s in songs) artist_indices = {s: i for i, s in enumerate(artists)} artist_mat = scipy.sparse.lil_matrix((num_songs+1, len(artists))) for s in songs: artist_mat[s.id, artist_indices[s.artist]] = 1 return artist_mat def get_item_features(engine): ''' - Resultant matrix is of shape: (num_songs, num_features) - Matrix can be indexed as (song_id, feature_idx) ''' sentiments = db_utils.song_sentiments(engine) num_songs = db_utils.song_max_id(engine) item_features = scipy.sparse.lil_matrix((num_songs+1, 3)) for s in sentiments: pos = s.pos or 0 neu = s.neu or 0 neg = s.neg or 0 sent_arr = np.array([pos, neu, neg]) norm = la.norm(sent_arr) if norm > 0: item_features[s.id] = sent_arr / norm keywords = keyword_sparse_matrix(engine) artists = artist_matrix(engine) audio = get_audio_analysis_features(engine) results = scipy.sparse.hstack([item_features, keywords, artists, audio]) return results def train_model(engine): ''' interactions is of: shape: (num_users, num_items) format: 1 if positive interaction, -1 if negative interaciton item_features is of: shape: (num_items, num_features) format: [pos_sent, neu_sent, neg_sent] ''' model = load_model() interactions = get_interactions(engine) item_features = get_item_features(engine) model.fit(interactions, item_features=item_features, epochs=NUM_EPOCHS) dump_model(model) return model def get_recommendations(engine, playlist_id): model = train_model(engine) item_features = get_item_features(engine) num_items = item_features.shape[0] predictions = model.predict(playlist_id, np.arange(num_items), item_features=item_features) return [int(i) for i in np.argsort(-predictions)] def keyword_sparse_matrix(engine): keyword_list = list(db_utils.all_keywords(engine)) keyword_dict = {} curr_idx = 0 for k in keyword_list: if k.word not in keyword_dict: keyword_dict[k.word] = curr_idx curr_idx += 1 num_songs = db_utils.song_max_id(engine) keyword_mat = scipy.sparse.lil_matrix((num_songs + 1, curr_idx + 1)) for k in keyword_list: keyword_mat[k.song_id, keyword_dict[k.word]] = k.weight # Normalize rows for r in range(keyword_mat.shape[0]): norm = la.norm(keyword_mat.getrow(r).todense()) if norm > 0: keyword_mat[r] = keyword_mat.getrow(r) / norm return keyword_mat def load_model(): ''' Loads LightFM model from file Returns empty model if no pickled model found ''' try: with open(MODEL_LOCATION, 'rb') as f: return pickle.load(f) except IOError: return LightFM(loss='warp', no_components=NUM_COMPONENTS) def dump_model(model): ''' Saves LightFM model to file ''' with open(MODEL_LOCATION, 'wb') as f: pickle.dump(model, f) def word_tokenize_no_punct(sent): tokens = regex_tokenizer.tokenize(sent) return [w.lower() for w in tokens if w.lower() not in eng_stopwords and len(w) > 1] def songs_to_vocab(songs): vocab = set() for s in songs: if not s.lyrics: continue sent_vocab = set(word_tokenize_no_punct(s.lyrics)) vocab |= sent_vocab return list(vocab) def tf(mat, doc, term): s = np.sum(mat.getrow(doc).todense()) if s != 0: return mat[doc, term] / float(s) return 0 def idf(mat, term): s = mat.getcol(term).nnz if s != 0: return mat.shape[0] / float(s) return 0 def extract_keywords(engine): ''' - Constructs a TFIDF of all lyrics of all songs - Extracts the most meaningful keywords of each song - Updates the keyword table accordingly ''' songs = db_utils.song_lyrics(engine) # lyrics = [s.lyrics if s.lyrics else "" for s in songs] # tfidf = TfidfVectorizer(stop_words='english', # max_df=0.7) # tfidf_mat = tfidf.fit_transform(lyrics).toarray() vocab = songs_to_vocab(songs) w_indices = {k: idx for idx, k in enumerate(vocab)} # Construct document term frequency matrix # matrix # (word_idx, doc_idx) => word_doc_count matrix = scipy.sparse.lil_matrix((len(songs), len(vocab))) for i, s in enumerate(songs): if not s.lyrics: continue for w in word_tokenize_no_punct(s.lyrics): matrix[i, w_indices[w]] += 1 # Calculate tfidf score for each term # tfidf # (word_idx, doc_idx) => word_doc_tfidf_score tfidf = scipy.sparse.lil_matrix((len(songs), len(vocab))) nzx, nzy = matrix.nonzero() # Only conerned w/ nonzero term entries for i in range(len(nzx)): doc_idx, term_idx = nzx[i], nzy[i] term_freq = tf(matrix, doc_idx, term_idx) inv_doc_freq = idf(matrix, term_idx) tfidf[doc_idx, term_idx] = term_freq * inv_doc_freq print "Calculated TFIDF for all songs." # Do insertion for keywords of all songs for i in range(len(songs)): print "Inserting keywords ({}/{})".format(i, len(songs)) # Sort tfidf score descending, find 10 most relevant words max_indices = (-tfidf.getrow(i).toarray()[0]).argsort()[:10] song_id = songs[i].id # Delete old keywords db_utils.delete_song_keywords(engine, song_id) for term_idx in max_indices: if tfidf[i, term_idx] == 0: continue kw_str = vocab[int(term_idx)] kw_weight = tfidf[i, term_idx] # Do insertion into database db_utils.add_song_keyword(engine, song_id, kw_str, float(kw_weight)) def similar_songs(engine, song_id, num_results=5): ''' - Returns song most similar to the given song using cosine similarity ''' features = get_item_features(engine) sample_v = np.array(features.getrow(song_id).todense()) sample_norm = la.norm(sample_v) cos_diffs = [] for i in range(features.shape[0]): test_v = features.getrow(i).todense().T norm = sample_norm * la.norm(test_v) cos_diffs.append(np.dot(sample_v, test_v) / norm if norm != 0 else 0) most_similar = np.argsort(-np.array(cos_diffs)) similar_ids = [int(i) for i in most_similar if i != song_id][:5] return similar_ids
We’re back. Finally. Stay tuned for our new home.
# This file is part of Androguard. # # Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.path.append('./') import logging import struct from collections import defaultdict import tools.modified.androguard.core.androconf as androconf import tools.modified.androguard.decompiler.dad.util as util from tools.modified.androguard.core.analysis import analysis from tools.modified.androguard.core.bytecodes import apk, dvm from tools.modified.androguard.decompiler.dad.ast import (JSONWriter, parse_descriptor, literal_string, literal_null, literal_int, literal_long, literal_float, literal_double, literal_bool, literal_hex_int, dummy) from tools.modified.androguard.decompiler.dad.control_flow import identify_structures from tools.modified.androguard.decompiler.dad.dataflow import (build_def_use, place_declarations, dead_code_elimination, register_propagation, split_variables) from tools.modified.androguard.decompiler.dad.graph import construct, simplify, split_if_nodes from tools.modified.androguard.decompiler.dad.instruction import Param, ThisParam from tools.modified.androguard.decompiler.dad.writer import Writer from tools.modified.androguard.util import read def auto_vm(filename): ret = androconf.is_android(filename) if ret == 'APK': return dvm.DalvikVMFormat(apk.APK(filename).get_dex()) elif ret == 'DEX': return dvm.DalvikVMFormat(read(filename)) elif ret == 'DEY': return dvm.DalvikOdexVMFormat(read(filename)) return None # No seperate DvField class currently def get_field_ast(field): triple = field.get_class_name()[1:-1], field.get_name(), field.get_descriptor() expr = None if field.init_value: val = field.init_value.value expr = dummy(str(val)) if val is not None: if field.get_descriptor() == 'Ljava/lang/String;': expr = literal_string(val) elif field.proto == 'B': expr = literal_hex_int(struct.unpack('<b', val)[0]) return { 'triple': triple, 'type': parse_descriptor(field.get_descriptor()), 'flags': util.get_access_field(field.get_access_flags()), 'expr': expr, } class DvMethod(object): def __init__(self, methanalysis): method = methanalysis.get_method() self.method = method self.start_block = next(methanalysis.get_basic_blocks().get(), None) self.cls_name = method.get_class_name() self.name = method.get_name() self.lparams = [] self.var_to_name = defaultdict() self.writer = None self.graph = None self.ast = None self.access = util.get_access_method(method.get_access_flags()) desc = method.get_descriptor() self.type = desc.split(')')[-1] self.params_type = util.get_params_type(desc) self.triple = method.get_triple() self.exceptions = methanalysis.exceptions.exceptions code = method.get_code() if code is None: logger.debug('No code : %s %s', self.name, self.cls_name) else: start = code.registers_size - code.ins_size if 'static' not in self.access: self.var_to_name[start] = ThisParam(start, self.cls_name) self.lparams.append(start) start += 1 num_param = 0 for ptype in self.params_type: param = start + num_param self.lparams.append(param) self.var_to_name[param] = Param(param, ptype) num_param += util.get_type_size(ptype) if not __debug__: from tools.modified.androguard.core import bytecode bytecode.method2png('/tmp/dad/graphs/%s#%s.png' % \ (self.cls_name.split('/')[-1][:-1], self.name), methanalysis) def process(self, doAST=False): logger.debug('METHOD : %s', self.name) # Native methods... no blocks. if self.start_block is None: logger.debug('Native Method.') if doAST: self.ast = JSONWriter(None, self).get_ast() else: self.writer = Writer(None, self) self.writer.write_method() return graph = construct(self.start_block, self.var_to_name, self.exceptions) self.graph = graph if not __debug__: util.create_png(self.cls_name, self.name, graph, '/tmp/dad/blocks') use_defs, def_uses = build_def_use(graph, self.lparams) split_variables(graph, self.var_to_name, def_uses, use_defs) dead_code_elimination(graph, def_uses, use_defs) register_propagation(graph, def_uses, use_defs) place_declarations(graph, self.var_to_name, def_uses, use_defs) del def_uses, use_defs # After the DCE pass, some nodes may be empty, so we can simplify the # graph to delete these nodes. # We start by restructuring the graph by spliting the conditional nodes # into a pre-header and a header part. split_if_nodes(graph) # We then simplify the graph by merging multiple statement nodes into # a single statement node when possible. This also delete empty nodes. simplify(graph) graph.compute_rpo() if not __debug__: util.create_png(self.cls_name, self.name, graph, '/tmp/dad/pre-structured') identify_structures(graph, graph.immediate_dominators()) if not __debug__: util.create_png(self.cls_name, self.name, graph, '/tmp/dad/structured') if doAST: self.ast = JSONWriter(graph, self).get_ast() else: self.writer = Writer(graph, self) self.writer.write_method() def get_ast(self): return self.ast def show_source(self): print self.get_source() def get_source(self): if self.writer: return '%s' % self.writer return '' def get_source_ext(self): if self.writer: return self.writer.str_ext() return [] def __repr__(self): #return 'Method %s' % self.name return 'class DvMethod(object): %s' % self.name class DvClass(object): def __init__(self, dvclass, vma): name = dvclass.get_name() if name.find('/') > 0: pckg, name = name.rsplit('/', 1) else: pckg, name = '', name self.package = pckg[1:].replace('/', '.') self.name = name[:-1] self.vma = vma self.methods = dvclass.get_methods() self.fields = dvclass.get_fields() self.subclasses = {} self.code = [] self.inner = False access = dvclass.get_access_flags() # If interface we remove the class and abstract keywords if 0x200 & access: prototype = '%s %s' if access & 0x400: access -= 0x400 else: prototype = '%s class %s' self.access = util.get_access_class(access) self.prototype = prototype % (' '.join(self.access), self.name) self.interfaces = dvclass.get_interfaces() self.superclass = dvclass.get_superclassname() self.thisclass = dvclass.get_name() logger.info('Class : %s', self.name) logger.info('Methods added :') for meth in self.methods: logger.info('%s (%s, %s)', meth.get_method_idx(), self.name, meth.name) logger.info('') def add_subclass(self, innername, dvclass): self.subclasses[innername] = dvclass dvclass.inner = True def get_methods(self): return self.methods def process_method(self, num, doAST=False): method = self.methods[num] if not isinstance(method, DvMethod): method.set_instructions([i for i in method.get_instructions()]) self.methods[num] = DvMethod(self.vma.get_method(method)) self.methods[num].process(doAST=doAST) method.set_instructions([]) else: method.process(doAST=doAST) def process(self, doAST=False): for klass in self.subclasses.values(): klass.process(doAST=doAST) for i in range(len(self.methods)): try: self.process_method(i, doAST=doAST) except Exception as e: logger.debug( 'Error decompiling method %s: %s', self.methods[i], e) def get_ast(self): fields = [get_field_ast(f) for f in self.fields] methods = [m.get_ast() for m in self.methods if m.ast is not None] isInterface = 'interface' in self.access return { 'rawname': self.thisclass[1:-1], 'name': parse_descriptor(self.thisclass), 'super': parse_descriptor(self.superclass), 'flags': self.access, 'isInterface': isInterface, 'interfaces': map(parse_descriptor, self.interfaces), 'fields': fields, 'methods': methods, } def get_source(self): source = [] if not self.inner and self.package: source.append('package %s;\n' % self.package) superclass, prototype = self.superclass, self.prototype if superclass is not None and superclass != 'Ljava/lang/Object;': superclass = superclass[1:-1].replace('/', '.') prototype += ' extends %s' % superclass if len(self.interfaces) > 0: prototype += ' implements %s' % ', '.join( [n[1:-1].replace('/', '.') for n in self.interfaces]) source.append('%s {\n' % prototype) for field in self.fields: name = field.get_name() access = util.get_access_field(field.get_access_flags()) f_type = util.get_type(field.get_descriptor()) source.append(' ') if access: source.append(' '.join(access)) source.append(' ') if field.init_value: value = field.init_value.value if f_type == 'String': value = '"%s"' % value elif field.proto == 'B': value = '0x%x' % struct.unpack('b', value)[0] source.append('%s %s = %s;\n' % (f_type, name, value)) else: source.append('%s %s;\n' % (f_type, name)) for klass in self.subclasses.values(): source.append(klass.get_source()) for method in self.methods: if isinstance(method, DvMethod): source.append(method.get_source()) source.append('}\n') return ''.join(source) def get_source_ext(self): source = [] if not self.inner and self.package: source.append( ('PACKAGE', [('PACKAGE_START', 'package '), ('NAME_PACKAGE', '%s' % self.package), ('PACKAGE_END', ';\n')])) list_proto = [] list_proto.append( ('PROTOTYPE_ACCESS', '%s class ' % ' '.join(self.access))) list_proto.append(('NAME_PROTOTYPE', '%s' % self.name, self.package)) superclass = self.superclass if superclass is not None and superclass != 'Ljava/lang/Object;': superclass = superclass[1:-1].replace('/', '.') list_proto.append(('EXTEND', ' extends ')) list_proto.append(('NAME_SUPERCLASS', '%s' % superclass)) if len(self.interfaces) > 0: list_proto.append(('IMPLEMENTS', ' implements ')) for i, interface in enumerate(self.interfaces): if i != 0: list_proto.append(('COMMA', ', ')) list_proto.append( ('NAME_INTERFACE', interface[1:-1].replace('/', '.'))) list_proto.append(('PROTOTYPE_END', ' {\n')) source.append(("PROTOTYPE", list_proto)) for field in self.fields: field_access_flags = field.get_access_flags() access = [util.ACCESS_FLAGS_FIELDS[flag] for flag in util.ACCESS_FLAGS_FIELDS if flag & field_access_flags] f_type = util.get_type(field.get_descriptor()) name = field.get_name() if access: access_str = ' %s ' % ' '.join(access) else: access_str = ' ' source.append( ('FIELD', [('FIELD_ACCESS', access_str), ('FIELD_TYPE', '%s' % f_type), ('SPACE', ' '), ('NAME_FIELD', '%s' % name, f_type, field), ('FIELD_END', ';\n')])) #TODO: call get_source_ext for each subclass? for klass in self.subclasses.values(): source.append((klass, klass.get_source())) for method in self.methods: if isinstance(method, DvMethod): source.append(("METHOD", method.get_source_ext())) source.append(("CLASS_END", [('CLASS_END', '}\n')])) return source def show_source(self): print self.get_source() def __repr__(self): if not self.subclasses: return 'Class(%s)' % self.name return 'Class(%s) -- Subclasses(%s)' % (self.name, self.subclasses) class DvMachine(object): def __init__(self, name): vm = auto_vm(name) if vm is None: raise ValueError('Format not recognised: %s' % name) self.vma = analysis.uVMAnalysis(vm) self.classes = dict((dvclass.get_name(), dvclass) for dvclass in vm.get_classes()) #util.merge_inner(self.classes) def get_classes(self): return self.classes.keys() def get_class(self, class_name): for name, klass in self.classes.iteritems(): if class_name in name: if isinstance(klass, DvClass): return klass dvclass = self.classes[name] = DvClass(klass, self.vma) return dvclass def process(self): for name, klass in self.classes.iteritems(): logger.info('Processing class: %s', name) if isinstance(klass, DvClass): klass.process() else: dvclass = self.classes[name] = DvClass(klass, self.vma) dvclass.process() def show_source(self): for klass in self.classes.values(): klass.show_source() def process_and_show(self): for name, klass in sorted(self.classes.iteritems()): logger.info('Processing class: %s', name) if not isinstance(klass, DvClass): klass = DvClass(klass, self.vma) klass.process() klass.show_source() logger = logging.getLogger('dad') sys.setrecursionlimit(5000) def main(): # logger.setLevel(logging.DEBUG) for debugging output # comment the line to disable the logging. logger.setLevel(logging.INFO) console_hdlr = logging.StreamHandler(sys.stdout) console_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) logger.addHandler(console_hdlr) default_file = 'examples/android/TestsAndroguard/bin/TestActivity.apk' if len(sys.argv) > 1: machine = DvMachine(sys.argv[1]) else: machine = DvMachine(default_file) logger.info('========================') logger.info('Classes:') for class_name in sorted(machine.get_classes()): logger.info(' %s', class_name) logger.info('========================') cls_name = raw_input('Choose a class: ') if cls_name == '*': machine.process_and_show() else: cls = machine.get_class(cls_name) if cls is None: logger.error('%s not found.', cls_name) else: logger.info('======================') for i, method in enumerate(cls.get_methods()): logger.info('%d: %s', i, method.name) logger.info('======================') meth = raw_input('Method: ') if meth == '*': logger.info('CLASS = %s', cls) cls.process() else: cls.process_method(int(meth)) logger.info('Source:') logger.info('===========================') cls.show_source() if __name__ == '__main__': main()
A golden retriever named Charlie is blind due to glaucoma, but gets around with the help of a furry friend. With so many stories of crime, hate, and division ruling the recent news cycles, some positive news really goes a long way. The story behind a sweet photo of a golden retriever named Charlie has been winning over the internet. Charlie is 11-years-old and had to have both his eyes removed due to an eye condition. Nevertheless, he’s been given the friend of a lifetime that helps him get around. Charlie has his own “seeing eye” puppy named Maverick, according to Today. Maverick, a golden retriever himself, is only 4-months-old. He may be young, but he has a big job to do. He’s not only Charlie’s best friend, but acts as his set of eyes. Charlie and Maverick’s owners, Adam and Chelsea Stipe, had to make the difficult decision to have Charlie’s left eye removed in 2016. He suffered from glaucoma, a condition that affects the optic nerve and can cause a lot of pain as it progresses. Shortly after losing his left eye, the dog had to lose his right eye due to the same condition, leaving him completely blind. Maverick was welcomed into the Stripe family this past January. In the beginning, Adam and Chelsea weren’t sure how well the dogs would co-exist together. However, after a bit of an adjustment, they became good friends. Maverick seems to be aware of Charlie’s condition at some level and walks alongside him to assist him as he moves around their family home in Mooresville, North Carolina. The pup has helped Charlie get exercise and enjoy himself again, something he couldn’t do as easily after his surgery. Chelsea says that the puppy also helps Charlie out in an adorable way if he has trouble during playtime. The Stripe family created an Instagram page dedicated to the adventures that Maverick and Charlie have together. They simply wanted to document the sweet bond the dogs share and bring a little bit of light into the lives of others. They had no idea how many people the account would touch. “We love how positive the community is and how happy (the dogs) are making everyone,” Chelsea said.
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = ''' module: ec2_vpc_nacl short_description: create and delete Network ACLs. description: - Read the AWS documentation for Network ACLS U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html) version_added: "2.2" options: name: description: - Tagged name identifying a network ACL. - One and only one of the I(name) or I(nacl_id) is required. required: false nacl_id: description: - NACL id identifying a network ACL. - One and only one of the I(name) or I(nacl_id) is required. required: false version_added: "2.4" vpc_id: description: - VPC id of the requesting VPC. - Required when state present. required: false subnets: description: - The list of subnets that should be associated with the network ACL. - Must be specified as a list - Each subnet can be specified as subnet ID, or its tagged name. required: false egress: description: - A list of rules for outgoing traffic. - Each rule must be specified as a list. required: false ingress: description: - List of rules for incoming traffic. - Each rule must be specified as a list. required: false tags: description: - Dictionary of tags to look for and apply when creating a network ACL. required: false state: description: - Creates or modifies an existing NACL - Deletes a NACL and reassociates subnets to the default NACL required: false choices: ['present', 'absent'] default: present author: Mike Mochan(@mmochan) extends_documentation_fragment: aws requirements: [ botocore, boto3, json ] ''' EXAMPLES = ''' # Complete example to create and delete a network ACL # that allows SSH, HTTP and ICMP in, and all traffic out. - name: "Create and associate production DMZ network ACL with DMZ subnets" ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl region: ap-southeast-2 subnets: ['prod-dmz-1', 'prod-dmz-2'] tags: CostCode: CC1234 Project: phoenix Description: production DMZ ingress: [ # rule no, protocol, allow/deny, cidr, icmp_code, icmp_type, # port from, port to [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22], [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80], [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8], ] egress: [ [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null] ] state: 'present' - name: "Remove the ingress and egress rules - defaults to deny all" ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl region: ap-southeast-2 subnets: - prod-dmz-1 - prod-dmz-2 tags: CostCode: CC1234 Project: phoenix Description: production DMZ state: present - name: "Remove the NACL subnet associations and tags" ec2_vpc_nacl: vpc_id: 'vpc-12345678' name: prod-dmz-nacl region: ap-southeast-2 state: present - name: "Delete nacl and subnet associations" ec2_vpc_nacl: vpc_id: vpc-12345678 name: prod-dmz-nacl state: absent - name: "Delete nacl by its id" ec2_vpc_nacl: nacl_id: acl-33b4ee5b state: absent ''' RETURN = ''' task: description: The result of the create, or delete action. returned: success type: dictionary ''' try: import botocore import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info # Common fields for the default rule that is contained within every VPC NACL. DEFAULT_RULE_FIELDS = { 'RuleNumber': 32767, 'RuleAction': 'deny', 'CidrBlock': '0.0.0.0/0', 'Protocol': '-1' } DEFAULT_INGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', False)]) DEFAULT_EGRESS = dict(list(DEFAULT_RULE_FIELDS.items()) + [('Egress', True)]) # VPC-supported IANA protocol numbers # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, } #Utility methods def icmp_present(entry): if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1: return True def load_tags(module): tags = [] if module.params.get('tags'): for name, value in module.params.get('tags').items(): tags.append({'Key': name, 'Value': str(value)}) tags.append({'Key': "Name", 'Value': module.params.get('name')}) else: tags.append({'Key': "Name", 'Value': module.params.get('name')}) return tags def subnets_removed(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) associations = results['NetworkAcls'][0]['Associations'] subnet_ids = [assoc['SubnetId'] for assoc in associations] return [subnet for subnet in subnet_ids if subnet not in subnets] def subnets_added(nacl_id, subnets, client, module): results = find_acl_by_id(nacl_id, client, module) associations = results['NetworkAcls'][0]['Associations'] subnet_ids = [assoc['SubnetId'] for assoc in associations] return [subnet for subnet in subnets if subnet not in subnet_ids] def subnets_changed(nacl, client, module): changed = False vpc_id = module.params.get('vpc_id') nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] subnets = subnets_to_associate(nacl, client, module) if not subnets: default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module) if subnets: replace_network_acl_association(default_nacl_id, subnets, client, module) changed = True return changed changed = False return changed subs_added = subnets_added(nacl_id, subnets, client, module) if subs_added: replace_network_acl_association(nacl_id, subs_added, client, module) changed = True subs_removed = subnets_removed(nacl_id, subnets, client, module) if subs_removed: default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0] replace_network_acl_association(default_nacl_id, subs_removed, client, module) changed = True return changed def nacls_changed(nacl, client, module): changed = False params = dict() params['egress'] = module.params.get('egress') params['ingress'] = module.params.get('ingress') nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] nacl = describe_network_acl(client, module) entries = nacl['NetworkAcls'][0]['Entries'] tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry] tmp_ingress = [entry for entry in entries if entry['Egress'] is False] egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule] ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule] if rules_changed(egress, params['egress'], True, nacl_id, client, module): changed = True if rules_changed(ingress, params['ingress'], False, nacl_id, client, module): changed = True return changed def tags_changed(nacl_id, client, module): changed = False tags = dict() if module.params.get('tags'): tags = module.params.get('tags') tags['Name'] = module.params.get('name') nacl = find_acl_by_id(nacl_id, client, module) if nacl['NetworkAcls']: nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']] nacl_tags = [item for sublist in nacl_values for item in sublist] tag_values = [[key, str(value)] for key, value in tags.items()] tags = [item for sublist in tag_values for item in sublist] if sorted(nacl_tags) == sorted(tags): changed = False return changed else: delete_tags(nacl_id, client, module) create_tags(nacl_id, client, module) changed = True return changed return changed def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module): changed = False rules = list() for entry in param_rules: rules.append(process_rule_entry(entry, Egress)) if rules == aws_rules: return changed else: removed_rules = [x for x in aws_rules if x not in rules] if removed_rules: params = dict() for rule in removed_rules: params['NetworkAclId'] = nacl_id params['RuleNumber'] = rule['RuleNumber'] params['Egress'] = Egress delete_network_acl_entry(params, client, module) changed = True added_rules = [x for x in rules if x not in aws_rules] if added_rules: for rule in added_rules: rule['NetworkAclId'] = nacl_id create_network_acl_entry(rule, client, module) changed = True return changed def process_rule_entry(entry, Egress): params = dict() params['RuleNumber'] = entry[0] params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]]) params['RuleAction'] = entry[2] params['Egress'] = Egress params['CidrBlock'] = entry[3] if icmp_present(entry): params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])} else: if entry[6] or entry[7]: params['PortRange'] = {"From": entry[6], 'To': entry[7]} return params def restore_default_associations(assoc_ids, default_nacl_id, client, module): if assoc_ids: params = dict() params['NetworkAclId'] = default_nacl_id[0] for assoc_id in assoc_ids: params['AssociationId'] = assoc_id restore_default_acl_association(params, client, module) return True def construct_acl_entries(nacl, client, module): for entry in module.params.get('ingress'): params = process_rule_entry(entry, Egress=False) params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] create_network_acl_entry(params, client, module) for rule in module.params.get('egress'): params = process_rule_entry(rule, Egress=True) params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId'] create_network_acl_entry(params, client, module) ## Module invocations def setup_network_acl(client, module): changed = False nacl = describe_network_acl(client, module) if not nacl['NetworkAcls']: nacl = create_network_acl(module.params.get('vpc_id'), client, module) nacl_id = nacl['NetworkAcl']['NetworkAclId'] create_tags(nacl_id, client, module) subnets = subnets_to_associate(nacl, client, module) replace_network_acl_association(nacl_id, subnets, client, module) construct_acl_entries(nacl, client, module) changed = True return(changed, nacl['NetworkAcl']['NetworkAclId']) else: changed = False nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] subnet_result = subnets_changed(nacl, client, module) nacl_result = nacls_changed(nacl, client, module) tag_result = tags_changed(nacl_id, client, module) if subnet_result is True or nacl_result is True or tag_result is True: changed = True return(changed, nacl_id) return (changed, nacl_id) def remove_network_acl(client, module): changed = False result = dict() nacl = describe_network_acl(client, module) if nacl['NetworkAcls']: nacl_id = nacl['NetworkAcls'][0]['NetworkAclId'] vpc_id = nacl['NetworkAcls'][0]['VpcId'] associations = nacl['NetworkAcls'][0]['Associations'] assoc_ids = [a['NetworkAclAssociationId'] for a in associations] default_nacl_id = find_default_vpc_nacl(vpc_id, client, module) if not default_nacl_id: result = {vpc_id: "Default NACL ID not found - Check the VPC ID"} return changed, result if restore_default_associations(assoc_ids, default_nacl_id, client, module): delete_network_acl(nacl_id, client, module) changed = True result[nacl_id] = "Successfully deleted" return changed, result if not assoc_ids: delete_network_acl(nacl_id, client, module) changed = True result[nacl_id] = "Successfully deleted" return changed, result return changed, result #Boto3 client methods def create_network_acl(vpc_id, client, module): try: if module.check_mode: nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000")) else: nacl = client.create_network_acl(VpcId=vpc_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) return nacl def create_network_acl_entry(params, client, module): try: if not module.check_mode: client.create_network_acl_entry(**params) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def create_tags(nacl_id, client, module): try: delete_tags(nacl_id, client, module) if not module.check_mode: client.create_tags(Resources=[nacl_id], Tags=load_tags(module)) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def delete_network_acl(nacl_id, client, module): try: if not module.check_mode: client.delete_network_acl(NetworkAclId=nacl_id) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def delete_network_acl_entry(params, client, module): try: if not module.check_mode: client.delete_network_acl_entry(**params) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def delete_tags(nacl_id, client, module): try: if not module.check_mode: client.delete_tags(Resources=[nacl_id]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def describe_acl_associations(subnets, client, module): if not subnets: return [] try: results = client.describe_network_acls(Filters=[ {'Name': 'association.subnet-id', 'Values': subnets} ]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) associations = results['NetworkAcls'][0]['Associations'] return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets] def describe_network_acl(client, module): try: if module.params.get('nacl_id'): nacl = client.describe_network_acls(Filters=[ {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]} ]) else: nacl = client.describe_network_acls(Filters=[ {'Name': 'tag:Name', 'Values': [module.params.get('name')]} ]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) return nacl def find_acl_by_id(nacl_id, client, module): try: return client.describe_network_acls(NetworkAclIds=[nacl_id]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def find_default_vpc_nacl(vpc_id, client, module): try: response = client.describe_network_acls(Filters=[ {'Name': 'vpc-id', 'Values': [vpc_id]}]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) nacls = response['NetworkAcls'] return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True] def find_subnet_ids_by_nacl_id(nacl_id, client, module): try: results = client.describe_network_acls(Filters=[ {'Name': 'association.network-acl-id', 'Values': [nacl_id]} ]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) if results['NetworkAcls']: associations = results['NetworkAcls'][0]['Associations'] return [s['SubnetId'] for s in associations if s['SubnetId']] else: return [] def replace_network_acl_association(nacl_id, subnets, client, module): params = dict() params['NetworkAclId'] = nacl_id for association in describe_acl_associations(subnets, client, module): params['AssociationId'] = association try: if not module.check_mode: client.replace_network_acl_association(**params) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def replace_network_acl_entry(entries, Egress, nacl_id, client, module): params = dict() for entry in entries: params = entry params['NetworkAclId'] = nacl_id try: if not module.check_mode: client.replace_network_acl_entry(**params) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def restore_default_acl_association(params, client, module): try: if not module.check_mode: client.replace_network_acl_association(**params) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) def subnets_to_associate(nacl, client, module): params = list(module.params.get('subnets')) if not params: return [] if params[0].startswith("subnet-"): try: subnets = client.describe_subnets(Filters=[ {'Name': 'subnet-id', 'Values': params}]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) else: try: subnets = client.describe_subnets(Filters=[ {'Name': 'tag:Name', 'Values': params}]) except botocore.exceptions.ClientError as e: module.fail_json(msg=str(e)) return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']] def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( vpc_id=dict(), name=dict(), nacl_id=dict(), subnets=dict(required=False, type='list', default=list()), tags=dict(required=False, type='dict'), ingress=dict(required=False, type='list', default=list()), egress=dict(required=False, type='list', default=list(),), state=dict(default='present', choices=['present', 'absent']), ), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'nacl_id']], required_if=[['state', 'present', ['vpc_id']]]) if not HAS_BOTO3: module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state').lower() try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - %s" % str(e)) invocations = { "present": setup_network_acl, "absent": remove_network_acl } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results) if __name__ == '__main__': main()
Sterling had the pleasure of speaking with Nicole Fee Development Manager and Michanne Bohanon, Volunteer and 2018 East Bay March for Babies Ambassador for March of Dimes. March of Dimes leads the fight for the health of all moms and babies. By lobbying for policies to protect them, working to radically improve the healthcare they receive, pioneering research to find solutions to the toughest problems and empowering families with the knowledge and tools to have healthier pregnancies, March of Dimes builds on an 80+ year legacy of impact and innovation to support every mom and every baby.
# -*- coding: utf-8 -*- from cities_light.models import City from configuration.models import ProcessNumber, PrincipalInvestigator from dal import autocomplete from .models import ScientificMission, Route from django import forms from django.utils.translation import ugettext_lazy as _ from person.models import Person from helpers.forms.date_range import DateInput annex_seven_choices = ((0, '----------------'), (1, 'transporte aéreo'), (2, 'transporte terrestre'), (3, 'seguro saúde')) class ProcessField(forms.CharField): def to_python(self, value): # Return an empty list if no input was given. return value def validate(self, value): """Check if value consists only of valid emails.""" # Use the parent's handling of required fields, etc. super(ProcessField, self).validate(value) def clean(self, value): return value class RouteForm(forms.ModelForm): class Meta: model = Route fields = ('origin_city', 'destination_city', 'departure', 'arrival') widgets = { 'origin_city': autocomplete.ModelSelect2(url='city_autocomplete'), 'destination_city': autocomplete.ModelSelect2(url='city_autocomplete'), } class Media: css = { 'all': ('/static/css/inline_autocomplete.css',) } class ScientificMissionForm(forms.ModelForm): class Meta: model = ScientificMission fields = '__all__' widgets = { 'destination_city': autocomplete.ModelSelect2(url='city_autocomplete'), } localized_fields = ('amount_paid',) class AnnexSixForm(forms.Form): # process = ProcessNumber.get_solo() value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True) start_date = forms.DateField(label=_('Start date'), widget=DateInput, required=False) end_date = forms.DateField(label=_('End date'), widget=DateInput, required=False) # process = ProcessField(label=_('Process'), widget=forms.TextInput(attrs={'placeholder': process.process_number})) class Media: css = { 'all': ('/static/css/inline_autocomplete.css',) } def clean(self): cleaned_data = super(AnnexSixForm, self).clean() daily_stipend = cleaned_data.get('daily_stipend') process = cleaned_data.get('process') class AnnexSevenForm(forms.Form): try: principal_investigator = PrincipalInvestigator.get_solo() name = principal_investigator.name except: name = None if name: CHOICES = ( ('1', 'FAPESP'), ('2', name), ) else: CHOICES = ( ('1', 'FAPESP'), ) # process = ProcessNumber.get_solo() choice = forms.ChoiceField(label=_('Provider'), choices=CHOICES, required=True) start_date = forms.DateField(label=_('Start date'), widget=DateInput, required=False) end_date = forms.DateField(label=_('End date'), widget=DateInput, required=False) stretch = forms.CharField(label=_('Stretch'), required=True) reimbursement = forms.ChoiceField(label=_('Reimbursement'), choices=annex_seven_choices, required=True) person = forms.ModelChoiceField(label=_('Person'), queryset=Person.objects.all(), empty_label="----------", required=True) value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True) # process = ProcessField(label=_('Process'), widget=forms.TextInput( # attrs={'placeholder': process.process_number})) class AnnexNineForm(forms.Form): # process = ProcessNumber.get_solo() job = forms.CharField(label=_('Job'), required=True) person = forms.ModelChoiceField(label=_('Service provider'), queryset=Person.objects.all(), empty_label="----------", required=True) note = forms.BooleanField(label=_('Note'), initial=True, required=False) value = forms.DecimalField(label=_('Value'), max_digits=10, decimal_places=2, required=True) # process = ProcessField(label=_('Process'), widget=forms.TextInput( # attrs={'placeholder': process.process_number}))
The untimely assassination of beloved rapper Nipsey Hussle sent shock waves through the hip-hop community. The Victory Lap rapper was gunned down in front of his Marathon store Sunday night. He was reportedly shot six times and later pronounced dead at a local hospital. Hussle, a member of the Crips gang, used his platform to promote positivity, change in the community and educate the youth about financial freedom. Hussle had two children, one with actress Lauren London. Before his death, he was seen posing with fans in the parking lot of his Marathon store. In his last tweet, which was posted just moments before he was shot, he talked about finding the blessing in having strong enemies. Sad to see you go Nip, I’m only finding peace in knowing you are blessed. As you know, I was honored to share in such an amazing year with you musically. We spoke abt how they let the “real ones” in the building and how the Grammys “got it right” this time. I would gauge everyone’s music credibility based on if they mentioned you. “Don’t pair me or compare me, unless the said party is a rare breed...” - self You definitely are a rare breed and I thank you for being genuine to me and in your mission to empower your people...Always love.
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """type-providers create command.""" from googlecloudsdk.api_lib.deployment_manager import dm_labels from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.deployment_manager import dm_beta_base from googlecloudsdk.command_lib.deployment_manager import dm_write from googlecloudsdk.command_lib.deployment_manager import flags from googlecloudsdk.command_lib.deployment_manager import type_providers from googlecloudsdk.command_lib.util import labels_util from googlecloudsdk.core import log def LogResource(request, async): log.CreatedResource(request.typeProvider.name, kind='type_provider', async=async) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class Create(base.CreateCommand): """Create a type provider. This command inserts (creates) a new type provider based on a provided configuration file. """ detailed_help = { 'DESCRIPTION': '{description}', 'EXAMPLES': """\ To create a new type provider, run: $ {command} my-type-provider --api-options-file=my-options.yaml --descriptor-url <descriptor URL> --description "My type." """, } @staticmethod def Args(parser): """Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed. """ flags.AddAsyncFlag(parser) type_providers.AddTypeProviderNameFlag(parser) type_providers.AddApiOptionsFileFlag(parser) type_providers.AddDescriptionFlag(parser) type_providers.AddDescriptorUrlFlag(parser) labels_util.AddCreateLabelsFlags(parser) def Run(self, args): """Run 'type-providers create'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Raises: HttpException: An http error response was received while executing api request. """ messages = dm_beta_base.GetMessages() type_provider_ref = dm_beta_base.GetResources().Parse( args.provider_name, collection='deploymentmanager.typeProviders') update_labels_dict = labels_util.GetUpdateLabelsDictFromArgs(args) labels = dm_labels.UpdateLabels([], messages.TypeProviderLabelEntry, update_labels=update_labels_dict) type_provider = messages.TypeProvider( name=type_provider_ref.typeProvider, description=args.description, descriptorUrl=args.descriptor_url, labels=labels) type_providers.AddOptions(args.api_options_file, type_provider) request = messages.DeploymentmanagerTypeProvidersInsertRequest( project=type_provider_ref.project, typeProvider=type_provider) dm_write.Execute(request, args.async, dm_beta_base.GetClient().typeProviders.Insert, LogResource)
AboutClick here to learn more about the community. EventsThe schedule and a list of all the events we sponsor. SocialKeep in touch with your locals! Forums and telegram link. TelegramThis will open a link to join our Telegram chat! CharityDoing our best for the less fortunate. humanitarian aid, animal care, and LGBTQ health. Currently TFS is providing charity to Mazzoni Center. To visit Mazzoni's website, CLICK HERE. The Furst State Foundation is in the process of working to become incorporated under 503(c)(3). All photos used in our slideshows can be found on The Furst State Flickr page linked at the top right. "TFS" is an abbreviation for "The Furst State" and is commonly used by the community. Copyright © The Furst State Foundation. All rights reserved.
from django.conf import settings from django.db import models from django.db.utils import IntegrityError class DataHubLegacyUser(models.Model): """DataHub's old User model. Replaced by the Django User model.""" id = models.AutoField(primary_key=True) email = models.CharField(max_length=100, unique=True) username = models.CharField(max_length=50, unique=True) f_name = models.CharField(max_length=50, null=True) l_name = models.CharField(max_length=50, null=True) password = models.CharField(max_length=50) active = models.BooleanField(default=False) def __unicode__(self): return self.username class Meta: db_table = "datahub_legacy_users" class Card(models.Model): id = models.AutoField(primary_key=True) timestamp = models.DateTimeField(auto_now=True) repo_base = models.CharField(max_length=50) repo_name = models.CharField(max_length=50) card_name = models.CharField(max_length=50) public = models.BooleanField(default=False) query = models.TextField() def __unicode__(self): return 'card: %s.%s %s' % (self.repo_base, self.repo_name, self.card_name) class Meta: db_table = "cards" unique_together = ('repo_base', 'repo_name', 'card_name') class Annotation(models.Model): id = models.AutoField(primary_key=True) timestamp = models.DateTimeField(auto_now=True) url_path = models.CharField(max_length=500, unique=True) annotation_text = models.TextField() def __unicode__(self): return self.url_path class Meta: db_table = "annotations" # Thrift Apps class App(models.Model): id = models.AutoField(primary_key=True) timestamp = models.DateTimeField(auto_now=True) app_id = models.CharField(max_length=100, unique=True) app_name = models.CharField(max_length=100) app_token = models.CharField(max_length=500) legacy_user = models.ForeignKey('DataHubLegacyUser', null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) def __unicode__(self): return self.app_name class Meta: db_table = "apps" class Collaborator(models.Model): id = models.AutoField(primary_key=True) timestamp = models.DateTimeField(auto_now=True) # user is the person permission is being granted to. user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True) app = models.ForeignKey('App', null=True) repo_name = models.TextField() repo_base = models.TextField() permission = models.TextField() # e.g. 'SELECT, UPDATE, INSERT' file_permission = models.TextField() # e.g. 'read, write' license_id = models.IntegerField(default=-1) def __unicode__(self): if self.user: c = self.user elif self.app: c = self.app else: c = '' return "{base}.{repo}/{collaborator}".format( base=self.repo_base, repo=self.repo_name, collaborator=c) def save(self, *args, **kwargs): if bool(self.user) == bool(self.app): raise IntegrityError( "Collaborator objects must have an associated user or app, " "not both or neither.") super(Collaborator, self).save(*args, **kwargs) class Meta: db_table = "collaborators" unique_together = ('repo_name', 'repo_base', 'user', 'app') class LicenseView(models.Model): id = models.AutoField(primary_key=True) start_date = models.DateTimeField(auto_now=True) end_date = models.DateTimeField(auto_now=True) # view_sql is the sql used to generate the view from the table view_sql = models.TextField() repo_name = models.TextField() repo_base = models.TextField() table = models.TextField() license_id = models.IntegerField() class Meta: db_table = "license_views" unique_together = ('repo_name', 'repo_base', 'table', 'license_id') def __unicode__(self): return """ Base: {base}\n Repo: {repo}\n Table: {table}\n Viewsql: {view_sql}\n LicenseID: {license_id} \n ID: {id}\n """.format( base=self.repo_base, repo=self.repo_name, table=self.table, view_sql=self.view_sql, license_id=self.license_id, id=self.id)
With a Dieci system, you can easily build very high quality audio systems that are install-friendly. The woofers use the proprietary “V” cone membrane, ensuring the best performance. The DT 16 neodymium tweeter provides wide, detailed high frequency response. The in-line hi/lo pass passive crossovers, and the rich accessory kit provided with these systems increase the installation possibilities.
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-23 16:05 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('crm', '0004_auto_20161023_1143'), ] operations = [ migrations.RenameField( model_name='inboundcontactaddress', old_name='city', new_name='administrative_area', ), migrations.RenameField( model_name='inboundcontactaddress', old_name='address', new_name='formatted_address', ), migrations.RenameField( model_name='individualaddress', old_name='city', new_name='administrative_area', ), migrations.RenameField( model_name='individualaddress', old_name='address', new_name='formatted_address', ), migrations.RemoveField( model_name='inboundcontactaddress', name='zip_code', ), migrations.RemoveField( model_name='individualaddress', name='zip_code', ), migrations.AddField( model_name='inboundcontactaddress', name='county', field=models.CharField(blank=True, max_length=127), ), migrations.AddField( model_name='inboundcontactaddress', name='final_type', field=models.CharField(blank=True, max_length=32), ), migrations.AddField( model_name='inboundcontactaddress', name='latitude', field=models.FloatField(blank=True, default=None), ), migrations.AddField( model_name='inboundcontactaddress', name='locality', field=models.CharField(blank=True, max_length=127), ), migrations.AddField( model_name='inboundcontactaddress', name='longitude', field=models.FloatField(blank=True, default=None), ), migrations.AddField( model_name='inboundcontactaddress', name='postal_code', field=models.CharField(blank=True, max_length=16), ), migrations.AddField( model_name='inboundcontactaddress', name='postal_code_suffix', field=models.CharField(blank=True, max_length=16), ), migrations.AddField( model_name='inboundcontactaddress', name='route', field=models.CharField(blank=True, max_length=255), ), migrations.AddField( model_name='inboundcontactaddress', name='street_number', field=models.IntegerField(blank=True, default=None), ), migrations.AddField( model_name='individualaddress', name='county', field=models.CharField(blank=True, max_length=127), ), migrations.AddField( model_name='individualaddress', name='final_type', field=models.CharField(blank=True, max_length=32), ), migrations.AddField( model_name='individualaddress', name='latitude', field=models.FloatField(blank=True, default=None), ), migrations.AddField( model_name='individualaddress', name='locality', field=models.CharField(blank=True, max_length=127), ), migrations.AddField( model_name='individualaddress', name='longitude', field=models.FloatField(blank=True, default=None), ), migrations.AddField( model_name='individualaddress', name='postal_code', field=models.CharField(blank=True, max_length=16), ), migrations.AddField( model_name='individualaddress', name='postal_code_suffix', field=models.CharField(blank=True, max_length=16), ), migrations.AddField( model_name='individualaddress', name='route', field=models.CharField(blank=True, max_length=255), ), migrations.AddField( model_name='individualaddress', name='street_number', field=models.IntegerField(blank=True, default=None), ), ]
A £2000 Golden Hello* is just one of the ways we will reward you when you join Castle Lodge as a Registered Nurse. Castle Lodge is a specialist independent hospital, supporting 15 patients with a variety of mental health needs. As a Registered Nurse (RMN) at a Barchester independent mental health hospital, you’ll use your compassion and experience to deliver the quality care and support we’re known for. Working within a multi-disciplinary team, you’ll provide care and support to patients with complex and sometimes challenging behaviours. You’ll help improve patients’ quality of life by ensuring their needs are met and that each person is always treated with dignity and respect. That means you’ll take the time to listen to our patients, and provide and supervise the delivery of excellent nursing care. As a Registered Nurse (RMN) at Barchester, you can enjoy the freedom and autonomy you need to make a real difference. You’ll need to have current NMC registration to join us as a Registered Nurse (RMN). We’ll look for an up-to-date knowledge of clinical practices together with a strong understanding of CQC requirements and frameworks. Your proactive approach will mean you’re ready to make the most of the personally-tailored training programme we’ll develop for you. Dedicated and compassionate, you should also have experience of producing well-developed care plans, undertaking risk assessments and working as part of a clinical team.
""" Adapt the binary search algorithm so that instead of outputting whether a specific value was found, it outputs whether a value within an interval (specified by you) was found. Write the pseudocode and code and give the time complexity of the algorithm using the Big O notation.""" def binarySearch(lower, upper, alist): """ take in a lower bound, upper bound, and a list as input. then you have the range of numbers to search for. binary search after but the search element is a range of numbers. """ bounds = range(lower, upper + 1) first = 0 last = len(alist) - 1 found = False while first <= last and not found: midpoint = (first + last) // 2 if alist[midpoint] in bounds: found = True else: if bounds[0] > alist[midpoint]: first = midpoint + 1 else: last = midpoint - 1 return found """ take lower bound, upper bound and list as inputs set search start point, end point while the list is not empty and element is not found set a new midpoint using the start point and end point if the midpoint is in bounds element is found else if the smallest element is greater than the midpoint set lower bound to the current midpoint else set upper bound to the current midpoint return whether or not an element matching was found O(N) """
After working some time experiencing the small, modern apartments of Berlin, designer and illustrator Karin Hearn craved a similarly simple living room, with a creative facilities, in her hometown of Perth, Australia. However, finding something affordable and practical in metropolis was a challenge. The large, low windows accept the outside in and blur the boundaries of the little house, creating a bigger sense of space. Right here, a friend’s dog, Xolo, provides the space a test run. to make sure every design choice worked with the style and mindset of the home, ” Weir says.
#!/usr/bin/python import sys from argparse import ArgumentParser from collections import Counter import matplotlib.pyplot as plt from lib.graph import HypRG def main(): parser = ArgumentParser(description='Emulate hyperbolic random graph', epilog='Returns only edges (without isolated vertices!)') parser.add_argument('n', type=int, help='number of vertices') parser.add_argument('--alpha', type=float, help='alpha', default=1.) parser.add_argument('-C', type=float, help='C', default=0.) parser.add_argument('-f', help='outfile') parser.add_argument('-s', '--seed', help='random seed', type=int) args = parser.parse_args() if args.f: out_f = open(args.f, 'w') else: out_f = sys.stdout n = args.n alpha = args.alpha C = args.C seed = 0 if args.seed is None else args.seed g = HypRG(n, alpha=alpha, C=C, seed=seed) for e in g.edges(): e_fmt = [] for v in e: e_fmt.append("{0:.3f},{1:.3f}".format(*v)) out_f.write(' '.join(e_fmt) + '\n') if __name__ == '__main__': main()
FDA publishes "Footnotes", a bi-monthly magazine which details up and coming events (local and international), items of interest to folk dancers, and members' contributions. If you wish to receive a complete printed copy of "Footnotes" posted to you, subscriptions are AU$30 per annum (OR choosing emailed copies will discount subscription membership to AU$25 per annum). Download and print out this membership form (click here) to send it with a cheque for membership subscription to the FDA Treasurer (the form has details). Membership also entitles you to discount entry to FDA functions. Please note that these are all archive copies of the original printed version, which are never updated, so they may contain broken links to web sites or e-mail addresses. Please refer to our links page or the most recent issue of Footnotes for current links. Also, each issue contains a variety of illustrations or photos, so they may take a little bit longer than a normal web page to load once clicked.
#!/usr/bin/env python ################################################################################ # GIPS: Geospatial Image Processing System # # AUTHOR: Matthew Hanson # EMAIL: matt.a.hanson@gmail.com # # Copyright (C) 2014-2018 Applied Geosolutions # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> ################################################################################ import sys import os from datetime import datetime as dt import traceback import numpy from copy import deepcopy from collections import defaultdict import gippy from gips.tiles import Tiles from gips.utils import VerboseOut, Colors from gips import utils from gips.mapreduce import MapReduce from . import dbinv, orm class Inventory(object): """ Base class for inventories """ _colors = [Colors.PURPLE, Colors.RED, Colors.GREEN, Colors.BLUE, Colors.YELLOW] def __init__(self): pass def __getitem__(self, date): """ Indexing operator for class """ return self.data[date] def __len__(self): """ Length of inventory (# of dates) """ return len(self.dates) def get_subset(self, dates): """ Return subset of inventory """ inv = deepcopy(self) for d in inv.dates: if d not in dates: del inv.data[d] return inv @property def sensor_set(self): sset = set() for date in self.dates: sset.update(self.data[date].sensor_set) return sorted(sset) @property def dates(self): """ Get sorted list of dates """ return sorted(self.data.keys()) @property def numfiles(self): """ Total number of files in inventory """ return sum([len(dat) for dat in self.data.values()]) @property def datestr(self): return '%s dates (%s - %s)' % (len(self.dates), self.dates[0], self.dates[-1]) def color(self, sensor): """ Return color for sensor """ return self._colors[list(self.sensor_set).index(sensor)] def pprint(self, md=False, size=False): """ Print the inventory """ if len(self.data) == 0: print 'No matching files in inventory' return self.data[self.data.keys()[0]].pprint_asset_header() dformat = '%m-%d' if md else '%j' oldyear = 0 formatstr = '{:<12}\n' colors = {k: self.color(k) for k in self.sensor_set} for date in self.dates: # if new year then write out the year if date.year != oldyear: sys.stdout.write(Colors.BOLD + formatstr.format(date.year) + Colors.OFF) self.data[date].pprint(dformat, colors) oldyear = date.year if self.numfiles != 0: VerboseOut("\n\n%s files on %s dates" % (self.numfiles, len(self.dates)), 1) if size: filelist_gen = ( tile.filenames.values() + [a.filename for a in tile.assets.values()] for tiles in self.data.values() for tile in tiles.tiles.values() ) total_size = sum( sum(os.stat(f).st_size for f in fl) for fl in filelist_gen ) sitename = self.spatial.sitename if sitename == 'tiles': sitename += str(self.spatial.tiles) print('{} includes {:.0f} Mebibytes of local gips archive data' .format(sitename, total_size / 2 ** 20)) class ProjectInventory(Inventory): """ Inventory of project directory (collection of Data class) """ def __init__(self, projdir='', products=[]): """ Create inventory of a GIPS project directory """ self.projdir = os.path.abspath(projdir) if not os.path.exists(self.projdir): raise Exception('Directory %s does not exist!' % self.projdir) self.data = {} product_set = set() sensor_set = set() with utils.error_handler("Project directory error for " + self.projdir): # can't import Data at module scope due to circular dependencies from gips.data.core import Data for dat in Data.discover(self.projdir): self.data[dat.date] = dat # All products and sensors used across all dates product_set = product_set.union(dat.product_set) sensor_set = sensor_set.union(dat.sensor_set) if not products: products = list(product_set) self.requested_products = products self.sensors = sensor_set def products(self, date=None): """ Intersection of available products and requested products for this date """ if date is not None: return set(self.data[date].products).intersection(set(self.requested_products)) else: products = {} for date in self.dates: products[date] = set(self.data[date].products).intersection(set(self.requested_products)) return products def new_image(self, filename, dtype=gippy.GDT_Byte, numbands=1, nodata=None): """ Create new image with the same template as the files in project """ img = gippy.GeoImage(self.data[self.dates[0]].open(self.requested_products[0])) imgout = gippy.GeoImage(filename, img, dtype, numbands) img = None if nodata is not None: imgout.SetNoData(nodata) return imgout def data_size(self): """ Get 'shape' of inventory: #products x rows x columns """ img = gippy.GeoImage(self.data[self.dates[0]].open(self.requested_products[0])) sz = (len(self.requested_products), img.YSize(), img.XSize()) return sz def get_data(self, dates=None, products=None, chunk=None): """ Read all files as time series, stacking all products """ # TODO - change to absolute dates if dates is None: dates = self.dates days = numpy.array([int(d.strftime('%j')) for d in dates]) imgarr = [] if products is None: products = self.requested_products for p in products: gimg = self.get_timeseries(p, dates=dates) # TODO - move numpy.squeeze into swig interface file? ch = gippy.Recti(chunk[0], chunk[1], chunk[2], chunk[3]) arr = numpy.squeeze(gimg.TimeSeries(days.astype('float64'), ch)) arr[arr == gimg[0].NoDataValue()] = numpy.nan if len(days) == 1: dims = arr.shape arr = arr.reshape(1, dims[0], dims[1]) imgarr.append(arr) data = numpy.vstack(tuple(imgarr)) return data def get_location(self): # this is a terrible hack to get the name of the feature associated with the inventory data = self.data[self.dates[0]] location = os.path.split(os.path.split(data.filenames.values()[0])[0])[1] return location def get_timeseries(self, product='', dates=None): """ Read all files as time series """ if dates is None: dates = self.dates # TODO - multiple sensors filenames = [self.data[date][product] for date in dates] img = gippy.GeoImage(filenames) return img def map_reduce(self, func, numbands=1, products=None, readfunc=None, nchunks=100, **kwargs): """ Apply func to inventory to generate an image with numdim output bands """ if products is None: products = self.requested_products if readfunc is None: readfunc = lambda x: self.get_data(products=products, chunk=x) inshape = self.data_size() outshape = [numbands, inshape[1], inshape[2]] mr = MapReduce(inshape, outshape, readfunc, func, **kwargs) mr.run(nchunks=nchunks) return mr.assemble() class DataInventory(Inventory): """ Manager class for data inventories (collection of Tiles class) """ def __init__(self, dataclass, spatial, temporal, products=None, fetch=False, update=False, **kwargs): """ Create a new inventory :dataclass: The Data class to use (e.g., LandsatData, ModisData) :spatial: The SpatialExtent requested :temporal: The temporal extent requested :products: List of requested products of interest :fetch: bool indicated if missing data should be downloaded """ VerboseOut('Retrieving inventory for site %s for date range %s' % (spatial.sitename, temporal) , 2) self.dataclass = dataclass Repository = dataclass.Asset.Repository self.spatial = spatial self.temporal = temporal self.products = dataclass.RequestedProducts(products) self.update = update if fetch: # command-line arguments could have lists, which lru_cache chokes # one due to being unhashable. Also tiles is passed in, which # conflicts with the explicit tiles argument. fetch_kwargs = {k: v for (k, v) in utils.prune_unhashable(kwargs).items() if k != 'tiles'} archived_assets = dataclass.fetch(self.products.base, self.spatial.tiles, self.temporal, self.update, **fetch_kwargs) if orm.use_orm(): # save metadata about the fetched assets in the database driver = dataclass.name.lower() for a in archived_assets: dbinv.update_or_add_asset( asset=a.asset, sensor=a.sensor, tile=a.tile, date=a.date, name=a.archived_filename, driver=driver) # if the new asset comes with any "free" products, save that info: for (prod_type, fp) in a.products.items(): dbinv.update_or_add_product( product=prod_type, sensor=a.sensor, tile=a.tile, date=a.date, name=fp, driver=driver) # Build up the inventory: One Tiles object per date. Each contains one Data object. Each # of those contain one or more Asset objects. self.data = {} dates = self.temporal.prune_dates(spatial.available_dates) if orm.use_orm(): # populate the object tree under the DataInventory (Tiles, Data, Asset) by querying the # DB quick-like then assigning things we iterate: The DB is a flat table of data; we # have to hierarchy-ize it. Do this by setting up a temporary collection of objects # for populating Data instances: the collection is basically a simple version of the # complicated hierarchy that GIPS constructs on its own: # collection = { # (tile, date): {'a': [asset, asset, asset], # 'p': [product, product, product]}, # (tile, date): {'a': [asset, asset, asset], # 'p': [product, product, product]}, # } collection = defaultdict(lambda: {'a': [], 'p': []}) def add_to_collection(date, tile, kind, item): key = (date, str(tile)) # str() to avoid possible unicode trouble collection[key][kind].append(item) search_criteria = { # same for both Assets and Products 'driver': Repository.name.lower(), 'tile__in': spatial.tiles, 'date__in': dates, } for p in dbinv.product_search(**search_criteria).order_by('date', 'tile'): add_to_collection(p.date, p.tile, 'p', str(p.name)) for a in dbinv.asset_search(**search_criteria).order_by('date', 'tile'): add_to_collection(a.date, a.tile, 'a', str(a.name)) # the collection is now complete so use it to populate the GIPS object hierarchy for k, v in collection.items(): (date, tile) = k # find or else make a Tiles object if date not in self.data: self.data[date] = Tiles(dataclass, spatial, date, self.products, **kwargs) tiles_obj = self.data[date] # add a Data object (should not be in tiles_obj.tiles already) assert tile not in tiles_obj.tiles # sanity check data_obj = dataclass(tile, date, search=False) # add assets and products [data_obj.add_asset(dataclass.Asset(a)) for a in v['a']] data_obj.ParseAndAddFiles(v['p']) # add the new Data object to the Tiles object if it checks out if data_obj.valid and data_obj.filter(**kwargs): tiles_obj.tiles[tile] = data_obj return # Perform filesystem search since user wants that. Data object instantiation results # in filesystem search (thanks to search=True). self.data = {} # clear out data dict in case it has partial results for date in dates: tiles_obj = Tiles(dataclass, spatial, date, self.products, **kwargs) for t in spatial.tiles: data_obj = dataclass(t, date, search=True) if data_obj.valid and data_obj.filter(**kwargs): tiles_obj.tiles[t] = data_obj if len(tiles_obj) > 0: self.data[date] = tiles_obj @property def sensor_set(self): """ The set of all sensors used in this inventory """ return sorted(self.dataclass.Asset._sensors.keys()) def process(self, *args, **kwargs): """ Process assets into requested products """ # TODO - some check on if any processing was done start = dt.now() VerboseOut('Processing [%s] on %s dates (%s files)' % (self.products, len(self.dates), self.numfiles), 3) if len(self.products.standard) > 0: for date in self.dates: with utils.error_handler(continuable=True): self.data[date].process(*args, **kwargs) if len(self.products.composite) > 0: self.dataclass.process_composites(self, self.products.composite, **kwargs) VerboseOut('Processing completed in %s' % (dt.now() - start), 2) def mosaic(self, datadir='./', tree=False, **kwargs): """ Create project files for data in inventory """ # make sure products have been processed first self.process(overwrite=False) start = dt.now() VerboseOut('Creating mosaic project %s' % datadir, 2) VerboseOut(' Dates: %s' % self.datestr) VerboseOut(' Products: %s' % self.products) dout = datadir for d in self.dates: if tree: dout = os.path.join(datadir, d.strftime('%Y%j')) self.data[d].mosaic(dout, **kwargs) VerboseOut('Completed mosaic project in %s' % (dt.now() - start), 2) # def warptiles(self): # """ Just copy or warp all tiles in the inventory """ def pprint(self, **kwargs): """ Print inventory """ print if self.spatial.site is not None: print Colors.BOLD + 'Asset Coverage for site %s' % (self.spatial.sitename) + Colors.OFF self.spatial.print_tile_coverage() print else: # constructor makes it safe to assume there is only one tile when # self.spatial.site is None, but raise an error anyway just in case if len(self.spatial.tiles) > 1: raise RuntimeError('Expected 1 tile but got ' + repr(self.spatial.tiles)) print Colors.BOLD + 'Asset Holdings for tile ' + self.spatial.tiles[0] + Colors.OFF super(DataInventory, self).pprint(**kwargs) print Colors.BOLD + '\nSENSORS' + Colors.OFF _sensors = self.dataclass.Asset._sensors for key in sorted(self.sensor_set): if key in _sensors: desc = _sensors[key]['description'] scode = key + ': ' if key != '' else '' else: desc = '' scode = key print self.color(key) + '%s%s' % (scode, desc) + Colors.OFF
My child has cancer, can there be complications with fifth disease? Possibly. If your child is on an active treatment protocol that affects the immune system, then the development of any infection can be serious. Your child might get more sick from the disease than someone else who has a normal immune system and is not battling cancer. Maybe. Kids get a rash, mild fever, and/or mild cold symptoms. Adults get a rash, some joint aches, and/or joint swelling. Infected persons, with or without symptoms, mostly recover without complications and have immunity against re-infection. Complications can occur in persons with immunosuppression, such as some cancer patients, and in patients with chronic anemia. I am having a habit of pinching off my nail beds since my child hood. Is this some kind of disease. Does it lead to cancer? What does an american without health insurance do if his child has cancer or another serious disease? I was diagnosed with celiac disease at 13, shouldve happened as I baby, I went 13 years eating gluten not knowing, am I at higher risk for cancer etc? Please explain why is cancer a life style disease? Are men more prone than women to lethal diseases like cancer? Is there a way to help children with cancer or other diseases? Why are diseases like cancer more common in people in some families? Why are drugs to cure cancer and other diseases extraordinarily expensive? For what length of time can a person have cancer or a disease and not know it?
#coding:utf-8 def cnenlen(s): if type(s) is str: s = s.decode('utf-8', 'ignore') return len(s.encode('gb18030', 'ignore')) // 2 def cnencut(s, length): ts = type(s) if ts is str: s = s.decode('utf-8', 'ignore') s = s.encode('gb18030', 'ignore')[:length*2].decode('gb18030', 'ignore') if ts is str: s = s.encode('utf-8', 'ignore') return s def cnenoverflow(s, length): txt = cnencut(s , length) if txt != s: txt = '%s ...' % txt.rstrip() has_more = True else: has_more = False return txt, has_more def txt_rsrtip(txt): return '\n'.join( map( str.rstrip, txt.replace('\r\n', '\n')\ .replace('\r', '\n').rstrip('\n ')\ .split('\n') ) ) def make_tag_list(tag_txt): _tag_list = txt_rsrtip(tag_txt).split('\n') result = [] for i in _tag_list: tag = i.strip() if not tag: continue if tag not in result: result.append(tag) return result if __name__ == '__main__': pass print repr(txt_rsrtip('b\r\nx'))
The 0693 2-Piece Sectional Sofa with Right-Arm-Facing Chaise by McCreary Modern at AHFA in the area. Product availability may vary. Contact us for the most current availability on this product.
from __future__ import unicode_literals, absolute_import import random import time from .base import Command from utils.colorize import colorize, Colors from player.player import Player class Play(Command): name = 'play' pattern = 'play {genre}' example = ('play chillout', 'p jazz',) description = 'Use this command to play genres and resume paused track.' @staticmethod def handle(self, *args): arg = args[0] if args else '' if not arg: if self.player and self.player.is_paused: self.player.play() return self.INDENT + colorize(Colors.BLUE, '\u25B6 ' + self.client.active_station['name']) self.stdout_print(self.INDENT + colorize(Colors.GRAY, 'Pick random genre...')) arg = random.choice([genre.get('title', '') for genre in self.client.genres]) genre = self.client.search_genre(arg) genre_id = genre.get('id') if genre else None if genre_id is None: return self.INDENT + colorize(Colors.RED, 'Genre ') + arg + colorize(Colors.RED, ' not found.') self.stdout_print(self.INDENT + colorize(Colors.GREEN, 'Tuning in...')) self.stdout_print(self.INDENT + colorize(Colors.GREEN, 'Starting genre: ') + genre.get('title', '')) num_of_tries = 0 while num_of_tries < 3: num_of_tries += 1 stream = self.client.get_stream(genre_id, renew_active_station=True) if not stream: return self.INDENT + colorize(Colors.RED, 'No active stations found... Please, try another genre.') if self.player: self.player.stop() self.player = Player(stream) self.player.play() num_of_checks = 0 while num_of_checks < 5: num_of_checks += 1 time.sleep(1) if self.player.is_playing: return self.INDENT + colorize(Colors.BLUE, '\u25B6 ' + self.client.active_station['name']) return self.INDENT + colorize(Colors.RED, 'No active stations found... Please, try another genre.') class P(Play): name = 'p' pattern = 'p {genre}' example = ('p chillout', 'play jazz',) show_in_main_help = False
I'm collecting some aspects of my Research about the regiments of the swabian district. It is interesting to see that there are no informations about the troubles the command had with the normal soldiers. I found some informations in a more recent chronicle of the early 19th century. But now I'm reading in the chronicle of a certain Georg David Blinzig, who started to write his chronicle in 1740. Although Blinzig was not the leading figure of the state, his notes are very interesting. It's very likely that he saw the end of a part of the rebellion of Lorch with his own eyes. Secondly the contingent of Hall played a Major roll in the uprising of the swabian troops! When the war broke out it seams that there was no opposition in the imperial town of Hall against the duties for the empire and the emperor. It was'nt difficult to find enough of peasants to serve in the contingent of Schwäbisch Hall. Very soon all men of the company of infantry and the half Company of dragoons were ready to march. They had to march to the other parts of the swabian troops. The Regiment of Baden-Durlach made the assembly at Durlach. This happened in may 1757. To be continued if there is some interest. The same document mentions the equipement of the coachmen (Fuhrknechte). It's interesting, that all had something like a uniform: blue coats, blue waistcoat, Sabres and belt.
""" Naive registry that is just a subclass of a python dictionary. It is meant to be used to store objects and retrieve them when needed. The registry is recreated on each app launch and is best suited to store some dynamic or short-term data. Storing an object should be performed by using the `add` function, and retrieving it by using the `get` function. Examples: Register a function under a function name:: from ramses import registry @registry.add def foo(): print 'In foo' assert registry.get('foo') is foo Register a function under a different name:: from ramses import registry @registry.add('bar') def foo(): print 'In foo' assert registry.get('bar') is foo Register an arbitrary object:: from ramses import registry myvar = 'my awesome var' registry.add('my_stored_var', myvar) assert registry.get('my_stored_var') == myvar Register and get an object by namespace:: from ramses import registry myvar = 'my awesome var' registry.add('Foo.my_stored_var', myvar) assert registry.mget('Foo') == {'my_stored_var': myvar} """ import six class Registry(dict): pass registry = Registry() def add(*args): def decorator(function): registry[name] = function return function if len(args) == 1 and six.callable(args[0]): function = args[0] name = function.__name__ return decorator(function) elif len(args) == 2: registry[args[0]] = args[1] else: name = args[0] return decorator def get(name): try: return registry[name] except KeyError: raise KeyError( "Object named '{}' is not registered in ramses " "registry".format(name)) def mget(namespace): namespace = namespace.lower() + '.' data = {} for key, val in registry.items(): key = key.lower() if not key.startswith(namespace): continue clean_key = key.split(namespace)[-1] data[clean_key] = val return data
a thousand years piano sheet guys music woods harp center pop books easy pdf. a thousand years piano sheet love it sf and image tagged with part 2 music free pdf. a thousand years piano sheet music notation score download easy. a thousand years piano sheet love it and image tagged with music free pdf. a thousand years piano sheet guys music digital part 2 pdf. a thousand years piano sheet music advanced free. a thousand years piano sheet music score hard. a thousand years piano sheet music letters best chords with. a thousand years piano sheet by music advanced level with letters. a thousand years piano sheet music notes keyboard roblox. a thousand years piano sheet easy music free. a thousand years piano sheet zoom music free. a thousand years piano sheet harp column music pdf easy. a thousand years piano sheet music digital free pdf easy. a thousand years piano sheet for easy music free. a thousand years piano sheet music sax violin and pdf. a thousand years piano sheet by ea arr ocher guys music free. a thousand years piano sheet roblox. a thousand years piano sheet music download free pdf. a thousand years piano sheet music free pdf. a thousand years piano sheet by part 2 music pdf. a thousand years piano sheet music and violin free. a thousand years piano sheet easy music free pdf. a thousand years piano sheet music the guys for beginners. a thousand years piano sheet music violin for pdf easy. a thousand years piano sheet music christina perri easy pdf. a thousand years piano sheet screenshot 1 music download.
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils to train DistilBERT adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) """ import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) def git_log(folder_path: str): """ Log commit info. """ repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), } with open(os.path.join(folder_path, "git_log.json"), "w") as f: json.dump(repo_infos, f, indent=4) def init_gpu_params(params): """ Handle single and multi-GPU / multi-node. """ if params.n_gpu <= 0: params.local_rank = 0 params.master_port = -1 params.is_master = True params.multi_gpu = False return assert torch.cuda.is_available() logger.info("Initializing GPUs") if params.n_gpu > 1: assert params.local_rank != -1 params.world_size = int(os.environ["WORLD_SIZE"]) params.n_gpu_per_node = int(os.environ["N_GPU_NODE"]) params.global_rank = int(os.environ["RANK"]) # number of nodes / node ID params.n_nodes = params.world_size // params.n_gpu_per_node params.node_id = params.global_rank // params.n_gpu_per_node params.multi_gpu = True assert params.n_nodes == int(os.environ["N_NODES"]) assert params.node_id == int(os.environ["NODE_RANK"]) # local job (single GPU) else: assert params.local_rank == -1 params.n_nodes = 1 params.node_id = 0 params.local_rank = 0 params.global_rank = 0 params.world_size = 1 params.n_gpu_per_node = 1 params.multi_gpu = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode params.is_master = params.node_id == 0 and params.local_rank == 0 params.multi_node = params.n_nodes > 1 # summary PREFIX = f"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) logger.info(PREFIX + "Node ID : %i" % params.node_id) logger.info(PREFIX + "Local rank : %i" % params.local_rank) logger.info(PREFIX + "World size : %i" % params.world_size) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) logger.info(PREFIX + "Master : %s" % str(params.is_master)) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) # set GPU device torch.cuda.set_device(params.local_rank) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed") torch.distributed.init_process_group( init_method="env://", backend="nccl", ) def set_seed(args): """ Set the random seed. """ np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
We need to know your dog(s) as well as you do! Please answer the following questions so that we have a better understanding on the behavior of your dog. Please provide references for your dog. This person should be familiar with your dogs behavior around stranger(s) and other animals. Please include any current Woofers’ clients who have referred you to us! 1) Owner certifies the accuracy of all information conveyed about the owner’s pet(s) to Woofers . 2) Owner authorizes the Facility to obtain medical and vaccination records for the pet(s) listed above from the veterinarian listed above. Owner further authorizes said veterinarian to provide these records to the Facility. 3) Owner agrees not to bring pet(s) to Facility if pet(s) exhibit any signs of illness that may be harmful to the other pets in attendance, such as: vomiting, diarrhea, coughing, extreme lethargy, etc. without first obtaining approval from the Facility. 4) Facility shall exercise reasonable care for owner’s pet(s) while it/they is/are in the Facility’s custody. The owner recognizes the potential risks (such as illness and/or injury) involved with pet daycare, boarding and grooming. The Facility does not allow any aggressive pets for boarding, daycare or grooming. 5) Facility may change the level or type of daycare/boarding or possibly remove the pet(s) from group play, if Facility believes it necessary to ensure the safety of the owner’s pet(s), other pets or its employees. Dogs must be in attendance within any 30 day period in order to ensure the ability to attend daycare. 6) Owner represents to the Facility that owner is over 18 years of age; that all information provided about the owner is accurate; and that all credit card information provided is accurate. 7) Owner agrees to the Policies and Procedures posted in the Facility’s lobby and on its website on check-in date. The Facility reserves the right to adjust said Policies and Procedures without prior notice. 8) Owner gives the Facility consent to act in the owner’s behalf to obtain veterinarian services if owner’s pet(s) become(s) ill or require(s) the attention of a veterinarian. The owner agrees that the Facility, in its sole discretion, may engage veterinarian services, administer medicine, or give other requisite attention to the pet(s); that all expenses incurred shall be paid by the owner; and agrees to indemnify and hold the Facility and its employees harmless from said expenses. 9) Owner agrees to pay all charges incurred for: special services requested veterinary costs as stated above, and failure to comply with the Facility’s Cancellation Policy. 10) Facility shall have the right to refuse to release owner’s pet(s) until owner has paid all charges due to the Facility. The Facility is hereby granted a lien on the owner’s pet(s) for any and all unpaid charges resulting from boarding, day care or any other service provided by the Facility. 11) Owner assumes any and all liability and expenses for injuries inflict upon any human or other pet(s) by owners pet(s) while in the Facility. Owner agrees to indemnify and hold the Facility and its employees harmless from any and all liability and expenses incurred as a result of said injuries. 12) Facility has the right to take photographs and/or videos of owner’s pet(s) while in the Facility’s care. The Facility may post or reproduce any and all photographs/videos taken (as well as pet(s) name) on or in, without limitation, the Facility’s website, social media sites or promotional materials without becoming liable to the owner (or the owner’s pet(s)) for any royalty payment. Owner hereby releases, covenants not to sue, and forever discharges the facility of and from any and all claims, demands, rights, and causes of action of whatever kind or nature. Further, owner agrees that no photography or video may be taken by the owner while in the Facility without approval from Facility’s Corporate Offices. 13) Owner agrees that the Facility’s liability shall not exceed the current chattel value of a pet of the same species delivered to the Facility. 14) This Agreement contains the entire agreement between the parties and shall be binding on the heirs, administrators, personal representatives and assigns of the owner and the Facility. Any action by owner which is in breach of the terms and conditions of this Agreement. 16) Any controversy or claim relating to this Agreement shall be settled by arbitration in accordance with the rules of the American Arbitration Association. Judgment upon the award rendered by an arbitrator may be entered in any court having jurisdiction thereof. The arbitrator shall, as part of his/her award, determine an award to the prevailing party of the costs of such arbitration and reasonable attorney’s fees of the prevailing party. 17) This is an Agreement between Woofers and/or Moxie Dog, L.L.C. (referred to as “Facility”) and the pet(s) owner whose signature appears below (referred to as “owner”). © 2019 Woofers Pet Stuff All rights reserved. Site Design by GoodDog Marketing.
"""Defines the functions necessary to delete a file from a workspace""" from __future__ import unicode_literals import logging import os import sys from error.exceptions import ScaleError, get_error_by_exception logger = logging.getLogger(__name__) GENERAL_FAIL_EXIT_CODE = 1 def delete_files(files, volume_path, broker): """Deletes the given files within a workspace. :param files: List of named tuples containing path and ID of the file to delete. :type files: [collections.namedtuple] :param volume_path: Absolute path to the local container location onto which the volume file system was mounted, None if this broker does not use a container volume :type volume_path: string :param broker: The storage broker :type broker: `storage.brokers.broker.Broker` """ logger.info('Deleting %i files', len(files)) try: broker.delete_files(volume_path=volume_path, files=files, update_model=False) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Error performing delete_files steps') sys.exit(exit_code) return
Growing up in France in the 1800’s, Frederic Ozanam was a French scholar who loved to learn and also learned to love the Catholic faith in a time when being Catholic was not popular. Although not necessarily persecuted, Catholic beliefs were not promoted much. After finding others who shared his belief, he became a great writer of the Church, publishing many works, including several about the works of the historical Christian church. With the support of some friends, he founded the Charitable Society of St. Vincent de Paul. They devoted their time and resources to the service of the poor. Although Frederic Ozanam died at the young age of 40, his words and works still have an effect on the Church today. If Frederic Ozanam’s name sounds familiar, it is because we have a local charitable organization that collects household items for the poor! So clean out those closets and donate them to an organization that began in the 1800’s!
#!/usr/bin/env python #-*- coding:utf-8 -*- """ This module provides classes to manage the threads Classes: SpiderThread, spider works in this classes Scheduler, manage all the threads Date: 2014/10/29 17:23:06 """ import os import logging import threading import htmlsaver import utils import errors import interface import spider class SpiderThread(threading.Thread, interface.Worker): """the spider thread Args: spider, the spider worker crawl_interval, the work interval of the spider args, arguments passed to spider kwargs, positional arguments passed to spider """ def __init__(self, spider, crawl_interval, *args, **kwargs): self.spider = spider self.stopped = threading.Event() self.crawl_interval = crawl_interval super(SpiderThread, self).__init__(*args, **kwargs) self.daemon = True def run(self): while True: if self.spider.stopped: logging.info('spider thread will be exiting by spider') break if not self.stopped.wait(self.crawl_interval): self.spider.run() else: self.spider.stop() logging.info('spider thread will be exiting by thread') def terminate(self): self.stopped.set() logging.info('spider thread will be exiting by terminate') class Scheduler(object): """manage all the threads Args: arg_parser, the argument parser """ def __init__(self, arg_parser): self.thread_num = arg_parser.get_spider_option('thread_count', int) self.max_level = arg_parser.get_spider_option('max_depth', int) self.output_dir = arg_parser.get_spider_option('output_directory', dir) self.target_url_regx = arg_parser.get_spider_option('target_url', str) self.crawl_interval = arg_parser.get_spider_option('crawl_interval', float) self.urls_cache = utils.UrlQueue(self.thread_num, self.max_level) self.saver = htmlsaver.HtmlSaver(self.output_dir) self.workers = [ ] self._put_level_zero_urls(arg_parser.get_spider_option('url_list_files', str)) def _put_level_zero_urls(self, urls_file): if not os.path.exists(urls_file): raise errors.ArgumentFileError( 'file not exits: {0}'.format(urls_file)) try: with open(urls_file, 'r') as infile: for url in infile: self.urls_cache.put(utils.UrlLevelTuple(url=url.strip(), level=0)) except IOError as e: logging.warn('file can not read: %s', f) if self.urls_cache.empty(): raise errors.QueueEmptyError('no urls at fisrt') def init(self): """initial method to prepare the environmetn""" for i in range(self.thread_num): worker = spider.Spider(self.urls_cache, self.saver, self.target_url_regx) self.workers.append(SpiderThread(worker, self.crawl_interval)) def execute(self): """start all threads and run""" self.saver.start() for worker in self.workers: worker.start() while self.workers: for worker in self.workers[:]: worker.join(self.crawl_interval) if not worker.is_alive(): self.workers.remove(worker) logging.info('worker thread is removed: %d', worker.ident) self.saver.terminate() logging.info('all worker thread exited, exit now') def terminate(self): for worker in self.workers: worker.terminate() if __name__ == '__main__': Scheduler()
Chicago-based worship community, Renew Movement, has a heart to see revival sweep our nation and they carry out their mission of leading people into encounters with God by joining forces with church communities around the country hosting worship and prayer nights. These nights are designed to gather the multi-ethnic, multi-denominational church in hopes of restoring unity, reconciliation and peace for the mission of the kingdom. This week, we are excited to feature their debut single, "Psalm 91" as our Indie Showcase. The song offers a firm declaration of our trust in God with lyrics birthed straight out of scripture. Renew Movement Director Greg Armstrong shares, “We want to write and release songs that encourage the church to look to the strength of God. We desire lyrics and melodies that set people's heart towards trusting in the One who loves us unconditionally.” "Psalm 91" is the first single from Renew Movement's debut EP, Renew Live, which releases later this Spring. Download your own copy of this incredible new song today on iTunes!
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example usage of the TriggerDagRunOperator. This example holds 2 DAGs: 1. 1st DAG (example_trigger_controller_dag) holds a TriggerDagRunOperator, which will trigger the 2nd DAG 2. 2nd DAG (example_trigger_target_dag) which will be triggered by the TriggerDagRunOperator in the 1st DAG """ from airflow import DAG from airflow.operators.trigger_dagrun import TriggerDagRunOperator from airflow.utils.dates import days_ago dag = DAG( dag_id="example_trigger_controller_dag", default_args={"owner": "airflow"}, start_date=days_ago(2), schedule_interval="@once", tags=['example'], ) trigger = TriggerDagRunOperator( task_id="test_trigger_dagrun", trigger_dag_id="example_trigger_target_dag", # Ensure this equals the dag_id of the DAG to trigger conf={"message": "Hello World"}, dag=dag, )
Don’t Ever Ever Use GoDaddy… They Do Some good, But Mostly Fraud? Ages ago I helped a friend make a website. Today I got a renewal notification. Apparently I’ve been paying for his site for years without noticing it. I called to shut it down, and they told me tough luck. Nope, I’m required to keep paying for it just because. Just to be clear, GoDaddy is a fraud organization on par with AT&T or AOL back in the day, where they’d make it impossible for you to stop getting billed. Same thing. As surely as AOL and AT&T were legit ten years ago, so is GoDaddy legitimate now, which is to say “barely at all,” best I can tell. You signed up, sure, but they make it exceptionally difficult to stop giving them money, even (and especially) for services you no longer use. That’s how they make their money. You know those Super Bowl ads aren’t free, right? If everyone used their services to the extent promised, they’d go broke. Most of use just use a fraction, and that’s fine, but when we want to cancel services, we should be allowed to. It took five tries calling in to reach an actual person, at which point I was told that even though my name is on the account, I couldnt’t take my credit card off of it. Mind you, the site has been dead for at least three to four years. There’s literally nothing there. I asked which credit card was on file? Nope, can’t even access that. “If you figure out which one, you can file a dispute with your credit card to contest it…” What? That’s not how business is done. Their solution to billing me for something I don’t want and won’t pay for is to wait until they charge me for it, then argue with my bank that it was bogus from the beginning. That’s impossible, since I authorized it, even if it was YEARS ago. And they know that damn well. There’s literally no way to stop giving them money, even for domains and accounts you don’t use. No. No my friend. I talked with Isaiah Ratford, who was extremely polite and helpful, courteous even in the face of my escalating frustration, but GoDaddy.com is a worthless pile of crap, with whom no one should ever do business, period. Sure, they may sue me for defamation or some such nonsense, but it’s wholly without merit. The attention would be more likely to attract a class action suit against them for their horrible practices than anything, and I have nothing to take.
#python # -*- coding: cp1252 -*- ###################################################################### # # Royal Render Render script for Houdini # Author: Royal Render, Holger Schoenberger, Binary Alchemy # Version v 7.0.11 # Copyright (c) Holger Schoenberger - Binary Alchemy # ###################################################################### import sys import traceback def formatExceptionInfo(maxTBlevel=5): cla, exc, trbk = sys.exc_info() excName = cla.__name__ try: excArgs = exc.__dict__["args"] except KeyError: excArgs = "<no args>" excTb = traceback.format_tb(trbk, maxTBlevel) return (excName, excArgs, excTb) try: print( "RR - start " ) idx = sys.argv.index( "-frames" ) seqStart = int(sys.argv[ idx + 1 ]) seqEnd = int(sys.argv[ idx + 2 ]) seqStep = int(sys.argv[ idx + 3 ]) if "-fileName" not in sys.argv: fileName = None else: idx = sys.argv.index( "-fileName" ) fileName = sys.argv[ idx + 1 ] if "-fileExt" not in sys.argv: fileExt = None else: idx = sys.argv.index( "-fileExt" ) fileExt = sys.argv[ idx + 1 ] if "-filePadding" not in sys.argv: filePadding = None else: idx = sys.argv.index( "-filePadding" ) filePadding = int(sys.argv[ idx + 1 ]) imgRes = () if "-res" in sys.argv: idx = sys.argv.index( "-res" ) width = int( sys.argv[ idx + 1 ] ) height = int( sys.argv[ idx + 2 ] ) imgRes = (width,height) idx = sys.argv.index( "-driver" ) driver = sys.argv[ idx + 1 ] inputFile = sys.argv[ len(sys.argv) - 1 ] try: hou.hipFile.load( inputFile, True ) except hou.LoadWarning, e: print( "Error: LoadWarning (probably wrong houdini version)") print( e) rop = hou.node( driver ) if rop == None: print( "Error: Driver node \"" + driver + "\" does not exist" ) else: if "-threads" in sys.argv: idx = sys.argv.index( "-threads" ) threadCount = sys.argv[ idx + 1 ] if (rop.type().name() != "arnold"): try: usemaxthread=rop.parm('vm_usemaxthreads') usemaxthread.set(0) threadCount = int(threadCount) threadCountParam=rop.parm('vm_threadcount') threadCountParam.set(threadCount) except hou.LoadWarning, e: print( "Error: Unable to set thread count") print( e) for fr in range( seqStart, seqEnd + 1 ): print( "Rendering Frame #" + str(fr) +" ...") if fileName != None: if filePadding != None: pad = "%0*d" % (filePadding,fr) filenameComplete = fileName+pad+fileExt else: filenameComplete = fileName+fileExt else: filenameComplete = None print( filenameComplete ) rop.render( (fr,fr,seqStep), imgRes, filenameComplete, fileExt ) print( "Frame Rendered #" + str(fr) ) except hou.OperationFailed, e: print( "Error: OperationFailed") print( e) print( formatExceptionInfo()) except: print( "Error: Error executing script") print( formatExceptionInfo())
How are we going to deal with North Korea? and More ! « Triggered by a banana peel… no joke! Americans are paying more in taxes than on food and clothing. Hmm… that doesn’t seem right! Maj. Gen. Paul E. Vallely (ret) joins Joe to discuss whats really happening in the WH, Middle East and Europe. The General discuss whats going on with ISIS, Taliban and more. The Deep State continues to be a problem for Trump. It may takes YEARS to drain the swamp. Some of the Generals advising Trump don’t understand the Caliphate and radical Islam. Transgender issue is social engineering. It doesn’t belong in the military. The Legislative Branch is broken. We can’t get anything done. The Judicial system is broken. Too many activist judges. FEMA is doing an outstanding job under Trump’s leadership and Gov. Abbott is doing a terrific job. What North Korea is doing is an act of war. At some point we’ll have to respond. And his regime will be done in 60 minutes!
# -*- coding: utf-8 -*- import abc import numpy as np class ACorruptor(metaclass=abc.ABCMeta): @abc.abstractmethod def __call__(self, steps, entities): raise NotImplementedError class SimpleCorruptor(ACorruptor): def __init__(self, index_generator=None, candidate_indices=None, corrupt_objects=False): self.index_generator = index_generator self.candidate_indices = candidate_indices self.corrupt_objects = corrupt_objects def __call__(self, steps, entities): """ Generates sets of negative examples, by corrupting the facts (walks) provided as input. :param steps: [nb_samples, m] matrix containing the walk relation indices. :param entities: [nb_samples, 2] matrix containing subject and object indices. :return: ([nb_samples, 1], [nb_samples, 2]) pair containing sets of negative examples. """ nb_samples = steps.shape[0] # Relation indices are not changed. For corrupting them, use a SimpleRelationCorruptor. negative_steps = steps # Entity (subject and object) indices are corrupted for generating two new sets of walks entities_corr = np.copy(entities) entities_corr[:, 1 if self.corrupt_objects else 0] = self.index_generator(nb_samples, self.candidate_indices) return negative_steps, entities_corr class SimpleRelationCorruptor(ACorruptor): def __init__(self, index_generator=None, candidate_indices=None): self.index_generator = index_generator self.candidate_indices = candidate_indices def __call__(self, steps, entities): """ Generates sets of negative examples, by corrupting the facts (walks) provided as input. :param steps: [nb_samples, m] matrix containing the walk relation indices. :param entities: [nb_samples, 2] matrix containing subject and object indices. :return: ([nb_samples, 1], [nb_samples, 2]) pair containing sets of negative examples. """ nb_samples = steps.shape[0] # Corrupting the relation indices negative_steps = np.copy(steps) negative_steps[:, 0] = self.index_generator(nb_samples, self.candidate_indices) # We leave entities unchanged entities_corr = entities return negative_steps, entities_corr
Great customer care leads to long-term, happy customers; it’s a proven fact. A large part of providing support these days centers around getting ahead of issues — and effectively solving them — before they can turn into problems. One way to do this is with a system of proactive customer support. By building and implementing ways to anticipate needs, questions and potential issues ahead of time, you’ll reduce your customer’s effort along with your number of inquiries. 1. Maintain a robust knowledge base and FAQ. Creating a support knowledge base that provides a fully holistic view of your product or service, along with any other issues that may arise, is the first step to implementing a proactive support system. Beyond establishing a system of best practices for your customer service team, having an easy-to-read and informative FAQ will allow your customers to become experts on your product/service and seek help themselves when needed. 2. Make it easy to contact you. At the end of the day, if your customer needs one-on-one help, it should be clear and simple how to reach someone knowledgable. It’s important to provide a clear path of customer contact from anywhere on your website, knowledge base articles and other communications. Whether their preferred route is email, chat or phone support, do your best to assist them via any potential path! Zingtree is a powerful tool for building custom interactive decision tree troubleshooters – these allow users to follow a path of questions and answers leading to a final most-likely solution, and are especially helpful for more technical issues or questions. By enabling your customers to self-help in a hands-on way, you’ll empower them and develop stronger loyalty in the long-term! With a robust knowledge base of information, easy communication paths and customized troubleshooter decision trees for technical issues, customers will find it simple (and even pleasant!) to find the answers they are seeking. Start Your Free Zingtree Trial!
# # Relativistic MCMC method # # This file is part of PINTS. # Copyright (c) 2017-2019, University of Oxford. # For licensing information, see the LICENSE file distributed with the PINTS # software package. # from __future__ import absolute_import, division from __future__ import print_function, unicode_literals import pints import numpy as np class RelativisticMCMC(pints.SingleChainMCMC): r""" Implements Relativistic Monte Carlo as described in [1]_. Uses a physical analogy of a particle moving across a landscape under Hamiltonian dynamics to aid efficient exploration of parameter space. Introduces an auxilary variable -- the momentum (``p_i``) of a particle moving in dimension ``i`` of negative log posterior space -- which supplements the position (``q_i``) of the particle in parameter space. The particle's motion is dictated by solutions to Hamilton's equations, .. math:: dq_i/dt &= \partial H/\partial p_i\\ dp_i/dt &= - \partial H/\partial q_i. The Hamiltonian is given by, .. math:: H(q,p) &= U(q) + KE(p)\\ &= -\text{log}(p(q|X)p(q)) + mc^2 (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5} where ``d`` is the dimensionality of model, ``m`` is the scalar 'mass' given to each particle (chosen to be 1 as default) and ``c`` is the speed of light (chosen to be 10 by default). To numerically integrate Hamilton's equations, it is essential to use a sympletic discretisation routine, of which the most typical approach is the leapfrog method, .. math:: p_i(t + \epsilon/2) &= p_i(t) - (\epsilon/2) d U(q_i(t))/dq_i\\ q_i(t + \epsilon) &= q_i(t) + \epsilon M^{-1}(p_i(t + \epsilon/2)) p_i(t + \epsilon/2)\\ p_i(t + \epsilon) &= p_i(t + \epsilon/2) - (\epsilon/2) d U(q_i(t + \epsilon))/dq_i where relativistic mass (a scalar) is, .. math:: M(p) = m (\Sigma_{i=1}^{d} p_i^2 / (mc^2) + 1)^{0.5} In particular, the algorithm we implement follows eqs. in section 2.1 of [1]_. Extends :class:`SingleChainMCMC`. References ---------- .. [1] "Relativistic Monte Carlo". Xiaoyu Lu, Valerio Perrone, Leonard Hasenclever, Yee Whye Teh, Sebastian J. Vollmer, 2017, Proceedings of Machine Learning Research. """ def __init__(self, x0, sigma0=None): super(RelativisticMCMC, self).__init__(x0, sigma0) # Set initial state self._running = False self._ready_for_tell = False # Current point in the Markov chain self._current = None # Aka current_q in the chapter self._current_energy = None # Aka U(current_q) = -log_pdf self._current_gradient = None self._current_momentum = None # Aka current_p # Current point in the leapfrog iterations self._momentum = None # Aka p in the chapter self._position = None # Aka q in the chapter self._gradient = None # Aka grad_U(q) in the chapter # Iterations, acceptance monitoring, and leapfrog iterations self._mcmc_iteration = 0 self._mcmc_acceptance = 0 self._frog_iteration = 0 # Default number of leapfrog iterations self._n_frog_iterations = 20 # Default integration step size for leapfrog algorithm self._epsilon = 0.1 self._step_size = None self._mass = 1 self._c = 10 self.set_leapfrog_step_size(np.diag(self._sigma0)) # Divergence checking # Create a vector of divergent iterations self._divergent = np.asarray([], dtype='int') # Default threshold for Hamiltonian divergences # (currently set to match Stan) self._hamiltonian_threshold = 10**3 def ask(self): """ See :meth:`SingleChainMCMC.ask()`. """ # Check ask/tell pattern if self._ready_for_tell: raise RuntimeError('Ask() called when expecting call to tell().') # Initialise on first call if not self._running: self._running = True self._mc2 = self._mass * self._c**2 # Notes: # Ask is responsible for updating the position, which is the point # returned to the user # Tell is then responsible for updating the momentum, which uses the # gradient at this new point # The MCMC step happens in tell, and does not require any new # information (it uses the log_pdf and gradient of the final point # in the leapfrog run). # Very first iteration if self._current is None: # Ask for the pdf and gradient of x0 self._ready_for_tell = True return np.array(self._x0, copy=True) # First iteration of a run of leapfrog iterations if self._frog_iteration == 0: # Sample random momentum for current point using identity cov self._current_momentum = np.random.multivariate_normal( np.zeros(self._n_parameters), np.eye(self._n_parameters)) # First leapfrog position is the current sample in the chain self._position = np.array(self._current, copy=True) self._gradient = np.array(self._current_gradient, copy=True) self._momentum = np.array(self._current_momentum, copy=True) # Perform a half-step before starting iteration 0 below self._momentum -= self._scaled_epsilon * self._gradient * 0.5 # Perform a leapfrog step for the position squared = np.sum(np.array(self._momentum)**2) relativistic_mass = self._mass * np.sqrt(squared / self._mc2 + 1) self._position += ( self._scaled_epsilon * self._momentum / relativistic_mass) # Ask for the pdf and gradient of the current leapfrog position # Using this, the leapfrog step for the momentum is performed in tell() self._ready_for_tell = True return np.array(self._position, copy=True) def current_log_pdf(self): """ See :meth:`SingleChainMCMC.current_log_pdf()`. """ return -self._current_energy def divergent_iterations(self): """ Returns the iteration number of any divergent iterations """ return self._divergent def epsilon(self): """ Returns epsilon used in leapfrog algorithm """ return self._epsilon def hamiltonian_threshold(self): """ Returns threshold difference in Hamiltonian value from one iteration to next which determines whether an iteration is divergent. """ return self._hamiltonian_threshold def leapfrog_steps(self): """ Returns the number of leapfrog steps to carry out for each iteration. """ return self._n_frog_iterations def leapfrog_step_size(self): """ Returns the step size for the leapfrog algorithm. """ return self._step_size def _log_init(self, logger): """ See :meth:`Loggable._log_init()`. """ logger.add_float('Accept.') def _log_write(self, logger): """ See :meth:`Loggable._log_write()`. """ logger.log(self._mcmc_acceptance) def _kinetic_energy(self, momentum): """ Kinetic energy of relativistic particle, which is defined in [1]_. """ squared = np.sum(np.array(momentum)**2) return self._mc2 * (squared / self._mc2 + 1)**0.5 def mass(self): """ Returns ``mass`` which is the rest mass of particle. """ return self._mass def n_hyper_parameters(self): """ See :meth:`TunableMethod.n_hyper_parameters()`. """ return 4 def name(self): """ See :meth:`pints.MCMCSampler.name()`. """ return 'Relativistic MCMC' def needs_sensitivities(self): """ See :meth:`pints.MCMCSampler.needs_sensitivities()`. """ return True def scaled_epsilon(self): """ Returns scaled epsilon used in leapfrog algorithm """ return self._scaled_epsilon def _set_scaled_epsilon(self): """ Rescales epsilon along the dimensions of step_size """ self._scaled_epsilon = np.zeros(self._n_parameters) for i in range(self._n_parameters): self._scaled_epsilon[i] = self._epsilon * self._step_size[i] def set_epsilon(self, epsilon): """ Sets epsilon for the leapfrog algorithm """ epsilon = float(epsilon) if epsilon <= 0: raise ValueError('epsilon must be positive for leapfrog algorithm') self._epsilon = epsilon self._set_scaled_epsilon() def set_hamiltonian_threshold(self, hamiltonian_threshold): """ Sets threshold difference in Hamiltonian value from one iteration to next which determines whether an iteration is divergent. """ if hamiltonian_threshold < 0: raise ValueError('Threshold for divergent iterations must be ' + 'non-negative.') self._hamiltonian_threshold = hamiltonian_threshold def set_hyper_parameters(self, x): """ The hyper-parameter vector is ``[leapfrog_steps, leapfrog_step_size, mass, c]``. See :meth:`TunableMethod.set_hyper_parameters()`. """ self.set_leapfrog_steps(x[0]) self.set_leapfrog_step_size(x[1]) self.set_mass(x[2]) self.set_speed_of_light(x[3]) def set_leapfrog_steps(self, steps): """ Sets the number of leapfrog steps to carry out for each iteration. """ steps = int(steps) if steps < 1: raise ValueError('Number of steps must exceed 0.') self._n_frog_iterations = steps def set_leapfrog_step_size(self, step_size): """ Sets the step size for the leapfrog algorithm. """ a = np.atleast_1d(step_size) if len(a[a < 0]) > 0: raise ValueError( 'Step size for leapfrog algorithm must' + 'be greater than zero.' ) if len(a) == 1: step_size = np.repeat(step_size, self._n_parameters) elif not len(step_size) == self._n_parameters: raise ValueError( 'Step size should either be of length 1 or equal to the' + 'number of parameters' ) self._step_size = step_size self._set_scaled_epsilon() def set_mass(self, mass): """ Sets scalar mass. """ if isinstance(mass, list): raise ValueError('Mass must be scalar.') if mass <= 0: raise ValueError('Mass must be positive.') self._mass = mass def set_speed_of_light(self, c): """ Sets `speed of light`. """ if c <= 0: raise ValueError('Speed of light must be positive.') self._c = c def speed_of_light(self): """ Returns `speed of light`. """ return self._c def tell(self, reply): """ See :meth:`pints.SingleChainMCMC.tell()`. """ if not self._ready_for_tell: raise RuntimeError('Tell called before proposal was set.') self._ready_for_tell = False # Unpack reply energy, gradient = reply # Check reply, copy gradient energy = float(energy) gradient = pints.vector(gradient) assert(gradient.shape == (self._n_parameters, )) # Energy = -log_pdf, so flip both signs! energy = -energy gradient = -gradient # Very first call if self._current is None: # Check first point is somewhere sensible if not np.isfinite(energy): raise ValueError( 'Initial point for MCMC must have finite logpdf.') # Set current sample, energy, and gradient self._current = self._x0 self._current_energy = energy self._current_gradient = gradient # Increase iteration count self._mcmc_iteration += 1 # Mark current as read-only, so it can be safely returned self._current.setflags(write=False) # Return first point in chain return self._current # Set gradient of current leapfrog position self._gradient = gradient # Update the leapfrog iteration count self._frog_iteration += 1 # Not the last iteration? Then perform a leapfrog step and return if self._frog_iteration < self._n_frog_iterations: self._momentum -= self._scaled_epsilon * self._gradient # Return None to indicate there is no new sample for the chain return None # Final leapfrog iteration: only do half a step self._momentum -= self._scaled_epsilon * self._gradient * 0.5 # Before starting accept/reject procedure, check if the leapfrog # procedure has led to a finite momentum and logpdf. If not, reject. accept = 0 if np.isfinite(energy) and np.all(np.isfinite(self._momentum)): # Evaluate potential and kinetic energies at start and end of # leapfrog trajectory current_U = self._current_energy current_K = self._kinetic_energy(self._current_momentum) proposed_U = energy proposed_K = self._kinetic_energy(self._momentum) # Check for divergent iterations by testing whether the # Hamiltonian difference is above a threshold div = proposed_U + proposed_K - (self._current_energy + current_K) if np.abs(div) > self._hamiltonian_threshold: # pragma: no cover self._divergent = np.append( self._divergent, self._mcmc_iteration) self._momentum = self._position = self._gradient = None self._frog_iteration = 0 # Update MCMC iteration count self._mcmc_iteration += 1 # Update acceptance rate (only used for output!) self._mcmc_acceptance = ( (self._mcmc_iteration * self._mcmc_acceptance + accept) / (self._mcmc_iteration + 1)) self._current.setflags(write=False) return self._current # Accept/reject else: r = np.exp(current_U - proposed_U + current_K - proposed_K) if np.random.uniform(0, 1) < r: accept = 1 self._current = self._position self._current_energy = energy self._current_gradient = gradient # Mark current as read-only, so it can be safely returned self._current.setflags(write=False) # Reset leapfrog mechanism self._momentum = self._position = self._gradient = None self._frog_iteration = 0 # Update MCMC iteration count self._mcmc_iteration += 1 # Update acceptance rate (only used for output!) self._mcmc_acceptance = ( (self._mcmc_iteration * self._mcmc_acceptance + accept) / (self._mcmc_iteration + 1)) # Return current position as next sample in the chain return self._current
We have an exciting opportunity to join our team as a Clinical Ancillary Systems Analyst. In this role, the successful candidate Independent contributor IT professional, applying technical expertise in one or more IT disciplines. Selects, customizes, configures, installs and supports packages. Understands the work processes of assigned business areas. Collect user requirements and translates them into well-engineered, tested, and deployed business application systems. Responsibilities include the analysis, design, development, testing, installation, and maintenance of information. Receives general direction, work in progress is reviewed routinely. Responsible for the design and development of IT systems. Develops design and functional specifications, produces deliverables related to the project(s) assigned and assists in post implementation support and system enhancements. Responsible for selecting appropriate C.A.S.E. tools to develop systems and software. Responsible for gathering, compiling and synthesizing information with regard to technology processes or systems. Independently designs technical solutions for modules of a project, or to resolve most problems. Selects appropriate work procedures or approaches to address technical challenges. To qualify you must have a Typically requires 4 or more years of experience and BA/BS degree or equivalent.
# -*- coding: utf-8 -*- # Copyright(C) 2010-2012 Romain Bignon, Florent Fourcot # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.browser.pages import HTMLPage from weboob.browser.elements import ListElement, ItemElement, method from weboob.browser.filters.standard import CleanText, Regexp, Field, Filter, debug from weboob.capabilities.gauge import GaugeMeasure, GaugeSensor from weboob.capabilities.base import NotAvailable class Split(Filter): def __init__(self, selector, mode): super(Split, self).__init__(selector) self.mode = mode @debug() def filter(self, txt): if u"Temperatur" in txt: value = txt.split(': ')[1].split(u'°')[0] unit = u'°C' else: value = txt.split(':')[-1].split()[0] unit = txt.split(':')[-1].split()[1] if unit == u"W/m": unit = u"W/m²" try: value = float(value) except ValueError: value = NotAvailable return [value, unit][self.mode] class StartPage(HTMLPage): @method class get_sensors_list(ListElement): item_xpath = '//p[@align="center"]' class item(ItemElement): klass = GaugeSensor obj_name = Regexp(CleanText('.'), '(.*?) {0,}: .*', "\\1") obj_id = CleanText(Regexp(Field('name'), '(.*)', "dd-\\1"), " .():") obj_gaugeid = u"wetter" obj_forecast = NotAvailable obj_unit = Split(CleanText('.'), 1) def obj_lastvalue(self): lastvalue = GaugeMeasure() lastvalue.level = Split(CleanText('.'), 0)(self) lastvalue.alarm = NotAvailable return lastvalue
Performing a quite remarkable recovery against Japan, the second seeds, on the opening day of play in the Women’s Team event at the 2018 Asian Games in Jakarta, Indonesia, the contest which in effect secured first place in the group and resigned Japan to runners up spot; DPR Korea continued their impressive run of form on the second day of action, Monday 17th August. The no.7 seeds, fielding once again the trio comprising Cha Hyo Sim, Kim Song I and Kim Nam Hae; they beat Chinese Taipei, the no.5 seeds, by three matches to one to book a semi-final place and moreover be assured of a medal; there are two bronze medals. Cheng I-Ching gave Chinese Taipei the ideal start; she beat Cha Hyo Sim in four games (11-5, 11-8, 8-11, 13-11) but that was to prove her team’s only success. Chen Szu-Yu suffered at the hands of Kim Song I (11-4, 11-8, 11-5), before in a full distance contest, decided by the minimal two point margin, Cheng Hsien-Tu experienced defeat at the hands of Kim Nam Hae (13-11, 9-11, 7-11, 11-8, 11-9). DPR Korea in the ascendancy, matters concluded with the defensive skills of Kim Song I proving too secure for Cheng I-Ching; a straight games verdict (11-4, 11-9, 11-5) was order of the day. Success contrary to seeding, in the remaining quarter-final contests, it was as per seeding. China, the top seeds, selecting Wang Manyu, Chen Meng and Zhu Yuling beat the Japanese outfit comprising Miyu Kato, Minami Ando and Miyu Maeda by three matches to nil. A comprehensive win but Wang Manyu was somewhat self-critical. Similarly, the no.3 seeds, Korea Republic’s Suh Hyowon, Jeon Jihee and Yang Haeun recorded a three-one success against the no.6 seeds, Singapore’s Feng Tianwei, Yu Mengyu and Lin Ye; the same result as Hong Kong, the no.4 seeds, posted against India, the no.8 seeds. Hong Kong selected Lee Ho Ching, Doo Hoi Kem and Minnie Soo Wai Yam; for India the line-up read Manika Batra, Ayhika Mukherjee and Mouma Das. The one winner for India was Manika Batra. She beat Lee Ho Ching in the opening match of the fixture (11-9, 11-9, 5-11, 11-6); the backbone of success for Hong Kong was Doo Hoi Kem; she accounted for both Ayhika Mukherjee (12-14, 11-4, 12-10, 11-8) and Manika Batra (11-8, 11-8, 13-11). Meanwhile, for Singapore, the only success came in the opening match of the engagement when Feng Tianwei overcame Suh Hyowon (9-11, 11-6, 11-9, 11-7); the guiding hand for Korea Republic was Jeon Jihee. She prevailed against both Yu Mengyu (9-11, 12-10, 11-9, 13-11) and Feng Tianwei (11-8, 11-8, 11-8). Disappointment but Yu Mengyu is well aware of the trials and tribulations of the international arena. Likewise Feng Tianwei was philosophical, having earlier in the day being called to duty in the concluding group stage contest against Vietnam. In the penultimate round, China meets Korea Republic, Hong Kong faces DPR Korea; both semi-finals and finals will be played on Wednesday 28th August.
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Beta API stub implementation.""" import threading from grpc._links import invocation from grpc.framework.core import implementations as _core_implementations from grpc.framework.crust import implementations as _crust_implementations from grpc.framework.foundation import logging_pool from grpc.framework.interfaces.links import utilities _DEFAULT_POOL_SIZE = 6 class _AutoIntermediary(object): def __init__(self, up, down, delegate): self._lock = threading.Lock() self._up = up self._down = down self._in_context = False self._delegate = delegate def __getattr__(self, attr): with self._lock: if self._delegate is None: raise AttributeError('No useful attributes out of context!') else: return getattr(self._delegate, attr) def __enter__(self): with self._lock: if self._in_context: raise ValueError('Already in context!') elif self._delegate is None: self._delegate = self._up() self._in_context = True return self def __exit__(self, exc_type, exc_val, exc_tb): with self._lock: if not self._in_context: raise ValueError('Not in context!') self._down() self._in_context = False self._delegate = None return False def __del__(self): with self._lock: if self._delegate is not None: self._down() self._delegate = None class _StubAssemblyManager(object): def __init__( self, thread_pool, thread_pool_size, end_link, grpc_link, stub_creator): self._thread_pool = thread_pool self._pool_size = thread_pool_size self._end_link = end_link self._grpc_link = grpc_link self._stub_creator = stub_creator self._own_pool = None def up(self): if self._thread_pool is None: self._own_pool = logging_pool.pool( _DEFAULT_POOL_SIZE if self._pool_size is None else self._pool_size) assembly_pool = self._own_pool else: assembly_pool = self._thread_pool self._end_link.join_link(self._grpc_link) self._grpc_link.join_link(self._end_link) self._end_link.start() self._grpc_link.start() return self._stub_creator(self._end_link, assembly_pool) def down(self): self._end_link.stop(0).wait() self._grpc_link.stop() self._end_link.join_link(utilities.NULL_LINK) self._grpc_link.join_link(utilities.NULL_LINK) if self._own_pool is not None: self._own_pool.shutdown(wait=True) self._own_pool = None def _assemble( channel, host, metadata_transformer, request_serializers, response_deserializers, thread_pool, thread_pool_size, stub_creator): end_link = _core_implementations.invocation_end_link() grpc_link = invocation.invocation_link( channel, host, metadata_transformer, request_serializers, response_deserializers) stub_assembly_manager = _StubAssemblyManager( thread_pool, thread_pool_size, end_link, grpc_link, stub_creator) stub = stub_assembly_manager.up() return _AutoIntermediary( stub_assembly_manager.up, stub_assembly_manager.down, stub) def _dynamic_stub_creator(service, cardinalities): def create_dynamic_stub(end_link, invocation_pool): return _crust_implementations.dynamic_stub( end_link, service, cardinalities, invocation_pool) return create_dynamic_stub def generic_stub( channel, host, metadata_transformer, request_serializers, response_deserializers, thread_pool, thread_pool_size): return _assemble( channel, host, metadata_transformer, request_serializers, response_deserializers, thread_pool, thread_pool_size, _crust_implementations.generic_stub) def dynamic_stub( channel, host, service, cardinalities, metadata_transformer, request_serializers, response_deserializers, thread_pool, thread_pool_size): return _assemble( channel, host, metadata_transformer, request_serializers, response_deserializers, thread_pool, thread_pool_size, _dynamic_stub_creator(service, cardinalities))
– Who is the company founder? What was your motivation when you/they decided to start the company? The company was founded by us: Max Lykasov and Slava Oganesyan. We realized we make an outstanding product which can be sold on a global level. Besides we wanted to give young CG artists in our country an opportunity to show their worth as we saw a lot of potential in them. As experience has shown, we made the right decision. – What difficulties did you face in the early years of the company? What about today? The major challenge we faced at the first stage of communication with clients was the level of trust of Western companies to companies from Eastern Europe. Fortunately, this problem is solved after the first project is completed. Most companies become our loyal customers once they received our work. – How was it to get clients back in the early days, and how is it now? What do you do to get clients? Some interesting marketing tricks? We have a well-established sales system that has proven to be highly effective. For obvious reasons, we can not share more information about it. – What is the best client like? It is not easy to define the best client, but as practice shows, large architectural studios are easier to work with as they have better tuned work processes. – What do you think about clients requesting unreasonable deadlines? In our opinion, there is no such thing as unreasonable deadline. It is more about some clients getting into nonstandard situations whom we always help to find a way out. We would never work again with the clients who do not appreciate our efforts. For now we haven’t had a situation in which we would have had to deliberately refuse a project. – How much time do you usually spend working on a project? It depends on a certain project. Some occupy 2-3 days, others take up to a month or longer. – How do you calculate/estimate the project price? What amount/rate does your price range start from? Our company estimates the cost based on the time required for project completion. It is difficult to assign a lower bound of the project price, but so far we haven’t done a project which would cost less than 500$. Certainly we work according to our client’s requirement specification, at the same time we possess our own style that is visible in all the works. For us the larger the project is the more interesting it is for us. – If you have your own unique and recognizable visual style, how would you describe it? It is quite hard to describe our visual style. We try our best to produce high-quality photo realistic images. There is an example of a real requirement specification on our website according to which we ask our clients to provide us with project information. Yet naturally each customer is unique and we can change the approach to gathering information to fit a certain project. – They say we learn from losses more than from victories. Can you share some “failure stories”? There was a situation when, due to heavy workload, we didn’t pay enough attention to our major loyal customer and nearly lost him. We drew a good lesson from it. – What country do your major clients come from? Most our clients are located in the USA, but the largest one comes from Canada. – How do you divide the work load among the team? Does one artist take a project end-to-end, or do you spilt according to specialization, e.g., modeling, post-production, etc. We assign one artist for each project. In case it’s a challenging project the assigned artist has a team to help him. – If you are looking for an artists to join your studio, what qualities and knowledge are you looking for the most? We aim at hiring high-level professionals or people that show a lot of potential for professional growth. Our company performs the major part of work in 3D then we apply post-work, which occupies less of our time. – How would you rate your studio’s 3D expertise on a 0 to 10 scale? What areas would you like to have more knowledge about? We would rate our expertise in static 3D visualization and virtual reality 9, in animation and animated virtual reality 7. but we are actively developing our skills in this direction. – Do you take part in non-commercial projects and, if yes, what is your motivation in doing so? Once we produced a visualization of a sports complex for a newly established hockey club in Georgia because we wanted to contribute to its prompt development. – Do you attend 3D visualization conferences? No, basically we learn new tendencies from various web-resources and on our own. – Do you have any other company activities with teammates besides work? Our team is young and active with the average age of 25 years old. To keep up the team spirit we practice various sports activities. – What is your greatest work? What was the most challenging project you have worked on? It’s rather difficult to select just one greatest work, but we would give prominence to our project STALINKA. As a rule and unfortunately the most complex and interesting projects fall under NDA, so we can not disclose them. – What can you tell us about your techniques in developing a visualization? Do you provide any training for newcomers? Our main technique is to render an object as close as possible to reality. One of our key objectives – a continuous development of our team of artists. – Do you want to expand, becoming a larger studio with more and more artists? Or would you prefer to “stay small”? Our studio has an intention to expand by attracting high-level professionals. Our country has a large number of high-level professionals, but our goal is to make it more recognizable on the market of 3D visualization. – What can you tell about your competitors? Do you have any in your country/worldwide? Unfortunately, in our country there is no direct competition in the sphere of 3D visualization. As to the whole world, we are constantly following a number of companies whose success truly brings us delight and inspires us to go further. – Are you affected by the market overwhelmed with low quality/price offers and a lot of software existing that allows one to create simple 3D renders without any knowledge? Do you think such a job as a 3D artist will still be needed in future? The product we offer is of high quality that non-professionals are unable to replicate, so we are confident that our work will always be of steady demand. – What are your long-term goals? For the coming year we are planning to expand our company up to 50 employees.
import os.path as op base_dir = '/scr/fluidspace/pkerp/projects/genbank' from pyspark.sql import * sqlContext = SQLContext(sc) # get the gene_id -> pubmed mapping gene2pubmed = (sc.textFile(op.join(base_dir, "data/gene2pubmed")) .filter(lambda x: x[0] !== '#') .map(lambda x: x.split('\t')) .map(lambda p: {'taxid': int(p[0]), 'geneid': int(p[1]), 'pmid': int(p[2]), 'count': 1})) #schemaGene2Pubmed = sqlContext.inferSchema(gene2pubmed) #schemaGene2Pubmed.registerTempTable("gene2pubmed") gene2refseq = (sc.textFile(op.join(base_dir, "data/gene2refseq")) .filter(lambda x: x[0] !== '#') .map(lambda x: x.split('\t')) .map(lambda p: { 'taxid': int(p[0]), 'geneid' :int(p[1]), 'start_pos': p[9], 'end_pos': p[10], 'nucleotide_accession': p[7], 'orientation': p[11], 'assembly': p[12]})) gene_info = (sc.textFile(op.join(base_dir, "data/gene_info")) .filter(lambda x: x[0] !== '#') .map(lambda x: x.split('\t')) .map(lambda x: { 'taxid': int(x[0]), 'geneid': int(x[1]), 'description': x[8], 'symbol': x[2], 'name': x[11]})) gene_info_keyed = gene_info.map(lambda x: ((x['taxid'], x['geneid']), x)) #schemaGene2Refseq = sqlContext.inferSchema(gene2refseq) #schemaGene2Refseq.registerTempTable("gene2refseq") # get the most popular genes #gene_pubmed = sqlContext.sql("select taxid, geneid, count(*) as cnt from gene2pubmed where taxid = 9606 group by geneid, taxid order by cnt desc") #gene_pubmed.take(10) #filtered_refseq = sqlContext.sql("select * from gene2refseq where assembly like '%GRCh38%'") #filtered_refseq.take(10) # filter for human genes human_gene_pubmed = (gene2pubmed.filter(lambda x: x['taxid'] == 9606) .map(lambda x: ((x['taxid'], x['geneid']), x))) def reduce_count(r1, r2): ''' A reduce function that simply counts the number of elements in the table. @param r1: A Row @param r2: A Row @return: A new Row, equal to the first Row with a summed count. ''' #print >>sys.stderr, "r1:", r1 r1['count'] += r2['count'] return r1 # count how many references each id has # ((taxid, geneid), row) counted_human_gene_pubmed = (human_gene_pubmed.reduceByKey(reduce_count)) counted_human_gene_pubmed.take(1) def merge_two_dicts(x, y): '''Given two dicts, merge them into a new dict as a shallow copy.''' z = x.copy() z.update(y) return z # filter the refseq genes to those in the human GRCh38 assembly # ((taxid, geneid), row) human_refseq = (gene2refseq.filter(lambda x: x['assembly'].find('GRCh38') >= 0) .filter(lambda x: x['nucleotide_accession'].find('NC_') >= 0) .map(lambda x: ((x['taxid'], x['geneid']), x))) human_refseq_info = (human_refseq.join(gene_info_keyed) .map(lambda x: (x[0], merge_two_dicts(x[1][0], x[1][1])))) # join (K,V) and (K,W) -> (K, (V,W)) pairs # map (K,(V,W)) -> (K,W) # join the genes with reference counts with the refseq information human_refseq_pubmed = (counted_human_gene_pubmed.join(human_refseq) .map(lambda x: ((x[1][0]['count'], x[0][0], x[0][1]), x[1][1]))) #.map(lambda x: x['start_end_pos'] = (x['nucleotide_accession'], x['orientation'], x['start_pos'], x['end_pos'])) def consolidate_start_and_end(r): ''' Consolidate the start and end rows from a row. :param r: (key, {'start_pos': 1000, 'end_pos': 1010}) :return: (key, {'start_end_pos': set((1000, 1010))} ''' r[1]['start_end_pos'] = set([(r[1]['nucleotide_accession'], r[1]['orientation'],int(r[1]['start_pos']), int(r[1]['end_pos']))]) return (r[0], r[1]) def reduce_by_start_end_pos(r1,r2): ''' Reduce all of the rows by their start / send positions. :param r: {'start_end_pos': set((1000, 1010))} ''' #print >>sys.stderr, "r1:", r1 r1['start_end_pos'] = r1['start_end_pos'].union(r2['start_end_pos']) return r1 reduced_human_refseq_pubmed = (human_refseq_pubmed.map(consolidate_start_and_end) .reduceByKey(reduce_by_start_end_pos)) reduced_human_refseq_pubmed.sortByKey(ascending=False) reduced_human_refseq_pubmed.take(1) # take every (chr, orientation, start, end) tuple from the set and create one # big list out of it # then convert it all to TSV strings flattened_human_refseq_pubmed = (reduced_human_refseq_pubmed.flatMap(lambda x: [[x[0][0]] + list(y) for y in x[1]['start_end_pos']]) .map(lambda x: "\t".join(map(str, x)))) flattened_human_refseq_pubmed.saveAsTextFile('/scr/fluidspace/pkerp/projects/goomba/output/genes_by_popularity') ''' gene_pubmed = sqlContext.sql("select geneid, start_pos, count(*) as cnt from gene_starts group by geneid, start_pos order by cnt desc") gene_pubmed.take(1) gene_starts = sqlContext.sql('select gene2refseq.geneid, start_pos, pmid from gene2pubmed, gene2refseq where gene2pubmed.geneid = gene2refseq.geneid') gene_starts.registerTempTable('gene_starts') genes_sorted = sqlContext.sql("select tax_id, GeneID, count(*) as cnt from gene2refseq order by cnt desc") gene_pubmed.registerTempTable('gene_pubmed') gene_starts = sqlContext.sql('select gene2refseq.geneid, start_pos from gene2pubmed, gene2refseq where gene2pubmed.geneid = gene2refseq.geneid') result.take(1) gene_info = (sc.textFile(op.join(base_dir, "data/gene_info")) .filter(lambda x: x[0] !== '#') .map(lambda x: x.split('\t')) .map(lambda p: Row(tax_id=int(p[0]), GeneID=int(p[1]), Symbol=p[2], LocusTag=p[3], Synonyms=p[4], dbXrefs=p[5], chromosome=p[6], map_location=p[7], description=p[8], type_of_gene=p[9], Symbol_from_nomenclature_authority=p[10], Full_name_from_nomenclature_authority=p[11], Nomenclature_status=p[12], Other_designations=p[13], Modification_date=p[14]))) '''
President George W. Bush and the U.S. Congress registered record-low approval ratings in a Reuters/Zogby poll released on Wednesday, and a new monthly index measuring the mood of Americans dipped slightly on deepening worries about the economy. Only 29 percent of Americans gave Bush a positive grade for his job performance, below his worst Zogby poll mark of 30 percent in March. A paltry 11 percent rated Congress positively, beating the previous low of 14 percent in July. The Reuters/Zogby Index, a new measure of the mood of the country, dropped from 100 to 98.8 in the last month on worries about the economy and fears of a recession, pollster John Zogby said. "Since the last time we polled we have had the mortgage crisis, and we are hearing the recession word a whole lot more than we've heard it in the past," Zogby said.
#!/bin/python import vtk import vtk.util.colors points = vtk.vtkPoints() points.InsertNextPoint(0, 0, 0) points.InsertNextPoint(0, 1, 0) points.InsertNextPoint(1, 0, 0) points.InsertNextPoint(0, 0, 1) unstructuredGrid = vtk.vtkUnstructuredGrid() unstructuredGrid.SetPoints(points) unstructuredGrid.InsertNextCell(vtk.VTK_TETRA, 4, [0,1,2,3]) writer = vtk.vtkXMLUnstructuredGridWriter() writer.SetFileName('test.vtu') writer.SetInputData(unstructuredGrid) writer.Update() writer.Write() mapper = vtk.vtkDataSetMapper() mapper.SetInputData(unstructuredGrid) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(vtk.util.colors.banana) renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.SetBackground(0.3,0.6,0.3) renderWindowInteractor.Initialize() #renderer.ResetCamera() #renderer.GetActiveCamera().Zoom(1.5) renderWindow.Render() renderWindowInteractor.Start()
Lichter Law Firm takes seriously its commitment to give back to the communities it serves. Our attorneys have been actively involved for years with civic, legal, religious and service organizations that truly make a meaningful difference for our clients, colleagues and community. A number of our firm members have received awards, been honored at community dinners or been the subject of articles appearing in local and national news media in connection with their community service. Mayra co-founded the Creutzfeldt-Jakob Disease Foundation, www.cjdfoundation.org a registered section 501 (c)(3) non-profit organization which she ran and served as President for approximately ten years. During this time, the Foundation gained national and international recognition when Mad Cow Disease (the bovine form of this prion disease) became worldwide news. Mayra worked with scientists, health care professionals, government agencies, professional associations, the media and family members of patients to obtain, produce and disseminate information on Creutzfeldt-Jakob Disease and other related prion diseases. She has advocated numerous times on Capitol Hill to members of Congress for more surveillance, reporting and research funding, and hosted the first international conference of scientists, media and families. Mayra has also co-presented a lecture with the Center For Disease Control (“CDC”) at the Mayo Clinic. Mayra remains very active in the Foundation as the Chairman of the Board of Directors. The Foundation works with the Centers for Disease Control and Prevention, from which it receives a yearly grant, collaborates closely with the National Prion Disease Pathology Surveillance Center, and has helped establish the CJD International Support Alliance which includes 14 member countries. Currently, the Foundation awards numerous research grants (over $1 million between 2014-18) and hosts an annual conference which attracts internationally renowned scientists, researchers and government representatives who meet with families affected by prion diseases. For her tireless work on the Foundation, Mayra received the Health Care Heroes Award from the Greater Miami Chamber of Commerce, the 2018 Florence Kranitz Leadership Award from the Foundation, and she and her work have been featured in the New York Times, USA Today, The Miami Herald and Good Housekeeping magazine.
"""Manage Treadmill app manifest. """ import logging import urllib.request import urllib.parse import urllib.error import click from treadmill import cli from treadmill import restclient from treadmill import context _LOGGER = logging.getLogger(__name__) _STATE_FORMATTER = cli.make_formatter(cli.InstanceStatePrettyFormatter) _ENDPOINT_FORMATTER = cli.make_formatter(cli.EndpointPrettyFormatter) _APP_FORMATTER = cli.make_formatter(cli.AppPrettyFormatter) def _show_state(apis, match, finished): """Show cell state.""" url = '/state/' query = [] if match: query.append(('match', match)) if finished: query.append(('finished', '1')) if query: url += '?' + '&'.join( [urllib.parse.urlencode([param]) for param in query] ) response = restclient.get(apis, url) cli.out(_STATE_FORMATTER(response.json())) def _show_list(apis, match, states, finished=False): """Show list of instnces in given state.""" url = '/state/' query = [] if match: query.append(('match', match)) if finished: query.append(('finished', '1')) if query: url += '?' + '&'.join( [urllib.parse.urlencode([param]) for param in query] ) response = restclient.get(apis, url) names = [item['name'] for item in response.json() if item['state'] in states] for name in names: print(name) def _show_endpoints(apis, pattern, endpoint, proto): """Show cell endpoints.""" url = '/endpoint/%s' % urllib.parse.quote(pattern) if endpoint: if proto: url += '/' + proto else: url += '/*' url += '/' + endpoint response = restclient.get(apis, url) endpoints = [{ 'name': end['name'], 'proto': end['proto'], 'endpoint': end['endpoint'], 'hostport': '{0}:{1}'.format(end['host'], end['port']) } for end in response.json()] cli.out(_ENDPOINT_FORMATTER(endpoints)) def _show_instance(apis, instance_id): """Show instance manifest.""" url = '/instance/%s' % urllib.parse.quote(instance_id) response = restclient.get(apis, url) cli.out(_APP_FORMATTER(response.json())) def init(): """Return top level command handler.""" ctx = {} @click.group() @click.option('--cell', required=True, envvar='TREADMILL_CELL', callback=cli.handle_context_opt, expose_value=False) @click.option('--api', required=False, help='API url to use.', metavar='URL', envvar='TREADMILL_STATEAPI') def show(api): """Show state of scheduled applications.""" ctx['api'] = api @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') @click.option('--finished', is_flag=True, default=False, help='Show finished instances.') def state(match, finished): """Show state of Treadmill scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_state(apis, match, finished) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def pending(match): """Show pending instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['pending']) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def running(match): """Show running instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['running']) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def finished(match): """Show finished instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['finished'], finished=True) @show.command() @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def scheduled(match): """Show scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['running', 'scheduled']) @show.command(name='all') @cli.ON_REST_EXCEPTIONS @click.option('--match', help='Application name pattern match') def _all(match): """Show scheduled instances.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_list(apis, match, ['pending', 'running', 'scheduled']) @show.command() @cli.ON_REST_EXCEPTIONS @click.argument('pattern') @click.argument('endpoint', required=False) @click.argument('proto', required=False) def endpoints(pattern, endpoint, proto): """Show application endpoints.""" apis = context.GLOBAL.state_api(ctx['api']) return _show_endpoints(apis, pattern, endpoint, proto) @show.command() @cli.ON_REST_EXCEPTIONS @click.argument('instance_id') def instance(instance_id): """Show scheduled instance manifest.""" apis = context.GLOBAL.cell_api(ctx['api']) return _show_instance(apis, instance_id) del _all del running del scheduled del pending del finished del instance del state del endpoints return show
Our motto is to provide our customers with high quality, low maintenance and cost effective products and solutions. We are ISO 9001:2008 certified & all our products are CE Marked. With satisfied customer base across India, Singapore, Middle East, Thailand, Indonesia & Bhutan we are committed to supply the best in class products & after sales support.
#!/usr/bin/env python # -*- coding: ascii -*- # # Copyright 2011 # Andr\xe9 Malo or his licensors, as applicable # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" ===================== Javascript Minifier ===================== Javascript Minifier based on `jsmin.c by Douglas Crockford`_\. This module is a re-implementation based on the semantics of jsmin.c. Usually it produces the same results. It differs in the following ways: - there is no error detection: unterminated string, regex and comment literals are treated as regular javascript code and minified as such. - Control characters inside string and regex literals are left untouched; they are not converted to spaces (nor to \n) - Newline characters are not allowed inside string and regex literals, except for line continuations in string literals (ECMA-5). - rjsmin does not handle streams, but only complete strings. (However, the module provides a "streamy" interface). Besides the list above it differs from direct python ports of jsmin.c in speed. Since most parts of the logic are handled by the regex engine it's way faster than the original python port by Baruch Even. The speed factor varies between about 6 and 55 depending on input and python version (it gets faster the more compressed the input already is). Compared to the speed-refactored python port by Dave St.Germain the performance gain is less dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for details. rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more. Both python 2 and python 3 are supported. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c """ __author__ = "Andr\xe9 Malo" __author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1') __docformat__ = "restructuredtext en" __license__ = "Apache License, Version 2.0" __version__ = '1.0.1' __all__ = ['jsmin', 'jsmin_for_posers'] import re as _re from webassets.six.moves import map from webassets.six.moves import zip def _make_jsmin(extended=True, python_only=False): """ Generate JS minifier based on `jsmin.c by Douglas Crockford`_ .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `extended` : ``bool`` Extended Regexps? (using lookahead and lookbehind). This is faster, because it can be optimized way more. The regexps used with `extended` being false are only left here to allow easier porting to platforms without extended regex features (and for my own reference...) `python_only` : ``bool`` Use only the python variant. If true, the c extension is not even tried to be loaded. :Return: Minifier :Rtype: ``callable`` """ # pylint: disable = R0912, R0914, W0612 if not python_only: try: import _rjsmin except ImportError: pass else: return _rjsmin.jsmin try: xrange except NameError: xrange = range # pylint: disable = W0622 space_chars = r'[\000-\011\013\014\016-\040]' line_comment = r'(?://[^\r\n]*)' space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' string1 = \ r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)' string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")' strings = r'(?:%s|%s)' % (string1, string2) charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])' nospecial = r'[^/\\\[\r\n]' if extended: regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % ( nospecial, charclass, nospecial ) else: regex = ( r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' ) regex = regex % (charclass, nospecial, charclass, nospecial) pre_regex = r'[(,=:\[!&|?{};\r\n]' space = r'(?:%s|%s)' % (space_chars, space_comment) newline = r'(?:%s?[\r\n])' % line_comment def fix_charclass(result): """ Fixup string of chars to fit into a regex char class """ pos = result.find('-') if pos >= 0: result = r'%s%s-' % (result[:pos], result[pos + 1:]) def sequentize(string): """ Notate consecutive characters as sequence (1-4 instead of 1234) """ first, last, result = None, None, [] for char in map(ord, string): if last is None: first = last = char elif last + 1 == char: last = char else: result.append((first, last)) first = last = char if last is not None: result.append((first, last)) return ''.join(['%s%s%s' % ( chr(first), last > first + 1 and '-' or '', last != first and chr(last) or '' ) for first, last in result]) return _re.sub(r'([\000-\040\047])', # for better portability lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result) .replace('\\', '\\\\') .replace('[', '\\[') .replace(']', '\\]') ) ) def id_literal_(what): """ Make id_literal like char class """ match = _re.compile(what).match result = ''.join([ chr(c) for c in range(127) if not match(chr(c)) ]) return '[^%s]' % fix_charclass(result) def not_id_literal_(keep): """ Make negated id_literal like char class """ match = _re.compile(id_literal_(keep)).match result = ''.join([ chr(c) for c in range(127) if not match(chr(c)) ]) return r'[%s]' % fix_charclass(result) if extended: id_literal = id_literal_(r'[a-zA-Z0-9_$]') id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]') id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]') space_sub = _re.compile(( r'([^\047"/\000-\040]+)' r'|(%(strings)s[^\047"/\000-\040]*)' r'|(?:(?<=%(pre_regex)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))' r'|(?<=%(id_literal_close)s)' r'%(space)s*(?:(%(newline)s)%(space)s*)+' r'(?=%(id_literal_open)s)' r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)' r'|%(space)s+' r'|(?:%(newline)s%(space)s*)+' ) % locals()).sub def space_subber(match): """ Substitution callback """ # pylint: disable = C0321 groups = match.groups() if groups[0]: return groups[0] elif groups[1]: return groups[1] elif groups[2]: return groups[2] elif groups[3]: return '\n' elif groups[4]: return ' ' return '' def jsmin(script): # pylint: disable = W0621 r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ return space_sub(space_subber, '\n%s\n' % script).strip() else: not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]') not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]') not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]') space_norm_sub = _re.compile(( r'(%(strings)s)' r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))' r'|(%(space)s)+' r'|(?:(%(newline)s)%(space)s*)+' ) % locals()).sub def space_norm_subber(match): """ Substitution callback """ # pylint: disable = C0321 groups = match.groups() if groups[0]: return groups[0] elif groups[1]: return groups[1].replace('\r', '\n') + groups[2] elif groups[3]: return ' ' elif groups[4]: return '\n' space_sub1 = _re.compile(( r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)' r'|\040(%(not_id_literal)s)' r'|\n(%(not_id_literal_open)s)' ) % locals()).sub def space_subber1(match): """ Substitution callback """ groups = match.groups() return groups[0] or groups[1] or groups[2] space_sub2 = _re.compile(( r'(%(strings)s)\040?' r'|(%(pre_regex)s%(regex)s)[\040\n]?' r'|(%(not_id_literal)s)\040' r'|(%(not_id_literal_close)s)\n' ) % locals()).sub def space_subber2(match): """ Substitution callback """ groups = match.groups() return groups[0] or groups[1] or groups[2] or groups[3] def jsmin(script): r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach. The script is minified with three passes: normalization Control character are mapped to spaces, spaces and newlines are squeezed and comments are stripped. space removal 1 Spaces before certain tokens are removed space removal 2 Spaces after certain tokens are remove .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ return space_sub2(space_subber2, space_sub1(space_subber1, space_norm_sub(space_norm_subber, '\n%s\n' % script) ) ).strip() return jsmin jsmin = _make_jsmin() def jsmin_for_posers(script): r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Warning: This function is the digest of a _make_jsmin() call. It just utilizes the resulting regex. It's just for fun here and may vanish any time. Use the `jsmin` function instead. :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or (groups[3] and '\n') or (groups[4] and ' ') or '' ) return _re.sub( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?' r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|' r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]' r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/' r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*' r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*' r'))|(?<=[^\000-!#%&(*,./:-@\[\\^`{|~])(?:[\000-\011\013\014\016-\04' r'0]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n' r']))(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)' r'*/))*)+(?=[^\000-#%-\047)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-' r'^`{-~-])((?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*' r']*\*+)*/)))+(?=[^\000-#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\0' r'16-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)' r'?[\r\n])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]' r'*\*+)*/))*)+', subber, '\n%s\n' % script ).strip() if __name__ == '__main__': import sys as _sys _sys.stdout.write(jsmin(_sys.stdin.read()))
The Delta Protection Commission (Commission) is committed to national recognition of the Delta as a diverse, accessible, modern recreation and tourism destination. The Commission has compiled the 2015 Inventory of Recreation Facilities in the Sacramento-San Joaquin Delta which represents the current recreation resources in the Legal Delta and its adjacent areas. The Inventory is a reference for the Commission, State and local agencies, and recreation providers in planning, developing, and promoting the Delta as a unique recreation and tourism destination. The Davis-Dolwig Act is a 47-year-old state law that specifies that the state, not water ratepayers, should fund the recreation component of the the State Water Project (SWP). The budget proposes a number of statutory reforms to the act, in part to provide a dedicated funding source for its implementation. We find that the Governor’s proposal does not address a number of major problems with the implementation of the act and that the administration’s approach improperly limits the Legislature’s oversight role. We also find that, over many years, the Department of Water Resources has been allocating costs to the state under Davis-Dolwig that are significantly in excess of the direct costs to SWP for recreation. In our report, we offer the Legislature a package of statutory reforms to address problems that we have identified with the implementation of Davis-Dolwig. These include clarifying the role of public funding for recreation in SWP. We also recommend that the state evaluate the potential to divest itself of SWP reservoirs that are used mainly for recreation. Research on the social and natural processes that sustain the unique values of the Delta as an evolving place is sparse and sporadic. We found seven research projects and no established research programs directly aimed at developing an understanding of the processes supporting the Delta as an evolving place. Far more research occurs on other Delta topics, such as water flows, contaminants and nutrients, and at risk species. Nor did we find evidence that research in these other areas is identifying natural processes that could protect and enhance the values of the changing Delta. We therefore recommend an expanded, sustained commitment to research on the unique cultural, recreational, natural resource, and agricultural values of the Delta as an evolving place. The level of effort could resemble that in other Delta subject areas. Priority should be given to research that supports achieving the coequal goals in a manner that protects and enhances these unique values.
# Generated by Django 2.2.1 on 2019-05-08 12:18 from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import pazar.utils.html import pazar.webapp.models.mixins import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('public_key', models.TextField()), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'ordering': ('last_name', 'first_name'), }, bases=(models.Model, pazar.webapp.models.mixins.BaseMixin), managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='Image', fields=[ ('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('thumbnail', models.BinaryField()), ('filename', models.CharField(db_index=True, max_length=1024)), ('name', models.CharField(db_index=True, max_length=255)), ('file_hash', models.CharField(db_index=True, max_length=74)), ('_tags', models.CharField(db_index=True, max_length=1024)), ], options={ 'ordering': ('name', 'filename'), }, bases=(models.Model, pazar.webapp.models.mixins.BaseMixin), ), migrations.CreateModel( name='ProductCategory', fields=[ ('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('title', models.CharField(db_index=True, max_length=1024)), ('product_type', models.PositiveSmallIntegerField(choices=[(1, 'physical'), (2, 'digital'), (3, 'service')], default=1)), ('description', models.TextField()), ], bases=(models.Model, pazar.webapp.models.mixins.BaseMixin), ), migrations.CreateModel( name='TextHash', fields=[ ('checksum', models.CharField(editable=False, max_length=64, primary_key=True, serialize=False, verbose_name='Content Checksum')), ('_text', models.BinaryField(default=b'')), ], options={ 'unique_together': {('checksum', '_text')}, }, bases=(models.Model, pazar.webapp.models.mixins.BaseMixin), ), migrations.CreateModel( name='Listing', fields=[ ('id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('title', models.CharField(db_index=True, max_length=1024)), ('_page_text', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='webapp.TextHash')), ('product_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='webapp.ProductCategory')), ('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], bases=(models.Model, pazar.webapp.models.mixins.BaseMixin, pazar.utils.html.RstMixin), ), ]
No luxurious home upgrade is complete without the enhancements of modern technology. High-tech and intelligent devices lend ease to everyday activities and enrich your ability to enjoy the comforts of home. With new products joining this category at an exponential pace, there’s a way to boost the smart function of every room, even the bathroom. There truly is no limit to the home features that can work harder, faster and smarter—even when it comes to the toilet. If the notion of an intelligent toilet seems extreme to your senses, you may not be alone. In fact, according to a recent survey conducted by KOHLER, about two-thirds of Americans would choose a toilet that would give them a cleaner, fresher feeling, and three in five think washing with water would give them a cleaner feel than toilet paper alone. An overall fresher feeling and features like a deodorizer, night light and heated seat have strong appeal, according to the same survey, and those features are all offered in options like the KOHLER Veil, an ultimate, one-piece intelligent toilet with integrated cleansing functionality that provides optimum hygiene and individual comfort. From personal cleansing to an LED nightlight to hands-free opening, closing and flushing—all of which are easy to control on a touchscreen LCD remote control—the toilet brings unexpected options to your bathroom. Explore the available models and learn more about intelligent toilets at kohler.com/intelligenttoilets. A jetted tub is just the beginning when it comes to creating a spa-like atmosphere. Today’s options let you trick out your tub with everything from bubbles and heat to sound and color. Advanced hydrotherapy options use air or sound to massage, soothe, invigorate or relax your body and your mind—all controlled at the touch of a button. Don’t forget the heated back to keep you warm even longer.
# Copyright (c) 2016 Rocky Bernstein # Copyright (c) 2000-2002 by hartmut Goebel <hartmut@goebel.noris.de> from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG from uncompyle6.parser import PythonParserSingle from uncompyle6.parsers.parse21 import Python21Parser class Python15Parser(Python21Parser): def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG): super(Python15Parser, self).__init__(debug_parser) self.customized = {} def p_import15(self, args): """ importstmt ::= filler IMPORT_NAME STORE_FAST importstmt ::= filler IMPORT_NAME STORE_NAME importfrom ::= filler IMPORT_NAME importlist importfrom ::= filler filler IMPORT_NAME importlist POP_TOP importlist ::= importlist IMPORT_FROM importlist ::= IMPORT_FROM """ class Python15ParserSingle(Python21Parser, PythonParserSingle): pass if __name__ == '__main__': # Check grammar p = Python15Parser() p.checkGrammar() p.dumpGrammar() # local variables: # tab-width: 4
« I be Africa Man Original! NaijaMade… Coming to a store near you! I finally attended the 2008 Nigerian day parade in NYC and despite the rain, there was a pretty good turnout. I had a great time helping a friend publicize her new social networking website. It was nice to see all the Naija’s in full effect strutting their green and whites all day. I took advantage of the celebrations to launch NaijaMade: NaijaMan’s T-Shirt line. Many thanks to everyone who bought a t-shirt that day and encouraged me to take this dream to the next level. Send me a message if you’d like to preorder. Update: 04/28/09. The NaijaMade.com v1.0 launched today. Currently carrying just the Naija Day parade selection. We are working on the summer line and will update the online offering shortly! This entry was posted on October 2, 2008 at 3:55 pm and is filed under Naija Dreams, Random. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
# -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import datetime import operator from hashlib import md5 from collections import OrderedDict from translate.filters.decorators import Category from translate.storage import base from django.conf import settings from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.core.validators import MinValueValidator from django.db import models from django.db.models import F from django.template.defaultfilters import truncatechars from django.utils import timezone from django.utils.functional import cached_property from django.utils.http import urlquote from pootle.core.contextmanagers import update_data_after from pootle.core.delegate import data_tool, format_syncers, format_updaters from pootle.core.log import ( TRANSLATION_ADDED, TRANSLATION_CHANGED, TRANSLATION_DELETED, UNIT_ADDED, UNIT_DELETED, UNIT_OBSOLETE, UNIT_RESURRECTED, STORE_ADDED, STORE_DELETED, STORE_OBSOLETE, MUTE_QUALITYCHECK, UNMUTE_QUALITYCHECK, action_log, store_log) from pootle.core.mixins import CachedTreeItem from pootle.core.models import Revision from pootle.core.search import SearchBroker from pootle.core.signals import update_data from pootle.core.storage import PootleFileSystemStorage from pootle.core.url_helpers import ( get_editor_filter, split_pootle_path, to_tp_relative_path) from pootle.core.utils import dateformat from pootle.core.utils.aggregate import max_column from pootle.core.utils.multistring import PLURAL_PLACEHOLDER, SEPARATOR from pootle.core.utils.timezone import datetime_min, make_aware from pootle.i18n.gettext import ugettext_lazy as _ from pootle_format.models import Format from pootle_misc.checks import check_names from pootle_misc.util import import_func from pootle_statistics.models import (Submission, SubmissionFields, SubmissionTypes) from .constants import ( DEFAULT_PRIORITY, FUZZY, NEW, OBSOLETE, POOTLE_WINS, TRANSLATED, UNTRANSLATED) from .fields import MultiStringField, TranslationStoreField from .managers import StoreManager, SuggestionManager, UnitManager from .store.deserialize import StoreDeserialization from .store.serialize import StoreSerialization from .util import SuggestionStates, vfolders_installed TM_BROKER = None def get_tm_broker(): global TM_BROKER if TM_BROKER is None: TM_BROKER = SearchBroker() return TM_BROKER # # # # # # # # Quality Check # # # # # # # class QualityCheck(models.Model): """Database cache of results of qualitychecks on unit.""" name = models.CharField(max_length=64, db_index=True) unit = models.ForeignKey("pootle_store.Unit", db_index=True) category = models.IntegerField(null=False, default=Category.NO_CATEGORY) message = models.TextField() false_positive = models.BooleanField(default=False, db_index=True) def __unicode__(self): return self.name @property def display_name(self): return check_names.get(self.name, self.name) @classmethod def delete_unknown_checks(cls): unknown_checks = QualityCheck.objects \ .exclude(name__in=check_names.keys()) unknown_checks.delete() # # # # # # # # # Suggestion # # # # # # # # class Suggestion(models.Model, base.TranslationUnit): """Suggested translation for a :cls:`~pootle_store.models.Unit`, provided by users or automatically generated after a merge. """ target_f = MultiStringField() target_hash = models.CharField(max_length=32, db_index=True) unit = models.ForeignKey('pootle_store.Unit') user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, related_name='suggestions', db_index=True) reviewer = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, related_name='reviews', db_index=True) translator_comment_f = models.TextField(null=True, blank=True) state_choices = [ (SuggestionStates.PENDING, _('Pending')), (SuggestionStates.ACCEPTED, _('Accepted')), (SuggestionStates.REJECTED, _('Rejected')), ] state = models.CharField(max_length=16, default=SuggestionStates.PENDING, null=False, choices=state_choices, db_index=True) creation_time = models.DateTimeField(db_index=True, null=True) review_time = models.DateTimeField(null=True, db_index=True) objects = SuggestionManager() # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # # @property def _target(self): return self.target_f @_target.setter def _target(self, value): self.target_f = value self._set_hash() @property def _source(self): return self.unit._source @property def translator_comment(self, value): return self.translator_comment_f @translator_comment.setter def translator_comment(self, value): self.translator_comment_f = value self._set_hash() # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # # def __unicode__(self): return unicode(self.target) def _set_hash(self): string = self.translator_comment_f if string: string = self.target_f + SEPARATOR + string else: string = self.target_f self.target_hash = md5(string.encode("utf-8")).hexdigest() # # # # # # # # Unit # # # # # # # # # # wordcount_f = import_func(settings.POOTLE_WORDCOUNT_FUNC) def count_words(strings): wordcount = 0 for string in strings: wordcount += wordcount_f(string) return wordcount def stringcount(string): try: return len(string.strings) except AttributeError: return 1 class Unit(models.Model, base.TranslationUnit): store = models.ForeignKey("pootle_store.Store", db_index=True) index = models.IntegerField(db_index=True) unitid = models.TextField(editable=False) unitid_hash = models.CharField(max_length=32, db_index=True, editable=False) source_f = MultiStringField(null=True) source_hash = models.CharField(max_length=32, db_index=True, editable=False) source_wordcount = models.SmallIntegerField(default=0, editable=False) source_length = models.SmallIntegerField(db_index=True, default=0, editable=False) target_f = MultiStringField(null=True, blank=True) target_wordcount = models.SmallIntegerField(default=0, editable=False) target_length = models.SmallIntegerField(db_index=True, default=0, editable=False) developer_comment = models.TextField(null=True, blank=True) translator_comment = models.TextField(null=True, blank=True) locations = models.TextField(null=True, editable=False) context = models.TextField(null=True, editable=False) state = models.IntegerField(null=False, default=UNTRANSLATED, db_index=True) revision = models.IntegerField(null=False, default=0, db_index=True, blank=True) # Metadata creation_time = models.DateTimeField(auto_now_add=True, db_index=True, editable=False, null=True) mtime = models.DateTimeField(auto_now=True, db_index=True, editable=False) # unit translator submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, db_index=True, related_name='submitted') submitted_on = models.DateTimeField(db_index=True, null=True) commented_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, db_index=True, related_name='commented') commented_on = models.DateTimeField(db_index=True, null=True) # reviewer: who has accepted suggestion or removed FUZZY # None if translation has been submitted by approved translator reviewed_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, db_index=True, related_name='reviewed') reviewed_on = models.DateTimeField(db_index=True, null=True) objects = UnitManager() simple_objects = models.Manager() class Meta(object): unique_together = ( ('store', 'unitid_hash'), ("store", "state", "index", "unitid_hash")) get_latest_by = 'mtime' index_together = [ ["store", "index"], ["store", "revision"], ["store", "mtime"], ["store", "state"]] # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # # @property def _source(self): return self.source_f @_source.setter def _source(self, value): self.source_f = value self._source_updated = True @property def _target(self): return self.target_f @_target.setter def _target(self, value): self.target_f = value self._target_updated = True # # # # # # # # # # # # # Class & static methods # # # # # # # # # # # # # @classmethod def max_revision(cls): """Returns the max revision number across all units.""" return max_column(cls.objects.all(), 'revision', 0) # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # # def __unicode__(self): # FIXME: consider using unit id instead? return unicode(self.source) def __str__(self): return str(self.convert()) def __init__(self, *args, **kwargs): super(Unit, self).__init__(*args, **kwargs) self._rich_source = None self._source_updated = False self._rich_target = None self._target_updated = False self._state_updated = False self._comment_updated = False self._auto_translated = False self._encoding = 'UTF-8' def delete(self, *args, **kwargs): action_log(user='system', action=UNIT_DELETED, lang=self.store.translation_project.language.code, unit=self.id, translation='', path=self.store.pootle_path) super(Unit, self).delete(*args, **kwargs) def save(self, *args, **kwargs): created = self.id is None source_updated = kwargs.pop("source_updated", None) or self._source_updated target_updated = kwargs.pop("target_updated", None) or self._target_updated state_updated = kwargs.pop("state_updated", None) or self._state_updated auto_translated = ( kwargs.pop("auto_translated", None) or self._auto_translated) comment_updated = ( kwargs.pop("comment_updated", None) or self._comment_updated) action = kwargs.pop("action", None) or getattr(self, "_save_action", None) if not hasattr(self, '_log_user'): User = get_user_model() self._log_user = User.objects.get_system_user() user = kwargs.pop("user", self._log_user) if created: action = UNIT_ADDED if source_updated: # update source related fields self.source_hash = md5(self.source_f.encode("utf-8")).hexdigest() self.source_length = len(self.source_f) self.update_wordcount(auto_translate=True) if target_updated: # update target related fields self.target_wordcount = count_words(self.target_f.strings) self.target_length = len(self.target_f) if filter(None, self.target_f.strings): if self.state == UNTRANSLATED: self.state = TRANSLATED action = action or TRANSLATION_ADDED else: action = action or TRANSLATION_CHANGED else: action = TRANSLATION_DELETED # if it was TRANSLATED then set to UNTRANSLATED if self.state > FUZZY: self.state = UNTRANSLATED # Updating unit from the .po file set its revision property to # a new value (the same for all units during its store updated) # since that change doesn't require further sync but note that # auto_translated units require further sync revision = kwargs.pop('revision', None) if revision is not None and not auto_translated: self.revision = revision elif target_updated or state_updated or comment_updated: self.revision = Revision.incr() if not created and action: action_log( user=self._log_user, action=action, lang=self.store.translation_project.language.code, unit=self.id, translation=self.target_f, path=self.store.pootle_path) was_fuzzy = ( state_updated and self.state == TRANSLATED and action == TRANSLATION_CHANGED and not target_updated) if was_fuzzy: # set reviewer data if FUZZY has been removed only and # translation hasn't been updated self.reviewed_on = timezone.now() self.reviewed_by = self._log_user elif self.state == FUZZY: # clear reviewer data if unit has been marked as FUZZY self.reviewed_on = None self.reviewed_by = None elif self.state == UNTRANSLATED: # clear reviewer and translator data if translation # has been deleted self.reviewed_on = None self.reviewed_by = None self.submitted_by = None self.submitted_on = None super(Unit, self).save(*args, **kwargs) if action and action == UNIT_ADDED: action_log( user=self._log_user, action=action, lang=self.store.translation_project.language.code, unit=self.id, translation=self.target_f, path=self.store.pootle_path) self.add_initial_submission(user=user) if source_updated or target_updated: if not (created and self.state == UNTRANSLATED): self.update_qualitychecks() if self.istranslated(): self.update_tmserver() # done processing source/target update remove flag self._source_updated = False self._target_updated = False self._state_updated = False self._comment_updated = False self._auto_translated = False update_data.send( self.store.__class__, instance=self.store) def get_absolute_url(self): return self.store.get_absolute_url() def get_translate_url(self): return ( "%s%s" % (self.store.get_translate_url(), '#unit=%s' % unicode(self.id))) def get_search_locations_url(self): (proj_code, dir_path, filename) = split_pootle_path(self.store.pootle_path)[1:] return u''.join([ reverse('pootle-project-translate', args=[proj_code, dir_path, filename]), get_editor_filter(search=self.locations, sfields='locations'), ]) def get_screenshot_url(self): prefix = self.store.translation_project.\ project.screenshot_search_prefix if prefix: return prefix + urlquote(self.source_f) def is_accessible_by(self, user): """Returns `True` if the current unit is accessible by `user`.""" if user.is_superuser: return True from pootle_project.models import Project user_projects = Project.accessible_by_user(user) return self.store.translation_project.project.code in user_projects def add_initial_submission(self, user=None): if self.istranslated() or self.isfuzzy(): Submission.objects.create( creation_time=self.creation_time, translation_project=self.store.translation_project, submitter=user or self._log_user, unit=self, store=self.store, type=SubmissionTypes.UNIT_CREATE, field=SubmissionFields.TARGET, new_value=self.target, ) @cached_property def unit_syncer(self): return self.store.syncer.unit_sync_class(self) def convert(self, unitclass=None): """Convert to a unit of type :param:`unitclass` retaining as much information from the database as the target format can support. """ return self.unit_syncer.convert(unitclass) def sync(self, unit): """Sync in file unit with translations from the DB.""" changed = False if not self.isobsolete() and unit.isobsolete(): unit.resurrect() changed = True if unit.target != self.target: if unit.hasplural(): nplurals = self.store.translation_project.language.nplurals target_plurals = len(self.target.strings) strings = self.target.strings if target_plurals < nplurals: strings.extend([u'']*(nplurals - target_plurals)) if unit.target.strings != strings: unit.target = strings changed = True else: unit.target = self.target changed = True self_notes = self.getnotes(origin="translator") unit_notes = unit.getnotes(origin="translator") if unit_notes != (self_notes or ''): if self_notes != '': unit.addnote(self_notes, origin="translator", position="replace") else: unit.removenotes() changed = True if unit.isfuzzy() != self.isfuzzy(): unit.markfuzzy(self.isfuzzy()) changed = True if self.isobsolete() and not unit.isobsolete(): unit.makeobsolete() changed = True return changed def update(self, unit, user=None): """Update in-DB translation from the given :param:`unit`. :param user: User to attribute updates to. :rtype: bool :return: True if the new :param:`unit` differs from the current unit. Two units differ when any of the fields differ (source, target, translator/developer comments, locations, context, status...). """ changed = False if user is None: User = get_user_model() user = User.objects.get_system_user() update_source = ( self.source != unit.source or (len(self.source.strings) != stringcount(unit.source)) or (self.hasplural() != unit.hasplural())) if update_source: if unit.hasplural() and len(unit.source.strings) == 1: self.source = [unit.source, PLURAL_PLACEHOLDER] else: self.source = unit.source changed = True update_target = ( self.target != unit.target or (len(self.target.strings) != stringcount(unit.target))) if update_target: notempty = filter(None, self.target_f.strings) self.target = unit.target self.submitted_by = user self.submitted_on = timezone.now() if filter(None, self.target_f.strings) or notempty: # FIXME: we need to do this cause we discard nplurals for empty # plurals changed = True notes = unit.getnotes(origin="developer") if (self.developer_comment != notes and (self.developer_comment or notes)): self.developer_comment = notes or None changed = True notes = unit.getnotes(origin="translator") if (self.translator_comment != notes and (self.translator_comment or notes)): self.translator_comment = notes or None changed = True self._comment_updated = True locations = "\n".join(unit.getlocations()) if self.locations != locations and (self.locations or locations): self.locations = locations or None changed = True context = unit.getcontext() if self.context != unit.getcontext() and (self.context or context): self.context = context or None changed = True if self.isfuzzy() != unit.isfuzzy(): self.markfuzzy(unit.isfuzzy()) changed = True if self.isobsolete() != unit.isobsolete(): if unit.isobsolete(): self.makeobsolete() else: self.resurrect(unit.isfuzzy()) changed = True if self.unitid != unit.getid(): self.unitid = unicode(unit.getid()) or unicode(unit.source) self.unitid_hash = md5(self.unitid.encode("utf-8")).hexdigest() changed = True return changed def update_wordcount(self, auto_translate=False): """Updates the source wordcount for a unit. :param auto_translate: when set to `True`, it will copy the source string into the target field. """ self.source_wordcount = count_words(self.source_f.strings) if self.source_wordcount == 0: # We can't set the actual wordcount to zero since the unit # will essentially disappear from statistics thus for such # units set word count to 1 self.source_wordcount = 1 if (auto_translate and not bool(filter(None, self.target_f.strings))): # auto-translate untranslated strings self.target = self.source self.state = FUZZY self._auto_translated = True def update_qualitychecks(self, keep_false_positives=False): """Run quality checks and store result in the database. :param keep_false_positives: when set to `False`, it will activate (unmute) any existing false positive checks. :return: `True` if quality checks were updated or `False` if they left unchanged. """ unmute_list = [] result = False checks = self.qualitycheck_set.all() existing = {} for check in checks.values('name', 'false_positive', 'id'): existing[check['name']] = { 'false_positive': check['false_positive'], 'id': check['id'], } # no checks if unit is untranslated if not self.target: if existing: self.qualitycheck_set.all().delete() return True return False checker = self.store.translation_project.checker qc_failures = checker.run_filters(self, categorised=True) checks_to_add = [] for name in qc_failures.iterkeys(): if name in existing: # keep false-positive checks if check is active if (existing[name]['false_positive'] and not keep_false_positives): unmute_list.append(name) del existing[name] continue message = qc_failures[name]['message'] category = qc_failures[name]['category'] checks_to_add.append( QualityCheck( unit=self, name=name, message=message, category=category)) result = True if checks_to_add: self.qualitycheck_set.bulk_create(checks_to_add) if not keep_false_positives and unmute_list: self.qualitycheck_set.filter(name__in=unmute_list) \ .update(false_positive=False) # delete inactive checks if existing: self.qualitycheck_set.filter(name__in=existing).delete() changed = result or bool(unmute_list) or bool(existing) return changed def get_qualitychecks(self): return self.qualitycheck_set.all() def get_critical_qualitychecks(self): return self.get_qualitychecks().filter(category=Category.CRITICAL) def get_active_critical_qualitychecks(self): return self.get_active_qualitychecks().filter( category=Category.CRITICAL) def get_warning_qualitychecks(self): return self.get_qualitychecks().exclude(category=Category.CRITICAL) def get_active_qualitychecks(self): return self.qualitycheck_set.filter(false_positive=False) # # # # # # # # # # # Related Submissions # # # # # # # # # # # # def get_edits(self): return self.submission_set.get_unit_edits() def get_comments(self): return self.submission_set.get_unit_comments() def get_state_changes(self): return self.submission_set.get_unit_state_changes() def get_suggestion_reviews(self): return self.submission_set.get_unit_suggestion_reviews() # # # # # # # # # # # TranslationUnit # # # # # # # # # # # # # # def update_tmserver(self): obj = { 'id': self.id, # 'revision' must be an integer for statistical queries to work 'revision': self.revision, 'project': self.store.translation_project.project.fullname, 'path': self.store.pootle_path, 'source': self.source, 'target': self.target, 'username': '', 'fullname': '', 'email_md5': '', } if self.submitted_on: obj.update({ 'iso_submitted_on': self.submitted_on.isoformat(), 'display_submitted_on': dateformat.format(self.submitted_on), }) if self.submitted_by: obj.update({ 'username': self.submitted_by.username, 'fullname': self.submitted_by.full_name, 'email_md5': md5(self.submitted_by.email).hexdigest(), }) get_tm_broker().update(self.store.translation_project.language.code, obj) def get_tm_suggestions(self): return get_tm_broker().search(self) # # # # # # # # # # # TranslationUnit # # # # # # # # # # # # # # def getnotes(self, origin=None): if origin is None: notes = '' if self.translator_comment is not None: notes += self.translator_comment if self.developer_comment is not None: notes += self.developer_comment return notes elif origin == "translator": return self.translator_comment or '' elif origin in ["programmer", "developer", "source code"]: return self.developer_comment or '' else: raise ValueError("Comment type not valid") def addnote(self, text, origin=None, position="append"): if not (text and text.strip()): return if origin in ["programmer", "developer", "source code"]: self.developer_comment = text else: self.translator_comment = text def getid(self): return self.unitid def setid(self, value): self.unitid = value self.unitid_hash = md5(self.unitid.encode("utf-8")).hexdigest() def getlocations(self): if self.locations is None: return [] return filter(None, self.locations.split('\n')) def addlocation(self, location): if self.locations is None: self.locations = '' self.locations += location + "\n" def getcontext(self): return self.context def setcontext(self, value): self.context = value def isfuzzy(self): return self.state == FUZZY def markfuzzy(self, value=True): if self.state <= OBSOLETE: return if value != (self.state == FUZZY): # when Unit toggles its FUZZY state the number of translated words # also changes self._state_updated = True # that's additional check # but leave old value in case _save_action is set if not hasattr(self, '_save_action'): self._save_action = TRANSLATION_CHANGED if value: self.state = FUZZY elif self.state <= FUZZY: if filter(None, self.target_f.strings): self.state = TRANSLATED else: self.state = UNTRANSLATED # that's additional check # but leave old value in case _save_action is set if not hasattr(self, '_save_action'): self._save_action = TRANSLATION_DELETED def hasplural(self): return (self.source is not None and (len(self.source.strings) > 1 or hasattr(self.source, "plural") and self.source.plural)) def isobsolete(self): return self.state == OBSOLETE def makeobsolete(self): if self.state > OBSOLETE: # when Unit becomes obsolete the cache flags should be updated self._state_updated = True self._save_action = UNIT_OBSOLETE self.state = OBSOLETE self.index = 0 def resurrect(self, is_fuzzy=False): if self.state > OBSOLETE: return if filter(None, self.target_f.strings): # when Unit toggles its OBSOLETE state the number of translated # words or fuzzy words also changes if is_fuzzy: self.state = FUZZY else: self.state = TRANSLATED else: self.state = UNTRANSLATED self.update_qualitychecks(keep_false_positives=True) self._state_updated = True self._save_action = UNIT_RESURRECTED def istranslated(self): return self.state >= TRANSLATED # # # # # # # # # # # Suggestions # # # # # # # # # # # # # # # # # def get_suggestions(self): return self.suggestion_set.pending().select_related('user').all() def has_critical_checks(self): return self.qualitycheck_set.filter( category=Category.CRITICAL, ).exists() def toggle_qualitycheck(self, check_id, false_positive, user): check = self.qualitycheck_set.get(id=check_id) if check.false_positive == false_positive: return check.false_positive = false_positive check.save() self._log_user = user if false_positive: self._save_action = MUTE_QUALITYCHECK else: self._save_action = UNMUTE_QUALITYCHECK # create submission if false_positive: sub_type = SubmissionTypes.MUTE_CHECK else: sub_type = SubmissionTypes.UNMUTE_CHECK sub = Submission(creation_time=timezone.now(), translation_project=self.store.translation_project, submitter=user, field=SubmissionFields.NONE, unit=self, store=self.store, type=sub_type, quality_check=check) sub.save() # update timestamp # log user action self.save() def get_terminology(self): """get terminology suggestions""" matcher = self.store.translation_project.gettermmatcher() if matcher is None: return [] return matcher.matches(self.source) def get_last_updated_info(self): return { "display_datetime": dateformat.format(self.creation_time), "iso_datetime": self.creation_time.isoformat(), "creation_time": int(dateformat.format(self.creation_time, 'U')), "unit_source": truncatechars(self, 50), "unit_url": self.get_translate_url(), } # # # # # # # # # # # Store # # # # # # # # # # # # # # def validate_no_slashes(value): if '/' in value: raise ValidationError('Store name cannot contain "/" characters') if '\\' in value: raise ValidationError('Store name cannot contain "\\" characters') # Needed to alter storage location in tests fs = PootleFileSystemStorage() class Store(models.Model, CachedTreeItem, base.TranslationStore): """A model representing a translation store (i.e. a PO or XLIFF file).""" UnitClass = Unit Name = "Model Store" is_dir = False file = TranslationStoreField(max_length=255, storage=fs, db_index=True, null=False, editable=False) parent = models.ForeignKey('pootle_app.Directory', related_name='child_stores', db_index=True, editable=False) translation_project_fk = 'pootle_translationproject.TranslationProject' translation_project = models.ForeignKey(translation_project_fk, related_name='stores', db_index=True, editable=False) filetype = models.ForeignKey( Format, related_name='stores', null=True, blank=True, db_index=True) is_template = models.BooleanField(default=False) # any changes to the `pootle_path` field may require updating the schema # see migration 0007_case_sensitive_schema.py pootle_path = models.CharField(max_length=255, null=False, unique=True, db_index=True, verbose_name=_("Path")) tp_path = models.CharField( max_length=255, null=True, blank=True, db_index=True, verbose_name=_("Path")) # any changes to the `name` field may require updating the schema # see migration 0007_case_sensitive_schema.py name = models.CharField(max_length=128, null=False, editable=False, validators=[validate_no_slashes]) file_mtime = models.DateTimeField(default=datetime_min) state = models.IntegerField(null=False, default=NEW, editable=False, db_index=True) creation_time = models.DateTimeField(auto_now_add=True, db_index=True, editable=False, null=True) last_sync_revision = models.IntegerField(db_index=True, null=True, blank=True) obsolete = models.BooleanField(default=False) # this is calculated from virtualfolders if installed and linked priority = models.FloatField( db_index=True, default=1, validators=[MinValueValidator(0)]) objects = StoreManager() simple_objects = models.Manager() class Meta(object): ordering = ['pootle_path'] index_together = [ ["translation_project", "is_template"], ["translation_project", "pootle_path", "is_template", "filetype"]] unique_together = ( ('parent', 'name'), ("obsolete", "translation_project", "tp_path")) # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # # @property def code(self): return self.name.replace('.', '-') @property def tp(self): return self.translation_project @property def real_path(self): return self.file.name @property def has_terminology(self): """is this a project specific terminology store?""" # TODO: Consider if this should check if the store belongs to a # terminology project. Probably not, in case this might be called over # several files in a project. return self.name.startswith('pootle-terminology') @property def units(self): return self.unit_set.filter(state__gt=OBSOLETE).order_by('index') @units.setter def units(self, value): """Null setter to avoid tracebacks if :meth:`TranslationStore.__init__` is called. """ pass # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # # @cached_property def path(self): """Returns just the path part omitting language and project codes. If the `pootle_path` of a :cls:`Store` object `store` is `/af/project/dir1/dir2/file.po`, `store.path` will return `dir1/dir2/file.po`. """ return to_tp_relative_path(self.pootle_path) def __init__(self, *args, **kwargs): super(Store, self).__init__(*args, **kwargs) def __unicode__(self): return unicode(self.pootle_path) def __str__(self): return str(self.syncer.convert()) def save(self, *args, **kwargs): created = not self.id self.pootle_path = self.parent.pootle_path + self.name self.tp_path = self.parent.tp_path + self.name # Force validation of fields. self.full_clean() super(Store, self).save(*args, **kwargs) if created: store_log(user='system', action=STORE_ADDED, path=self.pootle_path, store=self.id) def delete(self, *args, **kwargs): store_log(user='system', action=STORE_DELETED, path=self.pootle_path, store=self.id) lang = self.translation_project.language.code for unit in self.unit_set.iterator(): action_log(user='system', action=UNIT_DELETED, lang=lang, unit=unit.id, translation='', path=self.pootle_path) super(Store, self).delete(*args, **kwargs) def calculate_priority(self): if not vfolders_installed(): return DEFAULT_PRIORITY from virtualfolder.models import VirtualFolder vfolders = VirtualFolder.objects priority = ( vfolders.filter(stores=self) .aggregate(priority=models.Max("priority"))["priority"]) if priority is None: return DEFAULT_PRIORITY return priority def set_priority(self, priority=None): priority = ( self.calculate_priority() if priority is None else priority) if priority != self.priority: Store.objects.filter(pk=self.pk).update(priority=priority) def makeobsolete(self): """Make this store and all its units obsolete.""" store_log(user='system', action=STORE_OBSOLETE, path=self.pootle_path, store=self.id) lang = self.translation_project.language.code unit_query = self.unit_set.filter(state__gt=OBSOLETE) unit_ids = unit_query.values_list('id', flat=True) for unit_id in unit_ids: action_log(user='system', action=UNIT_OBSOLETE, lang=lang, unit=unit_id, translation='', path=self.pootle_path) unit_query.update(state=OBSOLETE, index=0) self.obsolete = True self.save() def get_absolute_url(self): return reverse( 'pootle-tp-store-browse', args=split_pootle_path(self.pootle_path)) def get_translate_url(self, **kwargs): return u''.join( [reverse("pootle-tp-store-translate", args=split_pootle_path(self.pootle_path)), get_editor_filter(**kwargs)]) def findid_bulk(self, ids, unit_set=None): chunks = 200 for i in xrange(0, len(ids), chunks): units = (unit_set or self.unit_set).filter(id__in=ids[i:i+chunks]) for unit in units.iterator(): yield unit def get_file_mtime(self): disk_mtime = datetime.datetime.fromtimestamp(self.file.getpomtime()[0]) # set microsecond to 0 for comparing with a time value without # microseconds disk_mtime = make_aware(disk_mtime.replace(microsecond=0)) return disk_mtime def update_index(self, start, delta): with update_data_after(self): Unit.objects.filter(store_id=self.id, index__gte=start).update( index=operator.add(F('index'), delta)) def mark_units_obsolete(self, uids_to_obsolete, update_revision=None): """Marks a bulk of units as obsolete. :param uids_to_obsolete: UIDs of the units to be marked as obsolete. :return: The number of units marked as obsolete. """ obsoleted = 0 for unit in self.findid_bulk(uids_to_obsolete): # Use the same (parent) object since units will # accumulate the list of cache attributes to clear # in the parent Store object unit.store = self if not unit.isobsolete(): unit.makeobsolete() unit.save(revision=update_revision) obsoleted += 1 return obsoleted @cached_property def data_tool(self): return data_tool.get(self.__class__)(self) @cached_property def updater(self): updaters = format_updaters.gather() updater_class = ( updaters.get(self.filetype.name) or updaters.get("default")) return updater_class(self) @cached_property def syncer(self): syncers = format_syncers.gather() syncer_class = ( syncers.get(self.filetype.name) or syncers.get("default")) return syncer_class(self) def record_submissions(self, unit, old_target, old_state, current_time, user, submission_type=None, **kwargs): """Records all applicable submissions for `unit`. EXTREME HAZARD: this relies on implicit `._<field>_updated` members being available in `unit`. Let's look into replacing such members with something saner (#3895). """ state_updated = kwargs.get("state_updated") or unit._state_updated target_updated = kwargs.get("target_updated") or unit._target_updated comment_updated = kwargs.get("comment_updated") or unit._comment_updated create_subs = OrderedDict() if state_updated: create_subs[SubmissionFields.STATE] = [ old_state, unit.state] if target_updated: create_subs[SubmissionFields.TARGET] = [ old_target, unit.target_f] if comment_updated: create_subs[SubmissionFields.COMMENT] = [ '', unit.translator_comment or ''] if submission_type is None: submission_type = SubmissionTypes.SYSTEM subs_created = [] for field in create_subs: subs_created.append( Submission( creation_time=current_time, translation_project_id=self.translation_project_id, submitter=user, unit=unit, store_id=self.id, field=field, type=submission_type, old_value=create_subs[field][0], new_value=create_subs[field][1])) if subs_created: unit.submission_set.add(*subs_created, bulk=False) def update(self, store, user=None, store_revision=None, submission_type=None, resolve_conflict=POOTLE_WINS, allow_add_and_obsolete=True): """Update DB with units from a ttk Store. :param store: a source `Store` instance from TTK. :param store_revision: revision at which the source `Store` was last synced. :param user: User to attribute updates to. :param submission_type: Submission type of saved updates. :param allow_add_and_obsolete: allow to add new units and make obsolete existing units """ self.updater.update( store, user=user, store_revision=store_revision, submission_type=submission_type, resolve_conflict=resolve_conflict, allow_add_and_obsolete=allow_add_and_obsolete) def deserialize(self, data): return StoreDeserialization(self).deserialize(data) def serialize(self): return StoreSerialization(self).serialize() def sync(self, update_structure=False, conservative=True, user=None, skip_missing=False, only_newer=True): """Sync file with translations from DB.""" if skip_missing and not self.file.exists(): return self.syncer.sync( update_structure=update_structure, conservative=conservative, user=user, only_newer=only_newer) # # # # # # # # # # # # TranslationStore # # # # # # # # # # # # # suggestions_in_format = True def max_index(self): """Largest unit index""" return max_column(self.unit_set.all(), 'index', -1) def addunit(self, unit, index=None, user=None, update_revision=None): if index is None: index = self.max_index() + 1 newunit = self.UnitClass(store=self, index=index) newunit.update(unit, user=user) if self.id: newunit.save(revision=update_revision, user=user) return newunit def findunits(self, source, obsolete=False): if not obsolete and hasattr(self, "sourceindex"): return super(Store, self).findunits(source) # find using hash instead of index source_hash = md5(source.encode("utf-8")).hexdigest() units = self.unit_set.filter(source_hash=source_hash) if obsolete: units = units.filter(state=OBSOLETE) else: units = units.filter(state__gt=OBSOLETE) if units.count(): return units def findunit(self, source, obsolete=False): units = self.findunits(source, obsolete) if units: return units[0] def findid(self, id): if hasattr(self, "id_index"): return self.id_index.get(id, None) unitid_hash = md5(id.encode("utf-8")).hexdigest() try: return self.unit_set.get(unitid_hash=unitid_hash) except Unit.DoesNotExist: return None def header(self): # FIXME: we should store some metadata in db if self.file and hasattr(self.file.store, 'header'): return self.file.store.header() def get_max_unit_revision(self): return max_column(self.unit_set.all(), 'revision', 0) # # # TreeItem def get_parents(self): if self.parent.is_translationproject(): return [self.translation_project] return [self.parent] # # # /TreeItem # # # # # # # # # # # # # # # # Translation # # # # # # # # # # # # # # #
Come along to the first Open Meeting of the Edinburgh Zine Library. Learn more about the zines, the zine library and how to get involved. The meeting will be held on Monday 21st August from 6-7.30pm. We’ll be meeting in the George Washington Browne Room in Edinburgh Central Library. There’ll be a short presentation (15 minutes) about the Zine Library Project, followed by a drop-in and a chance to chat to us about volunteering and other ways to get involved. Bring your own zines if you want to submit them, bring yourselves. You don’t need any experience, just an interest in zines, archiving, diy cultures, social history or libraries. The George Washington Browne room is located on the mezzanine adjacent to the music library. If you’re unsure how to find it – ask the lovely people at the library reception, they’ll be more than happy to help! If you have any questions about the accessibility of the meeting or the meeting room please just ask. We are in the process of checking wheelchair accessibility and will keep the page updated – if you do use a wheelchair or other mobility aid please just let us know and we can easily accomodate you. Although we only accept volunteers and members over the age of 18, if you have children you are welcome to bring them to this first meeting. If you are unable to attend the meeting (we know festival time can be hectic, and that unfamiliar group meetings don’t suit everyone) but want to be involved please send us an email: edinburghzinelibrary@gmail.com or a message on facebook and we can arrange individual meetings, or work to find a way for you to be involved that works for you!
"""empty message Revision ID: 4ad33f99723a Revises: 743a0a1b5bc9 Create Date: 2017-08-17 14:36:49.229000 """ # revision identifiers, used by Alembic. revision = '4ad33f99723a' down_revision = '743a0a1b5bc9' from alembic import op import sqlalchemy as sa import sqlalchemy_utils def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('bit_facebook_daily_ad_insights_impression_device', sa.Column('id', sa.Integer(), nullable=False), sa.Column('account_id', sa.String(length=255), nullable=True), sa.Column('campaign_id', sa.String(length=255), nullable=True), sa.Column('adset_id', sa.String(length=255), nullable=True), sa.Column('campaign_name', sa.String(length=255), nullable=True), sa.Column('spend', sa.Numeric(), nullable=True), sa.Column('cost_per_unique_click', sa.Numeric(), nullable=True), sa.Column('unique_clicks', sa.Integer(), nullable=True), sa.Column('unique_impressions', sa.Integer(), nullable=True), sa.Column('unique_social_clicks', sa.Integer(), nullable=True), sa.Column('unique_social_impressions', sa.Integer(), nullable=True), sa.Column('website_clicks', sa.Integer(), nullable=True), sa.Column('date_start', sa.DateTime(), nullable=True), sa.Column('date_stop', sa.DateTime(), nullable=True), sa.Column('impression_device', sa.String(length=255), nullable=True), sa.Column('ad_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['ad_id'], ['bit_facebook_ad.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), 'bit_facebook_daily_ad_insights_impression_device', ['impression_device'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), table_name='bit_facebook_daily_ad_insights_impression_device') op.drop_table('bit_facebook_daily_ad_insights_impression_device') # ### end Alembic commands ###
The little brother to the Tortuga Setout, the Setout Divide from Tortuga is a full-featured bag for the style-casual modern minimal traveler. Great harness system — really comfortable and well built harness system keeps your load comfortable. Lots of organization — great tech pouch, laptop compartment and quick access pockets. Makes getting in and out in transit quite easy. Expandable capacity — it expands and contracts to make the bag look a little tighter when it’s not fully packed out. Ultimately a great bag to use in travel and just the right size for some of us.
#!/usr/bin/python # -*- coding: utf-8 -*- # # FILE: friendObj.py # # An object that mirrors the friends table in the database # # Copyright by Author. All rights reserved. Not for reuse without # express permissions. # # import copy class friendObj(object): def __init__(self): self.rid = None self.user = u"" self.friend = u"" self.user_id = 0 self.friend_id = 0 self.user_local_id = 0 self.friend_local_id = 0 def to_dict(self): rec = {} if( self.rid > 0 ): rec['rid'] = self.rid rec['user'] = self.user rec['friend'] = self.friend rec['user_id'] = self.user_id rec['friend_id'] = self.friend_id rec['user_local_id'] = self.user_local_id rec['friend_local_id'] = self.friend_local_id return rec def from_dict(self, rec): nobj = friendObj() if( rec ): nobj.user = rec['user'] nobj.friend = rec['friend'] nobj.user_id = rec['user_id'] nobj.friend_id = rec['friend_id'] nobj.user_local_id = rec['user_local_id'] nobj.friend_local_id = rec['friend_local_id'] return nobj def clone(self): nobj = friendObj() if( self.rid > 0 ): nobj.rid = self.rid nobj.user = self.user nobj.friend = self.friend nobj.user_id = self.user_id nobj.friend_id = self.friend_id nobj.user_local_id = self.user_local_id nobj.friend_local_id = self.friend_local_id return nobj def __repr__(self): return "<friendObj('%s','%s','%s','%s','%s','%s','%s')>"%(str(self.rid),str(self.user),str(self.friend),str(self.user_id),str(self.friend_id),str(self.user_local_id),str(self.friend_local_id))
With our in-depth local knowledge of Bishop-Burton where we deliver flowers that come with a guaranteed smile throughout the area via our same day flower delivery service from our trusted local florist or the very next day with the assistance of our expert overnight flower couriers. Having been sending flowers since 1947, you could say we're well versed in organising the most delightful flowers to Bishop-Burton - put your faith in our expert local florists for a last minute flower delivery whatever the reason or season! If you know where you are sending flowers, we also deliver to .
import glob import os import tqdm import numpy as np import shutil from bird import loader as l source_dir = "/disk/martinsson-spring17/birdClef2016Subset" classes = os.listdir(os.path.join(source_dir, "train")) percentage_validation_sampels = 0.10 progress = tqdm.tqdm(range(len(classes))) class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c in classes] unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in class_segmentss] print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels") for ((c, segments), p) in zip(unique_sampels, progress): nb_samples = len(segments) nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels)) valid_class_path = os.path.join(source_dir, "valid", c) if not os.path.exists(valid_class_path): #print("os.makedirs("+valid_class_path+")") os.makedirs(valid_class_path) i_valid_samples = np.random.choice(range(len(segments)), nb_validation_samples, replace=False) valid_samples = [segments[i] for i in i_valid_samples] for sample in valid_samples: #print(c, "validation") for segment in sample: #print("shutil.move("+segment+","+valid_class_path+")") shutil.move(segment, valid_class_path)
Herbalife24 is the first comprehensive performance nutrition line empowering athletes 24-hours a day. We have redefined industry standards to help you train, recover and perform like never before – with all the nutritional support you need as an athlete. This five-product line is customizable so you can determine your day-to-day needs based on activity levels and training demands. You can customise daily product usage based on your specific performance and recovery needs.
# colored-buttondemo-1.py import sys sys.path.append("../..") from wax import * COLORS = ['orange', 'chartreuse', 'papayawhip', 'dark blue', 'gold', 'red', 'yellow green', 'snow', 'hotpink', 'cadet blue'] # stick a custom event in Button def MyOnClick(self, event): print 'U clicked the button with label', `self.GetLabel()` Button.OnClick = MyOnClick class MainFrame(Frame): def Body(self): self.AddComponent(Button(self, "one"), stretch=1) self.AddComponent(Button(self, "two"), expand=1, stretch=1) self.AddComponent(Button(self, "three"), stretch=1) # adding a panel, using a class class Panel1(Panel): def Body(self): self.AddComponent(Button(self, "AAA"), stretch=1) self.AddComponent(Button(self, "BBB"), expand=1, stretch=1) self.AddComponent(Button(self, "CCC"), stretch=1) panel1 = Panel1(self, direction="HORIZONTAL") panel1.Pack() self.AddComponent(panel1, stretch=1) # adding two nested panels panel2 = Panel(self, direction="H") panel2.AddComponent(Button(panel2, "DD"), expand=1, stretch=1) panel2.AddComponent(Button(panel2, "EE"), expand=1, stretch=1) panel3 = Panel(panel2, direction="V") panel3.AddComponent(Button(panel3, "999"), stretch=1) b = Button(panel3, "888") panel3.AddComponent(b, expand=1, stretch=1) panel3.Pack() panel2.AddComponent(panel3, stretch=1) panel2.Pack() self.AddComponent(panel2, expand=1, stretch=1) self.Pack() # override event for this button def my_event(event): print "Wahey!" b.OnClick = my_event # color these buttons, using the GetAllChildren() method (new as of # 0.2.7) all_buttons = [widget for widget in self.GetAllChildren() if isinstance(widget, Button)] for color, button in zip(COLORS, all_buttons): button.SetBackgroundColor(color) app = Application(MainFrame, direction='vertical', title="Test test...") app.MainLoop()
Today is great for inspiring positive change in your life with many positive aspects and the Moon meeting with Jupiter in Virgo. A balance between personal needs and career must be maintained for optimal quality of life. Today is great for creating a schedule that helps you find that balance and prioritize your time. Getting to any destination requires that you choose the right road to get there. You can wander, but eventually you need to get back on the path. If you've recently changed careers, left your job, or are thinking of making a larger change of focus for the goals you have for your life, this is a great time to think more about what you want to do to build a stronger foundation for yourself. Ultimately, doing what you really want, will bring you great happiness. Being productive on anything that is emotionally satisfying will create much better results that grunt work will now. Taking a step back and assessing how effective your direction has been to reach your desired outcomes will help you to get rid of inefficiencies that need to be purged from the process now. Where are you spending too much time or not enough to develop your skills? No matter what you do, it is your work ethic that brings the most value. Your resourcefulness in accessing the information you need gives you more power. With Jupiter in focus, traveling with people who are close such as family or a significant other, can make anywhere feel like home, while also giving you a bigger perspective on life. Road trips to small, cozy towns not far from home can bring conversation or new information that inspires a philosophical change in how you approach your larger goals. Positive actions will have positive results today. Enthusiasm and potential can override your worries. Pay attention to your energy levels, and put your energy towards the positive. A focus on health and well being is best today. A steam bath is good for purging toxins. The end of the day is best for food, family or romance. Enjoy bonding with those closest to you from the heart. Earth signs (Taurus, Virgo and Capricorn) and Water signs (Cancer, Scorpio and Pisces) are generally well blessed most today.
from django.utils.translation import ugettext_lazy as _ from horizon import tabs import openstack_dashboard.models as fogbow_models COMPUTE_TERM = fogbow_models.FogbowConstants.COMPUTE_TERM STATE_TERM = fogbow_models.FogbowConstants.STATE_TERM SHH_PUBLIC_KEY_TERM = fogbow_models.FogbowConstants.SHH_PUBLIC_KEY_TERM CONSOLE_VNC_TERM = fogbow_models.FogbowConstants.CONSOLE_VNC_TERM MEMORY_TERM = fogbow_models.FogbowConstants.MEMORY_TERM CORES_TERM = fogbow_models.FogbowConstants.CORES_TERM IMAGE_SCHEME = fogbow_models.FogbowConstants.IMAGE_SCHEME EXTRA_PORT_SCHEME = fogbow_models.FogbowConstants.EXTRA_PORT_SCHEME class InstanceDetailTabInstancePanel(tabs.Tab): name = _("Instance details") slug = "instance_details" template_name = ("fogbow/instance/_detail_instance.html") def get_context_data(self, request): instanceId = self.tab_group.kwargs['instance_id'] response = fogbow_models.doRequest('get', COMPUTE_TERM + instanceId, None, request) instance = None try: instance = getInstancePerResponse(instanceId, response) except Exception: instance = {'instanceId': '-' , 'state': '-', 'sshPublic': '-', 'extra' : '-', 'memory' : '-', 'cores' : '-', 'image' : '-', 'extraPorts': '-'} return {'instance' : instance} def getInstancePerResponse(instanceId, response): if instanceId == 'null': instanceId = '-' instanceDetails = response.text.split('\n') state,sshPublic,console_vnc,memory,cores,image,extraPort = '-', '-', '-', '-', '-', '-', '-' for detail in instanceDetails: if STATE_TERM in detail: state = normalizeAttributes(detail, STATE_TERM) elif SHH_PUBLIC_KEY_TERM in detail: sshPublic = normalizeAttributes(detail, SHH_PUBLIC_KEY_TERM) elif MEMORY_TERM in detail: memory = normalizeAttributes(detail, MEMORY_TERM) elif CORES_TERM in detail: cores = normalizeAttributes(detail, CORES_TERM) elif IMAGE_SCHEME in detail: image = getFeatureInCategoryPerScheme('title', detail) elif EXTRA_PORT_SCHEME in detail: extraPort = normalizeAttributes(detail, EXTRA_PORT_SCHEME) return {'instanceId': instanceId , 'state': state, 'sshPublic':sshPublic, 'extra' : instanceDetails, 'memory' : memory, 'cores' : cores, 'image' : image, 'extraPorts': extraPort} def normalizeAttributes(propertie, term): try: return propertie.split(term)[1].replace('=', '').replace('"', '') except: return '' def getFeatureInCategoryPerScheme(featureName, features): try: features = features.split(';') for feature in features: if featureName in feature: return feature.replace(featureName + '=', '') \ .replace('"','').replace('Image:','') \ .replace(' image', '') return '' except Exception: return '-' class InstanceDetailTabGroupInstancePanel(tabs.TabGroup): slug = "instance_details" tabs = (InstanceDetailTabInstancePanel,)
First stop was Magic 106.7 FM in Boston. After the usual questions about the new film, questions turned to Bond 25. Craig also called into the 'Todd N Tyler' radio show this morning. As well as confirming that Sean is his favorite 007, although he has a soft spot for Roger and 'Live And Let Die' is one of his favorite films, Craig was also quizzed about Bond 25. Q: You've signed up for two more Bonds, correct? Craig also appeared on 93.3 WMMR in Philadelphia and was asked the same question right at the end of the interview. Daniel Craig will also be the headline guest on 'The Late Show with Stephen Colbert' tonight on CBS television in the US tonight.
import random from datetime import datetime from django.db import transaction from django.utils.timezone import now from .models import * def csv_for_appt(appt): out = '' # Headers out += "Time,Bike,Direction," for m in appt.organization.organizationmetrics_set.all(): out += "%s," % m.metric.name out = out[:-1] + "\n" # Detail for s in appt.survey_set.all(): out += "%s,%s,%s," % (s.created, s.is_bicycle, s.direction) for sv in s.surveyvalue_set.all(): out += "%s," % sv.value.stored_value out = out[:-1] + "\n" return out def stats_for_appt(appt): stat = {} stat['total'] = appt.survey_set.all().count() metrics = {} min = {} for i in range(0, ((appt.actual_end - appt.actual_start).seconds / 60)): min[i] = 0 metrics[-1] = {'name': 'direction', 'stats': {}} # List of metrics for m in appt.organization.organizationmetrics_set.filter(report=True): metrics[m.metric.id] = {'name': m.metric.name, 'stats': {}} # Value counts across all recorded info for s in appt.survey_set.all(): # Direction try: metrics[-1]['stats'][s.direction] += 1 except KeyError: metrics[-1]['stats'][s.direction] = 1 minutes_in = (s.recorded_at - appt.actual_start).seconds / 60 try: min[minutes_in] += 1 except KeyError: min[minutes_in] = 1 for sv in s.surveyvalue_set.select_related().all(): # Not in reportable metrics if sv.metric.id not in metrics.keys(): continue try: metrics[sv.metric.id]['stats'][sv.value.display_value] += 1 except KeyError: metrics[sv.metric.id]['stats'][sv.value.display_value] = 1 print min stat['metrics'] = metrics stat['minutes'] = min return stat def sim_appt(appt, avg_time=25): with transaction.atomic(): # Clear data appt.reset() #for s in appt.survey_set.all(): # SurveyValue.objects.filter(survey=s).delete() #Survey.objects.filter(appointment=appt).delete() start = now() total_time = 0 while True: sec = random.randint(0, avg_time * 2) total_time += sec t = start + datetime.timedelta(seconds=total_time) s = Survey.objects.create(appointment=appt, recorded_at=t) for m in appt.organization.organizationmetrics_set.all(): metric = m.metric if metric.value_set.system_name == 'direction': val = random.choice(list(appt.location.directions())) else: val = random.choice(list(m.metric.value_set.value_set.all())) # TODO handle defaults has_def = m.metric.value_set.value_set.filter(is_default=True).count() sv = SurveyValue.objects.create(survey=s, metric=metric, value=val) # TODO Add events if total_time > appt.organization.session_length * 60: break appt.actual_start = start appt.actual_end = start + datetime.timedelta(0, total_time) appt.time_taken = total_time appt.save() def get_appts_choices(theOrg, theYear=None): all_appts_choices = [('default', '--Pick--'),('ALL', 'Download All Appointments')] if theYear is not None: all_appts_choices += [(a['id'], (str(a['id']) + ' - ' + str(a['location__name'])) ) for a in Appointment.objects.filter(scheduled_start__year = theYear, organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ] else: all_appts_choices += [(a['id'], (str(a['id']) + ' - ' + str(a['location__name'])) ) for a in Appointment.objects.filter(organization = Organization.objects.get(slug=theOrg)).order_by('id').values('id', 'location__name') ] #for the count year drown-down, pull down all unique start_date years for the appts in the dB # to accomodate for potential DB compatibilities with django's distinct() function (only postgreSQL works), I'll do the unique year filtering myself return all_appts_choices
Looking for design assistant to aid in material and product selection. Our firm does primarily Kitchen Design but works on all types of projects that involve custom cabinetry and millwork. We will train on product specific items and kitchen design basics and software. Looking for full or part time with some scheduling flexibility.
from frasco import Feature, action, execute_action, command, current_app, import_string, signal, copy_extra_feature_options, has_app_context from celery import Celery from celery.bin.worker import worker as celery_worker from celery.bin.beat import beat as celery_beat from celery.schedules import crontab def pack_task_args(data): """Traverse data and converts every object with a __taskdump__() method """ if hasattr(data, "__taskdump__"): cls, state = data.__taskdump__() if not cls: cls = data.__class__.__module__ + "." + data.__class__.__name__ return {"$taskobj": [cls, state]} if isinstance(data, (list, tuple)): lst = [] for item in data: lst.append(pack_task_args(item)) return lst if isinstance(data, dict): dct = {} for k, v in data.iteritems(): dct[k] = pack_task_args(v) return dct return data def unpack_task_args(data): """Traverse data and transforms back objects which where dumped using __taskdump() """ if isinstance(data, (list, tuple)): lst = [] for item in data: lst.append(unpack_task_args(item)) return lst if isinstance(data, dict): if "$taskobj" in data: cls = import_string(data["$taskobj"][0]) return cls.__taskload__(data["$taskobj"][1]) else: dct = {} for k, v in data.iteritems(): dct[k] = unpack_task_args(v) return dct return data def run_action(name, **kwargs): """Instanciates and executes an action from current_app. This is the actual function which will be queued. """ kwargs = unpack_task_args(kwargs) current_user = None if '_current_user' in kwargs: current_user = kwargs.pop('_current_user') current_app.features.users.start_user_context(current_user) try: current_app.features.tasks.before_task_event.send(name=name) action = current_app.actions[name](kwargs) rv = execute_action(action) current_app.features.tasks.after_task_event.send(name=name) finally: if current_user: current_app.features.users.stop_user_context() return rv class TasksFeature(Feature): """Enqueue tasks to process them in the background """ name = "tasks" command_group = False defaults = {"broker_url": None, "result_backend": None, "accept_content": ['json', 'msgpack', 'yaml'], "task_serializer": "json", "result_serializer": "json", "schedule": {}, "delay_if_models_transaction": False, "run_beat_with_worker": True} before_task_event = signal("before_task") after_task_event = signal("after_task") task_enqueued_event = signal("task_enqueued") def init_app(self, app): self.app = app broker = self.options["broker_url"] backend = self.options["result_backend"] if not broker: if app.features.exists("redis"): broker = app.features.redis.options["url"] else: broker = "redis://localhost" if not backend: backend = broker self.celery = Celery(__name__, broker=broker, backend=backend) self.celery.conf["CELERY_ACCEPT_CONTENT"] = self.options["accept_content"] self.celery.conf["CELERY_TASK_SERIALIZER"] = self.options["task_serializer"] self.celery.conf["CELERY_RESULT_SERIALIZER"] = self.options["result_serializer"] self.celery.conf["CELERYBEAT_SCHEDULE_FILENAME"] = ".celerybeat-schedule" copy_extra_feature_options(self, self.celery.conf, "CELERY_") TaskBase = self.celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): if has_app_context(): # useful for testing if running tasks synchronously return TaskBase.__call__(self, *args, **kwargs) else: with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) self.celery.Task = ContextTask self.celery.conf["CELERYBEAT_SCHEDULE"] = {} if self.options["schedule"]: for action, schedule in self.options["schedule"].iteritems(): self.schedule_action(action, schedule) self.run_action_task = self.celery.task(name="frasco_run_action")(run_action) app.processes.append(("worker", ["frasco", "worker"])) if not self.options['run_beat_with_worker']: app.processes.append(("scheduler", ["frasco", "scheduler"])) def add_task(self, func, **kwargs): return self.celery.task(**kwargs)(func) def send_task(self, *args, **kwargs): return self.celery.send_task(*args, **kwargs) def schedule_task(self, schedule_name, name, schedule, **kwargs): if isinstance(schedule, dict): schedule = crontab(**schedule) elif isinstance(schedule, str): schedule = crontab(*schedule.split(" ")) self.celery.conf["CELERYBEAT_SCHEDULE"][schedule_name] = dict( task=name, schedule=schedule, **kwargs) def schedule_action(self, action, schedule, name=None): if not name: name = "scheduled_%s" % action self.schedule_task(name, "frasco_run_action", schedule, args=(action,)) @action(default_option="action") def enqueue(self, action, **kwargs): if current_app.features.exists('models') and current_app.features.models.delayed_tx_calls.top is not None \ and self.options['delay_if_models_transaction']: current_app.features.models.delayed_tx_calls.call(self.enqueue, (action,), kwargs) return if current_app.features.exists('users') and current_app.features.users.logged_in(): kwargs.setdefault('_current_user', current_app.features.users.current) result = self.run_action_task.apply_async(args=(action,), kwargs=pack_task_args(kwargs)) self.task_enqueued_event.send(self, action=action, result=result) return result def get_result(self, id): return self.run_action_task.AsyncResult(id) @command(with_reloader=True, with_app_ctx=False) def worker(self, hostname=None): options = {'hostname': hostname, 'beat': False} if self.options['run_beat_with_worker'] and self.celery.conf["CELERYBEAT_SCHEDULE"]: options['beat'] = True if self.app.debug: options['concurrency'] = 1 w = celery_worker(self.celery) w.run(**options) @command(with_reloader=True, with_app_ctx=False) def scheduler(self): b = celery_beat(self.celery) b.run()
The Commerce Schools Educational Enrichment Foundation continued its annual grant awards by funding projects totaling over $57,000. The Foundation has provided over $357,000 for projects to enhance opportunities for CISD students. Foundation board members traveled to each campus notifying teachers "Publisher's Clearinghouse-style" that their projects had received the go ahead for funding. Participating CSEEF board members were Dr. Jack and Beverly Pirkey, Loretta Kibler, Donna Spinato, Jerry Keeble, Janet Duncan and Patti Doster.
import os import csv import sys import re import importlib import networkx as nx # settings curDir = 'E:/GitHub/bioinformatics-algorithms-1/week9' #curDir = 'D:/Copy/Coursera/Bioinformatics Algorithms (part-I)/MyPrograms/week9' inputFile = './data/5.LongestSharedSubstring-2.txt' inputFile = 'C:/Users/Ashis/Downloads/dataset_296_5.txt' outputFile = './results/5.LongestSharedSubstring.txt' # set current directory os.chdir(curDir) ## read input with open(inputFile) as f: inputs = f.readlines() genome1 = inputs[0].strip() + '$' genome2 = inputs[1].strip() + '$' ## function to find longest common prefix def longestCommonPrefix(str1, str2): n = min([len(str1), len(str2)]) i = 0 while i < n and str1[i]==str2[i]: i += 1 prefix = str1[0:i] return prefix ## function to build a suffix tree from a genome def suffixTree(genome): ## build suffix tree g = nx.DiGraph() g.add_node(1) # add root with id 1 # two required function neighborsWithLabelPrefix = lambda node, prefix: [e[1] for e in g.edges_iter(node, data=True) if genome[e[2]['labelIdx'][0]] == prefix] getNewNode = lambda : len(g.nodes())+1 #print(longestCommonPrefix('abc','ab')) genomeLen = len(genome) for idx in range(genomeLen): # traverse as long as pattern matches curNode = 1 i = idx while(i < genomeLen): # find the edge with the first prefix character nextNode = neighborsWithLabelPrefix(curNode, genome[i]) # if there is no edge with the first prefix character, # it must be a new edge with the rest of the string. if len(nextNode) == 0: newNode = getNewNode() g.add_edge(curNode, newNode, {'labelIdx':[i,genomeLen]}) g.node[newNode]['startIdx'] = idx break # get the edge label nextNode = nextNode[0] edgeLabelIndices = g.edge[curNode][nextNode]['labelIdx'] edgeLabel = genome[edgeLabelIndices[0]:edgeLabelIndices[1]] edgeLabelLen = len(edgeLabel) # if the rest of the string starts with edgeLabel, # move to the next node if genome[i:i+edgeLabelLen] == edgeLabel: curNode = nextNode i += edgeLabelLen else: # edgeLabel matches partially prefix = longestCommonPrefix(genome[i:i+edgeLabelLen], edgeLabel) prefixLen = len(prefix) # create two new node, one intermediate, another for unmatched string intermediateNode = getNewNode() unmatchedNode = intermediateNode + 1 # remove existing edge from curNode to nextNode g.remove_edge(curNode, nextNode) # add edge from curNode to intermediateNode g.add_edge(curNode, intermediateNode, {'labelIdx':(edgeLabelIndices[0],edgeLabelIndices[0]+prefixLen)}) # add edge from intermediateNode to nextNode g.add_edge(intermediateNode, nextNode, {'labelIdx':(edgeLabelIndices[0]+prefixLen, edgeLabelIndices[1])}) # add edge from intermediateNode to unmatchedNode g.add_edge(intermediateNode, unmatchedNode, {'labelIdx':(i+prefixLen, genomeLen)}) g.node[unmatchedNode]['startIdx'] = idx break return g ## build two suffix tree for two genomes g1 = suffixTree(genome1) g2 = suffixTree(genome2) ## function to find edges with prefix from a node neighborsWithLabelPrefix = lambda genome, g, node, prefix:\ [e[1] for e in g.edges_iter(node, data=True) \ if genome[e[2]['labelIdx'][0]] == prefix] ## function to find edge label from suffix tree edgeLabelInSuffixTree = lambda genome, g, startNode, endNode:\ genome[g.edge[startNode][endNode]['labelIdx'][0]:\ g.edge[startNode][endNode]['labelIdx'][1]] def longestSharedSubstring(root1, root2): longestSubstr = '' ## create matched edge pairs n1 = nx.neighbors(g1, root1) if len(n1) == 0: return longestSubstr edges1 = [(edgeLabelInSuffixTree(genome1, g1, root1, node), node) for node in n1] edgePairs = [] for e in edges1: n2 = neighborsWithLabelPrefix(genome2, g2, root2, e[0][0]) if len(n2)>0: e2 = (edgeLabelInSuffixTree(genome2, g2, root2, n2[0]), n2[0]) edgePairs += [(e,e2)] if len(edgePairs) == 0: return longestSubstr ## traverse each edge pairs and update longest substr for ep in edgePairs: ## find substr in each pair substr = '' cur1 = root1 cur2 = root2 next1 = ep[0][1] next2 = ep[1][1] edge1 = ep[0][0] edge2 = ep[1][0] while True: if edge1 == edge2: substr = edge1 + longestSharedSubstring(next1, next2) break # update substr with prefix prefix = longestCommonPrefix(edge1, edge2) substr += prefix if len(edge1) < len(edge2): edge2 = edge2[len(prefix):] cur1 = next1 next1 = neighborsWithLabelPrefix(genome1, g1, cur1, edge2[0]) if len(next1) == 0: break next1 = next1[0] edge1 = edgeLabelInSuffixTree(genome1, g1, cur1, next1) else: edge1 = edge1[len(prefix):] cur2 = next2 next2 = neighborsWithLabelPrefix(genome2, g2, cur2, edge1[0]) if len(next2) == 0: break next2 = next2[0] edge2 = edgeLabelInSuffixTree(genome2, g2, cur2, next2) # update longest substring if len(substr) > len(longestSubstr): longestSubstr = substr return longestSubstr ## find longest shared sustring lss = longestSharedSubstring(1,1) print(lss) ## output with open(outputFile, "w") as f: f.writelines(lss) print('done.')
Campo Verde’s Valedictorian and Salutatorian are the student(s) who have the highest grade point average from the classes required to fulfill their 22 credit graduation requirement. This process was put into policy when students decided not to take classes like music, art or PE, because the un-weighted grade would hurt their chances to become Valedictorian. By using the current method of determining Val/Sal, students are not punished for pursuing their passion in un-weighted coursework. This may or may not mean the #1 ranked student in class will be the valedictorian. At the beginning of the senior year, the students ranked in the 5% of their senior class are called into a meeting, handed a Val/Sal worksheet, and explained the process for choosing the Val/Sal in the spring. Senior students who were called up in the fall are once again called to a meeting the spring after third quarter grades have posted. It is these grades that are used in calculating the last semester grade point average for graduation requirements. Students are encouraged to work with their parents and counselor in selecting the most competitive courses from their transcript.
import sys,sqlite3,os from Bio import SeqIO if __name__ == "__main__": if len(sys.argv) != 3: print "python get_family_order_distributions.py db outfile [working dir]" sys.exit(0) conn = sqlite3.connect(sys.argv[1]) c = conn.cursor() try: os.chdir(sys.argv[3]) except IndexError: pass # init variables to store processed data genes = {} allo = [] for i in os.listdir("."): # for every final phlawd output file, open it, create an empty dict to store info if i[-9:] == "FINAL.aln": print i infile = open(i,"r") genes[i] = {} # for every sequence in this file for j in SeqIO.parse(infile,"fasta"): # get left and right values for the otu, if we can't find it in the db, use zeroes sql = "SELECT left_value,right_value from taxonomy where ncbi_id = "+str(j.id)+";" c.execute(sql) left = 0 right = 0 for h in c: left = h[0] right = h[1] # get the family for this otu sql = "SELECT name from taxonomy where left_value < "+str(left)+" and right_value > "+ \ str(right)+" and node_rank = 'family' and name_class = 'scientific name';" c.execute(sql) # if we can't find a family (wtf?) substitute this otu id. apparently for some # ncbi taxa, no family has been assigned (e.g. Foetidia, ncbi_id = 79568) nm = "" for h in c: nm = str(h[0]) # print nm if len(nm) == 0: nm = j.id # if we haven't seen this family/unassigned otu id yet, # record it, set the count to zero if nm not in allo: allo.append(nm) if nm not in genes[i]: genes[i][nm] = 0 genes[i][nm] += 1 # done with this gene infile.close() # done counting records conn.close() outfile = open(sys.argv[2],"w") # build/write the header line (names of families/unassigned otus) st = "" for i in allo: st += "\t"+i outfile.write(st+"\n") # write gene name, then family/otu counts for each gene for i in genes: outfile.write(i) for j in allo: if j in genes[i]: outfile.write("\t"+str(genes[i][j])) else: outfile.write("\t0") outfile.write("\n") # done outfile.close()
Beneath the scorching Carribean sun, Hanz Hölzl's black jeep drives onto the main road, only to be noisily smashed off the street by a speeding bus. Thomas Roth's "Falco - Verdammt, wir leben noch" (translated: "Falco - dammit, we're still alive") begins and ends with Falco's fatal car crash. It's not a particularly original story arc being told here, but give it credit for a marginally new point of view: A waitress, played by Grace Jones (!), retells the final hours of his life - on the parking lot in front of her bar. And that's where things really take off. In chronological order, Hölzl's musical childhood and youth are shown, followed by his first performances with Austrian band Hallucination Company and shock rockers Drahdiwaberl. Next, we see the creation of the stage persona Falco, which eventually lands him the hit "Rock Me Amadeaus" and the top spot on the US Billboard charts. The movie attempts to show how Falco and Hölzl gradually become one and the same, and how the popstar tries to escape the mounting pressure to succeed, aided by cocaine, pills and lots of whiskey. The essence of it all is that behind the arrogant jerk there was a sensitive but complicated person, who in a very simple way was in search of support and love - a home, wife and kid(s). However, his relationship to Jacqueline (Patricia Aulitzky is playing Falco's lover, whose real-life name was Isabella) doesn't work out, and after eight years of standing in as "best dad in the world" Hölzl discovers to his detriment that their daughter isn't in fact his. So far, no revelations here. The movie is based on biographical facts, but the details are pure conjecture. In other words: The dialogues, scenes and other small matters such as his childhood friendship with the character Billy are completely made up. Which wouldn't actually be a bad thing if they had approached the matter with some ingenuity. But Falco's womanizing, drug escapades, ups and downs are reduced to unexpressive scenelets: The boy Hans standing in front of a brothel as a kid, then the man Falco doing the same - we get it, he didn't object to prostitutes. But does one really have to be that blaringly obvious about it? Dialogues come across as laboured and clumsy, for instance when Hans' Mom says: "See? Practicing really payed off". Particularly underwhelming are the "in concert" scenes, which are musically sound (leading man Rubey sings everything himself, and the filmmakers cooperated with Falco's original producers Rob and Ferdi Bolland to rerecord all songs), but more reminiscent of a Karaoke performance than an actual rock concert. Considering they had a budget of 4 million Euros, this seems somewhat disappointing. But if you manage to lower your expectations, the cast could set you up for a nice surprise along with the soundtrack. One could have done a lot worse in selecting someone to play Falco. Manuel Rubey does a good job - especially if you consider that he's an acting novice (aside from roles in the TV series "Tatort" and "SOKO Kitzbühel"). He's way better known to Ö3 (note: Austrian pop radio) listeners as the frontman of the band Mondscheiner. One can tell that he spent a lot of time in front of a mirror to portray The Falcon as authentically arrogant as he possibly could. But of course, there is only one Falco, and the makeup department could have tried a little harder to turn the young, boyish Rubey into a 40 year old human trainwreck ... On the other hand, neither Robert Stadlober, who didn't think he was up to the role, or - much worse - Manuel Ortega would have been right for the part. * "This ain't awesome, but crappy" *** "He's pretty awesome - but the imitation can never outdo the original"
import pygame from os.path import join as path_join from vector import Vector from entity import Entity from constants import COLOR_BLACK class Bullet(Entity): """ Make the bullets independant""" SPEED = 500 IMAGE_FILENAME = path_join('assets', "images", "laser.png") def __init__(self, world, flip=False, location=None, direction=None): sprite = pygame.image.load(Bullet.IMAGE_FILENAME).convert() sprite.set_colorkey(COLOR_BLACK) super(Bullet, self).__init__( world, 'Bullet', sprite, flip=flip, speed=Bullet.SPEED, location=location ) self.direction = direction def process(self, time_passed): if not self.get_destination(): x, y = self.get_location().x, self.get_location().y if self.direction == 'up': y = 0 - self.get_height() elif self.direction == 'down': y = self.world.get_world_limits()[1] + self.get_height() elif self.direction == 'left': x = 0 - self.get_width() elif self.direction == 'right': x = self.world.get_world_limits()[0] + self.get_width() self.set_destination(Vector(x, y)) super(Bullet, self).process(time_passed)
When President David Granger visited the Ruimveldt operations of the Brass Aluminium and Cast Iron Foundry more than a year ago he made a point of underscoring the importance of innovation in industry in pursuit of adapting to the needs of the Guyanese economy. He could hardly have been in a more appropriate place to do so. He pointed out at the time that BACIF had had to function in an environment that not only placed it in competition with rich industrialized countries but had also been required to function in a domestic environment where the continually changing landscape – like the ‘rise and fall’ of the bauxite industry – compelled the company to shift gears, to find innovative ways of creating new demand for the products in which it specializes. It would hardly be surprising if BACIF makes a claim for being amongst the most innovative of companies ever to ‘set up shop’ in Guyana. Set to become sixty next year, BACIF have become past masters of responding to the critical needs of some of Guyana’s most important industries and two weeks ago we found the company’s representatives still seeking to market what it has to offer in the company of a number of other local manufacturers of a decidedly more recent vintage at the Guyana Trade and Investment Exhibition (GUYTIE) at the Marriott Hotel. It is not difficult to tell that BACIF’s primary ‘selling point’ is its demonstrable endurance in the face of continually changing fortunes and its remarkable ability to adapt its production proficiency to suit the changes. When Stabroek Business stopped by the BACIF display on the penultimate day of the GUYTIE event, Yolanda Geddes-Kendall, a Director of the Company who ‘doubles’ in the position of Acting Commercial and Design Superintendent assured us that the company was ‘holding steady’ despite the small matter of having had to endure the damage done to its mainstream market by the travails of the Guyana Sugar Corporation. GUYSUCO, Geddes-Kendall told us used to account for around 75% of BACIF’s market. Seemingly assuming a too-resolute-to-fail posture BACIF, in recent times, has simply turned to finding more ways of serving more sectors of the country’s economy. What has been the survival game plan? Geddes-Kendall explains that it is simply a matter of soldiering on, the pressures of having to cope with the vagaries of the country’s economy serving to focus minds. There are seventy- five jobs at stake here and it seems that management never loses sight of that. What this has meant – apart from carefully planned diversification – is the placing of an enhanced value on the importance of the critical customers………like the Guyana Water Inc. and the mining sector. It has meant, too, that BACIF understands the importance of placing an even higher value on its external market so that the Jamaican entity Wray & Nephew has become an even more valued customer. At the GUYTIE event, BACIF had strategically put on display a range of fabricated products designed to serve as a reminder of the variety of its work and as if to make the point about its enduringly nationalistic fervour had strategically placed among its exhibits a striking gold-coloured replica of the Cacique Crown. It would have been a good thing if the fortunes of sugar had not dried up orders from GUYSUCO for bearings, scraper plates and other products for the mills but Geddes-Kendall subtly makes the point that nostalgia is not a luxury BACIF can afford. Indeed, the company has long begun to look ahead pointing to the overhead walkways across the highway on the lower East Bank as offering a glimpse of the company’s future. These days BACIF’s product count is up to 1000 different custom-made items per year, a feat which Adrian Barkoye, the Technical Assistant to the Company’s Chief Executive Officer says is testimony to its ability to identify needs and to ‘sell’ itself as the company to respond to those needs and to the innovativeness that attends that marketing strategy. BACIF has made an impressive journey from the late Claude Geddes’ four-man enterprise “in the backyard of his residence” almost fifty years ago.
# Copyright 2017 Google LLC. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Library to gather runtime performance metrics. This module exposes the ResourceMonitor class, which client code can use to gather resource usage metrics about their program. An example usage would look something like: with ResourceMonitor() as monitor: ... do work ... metrics = monitor.metrics() """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import platform import resource import time import psutil from deepvariant.protos import resources_pb2 class ResourceMonitor(object): """Class for collecting resource usage info from this or child process.""" def __init__(self): """Constructs a ResourceMonitor object.""" self.wall_start = None self.metrics_pb = self._initial_metrics_protobuf() def _initial_metrics_protobuf(self): """Returns an initialized ResourceMetrics proto. This function also fills in the "constant" fields of the ResourceMetrics proto that don't depend on the actual running commands, such as host_name. Returns: learning.genomics.deepvariant.ResourceMetrics proto. """ return resources_pb2.ResourceMetrics( host_name=_get_host_name(), cpu_frequency_mhz=_get_cpu_frequency(), physical_core_count=_get_cpu_count(), total_memory_mb=_get_total_memory()) def __enter__(self): return self.start() def __exit__(self, unused_type, unused_value, unused_traceback): pass def start(self): """Starts timers associated with resource collection. This method must be called before metrics(). Returns: self to enable the idiom `monitor = ResourceMonitor().start()`. """ self.wall_start = time.time() return self def metrics(self): """Collects and return runtime metrics as a ResourceMetrics proto. This method can be called multiple times, but wall clock time is always reckoned from the time of the last start() call. Returns: A learning.genomics.deepvariant.ResourceMetrics proto message. Raises: RuntimeError: if start() was not called previously. """ if self.wall_start is None: raise RuntimeError('start() must be called prior to metrics()') self.metrics_pb.wall_time_seconds = time.time() - self.wall_start # Consider using psutil.cpu_times() instead to get more detailed information # about the usage in self and all children. try: rusage = resource.getrusage(resource.RUSAGE_SELF) self.metrics_pb.cpu_user_time_seconds = rusage.ru_utime self.metrics_pb.cpu_system_time_seconds = rusage.ru_stime self.metrics_pb.memory_peak_rss_mb = int(rusage.ru_maxrss / 1024) except resource.error: # The OS call to get rusage failed, so just don't set the field values, # leaving them as the defalt values of 0. pass # Create a psutil.Process pointed at the current process. process = psutil.Process() io_counters = process.io_counters() self.metrics_pb.read_bytes = io_counters.read_bytes self.metrics_pb.write_bytes = io_counters.write_bytes return self.metrics_pb # ------------------------------------------------------------------------------ # Simple functions for getting host_name, cpu count, etc. Isolated here to make # them mockable. # ------------------------------------------------------------------------------ def _get_host_name(): """Gets the host name of this machine.""" return platform.node() def _get_cpu_count(): """Gets the number of physical cores in this machine. Returns: int >= 1 if the call to get the cpu_count succeeded, or 0 if not. """ return psutil.cpu_count(logical=False) or 0 def _get_cpu_frequency(): """Gets the frequency in MHz of the cpus in this machine. Returns: float > 0 if the call to get the cpu_frequency succeeded. This information may not be available on all systems, in which case we return 0.0. """ try: freq = psutil.cpu_freq() return freq.current if freq is not None else 0.0 except NotImplementedError: return 0.0 def _get_total_memory(): """Gets the total memory in megabytes in this machine.""" return int(psutil.virtual_memory().total / (1024 * 1024))
The Seventh-day Adventist Church believes that the gift of prophecy is still in operation in the christian church and that one of the manifestations of this gift was through Ellen Gould White who was one of the pioneers of the church. Ellen White has acted as guiding light through the time that she was alive to this day. One wonders what this church would be like without her guidance which we believe she got from the Lord. Theres in no known recording of her sermons or speeches except for the testimonies of those who knew her personally. The best testimony you can ever get is from people with first hand information about you, people close to you. The people who know you when you are away from the spotlight. Below is a video by some people who knew her or saw her. I hope that this video will strengthen your belief that God is still in the business of using some among us to be His prophets. And in the words of the bible I hope you shall "believe his prophets, so shall ye prosper" 2 Chronicles 20:20.
# # Copyright 2016 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import re from collections import defaultdict from io import StringIO from warnings import warn import configparser from xdg import BaseDirectory as base_dirs from lago.constants import CONFS_PATH from lago.utils import argparse_to_ini def _get_configs_path(): """Get a list of possible configuration files, from the following sources: 1. All files that exists in constants.CONFS_PATH. 2. All XDG standard config files for "lago.conf", in reversed order of importance. Returns: list(str): list of files """ paths = [] xdg_paths = [ path for path in base_dirs.load_config_paths('lago', 'lago.conf') ] paths.extend([path for path in CONFS_PATH if os.path.exists(path)]) paths.extend(reversed(xdg_paths)) return paths def get_env_dict(root_section): """Read all Lago variables from the environment. The lookup format is: LAGO_VARNAME - will land into 'lago' section LAGO__SECTION1__VARNAME - will land into 'section1' section, notice the double '__'. LAGO__LONG_SECTION_NAME__VARNAME - will land into 'long_section_name' Returns: dict: dict of section configuration dicts Examples: >>> os.environ['LAGO_GLOBAL_VAR'] = 'global' >>> os.environ['LAGO__INIT__REPO_PATH'] = '/tmp/store' >>> >>> config.get_env_dict() {'init': {'repo_path': '/tmp/store'}, 'lago': {'global_var': 'global'}} """ env_lago = defaultdict(dict) decider = re.compile( ( r'^{0}(?:_(?!_)|(?P<has>__))' r'(?(has)(?P<section>.+?)__)' r'(?P<name>.+)$' ).format(root_section.upper()) ) for key, value in os.environ.iteritems(): match = decider.match(key) if not match: continue if not match.group('name') or not value: warn( 'empty environment variable definition:' '{0}, ignoring.'.format(key) ) else: section = match.group('section') or root_section env_lago[section.lower()][match.group('name').lower()] = value return dict(env_lago) class ConfigLoad(object): """Merges configuration parameters from 3 different sources: 1. Enviornment vairables 2. config files in .INI format 3. argparse.ArgumentParser The assumed order(but not necessary) order of calls is: load() - load from config files and environment variables update_parser(parser) - update from the declared argparse parser update_args(args) - update from passed arguments to the parser """ def __init__(self, root_section='lago'): """__init__ Args: root_section (str): """ self.root_section = root_section self._config = defaultdict(dict) self._config.update(self.load()) self._parser = None def load(self): """Load all configuration from INI format files and ENV, always preferring the last read. Order of loading is: 1) Custom paths as defined in constants.CONFS_PATH 2) XDG standard paths 3) Environment variables Returns: dict: dict of section configuration dicts """ configp = configparser.ConfigParser() for path in _get_configs_path(): try: with open(path, 'r') as config_file: configp.read_file(config_file) except IOError: pass configp.read_dict(get_env_dict(self.root_section)) return {s: dict(configp.items(s)) for s in configp.sections()} def update_args(self, args): """Update config dictionary with parsed args, as resolved by argparse. Only root positional arguments that already exist will overridden. Args: args (namespace): args parsed by argparse """ for arg in vars(args): if self.get(arg): self._config[self.root_section][arg] = getattr(args, arg) def update_parser(self, parser): """Update config dictionary with declared arguments in an argparse.parser New variables will be created, and existing ones overridden. Args: parser (argparse.ArgumentParser): parser to read variables from """ self._parser = parser ini_str = argparse_to_ini(parser) configp = configparser.ConfigParser(allow_no_value=True) configp.read_dict(self._config) configp.read_string(ini_str) self._config.update( {s: dict(configp.items(s)) for s in configp.sections()} ) def get(self, *args): """Get a variable from the default section Args: *args (args): dict.get() args Returns: str: config variable """ return self._config[self.root_section].get(*args) def __getitem__(self, key): """Get a variable from the default section, good for fail-fast if key does not exists. Args: key (str): key Returns: str: config variable """ return self._config[self.root_section][key] def get_section(self, *args): """get a section dictionary Args: Returns: dict: section config dictionary """ return self._config.get(*args) def get_ini(self, defaults_only=False, incl_unset=False): """Return the config dictionary in INI format Args: defaults_only (bool): if set, will ignore arguments set by the CLI. Returns: str: string of the config file in INI format """ if self._parser: if not defaults_only: self._parser.set_defaults( **self.get_section(self.root_section) ) return argparse_to_ini(parser=self._parser, incl_unset=incl_unset) else: configp = configparser.ConfigParser(allow_no_value=True) configp.read_dict(self._config) with StringIO() as out_ini: configp.write(out_ini) return out_ini.getvalue() def __repr__(self): return self._config.__repr__() def __str__(self): return self._config.__str__() config = ConfigLoad()
Sherman Sherman Johnnie and Hoyt, LLP provides legal services to individuals and businesses in Oregon, Washington, and throughout the country from its base in Salem, Oregon. Committed to building trusting, long-term relationships that make a positive difference for our clients, the diverse talents of our members allow us to tailor creative solutions to fit each client’s situation. Our service is defined by our creativity, empathy, and results.
import inspect import functools import types import numpy as np class ResultsWrapper(object): """ Class which wraps a statsmodels estimation Results class and steps in to reattach metadata to results (if available) """ _wrap_attrs = {} _wrap_methods = {} def __init__(self, results): self._results = results self.__doc__ = results.__doc__ def __dir__(self): return [x for x in dir(self._results)] def __getattribute__(self, attr): get = lambda name: object.__getattribute__(self, name) results = get('_results') try: return get(attr) except AttributeError: pass obj = getattr(results, attr) data = results.model._data how = self._wrap_attrs.get(attr) if how: obj = data.wrap_output(obj, how=how) return obj def union_dicts(*dicts): result = {} for d in dicts: result.update(d) return result def make_wrapper(func, how): @functools.wraps(func) def wrapper(self, *args, **kwargs): results = object.__getattribute__(self, '_results') data = results.model._data return data.wrap_output(func(results, *args, **kwargs), how) argspec = inspect.getargspec(func) formatted = inspect.formatargspec(argspec.args, varargs=argspec.varargs, defaults=argspec.defaults) wrapper.__doc__ = "%s%s\n%s" % (func.im_func.func_name, formatted, wrapper.__doc__) return wrapper def populate_wrapper(klass, wrapping): for meth, how in klass._wrap_methods.iteritems(): if not hasattr(wrapping, meth): continue func = getattr(wrapping, meth) wrapper = make_wrapper(func, how) setattr(klass, meth, wrapper) if __name__ == '__main__': import scikits.statsmodels.api as sm from pandas import DataFrame data = sm.datasets.longley.load() df = DataFrame(data.exog, columns=data.exog_name) y = data.endog # data.exog = sm.add_constant(data.exog) df['intercept'] = 1. olsresult = sm.OLS(y, df).fit() rlmresult = sm.RLM(y, df).fit() # olswrap = RegressionResultsWrapper(olsresult) # rlmwrap = RLMResultsWrapper(rlmresult) data = sm.datasets.wfs.load() # get offset offset = np.log(data.exog[:,-1]) exog = data.exog[:,:-1] # convert dur to dummy exog = sm.tools.categorical(exog, col=0, drop=True) # drop reference category # convert res to dummy exog = sm.tools.categorical(exog, col=0, drop=True) # convert edu to dummy exog = sm.tools.categorical(exog, col=0, drop=True) # drop reference categories and add intercept exog = sm.add_constant(exog[:,[1,2,3,4,5,7,8,10,11,12]]) endog = np.round(data.endog) mod = sm.GLM(endog, exog, family=sm.families.Poisson()).fit() # glmwrap = GLMResultsWrapper(mod)
You know what I love about the ’80s? Public Enemy, NWA, Prince, Guns N’ Roses, and the fact that The Toxic Avenger (1984), an unrated film that shows a kid’s head getting smashed by a car in graphic, bloody close-up within the first thirty minutes, was inexplicably made into a children’s cartoon on broadcast television. You know what I hate about the ’80s? Reaganomics, Reagan, Bush, and now that I’ve pretty much got you all on my side, let me do a 180 and say that I hate The Goonies (1985). Okay, so you’re probably yelling at your computer screen now, but I defy any of you, to give me any kind of logical argument for why anyone over the age of ten, with an IQ over 100, should like this film, let alone consider it “the greatest adolescent adventure film of all time,” as at least one critic has dubbed it. Now, I’ll admit that I am lacking the one and only prerequisite for liking The Goonies: I never saw it as a kid. I know dozens of people who profess to love the film because they grew up with it. Most of them haven’t seen it since they were kids, but I guess that’s beside the point. The point is, I also never saw Labyrinth (1986) or The Princess Bride (1987) or The Neverending Story (1984) as a kid either, but I still love those movies now, after having seen them as an adult. Why? Because they’re actually good films.
import hashlib import binascii import encrypt.aes as aes import encrypt.scrypt as scrypt import num.enc as enc def encrypt(privK, Baddress, Saddress, passphrase): """ BIP0038 private key encryption, Non-EC """ # 1. take the first four bytes of SHA256(SHA256(address)) of it. Let's call this "addresshash". addresshash = hashlib.sha256(hashlib.sha256(Baddress + Saddress).digest()).digest()[:4] #2. Derive a key from the passphrase using scrypt # a. Parameters: passphrase is the passphrase itself encoded in UTF-8. # addresshash came from the earlier step, n=16384, r=8, p=8, length=64 # (n, r, p are provisional and subject to consensus) key = scrypt.hash(passphrase, addresshash, 16384, 8, 8) #Let's split the resulting 64 bytes in half, and call them derivedhalf1 and derivedhalf2. derivedhalf1 = key[0:32] derivedhalf2 = key[32:64] #3. Do AES256Encrypt(bitcoinprivkey[0...15] xor derivedhalf1[0...15], derivedhalf2), call the 16-byte result encryptedhalf1 Aes = aes.Aes(derivedhalf2) encryptedhalf1 = Aes.enc(enc.sxor(privK[:16], derivedhalf1[:16])) #4. Do AES256Encrypt(bitcoinprivkey[16...31] xor derivedhalf1[16...31], derivedhalf2), call the 16-byte result encryptedhalf2 encryptedhalf2 = Aes.enc(enc.sxor(privK[16:32], derivedhalf1[16:32])) #5. The encrypted private key is the Base58Check-encoded concatenation of the following, which totals 39 bytes without Base58 checksum: # 0x01 0x42 + flagbyte + salt + encryptedhalf1 + encryptedhalf2 flagbyte = chr(0b11100000) # 11 no-ec 1 compressed-pub 00 future 0 ec only 00 future privkey = ('\x01\x42' + flagbyte + addresshash + encryptedhalf1 + encryptedhalf2) check = hashlib.sha256(hashlib.sha256(privkey).digest()).digest()[:4] return enc.b58encode(privkey + check) def decrypt(encrypted_privkey, passphrase): # 1. Collect encrypted private key and passphrase from user. # passed as parameters data = enc.b58decode(encrypted_privkey) flagbyte = data[2:3] check = data[-4:] if check != hashlib.sha256(hashlib.sha256(data[:-4]).digest()).digest()[:4]: return False, 'checksum' addresshash = data[3:7] encryptedhalf1 = data[7:23] encryptedhalf2 = data[23:39] #3. Derive decryption key for seedb using scrypt with passpoint, addresshash, and ownersalt key = scrypt.hash(passphrase, addresshash, 16384, 8, 8) derivedhalf1 = key[0:32] derivedhalf2 = key[32:64] #4. Decrypt encryptedpart2 using AES256Decrypt to yield the last 8 bytes of seedb and the last 8 bytes of encryptedpart1. Aes = aes.Aes(derivedhalf2) decryptedhalf2 = Aes.dec(encryptedhalf2) #5. Decrypt encryptedpart1 to yield the remainder of seedb. decryptedhalf1 = Aes.dec(encryptedhalf1) priv = decryptedhalf1 + decryptedhalf2 priv = binascii.unhexlify('%064x' % (long(binascii.hexlify(priv), 16) ^ long(binascii.hexlify(derivedhalf1), 16))) return priv, addresshash
@ MIT and around the world, scholars, researchers, professionals, policy-makers, government officials, entrepreneurs, philanthropists, students and volunteers are impacting the digital divide in one or more of the following areas related to higher education. The links on this page are representative, but certainly not inclusive, of our global community of learners. We hope that you find them useful. E-mail us if you would like to suggest a link for this page (subject to approval). International online community for architects, planners, urban designers, landscape architects, conservationist, and scholars, with a focus on Muslim cultures and civilisations. Managed and maintained by the University of Wisconsin-Extension. A portal to online degrees and to the Virtual University Gazette. A non-profit based in Cambridge, MA, US that works with managers and decision makers to improve health worldwide.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Nadam for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.training import adam from tensorflow.python.training import training_ops class NadamOptimizer(adam.AdamOptimizer): """Optimizer that implements the Nadam algorithm. See [Dozat, T., 2015](http://cs229.stanford.edu/proj2015/054_report.pdf). """ def _apply_dense(self, grad, var): m = self.get_slot(var, "m") v = self.get_slot(var, "v") return training_ops.apply_adam( var, m, v, math_ops.cast(self._beta1_power, var.dtype.base_dtype), math_ops.cast(self._beta2_power, var.dtype.base_dtype), math_ops.cast(self._lr_t, var.dtype.base_dtype), math_ops.cast(self._beta1_t, var.dtype.base_dtype), math_ops.cast(self._beta2_t, var.dtype.base_dtype), math_ops.cast(self._epsilon_t, var.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True).op def _resource_apply_dense(self, grad, var): m = self.get_slot(var, "m") v = self.get_slot(var, "v") return training_ops.resource_apply_adam( var.handle, m.handle, v.handle, math_ops.cast(self._beta1_power, grad.dtype.base_dtype), math_ops.cast(self._beta2_power, grad.dtype.base_dtype), math_ops.cast(self._lr_t, grad.dtype.base_dtype), math_ops.cast(self._beta1_t, grad.dtype.base_dtype), math_ops.cast(self._beta2_t, grad.dtype.base_dtype), math_ops.cast(self._epsilon_t, grad.dtype.base_dtype), grad, use_locking=self._use_locking, use_nesterov=True) def _apply_sparse_shared(self, grad, var, indices, scatter_add): beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype) beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype) lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype) lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power)) # m_t = beta1 * m + (1 - beta1) * g_t m = self.get_slot(var, "m") m_scaled_g_values = grad * (1 - beta1_t) m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking) with ops.control_dependencies([m_t]): m_t = scatter_add(m, indices, m_scaled_g_values) # m_bar = (1 - beta1) * g_t + beta1 * m_t m_bar = m_scaled_g_values + beta1_t * m_t # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) v = self.get_slot(var, "v") v_scaled_g_values = (grad * grad) * (1 - beta2_t) v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking) with ops.control_dependencies([v_t]): v_t = scatter_add(v, indices, v_scaled_g_values) v_sqrt = math_ops.sqrt(v_t) var_update = state_ops.assign_sub( var, lr * m_bar / (v_sqrt + epsilon_t), use_locking=self._use_locking) return control_flow_ops.group(*[var_update, m_bar, v_t])
Intermediate Season 1, Lesson 2: How Often and How Much Time? How To Improve Spoken English, Part 3- Get Your Grammar Going! Intermediate Season 1, Lesson 3: Mind Maps!
# -*- coding: utf-8 -*- """ Created on Fri Nov 27 14:58:12 2015 @author: zah """ import functools import contextlib import jinja2 import jinja2.runtime from jinja2.exceptions import TemplateError from reportengine.resourcebuilder import ResourceError class TemplateRecordError(ResourceError, TemplateError): pass @functools.total_ordering class TargetRecord: def __init__(self, recorder, name, args=None, kwargs=None): self.name = name self.recorder = recorder self.args = args self.kwargs = kwargs def __call__(self, *args, **kwargs): for val in (*args, *kwargs.values()): if isinstance(val, TargetRecord): raise TemplateRecordError("Cannot determine the value of " "parameter inside a top" "level template: %s" % val.name) return type(self)(recorder=self.recorder, name=self.name, args=args, kwargs=kwargs) def __iter__(self): raise TemplateRecordError("Cannot iterate a resource inside a top " "level template: %s" % self.name) def __eq__(self, other): raise TemplateRecordError("Cannot compare resources inside a top " "level template: %s" % self.name) def __lt__(self, other): raise TemplateRecordError("Cannot compare resources inside a top " "level template: %s" % self.name) def __bool__(self): raise TemplateRecordError("Cannot determine boolean value of a " "resource inside a top " "level template: %s" % self.name) def __str__(self): """Do not call this!""" #This is dangerous as it will produce wrong results if called #outside the the template. Maybe it would be better to use some other #name, and overwrite buitins.str in the template code context. if self.args is not None and self.kwargs is not None: if self.args: msg = ("Error at {0.name}. Positional arguments like {0.args} " "are not aloowed inside a top-level template. " "Use keyword arguments, such as " "{0.name}(argname={0.args[0]},...)").format(self) raise TemplateRecordError(msg) target = {self.name: self.kwargs} else: target = self.name env_targets = self.recorder.environment.targets env_targets.append(target) return "<Provider {} args={} kwargs={}>".format(self.name, self.args, self.kwargs) class TargetSubs(TargetRecord): def __str__(self): return str(next(self.recorder.environment.results)) class TargetRecorder(jinja2.runtime.Context): record_class = TargetRecord def resolve(self, item): return self[item] def __contains__(self, item): return True def __getitem__(self, item): #TODO: Make sure we are not overwritting this if item in self.environment.globals: return self.environment.globals[item] record = self.record_class(self, item) return record class TargetSubstituter(TargetRecorder): record_class = TargetSubs class Environment(jinja2.Environment): """This class is the same as `jinja2.Environment` except that is adds a `fetch_mode` context manager, where the rendered templates register the variables and functions (with parameters) that will be called to render the template. This is used to extract the target resources and perform the corresponding checks. Also it imposes some restrictions on what the template can do, which is OK because we don't want a lot of logic in the user templates (we can always use another environment to render complex objects like figures).""" @contextlib.contextmanager def _change_context(self, context_class): past_context = self.context_class self.context_class = context_class try: yield finally: self.context_class = past_context @contextlib.contextmanager def fetch_mode(self): self.targets = [] with self._change_context(TargetRecorder): yield @contextlib.contextmanager def subs_mode(self, results): self.results = iter(results) with self._change_context(TargetSubstituter): yield def render_with_targets(self, template): with self.fetch_mode(): template.render() results = yield self.targets with self.subs_mode(results): yield template.render()
I was having an issue with a program behaving as though there were two monitors. The ATI video card had dual ports but only one monitor. I tried many things like reinstalling drivers and making sure the display setting is not extended or duplicated. Ultimately it was the VNC Mirror Driver. I was able to fix and then “break” again by toggling between Enabled/Disabled under Device Manager > Display Adapters > VNC Mirror Driver > right-click Disable. So I uninstalled the mirror driver. Found among other junk is a promotional advertising toy. It is a mouse in a meat storage room that you have to get into the mousetrap. The mouse has a little ball under it that it rolls on. The clear cover is close enough to keep it from flipping over, yet still slide around. There is a tiny little metal flap on the trap. It keeps the mouse in the trap unless you give it a little shake. But it also makes it a little harder to get into the trap without a shake as well. The Armistead’s Ague Tonic label on the back and the pork meat on the inside show the connection of two businesses at William M. Akin & Company. Established in 1848 by William M. Akin Sr.. Pressing Ctrl+B in Chrome opens and closes the Bookmarks Toolbar. By default, Firefox Ctrl+B toggles the Bookmarks Sidebar. Here is how to apply the same shortcut key to Firefox as in Chrome. The Bookmarks Sidebar will still be accessible with Ctrl+I. Install keyconfig.xpi posted in the mozillaZine forums. It is not an official hosted plugin. There is also a keyconfig wiki.