text
stringlengths
29
850k
# -*- coding=utf-8 -*- from __future__ import absolute_import, print_function import logging import os import sys import pip_shims.shims import six import six.moves import tomlkit import vistir from six.moves.urllib.parse import urlparse, urlsplit, urlunparse from vistir.compat import Path, fs_decode from vistir.path import ensure_mkdir_p, is_valid_url from .environment import MYPY_RUNNING # fmt: off six.add_move( # type: ignore six.MovedAttribute("Mapping", "collections", "collections.abc") # type: ignore ) # noqa # isort:skip six.add_move( # type: ignore six.MovedAttribute("Sequence", "collections", "collections.abc") # type: ignore ) # noqa # isort:skip six.add_move( # type: ignore six.MovedAttribute("Set", "collections", "collections.abc") # type: ignore ) # noqa # isort:skip six.add_move( # type: ignore six.MovedAttribute("ItemsView", "collections", "collections.abc") # type: ignore ) # noqa from six.moves import ItemsView, Mapping, Sequence, Set # type: ignore # noqa # isort:skip # fmt: on if MYPY_RUNNING: from typing import Dict, Any, Optional, Union, Tuple, List, Iterable, Text, TypeVar STRING_TYPE = Union[bytes, str, Text] S = TypeVar("S", bytes, str, Text) PipfileEntryType = Union[STRING_TYPE, bool, Tuple[STRING_TYPE], List[STRING_TYPE]] PipfileType = Union[STRING_TYPE, Dict[STRING_TYPE, PipfileEntryType]] VCS_LIST = ("git", "svn", "hg", "bzr") def setup_logger(): logger = logging.getLogger("requirementslib") loglevel = logging.DEBUG handler = logging.StreamHandler(stream=sys.stderr) handler.setLevel(loglevel) logger.addHandler(handler) logger.setLevel(loglevel) return logger log = setup_logger() SCHEME_LIST = ("http://", "https://", "ftp://", "ftps://", "file://") VCS_SCHEMES = [ "git", "git+http", "git+https", "git+ssh", "git+git", "git+file", "hg", "hg+http", "hg+https", "hg+ssh", "hg+static-http", "svn", "svn+ssh", "svn+http", "svn+https", "svn+svn", "bzr", "bzr+http", "bzr+https", "bzr+ssh", "bzr+sftp", "bzr+ftp", "bzr+lp", ] def is_installable_dir(path): # type: (STRING_TYPE) -> bool if pip_shims.shims.is_installable_dir(path): return True pyproject_path = os.path.join(path, "pyproject.toml") if os.path.exists(pyproject_path): pyproject = Path(pyproject_path) pyproject_toml = tomlkit.loads(pyproject.read_text()) build_system = pyproject_toml.get("build-system", {}).get("build-backend", "") if build_system: return True return False def strip_ssh_from_git_uri(uri): # type: (S) -> S """Return git+ssh:// formatted URI to git+git@ format""" if isinstance(uri, six.string_types): if "git+ssh://" in uri: parsed = urlparse(uri) # split the path on the first separating / so we can put the first segment # into the 'netloc' section with a : separator path_part, _, path = parsed.path.lstrip("/").partition("/") path = "/{0}".format(path) parsed = parsed._replace( netloc="{0}:{1}".format(parsed.netloc, path_part), path=path ) uri = urlunparse(parsed).replace("git+ssh://", "git+", 1) return uri def add_ssh_scheme_to_git_uri(uri): # type: (S) -> S """Cleans VCS uris from pipenv.patched.notpip format""" if isinstance(uri, six.string_types): # Add scheme for parsing purposes, this is also what pip does if uri.startswith("git+") and "://" not in uri: uri = uri.replace("git+", "git+ssh://", 1) parsed = urlparse(uri) if ":" in parsed.netloc: netloc, _, path_start = parsed.netloc.rpartition(":") path = "/{0}{1}".format(path_start, parsed.path) uri = urlunparse(parsed._replace(netloc=netloc, path=path)) return uri def is_vcs(pipfile_entry): # type: (PipfileType) -> bool """Determine if dictionary entry from Pipfile is for a vcs dependency.""" if isinstance(pipfile_entry, Mapping): return any(key for key in pipfile_entry.keys() if key in VCS_LIST) elif isinstance(pipfile_entry, six.string_types): if not is_valid_url(pipfile_entry) and pipfile_entry.startswith("git+"): pipfile_entry = add_ssh_scheme_to_git_uri(pipfile_entry) parsed_entry = urlsplit(pipfile_entry) return parsed_entry.scheme in VCS_SCHEMES return False def is_editable(pipfile_entry): # type: (PipfileType) -> bool if isinstance(pipfile_entry, Mapping): return pipfile_entry.get("editable", False) is True if isinstance(pipfile_entry, six.string_types): return pipfile_entry.startswith("-e ") return False def is_star(val): # type: (PipfileType) -> bool return (isinstance(val, six.string_types) and val == "*") or ( isinstance(val, Mapping) and val.get("version", "") == "*" ) def convert_entry_to_path(path): # type: (Dict[S, Union[S, bool, Tuple[S], List[S]]]) -> S """Convert a pipfile entry to a string""" if not isinstance(path, Mapping): raise TypeError("expecting a mapping, received {0!r}".format(path)) if not any(key in path for key in ["file", "path"]): raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path)) if "file" in path: path = vistir.path.url_to_path(path["file"]) elif "path" in path: path = path["path"] if not os.name == "nt": return fs_decode(path) return Path(fs_decode(path)).as_posix() def is_installable_file(path): # type: (PipfileType) -> bool """Determine if a path can potentially be installed""" from packaging import specifiers if isinstance(path, Mapping): path = convert_entry_to_path(path) # If the string starts with a valid specifier operator, test if it is a valid # specifier set before making a path object (to avoid breaking windows) if any(path.startswith(spec) for spec in "!=<>~"): try: specifiers.SpecifierSet(path) # If this is not a valid specifier, just move on and try it as a path except specifiers.InvalidSpecifier: pass else: return False parsed = urlparse(path) is_local = ( not parsed.scheme or parsed.scheme == "file" or (len(parsed.scheme) == 1 and os.name == "nt") ) if parsed.scheme and parsed.scheme == "file": path = vistir.compat.fs_decode(vistir.path.url_to_path(path)) normalized_path = vistir.path.normalize_path(path) if is_local and not os.path.exists(normalized_path): return False is_archive = pip_shims.shims.is_archive_file(normalized_path) is_local_project = os.path.isdir(normalized_path) and is_installable_dir( normalized_path ) if is_local and is_local_project or is_archive: return True if not is_local and pip_shims.shims.is_archive_file(parsed.path): return True return False def get_dist_metadata(dist): import pkg_resources from email.parser import FeedParser if isinstance(dist, pkg_resources.DistInfoDistribution) and dist.has_metadata( "METADATA" ): metadata = dist.get_metadata("METADATA") elif dist.has_metadata("PKG-INFO"): metadata = dist.get_metadata("PKG-INFO") else: metadata = "" feed_parser = FeedParser() feed_parser.feed(metadata) return feed_parser.close() def get_setup_paths(base_path, subdirectory=None): # type: (S, Optional[S]) -> Dict[S, Optional[S]] if base_path is None: raise TypeError("must provide a path to derive setup paths from") setup_py = os.path.join(base_path, "setup.py") setup_cfg = os.path.join(base_path, "setup.cfg") pyproject_toml = os.path.join(base_path, "pyproject.toml") if subdirectory is not None: base_path = os.path.join(base_path, subdirectory) subdir_setup_py = os.path.join(subdirectory, "setup.py") subdir_setup_cfg = os.path.join(subdirectory, "setup.cfg") subdir_pyproject_toml = os.path.join(subdirectory, "pyproject.toml") if subdirectory and os.path.exists(subdir_setup_py): setup_py = subdir_setup_py if subdirectory and os.path.exists(subdir_setup_cfg): setup_cfg = subdir_setup_cfg if subdirectory and os.path.exists(subdir_pyproject_toml): pyproject_toml = subdir_pyproject_toml return { "setup_py": setup_py if os.path.exists(setup_py) else None, "setup_cfg": setup_cfg if os.path.exists(setup_cfg) else None, "pyproject_toml": pyproject_toml if os.path.exists(pyproject_toml) else None, } def prepare_pip_source_args(sources, pip_args=None): # type: (List[Dict[S, Union[S, bool]]], Optional[List[S]]) -> List[S] if pip_args is None: pip_args = [] if sources: # Add the source to pip9. pip_args.extend(["-i", sources[0]["url"]]) # type: ignore # Trust the host if it's not verified. if not sources[0].get("verify_ssl", True): pip_args.extend( ["--trusted-host", urlparse(sources[0]["url"]).hostname] ) # type: ignore # Add additional sources as extra indexes. if len(sources) > 1: for source in sources[1:]: pip_args.extend(["--extra-index-url", source["url"]]) # type: ignore # Trust the host if it's not verified. if not source.get("verify_ssl", True): pip_args.extend( ["--trusted-host", urlparse(source["url"]).hostname] ) # type: ignore return pip_args @ensure_mkdir_p(mode=0o777) def _ensure_dir(path): return path _UNSET = object() _REMAP_EXIT = object() # The following functionality is either borrowed or modified from the itertools module # in the boltons library by Mahmoud Hashemi and distributed under the BSD license # the text of which is included below: # (original text from https://github.com/mahmoud/boltons/blob/master/LICENSE) # Copyright (c) 2013, Mahmoud Hashemi # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * The names of the contributors may not be used to endorse or # promote products derived from this software without specific # prior written permission. # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. class PathAccessError(KeyError, IndexError, TypeError): """An amalgamation of KeyError, IndexError, and TypeError, representing what can occur when looking up a path in a nested object. """ def __init__(self, exc, seg, path): self.exc = exc self.seg = seg self.path = path def __repr__(self): cn = self.__class__.__name__ return "%s(%r, %r, %r)" % (cn, self.exc, self.seg, self.path) def __str__(self): return "could not access %r from path %r, got error: %r" % ( self.seg, self.path, self.exc, ) def get_path(root, path, default=_UNSET): """Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised. """ if isinstance(path, six.string_types): path = path.split(".") cur = root try: for seg in path: try: cur = cur[seg] except (KeyError, IndexError) as exc: raise PathAccessError(exc, seg, path) except TypeError as exc: # either string index in a list, or a parent that # doesn't support indexing try: seg = int(seg) cur = cur[seg] except (ValueError, KeyError, IndexError, TypeError): if not getattr(cur, "__iter__", None): exc = TypeError("%r object is not indexable" % type(cur).__name__) raise PathAccessError(exc, seg, path) except PathAccessError: if default is _UNSET: raise return default return cur def default_visit(path, key, value): return key, value _orig_default_visit = default_visit # Modified from https://github.com/mahmoud/boltons/blob/master/boltons/iterutils.py def dict_path_enter(path, key, value): if isinstance(value, six.string_types): return value, False elif isinstance(value, (Mapping, dict)): return value.__class__(), ItemsView(value) elif isinstance(value, tomlkit.items.Array): return value.__class__([], value.trivia), enumerate(value) elif isinstance(value, (Sequence, list)): return value.__class__(), enumerate(value) elif isinstance(value, (Set, set)): return value.__class__(), enumerate(value) else: return value, False def dict_path_exit(path, key, old_parent, new_parent, new_items): ret = new_parent if isinstance(new_parent, (Mapping, dict)): vals = dict(new_items) try: new_parent.update(new_items) except AttributeError: # Handle toml containers specifically try: new_parent.update(vals) # Now use default fallback if needed except AttributeError: ret = new_parent.__class__(vals) elif isinstance(new_parent, tomlkit.items.Array): vals = tomlkit.items.item([v for i, v in new_items]) try: new_parent._value.extend(vals._value) except AttributeError: ret = tomlkit.items.item(vals) elif isinstance(new_parent, (Sequence, list)): vals = [v for i, v in new_items] try: new_parent.extend(vals) except AttributeError: ret = new_parent.__class__(vals) # tuples elif isinstance(new_parent, (Set, set)): vals = [v for i, v in new_items] try: new_parent.update(vals) except AttributeError: ret = new_parent.__class__(vals) # frozensets else: raise RuntimeError("unexpected iterable type: %r" % type(new_parent)) return ret def remap( root, visit=default_visit, enter=dict_path_enter, exit=dict_path_exit, **kwargs ): """The remap ("recursive map") function is used to traverse and transform nested structures. Lists, tuples, sets, and dictionaries are just a few of the data structures nested into heterogenous tree-like structures that are so common in programming. Unfortunately, Python's built-in ways to manipulate collections are almost all flat. List comprehensions may be fast and succinct, but they do not recurse, making it tedious to apply quick changes or complex transforms to real-world data. remap goes where list comprehensions cannot. Here's an example of removing all Nones from some data: >>> from pprint import pprint >>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None}, ... 'Babylon 5': 6, 'Dr. Who': None} >>> pprint(remap(reviews, lambda p, k, v: v is not None)) {'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}} Notice how both Nones have been removed despite the nesting in the dictionary. Not bad for a one-liner, and that's just the beginning. See `this remap cookbook`_ for more delicious recipes. .. _this remap cookbook: http://sedimental.org/remap.html remap takes four main arguments: the object to traverse and three optional callables which determine how the remapped object will be created. Args: root: The target object to traverse. By default, remap supports iterables like :class:`list`, :class:`tuple`, :class:`dict`, and :class:`set`, but any object traversable by *enter* will work. visit (callable): This function is called on every item in *root*. It must accept three positional arguments, *path*, *key*, and *value*. *path* is simply a tuple of parents' keys. *visit* should return the new key-value pair. It may also return ``True`` as shorthand to keep the old item unmodified, or ``False`` to drop the item from the new structure. *visit* is called after *enter*, on the new parent. The *visit* function is called for every item in root, including duplicate items. For traversable values, it is called on the new parent object, after all its children have been visited. The default visit behavior simply returns the key-value pair unmodified. enter (callable): This function controls which items in *root* are traversed. It accepts the same arguments as *visit*: the path, the key, and the value of the current item. It returns a pair of the blank new parent, and an iterator over the items which should be visited. If ``False`` is returned instead of an iterator, the value will not be traversed. The *enter* function is only called once per unique value. The default enter behavior support mappings, sequences, and sets. Strings and all other iterables will not be traversed. exit (callable): This function determines how to handle items once they have been visited. It gets the same three arguments as the other functions -- *path*, *key*, *value* -- plus two more: the blank new parent object returned from *enter*, and a list of the new items, as remapped by *visit*. Like *enter*, the *exit* function is only called once per unique value. The default exit behavior is to simply add all new items to the new parent, e.g., using :meth:`list.extend` and :meth:`dict.update` to add to the new parent. Immutable objects, such as a :class:`tuple` or :class:`namedtuple`, must be recreated from scratch, but use the same type as the new parent passed back from the *enter* function. reraise_visit (bool): A pragmatic convenience for the *visit* callable. When set to ``False``, remap ignores any errors raised by the *visit* callback. Items causing exceptions are kept. See examples for more details. remap is designed to cover the majority of cases with just the *visit* callable. While passing in multiple callables is very empowering, remap is designed so very few cases should require passing more than one function. When passing *enter* and *exit*, it's common and easiest to build on the default behavior. Simply add ``from boltons.iterutils import default_enter`` (or ``default_exit``), and have your enter/exit function call the default behavior before or after your custom logic. See `this example`_. Duplicate and self-referential objects (aka reference loops) are automatically handled internally, `as shown here`_. .. _this example: http://sedimental.org/remap.html#sort_all_lists .. _as shown here: http://sedimental.org/remap.html#corner_cases """ # TODO: improve argument formatting in sphinx doc # TODO: enter() return (False, items) to continue traverse but cancel copy? if not callable(visit): raise TypeError("visit expected callable, not: %r" % visit) if not callable(enter): raise TypeError("enter expected callable, not: %r" % enter) if not callable(exit): raise TypeError("exit expected callable, not: %r" % exit) reraise_visit = kwargs.pop("reraise_visit", True) if kwargs: raise TypeError("unexpected keyword arguments: %r" % kwargs.keys()) path, registry, stack = (), {}, [(None, root)] new_items_stack = [] while stack: key, value = stack.pop() id_value = id(value) if key is _REMAP_EXIT: key, new_parent, old_parent = value id_value = id(old_parent) path, new_items = new_items_stack.pop() value = exit(path, key, old_parent, new_parent, new_items) registry[id_value] = value if not new_items_stack: continue elif id_value in registry: value = registry[id_value] else: res = enter(path, key, value) try: new_parent, new_items = res except TypeError: # TODO: handle False? raise TypeError( "enter should return a tuple of (new_parent," " items_iterator), not: %r" % res ) if new_items is not False: # traverse unless False is explicitly passed registry[id_value] = new_parent new_items_stack.append((path, [])) if value is not root: path += (key,) stack.append((_REMAP_EXIT, (key, new_parent, value))) if new_items: stack.extend(reversed(list(new_items))) continue if visit is _orig_default_visit: # avoid function call overhead by inlining identity operation visited_item = (key, value) else: try: visited_item = visit(path, key, value) except Exception: if reraise_visit: raise visited_item = True if visited_item is False: continue # drop elif visited_item is True: visited_item = (key, value) # TODO: typecheck? # raise TypeError('expected (key, value) from visit(),' # ' not: %r' % visited_item) try: new_items_stack[-1][1].append(visited_item) except IndexError: raise TypeError("expected remappable root, not: %r" % root) return value def merge_items(target_list, sourced=False): if not sourced: target_list = [(id(t), t) for t in target_list] ret = None source_map = {} def remerge_enter(path, key, value): new_parent, new_items = dict_path_enter(path, key, value) if ret and not path and key is None: new_parent = ret try: cur_val = get_path(ret, path + (key,)) except KeyError as ke: pass else: new_parent = cur_val return new_parent, new_items def remerge_exit(path, key, old_parent, new_parent, new_items): return dict_path_exit(path, key, old_parent, new_parent, new_items) for t_name, target in target_list: if sourced: def remerge_visit(path, key, value): source_map[path + (key,)] = t_name return True else: remerge_visit = default_visit ret = remap(target, enter=remerge_enter, visit=remerge_visit, exit=remerge_exit) if not sourced: return ret return ret, source_map
* Virtual Reality: How Much Immersion Is Enough? Computer(42) * Augmented Reality: No Longer a Novelty? * What Are Soft Biometrics and How Can They Be Used? * Satellite Imagery Analysis: What Can Hospital Parking Lots Tell Us about a Disease Outbreak? * Flexible Displays, Rigid Designs? * Does Neurotechnology Produce a Better Brain? * Microexpressions: A Chance for Computers to Beat Humans at Detecting Hidden Emotions?
#!/usr/bin/env python # encoding: utf-8 from audiomanager import AudioManager from spotify import Spotify def status(audio, spotify): print '-' * 50 print "Sink name:", audio._sink_name print "Sink id:", audio._sink_id print "is muted:", audio.is_muted() print "volume:", audio.get_volume() print '-' * 5 print "Spotify title:", spotify.get_title().encode('utf-8') print "Spotify title type:", type(spotify.get_title()) # print "Spotify title unicode:", spotify.get_title().decode('') print "Spotify is blacklisted:", spotify.is_blacklisted() print '-' * 50 def loop(audio, spotify): import time a = audio while True: if spotify.is_blacklisted(): if not a.is_muted(): a.mute() status(audio, spotify) else: if a.is_muted(): a.unmute() status(audio, spotify) time.sleep(0.1) if __name__ == '__main__': audio = AudioManager() spotify = Spotify() status(audio, spotify) # loop(audio, spotify)
I decided to shake things up a bit in 2019. Not make year-long resolutions but break the year down into goals or challenges. Last month was self-care. Just slow down. I decided one way to do with was to use up bath products I have had in drawers for a while. This is me usually “Open drawer see products. Oh yeah should use that. Not today. No time.” Repeat next time I open the drawer. I gave myself a facial. I opened the “Kiss My Face” product thinking it was a facial scrub or mask but it is a cleanser. Now in shower. Also have had a few soaks. I tried to accomplished one thing a week. Doesn’t sound like much but big for me. This month I want to concentrate on not spending. I debated doing this one in January but thought no and did self-care. December was expensive with few unexpected purchases (over the range microwave being one). Oh yeah and Christmas. I’m not a clothes or shoe shopper. So I don’t think this will be a problem. My restraint will be needed on not purchasing stationery and craft supplies. I’m thinking to fill this void I will use up some craft supplies on hand. That will give me some focus. If your thinking won’t books be a problem – no I am an avid library going. Is there something besides clothes and shoes that you find hard not to purchase? I would love to hear what pulls when at the shops! I just need to avoid the shops. I am more of an impulse buyer, but fortunately, that doesn’t translate to online shopping. If I can’t hold it, I’m not likely to buy it (unless I really need it). Don’t do much on-line shopping so that didn’t even cross my mind as an issue. Not much of an impulse buyer, like I wrote it is craft supplies that will be my downfall!! With kids, most of my shopping happens at the grocery store–but I can definitely get lured into a Marshalls or TJMaxx and decide the deals are too good to pass up. I always try to remind myself that no deal is better than spending $0–especially when the item isn’t necessary! Want versus a need. I think wants are fine if you can afford them and not giving something necessary up or going into debt. Just after an expensive month I want to spend less/save more as vacation later in the year and want to enjoy that and not over think what I’m spending. Thanks for visiting! Presents would be hard – especially if it was “them!”. I’m not an impulse buyer but I do think that waiting an hour is a good idea. When we have a sale or craft show at work I don’t take my pursue when I go through the tables. That way no impulse buying and usually I don’t go back for items I was thinking about. I didn’t think of it as a self-care practice but it really is. thank you. February for you, if not purchasing vintage items, could be craft projects or re-purposing items you already have. That would be a fun month! Art supplies! Anytime I feel the pull to try something new, I have a hard time not buying the supplies. I know! Art supplies is the pull. Shoes/boots whatever!!!
# Copyright 2019 The dm_control Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Base class for Walkers.""" import abc import collections from dm_control import composer from dm_control.composer.observation import observable from dm_env import specs import numpy as np def _make_readonly_float64_copy(value): if np.isscalar(value): return np.float64(value) else: out = np.array(value, dtype=np.float64) out.flags.writeable = False return out class WalkerPose(collections.namedtuple( 'WalkerPose', ('qpos', 'xpos', 'xquat'))): """A named tuple representing a walker's joint and Cartesian pose.""" __slots__ = () def __new__(cls, qpos=None, xpos=(0, 0, 0), xquat=(1, 0, 0, 0)): """Creates a new WalkerPose. Args: qpos: The joint position for the pose, or `None` if the `qpos0` values in the `mjModel` should be used. xpos: A Cartesian displacement, for example if the walker should be lifted or lowered by a specific amount for this pose. xquat: A quaternion displacement for the root body. Returns: A new instance of `WalkerPose`. """ return super(WalkerPose, cls).__new__( cls, qpos=_make_readonly_float64_copy(qpos) if qpos is not None else None, xpos=_make_readonly_float64_copy(xpos), xquat=_make_readonly_float64_copy(xquat)) def __eq__(self, other): return (np.all(self.qpos == other.qpos) and np.all(self.xpos == other.xpos) and np.all(self.xquat == other.xquat)) class Walker(composer.Robot, metaclass=abc.ABCMeta): """Abstract base class for Walker robots.""" def create_root_joints(self, attachment_frame): attachment_frame.add('freejoint') def _build_observables(self): return WalkerObservables(self) def transform_vec_to_egocentric_frame(self, physics, vec_in_world_frame): """Linearly transforms a world-frame vector into walker's egocentric frame. Note that this function does not perform an affine transformation of the vector. In other words, the input vector is assumed to be specified with respect to the same origin as this walker's egocentric frame. This function can also be applied to matrices whose innermost dimensions are either 2 or 3. In this case, a matrix with the same leading dimensions is returned where the innermost vectors are replaced by their values computed in the egocentric frame. Args: physics: An `mjcf.Physics` instance. vec_in_world_frame: A NumPy array with last dimension of shape (2,) or (3,) that represents a vector quantity in the world frame. Returns: The same quantity as `vec_in_world_frame` but reexpressed in this entity's egocentric frame. The returned np.array has the same shape as np.asarray(vec_in_world_frame). Raises: ValueError: if `vec_in_world_frame` does not have shape ending with (2,) or (3,). """ return super().global_vector_to_local_frame(physics, vec_in_world_frame) def transform_xmat_to_egocentric_frame(self, physics, xmat): """Transforms another entity's `xmat` into this walker's egocentric frame. This function takes another entity's (E) xmat, which is an SO(3) matrix from E's frame to the world frame, and turns it to a matrix that transforms from E's frame into this walker's egocentric frame. Args: physics: An `mjcf.Physics` instance. xmat: A NumPy array of shape (3, 3) or (9,) that represents another entity's xmat. Returns: The `xmat` reexpressed in this entity's egocentric frame. The returned np.array has the same shape as np.asarray(xmat). Raises: ValueError: if `xmat` does not have shape (3, 3) or (9,). """ return super().global_xmat_to_local_frame(physics, xmat) @abc.abstractproperty def root_body(self): raise NotImplementedError @abc.abstractproperty def observable_joints(self): raise NotImplementedError @property def action_spec(self): if not self.actuators: minimum, maximum = (), () else: minimum, maximum = zip(*[ a.ctrlrange if a.ctrlrange is not None else (-1., 1.) for a in self.actuators ]) return specs.BoundedArray( shape=(len(self.actuators),), dtype=np.float, minimum=minimum, maximum=maximum, name='\t'.join([actuator.name for actuator in self.actuators])) def apply_action(self, physics, action, random_state): """Apply action to walker's actuators.""" del random_state physics.bind(self.actuators).ctrl = action class WalkerObservables(composer.Observables): """Base class for Walker obserables.""" @composer.observable def joints_pos(self): return observable.MJCFFeature('qpos', self._entity.observable_joints) @composer.observable def sensors_gyro(self): return observable.MJCFFeature('sensordata', self._entity.mjcf_model.sensor.gyro) @composer.observable def sensors_accelerometer(self): return observable.MJCFFeature('sensordata', self._entity.mjcf_model.sensor.accelerometer) @composer.observable def sensors_framequat(self): return observable.MJCFFeature('sensordata', self._entity.mjcf_model.sensor.framequat) # Semantic groupings of Walker observables. def _collect_from_attachments(self, attribute_name): out = [] for entity in self._entity.iter_entities(exclude_self=True): out.extend(getattr(entity.observables, attribute_name, [])) return out @property def proprioception(self): return ([self.joints_pos] + self._collect_from_attachments('proprioception')) @property def kinematic_sensors(self): return ([self.sensors_gyro, self.sensors_accelerometer, self.sensors_framequat] + self._collect_from_attachments('kinematic_sensors')) @property def dynamic_sensors(self): return self._collect_from_attachments('dynamic_sensors')
Slow of this page is becuase we have gone elajt (elite). we have made a quakeclan and is playing quake all days now. Seriously: we are just busy working with our projects, we havn't given up or anything, unlike certain other uk-based dudes. The new projects include multiple new demos and several internet things.
# -*- coding: utf-8 -*- # pylint: disable=too-few-public-methods,invalid-name """ Bencode support. Copyright (c) 2009-2017 The PyroScope Project <pyroscope.project@gmail.com> See http://en.wikipedia.org/wiki/Bencode """ # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from six import string_types, text_type, binary_type, integer_types class BencodeError(ValueError): """ Error during decoding or encoding. """ class Decoder(object): """ Decode a string or stream to an object. """ def __init__(self, data, char_encoding='utf-8'): """ Initialize encoder. """ if isinstance(data, text_type): self.data = data.encode(char_encoding) else: self.data = data self.offset = 0 self.char_encoding = char_encoding def decode(self, check_trailer=False): # pylint: disable=I0011,R0912 """ Decode data in C{self.data} and return deserialized object. @param check_trailer: Raise error if trailing junk is found in data? @raise BencodeError: Invalid data. """ if self.offset >= len(self.data): raise BencodeError("Unexpected end of data at offset %d/%d" % ( self.offset, len(self.data), )) kind = self.data[self.offset:self.offset+1] # get bytes of length 1, not an int^ if b'0' <= kind <= b'9': # String try: end = self.data.find(b':', self.offset) length = int(self.data[self.offset:end], 10) except (ValueError, TypeError): raise BencodeError("Bad string length at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+length+1 obj = self.data[end+1:self.offset] if self.char_encoding: try: obj = obj.decode(self.char_encoding) except (UnicodeError, AttributeError): # deliver non-decodable string (byte arrays) as-is pass elif kind == b'i': # Integer try: end = self.data.find(b'e', self.offset+1) obj = int(self.data[self.offset+1:end], 10) except (ValueError, TypeError): raise BencodeError("Bad integer at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) self.offset = end+1 elif kind == b'l': # List self.offset += 1 obj = [] while self.data[self.offset:self.offset+1] != b'e': obj.append(self.decode()) self.offset += 1 elif kind == b'd': # Dict self.offset += 1 obj = {} while self.data[self.offset:self.offset+1] != b'e': key = self.decode() obj[key] = self.decode() self.offset += 1 else: raise BencodeError("Format error at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) if check_trailer and self.offset != len(self.data): raise BencodeError("Trailing data at offset %d (%r...)" % ( self.offset, self.data[self.offset:self.offset+32] )) return obj class Encoder(object): """ Encode a given object to an array of bytestrings. """ def __init__(self, char_encoding='utf-8'): """ Initialize encoder. """ self.result = [] self.char_encoding = char_encoding def encode(self, obj): """ Add the given object to the result. """ if isinstance(obj, bool): self.result.append(b"i1e" if obj else b"i0e") elif isinstance(obj, integer_types): self.result.extend([b"i", text_type(obj).encode(self.char_encoding), b"e"]) elif isinstance(obj, string_types): if isinstance(obj, text_type): obj = obj.encode(self.char_encoding) self.result.extend([str(len(obj)).encode(self.char_encoding), b':', obj]) elif isinstance(obj, binary_type): # Previous check catches py2's str self.result.extend([str(len(obj)).encode(self.char_encoding), b':', obj]) elif hasattr(obj, "__bencode__"): self.encode(obj.__bencode__()) elif hasattr(obj, "items"): # Dictionary self.result.append(b'd') for key, val in sorted(obj.items()): if isinstance(key, integer_types): key = text_type(key).encode(self.char_encoding) if not isinstance(key, string_types + (binary_type,)): raise BencodeError("Dict key must be bytestring, found '%s'" % key) if isinstance(key, text_type): key = key.encode(self.char_encoding) self.result.extend([str(len(key)).encode(self.char_encoding), b':', key]) self.encode(val) self.result.append(b'e') else: # Treat as iterable try: items = iter(obj) except TypeError as exc: raise BencodeError("Unsupported non-iterable object %r of type %s (%s)" % ( obj, type(obj), exc )) else: self.result.append(b'l') for item in items: self.encode(item) self.result.append(b'e') return self.result def bdecode(data, char_encoding='utf-8'): """ Decode a string or stream to an object. """ return Decoder(data, char_encoding).decode(check_trailer=True) def bencode(obj, char_encoding='utf-8'): """ Encode a given object to data. """ return b''.join(Encoder(char_encoding).encode(obj)) def bread(stream): """ Decode a file or stream to an object. """ if hasattr(stream, "read"): return bdecode(stream.read()) else: handle = open(stream, "rb") try: return bdecode(handle.read()) finally: handle.close() def bwrite(stream, obj): """ Encode a given object to a file or stream. """ handle = None if not hasattr(stream, "write"): stream = handle = open(stream, "wb") try: stream.write(bencode(obj)) finally: if handle: handle.close()
This cute, duck shaped natural rubber teether is a must have for all babies. The Kawan teether is covered in a fine layer of vegetable oil when you receive it. This is to protect the rubber from drying out. Before using the teether it should be washed, then sterilised using either a sterilising solution or by boiling in water for 5 minutes.
import time from JumpScale import j if not q._init_called: from JumpScale.core.InitBaseCore import q def logtest(total, interval, message, format=False): j.core.messagehandler3.connect2localLogserver() start = time.time() result = [] for n in xrange(1, total + 1): if n % interval == 0: t = time.time() delta = t - start print "Did %d of %d logs in %ss" % (n, total, delta) result.append({ "done": n, "time": delta }) if format: data = { "n": n, "total": total } j.logger.log(message % data) else: j.logger.log(message) totalTime = time.time() - start average = total / float(totalTime) print "Logged %d messages at %f messages per second on average" % (total, average) return result if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Test the logging system") parser.add_argument("--message", help="The message to log, can include " "%(n)s and %(total)s if you enable formatting", default="Testing 1 2 3") parser.add_argument("--format", action="store_true", help="Message contains formatting") parser.add_argument("--total", type=int, default=10000, help="The total amount of log calls that should happen") parser.add_argument("--interval", type=int, default=1000, help="The interval to print the passed time") parser.add_argument("--zeromq", action="store_true", help="Enable the 0MQ log handler") parser.add_argument("--dump-json", dest="dumpjson", type=argparse.FileType('w')) args = parser.parse_args() result = logtest(args.total, args.interval, args.message, args.format)
Books: I’m reading two books at the same time right now. Why? Uh, I dunno. The other one is “When to Rob a Bank”… which has nothing to do with the other book….. Ooh yes! Another strawberry recipe 🙂 I love strawberries and they are the best fresh. Once you start buying strawberries in the winter, I’m sorry they just don’t taste as good. So, we really like to load up on fresh ones this time of year. Here is a crisp recipe that used up some of my strawberries and mangoes. The second time I used up my frozen peaches in lieu of mangoes, and it was fantastic that way as well. You can make it either way. You can also make this using all frozen peaches or mangoes and strawberries – just reduce the cornstarch to 1 Tb. Top it with vanilla ice cream, or divide it up into ramekins to make individual crumbles (reduce the baking time!), or top with whipped cream. This recipe has 71 five star reviews on Food Network because it is so good! Grease a 9×9″ or 1.5 qt. casserole dish with butter or cooking spray. In a great big bowl: toss together the lemon juice, cornstarch, strawberries, peaches or mango, and brown sugar. Add to the casserole dish. In a medium bowl, toss together the flour, oats, almonds, brown sugar, cinnamon and salt. Stir to combine. Drizzle melted butter on top. Stir until mixture it wet. Sprinkle onto the fruit mixture. Bake 40 minutes. The fruit filling will be bubbly and the top will be golden brown. Cool for 1-2 hours. Book Update: I took ALL my books back to the library! We move next week! I already found a school library book from last May under our dining table. Oy vey. Recipe: A lassi is a yogurt based drink mainly found in India, Pakistan and Bangladesh. Feel free to substitute the mango for another fruit and the orange juice for milk or more ice. Fruits could include berries, papaya, pineapple, oranges, etc. Basically, try 1 cup of yogurt, 1 cup of fruit, some ice cubes and some honey if you like. For denser fruits like mango, I had orange juice or water. Something fast and delicious! Great way to get my kids some vitamin A since they really dislike carrots and I don’t have any canned pumpkin right now. Frozen mango and organic mango is sold at Trader Joe’s for $2.30 per bag, so it is a staple around here especially with a sprinkle of cinnamon on top. Add it to your kid’s lunch box or use it for a breakfast on the go. You get vitamin A and C from the mango, protein from the yogurt, and more vitamins from the orange juice. Blend together in a blender. Adding liquids first seems to help my blender blend faster. Blend in a blender and drink! Usually adding the liquid ingredients first seems to help the blender process in mine. I’m Back & with Mango Bread! Hey Everybody!!!! So… I’ve been doing this blog 3 years. THREE years. Non-stop through anything and everything. That is until I sold my home. It’s like a 24 hour job! Cleaning 3 times per day, packing half of the house into a truck and then a storage unit, touching up inside and out. Wow! I would say it is more stressful than getting married, but just a little less stressful than having a newborn baby. I did get 5 hours of sleep at night in a row – which is way better than having a newborn. Why sell? Well, we always wanted to have more land and hubby wanted a workshop. This subdivision we found gave us more land (that we don’t own… but it’s a park and a giant creek so little chance of building there) and an extra garage gives hubby his workshop. After discussing it a couple days, we put money down on the lot, put our house up on the market and sold it in EIGHT days. Yes, eight. I think we could have sold it sooner, but we had a huge ice storm opening weekend and then school was closed the following Monday as well. This recipe is for Mango Bread – a recipe I found at King Arthur Flour. I had some thawed out mango and wanted to make something different from it. Now, the bread tastes nothing like mango. It would taste great savory or sweet like as French Toast or as a ham sandwich. So, if you received mangoes as a gift, in your produce basket, or just tired of mango – then you may want to try this recipe and use them up! Add everything to the bread machine per the instructions. For my Oster machine, I add the liquids first. Then, I add everything else but the flour and yeast. Add the flour in one big heap. Poke a hole in the top with my finger and add the yeast into the hole. Hit “Dough Cycle” to make just the dough, or hit “white bread” cycle to make and bake the bread. This time I decided to try baking the bread and it worked great! If using the dough cycle, bake the bread in a 9×5 bread pan at 350 degrees for 25 minutes covered with foil, and then 15 minutes uncovered.
#!/usr/bin/env python import logging import os import sys BASE_PATH = os.path.dirname(__file__) logging.basicConfig() def main(): """ Standalone django model test with a 'memory-only-django-installation'. You can play with a django model without a complete django app installation. http://www.djangosnippets.org/snippets/1044/ """ sys.exc_clear() import django.test.utils os.environ["DJANGO_SETTINGS_MODULE"] = "django.conf.global_settings" from django.conf import global_settings # ISS Settings: global_settings.MS_ACCESS_KEY = os.environ["MS_ACCESS_KEY"] global_settings.MS_SECRET_KEY = os.environ["MS_SECRET_KEY"] global_settings.MS_ASSOCIATION_ID = os.environ["MS_ASSOCIATION_ID"] global_settings.INSTALLED_APPS = ('iss',) global_settings.DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_PATH, 'iss.sqlite'), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } global_settings.SECRET_KEY = "blahblah" if django.VERSION > (1, 7): django.setup() test_runner = django.test.utils.get_runner(global_settings) if django.VERSION > (1, 2): failures = test_runner().run_tests(['iss']) else: failures = test_runner(['iss'], verbosity=2) sys.exit(failures) if __name__ == '__main__': main()
Bri started her band, titled “The Banned”, in 2011 after a top 10 finish on CMT’s Next Superstar. Kody started playing music in the beginning of 2014, eager to take on as many acoustic gigs and songwriting competitions in the Dallas/Ft. Worth area that he could. As a frequent support act of the Texas Country/Red Dirt band Dalton Domino and the Front Porch Family, he took on the role of tour manager beginning of 2015. Grant Gilbert established his country music credentials in 2017 with his debut EP, “Lost in Transition,” and experienced success with his debut single, “On Guard” in subsequent years. On the outset of Gilbert’s career, he demonstrated traditional country leanings but refined his sound with a classic rock influence, well-founded melodies, and strapping guitar hooks. Gilbert’s version of the regional red dirt sound adjusts to his will and demonstrates an ability to cross over to a rock audience without a hitch, establishing him as an emerging paragon for the genre. This fun local band pleases the crowd with their old school R&B. Closing the festival Sunday morning, 12-3pm. Come out to enjoy the good weather, the Art Village and some Sunday Street Food and cold drinks.
from validator.constants import PACKAGE_THEME from validator.contextgenerator import ContextGenerator from validator.testcases.javascript import traverser from validator.testcases.javascript.jsshell import get_tree def test_js_file(err, filename, data, line=0, context=None, pollutable=False): 'Test a JS file by parsing and analyzing its tokens.' if err.detected_type == PACKAGE_THEME: err.warning( err_id=('testcases_scripting', 'test_js_file', 'theme_js'), warning='JS run from full theme', description='Themes should not contain executable code.', filename=filename, line=line) before_tier = None # Set the tier to 4 (Security Tests) if err is not None: before_tier = err.tier err.set_tier(3) tree = get_tree(data, filename=filename, err=err) if not tree: if before_tier: err.set_tier(before_tier) return # Generate a context if one is not available. if context is None: context = ContextGenerator(data) t = traverser.Traverser(err, filename, line, context=context, is_jsm=(filename.endswith('.jsm') or 'EXPORTED_SYMBOLS' in data)) t.pollutable = pollutable t.run(tree) # Reset the tier so we don't break the world if err is not None: err.set_tier(before_tier) def test_js_snippet(err, data, filename, line=0, context=None): 'Process a JS snippet by passing it through to the file tester.' if not data: return # Wrap snippets in a function to prevent the parser from freaking out # when return statements exist without a corresponding function. data = '(function(){%s\n})()' % data test_js_file(err, filename, data, line, context, pollutable=False)
We will certainly tell you regarding the Bedroom Furniture Hickory Nc image gallery we carry this internet site. You can look for photos you like for details objectives. Bedroom Furniture Hickory Nc is one of the most looked search of the month. If you wish to download please click “Download” button to minimize your smart device, Tablet or Computer system. If you need a photo of Bedroom Furniture Hickory Nc much more you can look the search on this web site. We hope the information on this website can aid you discover something you are seeking. If you have criticism and also pointers about this post, please leave a message in the remark area about Bedroom Furniture Hickory Nc.You could look for photos you like for info functions. Bedroom Furniture Hickory Nc is the most browsed search of the month. If you require an image of Bedroom Furniture Hickory Nc much more you can search the search on this website. We have recommendations to the history of the car you could see on the Wikipedia. A car (or vehicle) is a wheeled automobile used for transport. Many interpretations of car say they run largely on roads, seat one to eight individuals, have four tires, and mostly transportation people rather than items. Autos entered worldwide use during the 20th century, and also established economic climates rely on them. The year 1886 is regarded as the birth year of the contemporary car, when German developer Karl Benz built his Benz Patent-Motorwagen. Cars and trucks did not end up being commonly available until the early 20th century. One of the initial automobiles that came to the masses was the 1908 Version T, an American car made by the Ford Electric motor Company. Cars and trucks were rapidly taken on in the US, where they replaced animal-drawn carriages as well as carts, however took much longer to be accepted in Western Europe and other parts of the world.|A car (or car) is a wheeled motor lorry made use of for transportation. The year 1886 is pertained to as the birth year of the contemporary car, when German developer Karl Benz developed his Benz Patent-Motorwagen. One of the very first automobiles that was easily accessible to the masses was the 1908 Version T, an American car produced by the Ford Motor Business. When we discuss Bedroom Furniture Hickory Nc after that we will consider bedroom furniture hickory nc and several points. Yet often we should know about to recognize much better. It is not far away with the very important . If you want to open up the image gallery please click picture picture listed below. You can likewise download for your photo collection.
"""Unittests for idlelib.configHelpSourceEdit""" import unittest from idlelib.idle_test.mock_tk import Var, Mbox, Entry from idlelib import configHelpSourceEdit as help_dialog_module help_dialog = help_dialog_module.GetHelpSourceDialog class Dummy_help_dialog: # Mock for testing the following methods of help_dialog menu_ok = help_dialog.menu_ok path_ok = help_dialog.path_ok ok = help_dialog.ok cancel = help_dialog.cancel # Attributes, constant or variable, needed for tests menu = Var() entryMenu = Entry() path = Var() entryPath = Entry() result = None destroyed = False def destroy(self): self.destroyed = True # menu_ok and path_ok call Mbox.showerror if menu and path are not ok. orig_mbox = help_dialog_module.tkMessageBox showerror = Mbox.showerror class ConfigHelpTest(unittest.TestCase): dialog = Dummy_help_dialog() @classmethod def setUpClass(cls): help_dialog_module.tkMessageBox = Mbox @classmethod def tearDownClass(cls): help_dialog_module.tkMessageBox = orig_mbox def test_blank_menu(self): self.dialog.menu.set('') self.assertFalse(self.dialog.menu_ok()) self.assertEqual(showerror.title, 'Menu Item Error') self.assertIn('No', showerror.message) def test_long_menu(self): self.dialog.menu.set('hello' * 10) self.assertFalse(self.dialog.menu_ok()) self.assertEqual(showerror.title, 'Menu Item Error') self.assertIn('long', showerror.message) def test_good_menu(self): self.dialog.menu.set('help') showerror.title = 'No Error' # should not be called self.assertTrue(self.dialog.menu_ok()) self.assertEqual(showerror.title, 'No Error') def test_blank_path(self): self.dialog.path.set('') self.assertFalse(self.dialog.path_ok()) self.assertEqual(showerror.title, 'File Path Error') self.assertIn('No', showerror.message) def test_invalid_file_path(self): self.dialog.path.set('foobar' * 100) self.assertFalse(self.dialog.path_ok()) self.assertEqual(showerror.title, 'File Path Error') self.assertIn('not exist', showerror.message) def test_invalid_url_path(self): self.dialog.path.set('ww.foobar.com') self.assertFalse(self.dialog.path_ok()) self.assertEqual(showerror.title, 'File Path Error') self.assertIn('not exist', showerror.message) self.dialog.path.set('htt.foobar.com') self.assertFalse(self.dialog.path_ok()) self.assertEqual(showerror.title, 'File Path Error') self.assertIn('not exist', showerror.message) def test_good_path(self): self.dialog.path.set('https://docs.python.org') showerror.title = 'No Error' # should not be called self.assertTrue(self.dialog.path_ok()) self.assertEqual(showerror.title, 'No Error') def test_ok(self): self.dialog.destroyed = False self.dialog.menu.set('help') self.dialog.path.set('https://docs.python.org') self.dialog.ok() self.assertEqual(self.dialog.result, ('help', 'https://docs.python.org')) self.assertTrue(self.dialog.destroyed) def test_cancel(self): self.dialog.destroyed = False self.dialog.cancel() self.assertEqual(self.dialog.result, None) self.assertTrue(self.dialog.destroyed) if __name__ == '__main__': unittest.main(verbosity=2, exit=False)
Palm Springs International Shortfest, June 23-29, 2009. Philadelphia Independent Film Festival, June 25-28. Dubrovnik Libertas Film Festival, June 26-30. Screening in three fests over the same weekend, Kelly and I decided to attend the impressive (and very hot) Palm Springs Shortfest and Market. One of the best organized festivals I've ever attended. We saw dozens of great films, made lots of friends and showed L&RK to a packed, enthusiastic audience at an important world venue. Fest focuses on short film from around the world, with lots of parties, seminars and a lively filmmakers lounge. A lot of international connections are made, and the adjoining market allows everyone to catch films even if you missed the screening. A great experience thanks to the terrific selection of films and huge staff of volunteers. We explored nearby Joshua Tree, and found relief from 113 degree heat in the delicious local date shake. Palm Springs was a highlight of the fest circuit, I hope to come again. Next up: Puerto Rico Cinefiesta in July, Montreal Worldfest in August and Williamstown in October.
#! /usr/bin/env python # # Quex is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # This software is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with this library; if not, write to the Free Software Foundation, Inc., 59 # Temple Place, Suite 330, Boston, MA 02111-1307 USA # # (C) 2005-2012 Frank-Rene Schaefer # ################################################################################ import sys import os import quex.exception_checker as exception_checker if sys.version_info[0] >= 3: print("error: This version of quex was not implemented for Python >= 3.0") print("error: Please, use Python versions 2.x.") sys.exit(-1) if os.environ.has_key("QUEX_PATH") == False: print("Environment variable QUEX_PATH has not been defined.") else: sys.path.insert(0, os.environ["QUEX_PATH"]) try: exception_checker.do_on_import(sys.argv) import quex.DEFINITIONS import quex.input.command_line.core as command_line import quex.input.command_line.query as query import quex.core as core except BaseException as instance: exception_checker.handle(instance) try: pass # import psyco # psyco.full() except: pass if __name__ == "__main__": try: quex.DEFINITIONS.check() # (*) Test Exceptions __________________________________________________ if exception_checker.do(sys.argv): # Done: Tests about exceptions have been performed pass # (*) Query ____________________________________________________________ elif query.do(sys.argv): # Done: Queries about unicode sets and regular expressions pass # (*) The Real Job _____________________________________________________ elif command_line.do(sys.argv): # To do: Interpret input files and generate code or drawings. core.do() except BaseException as instance: exception_checker.handle(instance)
If you are environmentally conscious, Rootstown(OH) funeral homes can assist you in planning a green burial for a deceased loved one. A natural or green burial is an alternative choice in funeral planning as the body is not prepped with embalming chemicals nor is it cremated. Going "green" is an environmental sustainability movement that goes for a more natural approach to death care. Funeral homes in Rootstown(OH) can guide you through a selection of caskets or even a burial shroud that is biodegradable. Concrete grave liners and burial vaults are eschewed to allow proper decomposition of the deceased and the natural return to the outside earthly elements. The phrase "ashes to ashes, dust to dust", tied irrevocably to death and funerals, truly fits the bill with a green burial. Green burials are not a new concept as it was the norm pre-19th century.Traditional embalming uses chemicals like formaldehyde, a component discovered in the latter part of the 19th century. However, the practice has ancient ties to the Han Dynasty in China and even the ancient Egyptians who used herbs and spices. Funeral homes in Rootstown(OH) will tell you that embalming became a popular method during the American Civil War as a way to preserve the bodies of dead soldiers being sent back home. So why are green burials making a comeback in the 21st century? A greater consciousness about environmental harm is the predominant driving force. Some people simply see it as the latest fad though and are just trying to buck traditional. Rootstown(OH) funeral homes have discovered a number of other reasons why more people are going green. In addition to a greater environmental consciousness, overall cost is a great motivator. Green burials do not involve embalming, decorative caskets or concrete liners or vaults - examples of some of the priciest funeral products. In some instances, when you simply cannot imagine the deceased not housed within some type of container against the elements, you can provide an alternative container of your choice to Rootstown(OH) funeral homes. There are very inexpensive biodegradable coffins for sale and even kits to craft one for a loved one. If the thought of millions of square feet of hardwood or tons of metal has you cringing, a green burial can aid in conserving natural resources. You can also protect the environment by declining the embalming process for the deceased. Chemicals from the process have the potential to leach into the soil, groundwater or even the sewer system. Keep in mind though that some funeral homes in Rootstown(OH) might require it if the funeral cannot happen within a reasonable amount of time or you request a public viewing before final disposition. Imagining your deceased loved one forever a part of a natural landscape can be another reason for choosing a green burial. Talk with the funeral directors of Rootstown(OH) funeral homes to see who can best help you choose a nature area to forever inter your loved one. There are different green cemeteries as well as ecological and wildlife preserves that allow for a green burial. Because green burial is enjoying resurgence, there are more options now than there were five or ten years ago. You will likely see green options on published general price lists of Rootstown(OH) funeral homes. In addition, casket and outer burial container price lists might have biodegradable containers and shrouds for the deceased. Some traditional cemeteries might have designated sections for green burials. There are also cemeteries designated as "green" only. Funeral homes in Rootstown(OH) may also steer you towards other green burial location alternatives such as park lands, habitat areas and conservation tracts specifically designated for environmentally friendly burials. Each green burial location might have different rules such as using natural rock as grave markers or even a discrete ground marker. Rootstown(OH) funeral homes can assist in finding the right green burial situation for your loved one. Depending on where you live, there might not be a location that supports truly green burials. In this instance, funeral homes in Rootstown(OH) can present to you some compromises that help you stay as true as possible to a green burial. For instance, a simple burial can be modified to be environmentally conscious by forgoing embalming and being buried in a shroud or a biodegradable coffin. This option might preclude a public viewing however. Rootstown(OH) funeral homes can help you locate a cemetery that does not require a burial vault. However, if this is not possible, inquire about whether the lid can be cast off and the vault turned upside down so that the deceased is exposed to soil bottom for better composition and return to the earth. If your desire is to have a green burial for a deceased loved one, funeral homes in Rootstown(OH) will work hard to help you realize it.
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import logging import os import re import requests import zipfile from pathlib import Path from urllib.parse import urlparse # yapf: disable files = {'cim': ['https://content.vexrobotics.com/motors/217-2000-cim/cim-motor-curve-data-20151104.csv', 'https://content.vexrobotics.com/motors/217-2000-cim/cim-peak-power-data-20151104.csv', 'https://content.vexrobotics.com/motors/217-2000-cim/cim-locked-rotor-data-20151104.zip'], 'mini-cim': ['https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-motor-curve-data-20151207.csv', 'https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-peak-power-data-20151207.csv', 'https://content.vexrobotics.com/motors/217-3371-mini-cim/mini-cim-locked-rotor-data-20151209-2.zip'], '775pro': ['https://content.vexrobotics.com/motors/217-4347-775pro/775pro-motor-curve-data-20151208.csv', 'https://content.vexrobotics.com/motors/217-4347-775pro/775pro-peak-power-data-20151210.csv', 'https://content.vexrobotics.com/motors/217-4347-775pro/775pro-locked-rotor-data-20151209.zip'], 'bag': ['https://content.vexrobotics.com/motors/217-3351-bag/bag-motor-curve-data-20151207.csv', 'https://content.vexrobotics.com/motors/217-3351-bag/bag-peak-power-data-20151207.csv', 'https://content.vexrobotics.com/motors/217-3351-bag/bag-locked-rotor-data-20151207.zip']} # yapf: enable def file_exists(file_path): try: if os.stat("file").st_size != 0: return True except FileNotFoundError: pass return False def unzip_file(path): path = Path(path) if path.suffix == '.zip': logging.info('Unzipping %s to %s', path, path.parent) with zipfile.ZipFile(str(path), 'r') as zip_ref: zip_ref.extractall(str(path.parent)) def download_file(motor, url): # Gets just the '*.csv' part fname = Path(urlparse(url).path).name fpath = '{0}/{1}'.format(motor, fname) logging.info('Downloading %s to %s', url, fpath) if not file_exists(fname): r = requests.get(url) with open(fpath, 'wb') as dfile: dfile.write(r.content) return fpath def download_files(): directory = 'data/vex/' for motor in files.keys(): mpath = directory + motor try: logging.info('Creating direcotry %s', str(mpath)) os.makedirs(mpath) except FileExistsError: logging.info('Directory %s already exists', str(mpath)) for url in files[motor]: fpath = download_file(mpath, url) unzip_file(fpath) def main(): download_files() if __name__ == '__main__': main()
As a vegetarian, I am ashamed to say that despite hearing about the Meat-Free Monday campaign, I have never really looked into it as I already felt that I was doing my bit by being meat-free seven days a week. However my good friend and fellow vegetarian Marianne recently donned a T-Shirt and handed out leaflets next to a masquerading polar bear to raise awareness of the campaign, and so I thought I should find out what Meat-free Mondays are all about. The Meat Free Monday campaign is designed to reduce global CO2 emissions in order to slow the rate of climate change and protect the environment. It was launched by Sir Paul McCartney and his daughters Stella and Mary. Climate change could drive millions of the world’s species, including the polar bear to extinction. The group “Compassion in World Farming” has estimated that if the average UK household halved its consumption of meat, this would cut more emissions than if car use was cut in half. Cutting the use of dairy products would also help to reduce emissions and reduce climate change. My local town of St Albans may soon become the first Meat Free Monday local authority in the whole of the UK. A consultation has been started by the local council, suggesting that all publicly funded organizations in the city become meat-free on Mondays. This would mean that if approved, all hospitals, council buildings and school canteens would be a meat-free zone one day of the week. So why not consider having a meat free day yourself? With the price of meat rising each week, Meat-free Monday (or any other day) is good for your shopping bill, good for your health and also good for the environment. You can rally your local MP to become involved in the Meat-free Monday campaign through the website. I can report that the general public of St Albans is in general in favour of Meat Free Mondays. From the 30 or so people I personally questioned at the market on 16th April, most people thought it was a good idea and would be happy to try it. So far there have been over 700 responses to the questionnaire by St Albans residents, most of them positive. The results will be discussed at the next Council meeting on 12th May. Fingers crossed!
# No shebang line, this module is meant to be imported # # Copyright 2014 Oliver Palmer # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Python Logger ------------- This module provides the facilities to capture and send log records from Python's logger into Twisted. It also provides a :class:`Logger` class and :func:`getLogger` function to replace the built-in Python implementations. """ from time import time from logging import ( NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, Handler) from twisted.python.log import msg class Logger(object): """ A stand-in for an instance of :class:`logging.Logger` Unlike the standard logger this just forwards all messages to Twisted's logging system. """ def __init__(self, name): self.name = name self.disabled = False def debug(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=DEBUG) def info(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=INFO) def warning(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=WARNING) def error(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=ERROR) def critical(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=CRITICAL) def fatal(self, message, *args): if not self.disabled: msg(message, args=args, system=self.name, time=time(), logLevel=FATAL) class LogRecordToTwisted(Handler): """ Captures logging events for a standard Python logger and sends them to Twisted. Twisted has a built in class to help work with Python's logging library however it won't translate everything directly. """ def __init__(self): # We don't use these attributes because the observer # handles these. But we still have to provide them # because the Python logger system will try to access # them. self.level = NOTSET self.filters = [] def emit(self, record): """ Emits an instance of :class:`logging.LogRecord` into Twisted's logging system. """ msg(record.msg, args=record.args, python_record=True, time=record.created, system=record.name, logLevel=record.levelno) def acquire(self): pass def release(self): pass def createLock(self): pass def close(self): pass def getLogger(name): """ Analog to Python's :func:`logging.getLogger` except it returns instances of :class:`Logger` instead. """ return Logger("pf.%s" % name)
Real estate here took another hit this week as Forbes, the U.S. publisher of authoritative business magazines, posted an account of Costa Rican property ownership problems. The thrust of the article by author Jesse Bogan is captured by the headline "Costa Rica is a retirement heaven — unless some squatters steal your land." The article contains individual accounts of landowners plagued by squatters or who are victims of registry fraud. The article is available online, but it is unclear if it will be carried in one of the Forbes printed publications. The article is nothing new to readers of A.M. Costa Rica, which has covered the perils of property ownership for years, but the author did provide a broad view. The article is sure to have influence with persons who might be thinking of purchasing property in Costa Rica. spot? Think hard — particularly as an absentee landlord. The agrarian law says that squatters can't be booted off unoccupied land without a court order. Moreover, if they stick around for a year they get the right to stay indefinitely, if no one evicts them, and after ten years of such de facto possession they can file for title on the land." He also cites the case of prominent businessman Armando González Fonseca, a Citibank Costa Rica board member, who just purchased the possession rights of a long-time squatter in Playa Herradura. The expat owners of the property involved have been fighting for years to regain control. Bogan also recounts violence related to hostile possession, including the case of Max Dalton, who died in a shootout during a confrontation with squatters near Pavones. First it was the lack of water behind dams at generation sites, and now the country's major electrical generator says that the high price of diesel will require it to impose planned blackouts next year. But the chamber that represents employers is unimpressed. The chamber wants more private hydro generating projects and also noted that the price of petroleum tumbled. The Instituto Costarricence de Electricidad is warning that it will have to institute rolling blackouts during the dry season. The warnings come at the same time that the government entity is seeking major rate hikes. The institute blames lack of money to purchase sufficient diesel fuel for backup power plants. The Unión Costarricense de Cámaras y Asociaciones del Sector Empresarial Privado disputes the reasoning behind the warnings and said that it has complained for years about weaknesses in the distribution system. The chamber also said it has urged the executive branch to do what it can to get the legislature to pass a bill that provides for more concessions for the private production of electricity. Approval of the measure will reduce the impact of petroleum prices on the rates electrical users pay. The chamber said it was concerned about the inflationary results of higher electrical rates which would be translated into demands for higher salaries by employees. It was in the first half of 2007 when low water levels cause the Instituto Costarricence de Electricidad to impose blackouts by regions. Officials said at the time that there was not enough water behind some dams to generate sufficient power. This year has seen above average rainfall, but the institute already has purchased petroleum fired backup generators to handle any deficiencies in hydropower. The chamber also said that part of the problem was poor planning on the part of the institute management. One major project to construct a new dam was the victim of Tropical Storm Alma last May. The institute had to evacuate about 1,100 workers from the Pirrís hydroelectric project May 29 because of the storm which then inflicted costly damage to the construction. The work suffered a major setback. The shutdown of the Crucitas open pit gold mine is an unintended consequence of a Sala IV constitutional court decision in mid-September. At that time the court issued a protective order for the great green macaw and the almendro or mountain almond tree that the birds inhabit. There are an estimated 200 great green macaws in Costa Rica. But there are many more specimens of the almendro amarillo, which has the Latin name of Dipteryx panamensis. And some of these are on the site that Industrias Infinito S.A. seeks to use as a gold mining pit. The bird is called lapa verde in Spanish and has the Latin name of Ara ambigua. The September order by the constitutional court was comprehensive and covered all of Costa Rica, even though the mountain almond mainly is found in the northern zone. President Óscar Arias Sánchez and Roberto Dobles Mora, the environmental minister, directly contradicted the court's order when they issued a decree, published Friday, which allowed the gold mining company to remove trees from some 260 hectares or about 650 acres. The company started work Friday and stopped when officials were served papers Monday. A local environmentalist who has been fighting the mine filed the court case. This is why the nation's chief prosecutor has opened a criminal case against Arias and Dobles. The decision will revolve around the powers vested in the executive and judicial branches of the government. The court's order protecting the trees did not have any loopholes. So if a mountain almond tree is on a site, it may not be cut for any reason, according to the order. The order may freeze construction in many areas of the northern zone if it is found that officials of the Ministerio de Ambiente, Energía y Telecomunicaciones cannot selectively allow cutting. Infinito has promised to plant 50 trees for each one cut and has filed an environmental impact study promising rehabilitation of the land. Meanwhile, the Poder Judicial said that the prosecutor based in San Carlos near the mine appeared before a local judge Wednesday to prevent the lumbering of trees on the site. This is in addition to the Sala IV temporary order announced Monday. Infinito says that it was simply doing what the presidential order allowed its employees to do. The almendro tree was not commercially viable until the introduction of special carbon steel blades about 25 years ago due to the density of the wood. Some trees may be 50 meters, nearly 164 feet, tall. Transport officials said Wednesday that the Interamericana Sur highway, Route 2, would be closed from 7 o'clock tonight until 5 a.m. Sunday. Workers are installing a drainage system under the highway at La Cangreja, said the Consejo Nacional de Vialidad. Officials said that motorists could travel south via Desamparados in the Central Valley or via the Costenera Sur highway along the Pacific to Palmar Norte. The Desamparados route would be through Tarbaca, La Fila, Río Conejo, Frailes and then either San Cristóbal or Los Santos to rejoin the Interamerican Sur south of the construction site. The project is valued at about 15 million colons or about $27,300, officials said. Someone murdered a Grecia man, and one of his employees has been detained after he was found behind the wheel of the dead man's crashed Toyota Prado. The 59-year-old dead man was identified by his last name of Murillo. The man was found outside his home in Grecia Wednesday morning. Investigators found the interior of the home disturbed as if the murderer was seeking something. The victim had just purchased the vehicle. The suspect, a man of 20 years, was noticed because the vehicle was involved in a traffic accident in Argentina de Grecia a short time after the murder. When officers of the Policía de Tránsito checked the license plate number they realized that the vehicle belonged to the dead man. The suspect lived on the same property as the murdered man. The minimum wage employers must pay their workers is going up 7 percent Jan. 1, but the increase is much less than employee negotiators sought. Workers sought more than 16 percent. The increase covers the period from Jan. 1 to June 30, 2009. The percentage is less than half of the estimated inflation for 2008. The wages are set by the Consejo de Salarios after negotiations between employee and employer groups. Officials announced the wage agreement Wednesday afternoon. For the last half of 2008 workers got a 6.58 percent increase in the minimum salaries. The Unión Costarricense de Cámaras y Asociaciones del Sector Empresarial Privado said that perhaps 1.5 million workers would be affected. Many in Costa Rica work at the government established minimum wages which are set for each job category. was hurting because of the tightening of credit due to the world economic situation. It said there was a direct relationship between borrowing and unemployment. The Unión de Cámaras also said that uncertainty exists about when and if the free trade treaty with the United States, Central America and the Dominican Republic would enter into force. The salary increases are not the only amounts employers must pay. An increase in salary means an increase in worker insurance and in payments to the Caja Costarricense de Seguro Social that companies must pay. Worker risk insurance is going up overall about 10 percent, but what each employer pays is indexed to the salaries of employees. Under the agreement a carpenter, for example, who now makes 6,686 colons a day minimum will make 7,154 colons. A computer programmer will go from 8,032 a day to 8,594 colons daily minimum. Salaries are adjusted every six months to account for inflation and the devaluation of the colon, which was at 550.1 to the U.S. dollar Wednesday. Feeling guilty about driving that gas-guzzling SUV, but not quite ready to give it up? San José residents may soon ease their environmental conscience with trips to Suds Carwash, a company that uses cutting-edge green technology to leave vehicles sparkling clean. General manager and owner Gary Mick said that after traveling around Costa Rica, he witnessed his fair share of painfully slow car washes that made a dirty job out of what should be a clean business. According to Mick, industrial car washes usually use 140 gallons of fresh water per vehicle. At Suds, cars are washed with the same amount of water, but only 10 of those gallons consist of fresh water. The rest is recycled water that has been carefully filtered and cleaned in eight different stages of treatment. filtration system that uses UV technology to place a magnetic charge on any material waste, which is then removed by multiple filters. “At any given time we have 12,000 gallons of stored water that's always in the process of being cleaned,” said Mick. In addition to using 75 percent less electricity than other car washes, the company also uses environmentally friendly cleaning fluids and biodegradable soap. Most of the green products are imported from the U.S., but Mick said he hopes that by striking a deal with a local manufacturer, they will soon be able to buy the products locally. One branch in Los Yoses and another in Guadalupe are set to be running in full capacity after Oct. 31. With an exterior car wash taking less than five minutes, compared to what would take 15 to 18 minutes to wash by hand, Mick said he believes the company will be a hit among an increasingly time-conscious Tico population. There's been a lot of press coverage through the years of the crime problems on the Caribbean, and particularly in the tourist town of Puerto Viejo. To be sure, Puerto Viejo has problems with crime, but what community in Costa Rica doesn't? In fact, what community anywhere doesn't have a crime problem of one sort or another? I've read and heard that more than a few people feel the Caribbean and Puerto Viejo are losing their "pura vida" as a result of crime. Well, I've been back a month now and I, too, am worried that Puerto Viejo is losing its "pura vida," but I now see that being as much a result of the two "D's" as I do it being because of crime. What, some may be asking, are the two "D's"? Its simple: development and dust! When I came to PV ten years ago, I did so with a friend who, at that time, had owned her property on what is known as Margarita Road for seven years. Margarita Road was, truly, a road into the jungle. Today, as I learned on a recent bike ride, Margarita Road looks more like the roads leading into Topanga and Blueberry canyons in southern California than it does the road into the jungle I experienced just 10 years ago. Many recently arrived bourgeois bohemian expats who claim to subscribe to the principles of "pura vida," are proving by their actions that their commitment to "pura vida" is only as deep as their need to recreate the lives they claim they wanted to escape in the United States or Europe. Margarita Road is sad testimony to that reality. But an even more immediate threat to "pura vida," literally and figuratively, is the dust. The roads here have always been rough. Long stretches of dirt and semi-paved roads, riddled with potholes the size of lunar craters, are the rule, not the exception. But until about five or six years ago, the number of cars here was pretty small. But in the last several years, the number of cars has exploded and, with that explosion, the dust problem has become, for lack of a better word, suffocating. allowing this dangerous health issue to go unaddressed. There is nothing "pura vida" anymore about taking a bike ride from Puerto Viejo to Manzanillio. Because of the dust, it is a serious health hazard. Cars, trucks, and Status Utility Vehicles, driven by Ticos and bourgeois bohemian expats alike, careen down the road at breakneck speeds, kicking up clouds of dust so thick they often look like smoke from a raging fire. As a result, any person walking or riding a bicycle is inhaling huge amounts of dust particles and other dangerous materials that have no place in a person's lungs. The other day, on my way to the pulperia, I passed a young Gringa carrying an infant through a cloud of dust so thick I was coughing as I passed through it. There is no way exposure to that level of dust didn't pose a serious health threat to all of us, but especially to that infant. I can only wonder what the rates of asthma and other bronchial health issues among local kids and old folks must be. A good friend of mine who was born and bred here, sustained an eye infection as a result of dust getting in behind his contact lense while he was riding his bicycle. Even though he was wearing sunglasses, the dust was just too much. As a result, he rarely rides his bike anymore in the place where he was born. How sad is that? And in addition to the dust, another danger for the bicyclist or pedestrian are the "bullets" those careening vehicles often send shooting through the air. By bullets, I mean rocks that, when thrown up into the air by the rear tires of speeding vehicles, are, potentially, every bit as deadly as a real bullet shot out of a gun. Two years ago, one of those "bullets" hit and pierced the wire mesh basket on my bike. Had it been thrown just a few inches higher, it would have hit me squarely in the abdomen or chest — something I prefer not to think about. "Pura vida" is, indeed, under siege here and crime is an element of that reality. But anyone who minimizes the threat the two "D's" pose to "pura vida" is living in a state of mind defined by another word that begins with "D" — Denial. That's why I spend so much time on the beach and in the ocean where the issues of crime and the two "D's" can, at least for a time, be put on the back burner and "pura vida" still thrives. Two gambling house employees have been detained on the allegation that they were stealing from bettors by cloning their debit or credit cards. The Judicial Investigating Organization said that the victims were mostly all foreigners with a lot of disposable cash so they did not miss the money immediately. The money was taken from various automatic teller machines in amounts usually less than $800, investigators said. Agents said the men obtained the information used to clone the credit cards where they worked. The men, 24 and 34 years, came into police hands Tuesday night at an automatic teller machine in Sabana Oeste, agents said. The thieves who took money out of the machines with cloned credit cards wore masks to avoid being recognized by the surveillance cameras. One of the suspects carried a firearm, agents said, adding that they also found a mask. A local firm has put up a Web site to help motorists plan their trips here. The first route planner for Costa Rica, programmed by Tecno Alianza, a company based in San José, is free. Planning a trip in Costa Rica and estimating driving times is not easy especially when you are a foreigner, said Morten Sonntag, a spokesman for the firm. The online, free route planner from YourTravelmap.com is helpful when it comes to driving times and driving directions, he added in a release. Motorists just have to select the starting point and destinations, then the directions, driving times, distance and the route on a map will be displayed, said Sonntag. In addition the coordinates and the altitude of the chosen destination are provided. Users can choose between kilometers and miles, Sonntag added. The site also has a zoomable map. The application was programmed in Costa Rica and is online in three languages: English, German and Spanish. Colombian authorities have announced the dismantling of an international drug and money-laundering ring that had alleged ties to Hezbollah guerrillas in the Middle East. The attorney general's office says an international sting led to the arrest of nearly 100 suspects in Colombia and other countries. Authorities say those arrested include three suspects from the Middle East who allegedly used profits from the drug trade to fund Hezbollah. Hezbollah is a Lebanon-based Shi'ite militant group that the United States considers a terrorist organization. Colombian officials say the drug ring used routes in Venezuela, Panamá, Guatemala, Europe and the Middle East.
from __future__ import absolute_import from webassets.filter import Filter __all__ = ('Jinja2',) class Jinja2(Filter): """Process a file through the Jinja2 templating engine. Requires the ``jinja2`` package (https://github.com/mitsuhiko/jinja2). The Jinja2 context can be specified with the `JINJA2_CONTEXT` configuration option or directly with `context={...}`. Example: .. code-block:: python Bundle('input.css', filters=Jinja2(context={'foo': 'bar'})) Additionally to enable template loading mechanics from your project you can provide `JINJA2_ENV` or `jinja2_env` arg to make use of already created environment. """ name = 'jinja2' max_debug_level = None options = { 'context': 'JINJA2_CONTEXT', 'jinja2_env': 'JINJA2_ENV' } def setup(self): try: import jinja2 except ImportError: raise EnvironmentError('The "jinja2" package is not installed.') else: self.jinja2 = jinja2 super(Jinja2, self).setup() def input(self, _in, out, **kw): tpl_factory = self.jinja2_env.from_string if self.jinja2_env else self.jinja2.Template out.write(tpl_factory(_in.read()).render(self.context or {}))
The feed hasn't come up on regular antenna over the air, either. Me too, EZ, and I'm surprised by that, since she is suppose to be their star witness. Gur: "I've said too much" Just noticed that too @Tammy, no comment button. @Tammy I've had problems with comments today. i think i like it better like this anyway, anica. Probably easier for you to maintain as well. I'm glad I wasn't here for the bickering yesterday. good thinking, @anica. yesterday got way out of hand. Not sure why court has not resumed yet. We are still seeing the Colorado Courts slate on the feed. People will still be antagonistic without the comment field, but the comments do add some humor/drama to the mix. Is somebody at the courthouse to check the "switch"? I think if Dr Gur is made a hostile witness then what about the hostile prosecter . I still don't understand why DT is focusing on the November event. It speaks to absolutely nothing related to the planning and actual shooting, IMO. I am getting so frustrated with them. Grrrrr. Thank you Anica. I am not an expert on blogging but I think your doing a fantastic job of keeping us up to date and on point. Thanks again! @ DocJ - over the air antenna does work it's channel 9-3. Not the quality of picture that D7 has. Suzanne, is it on or are you getting the same signal as D7? I think of it this way as far as the prosecutor goes: He is speaking/fighting for the people that died and cannot speak/fight for themselves. He also speaks for all the injured and the families of the dead and injured. That's a heavy weight and I can see how that would fire you up. Big responsibility. @Suzanne I like my anetenna - I can see things better than on the little iPad :) Just noting that the feed hasn't come up on even the antenna, and I usually get it before D7, due to routing issues for them. There were over 200 votes on the poll yesterday about whether Dr. Gur was holding her own against AC with "no" ahead by about 20% I think. i think this trial starts late more often than it starts on time. whether it's coming back from lunch or breaks or starting in the morning. lol. @kristle i think the final result was 148/72 on no/yes respectively and the rest were unsure and there were a lot of unsures. Late today. Anyone fill me in on what's going on? Why no court? Boy this is the first time we haven't been able to see in the courtroom with such a loooong delay. Phil is doing an awesome job. I couldn't fill like Phil! @ Phil - way to go! I love how you guys in the media can work on the fly! @ Tammy G - I get both. Note: nothing is showing at this moment. I just jump on when there is spotty coverage with D7. I don't know where 9-3 gets its signal. Oh my goodness, I need my court fix! Come on! Maybe a juror needed a second interview? Agree @blue, thanks for breaking it down. I'd love to see Dan King questioned by Dr Gur! Maybe Dr. Gur is looking for her notes? If anyone is curious about typical juror behavior, follow this blog.
import subprocess import os from django.conf import settings import numpy as np import pysam from genome_finish import __path__ as gf_path_list from genome_finish.insertion_placement_read_trkg import extract_left_and_right_clipped_read_dicts from main.models import Dataset from main.models import Variant from main.models import VariantSet from utils.bam_utils import clipping_stats from variants.variant_sets import update_variant_in_set_memberships GENOME_FINISH_PATH = gf_path_list[0] VELVETH_BINARY = settings.TOOLS_DIR + '/velvet/velveth' VELVETG_BINARY = settings.TOOLS_DIR + '/velvet/velvetg' def get_altalign_reads(input_bam_path, output_bam_path, xs_threshold=None): input_af = pysam.AlignmentFile(input_bam_path, 'rb') output_af = pysam.AlignmentFile(output_bam_path, 'wb', template=input_af) for read in input_af: if read.has_tag('XS') and read.has_tag('AS'): if read.get_tag('AS') <= read.get_tag('XS'): output_af.write(read) output_af.close() input_af.close() def get_piled_reads(input_bam_path, output_bam_path, clipping_threshold=None): """Creates bam of reads that have more than clipping_threshold bases of clipping and are stacked 3 standard deviations higher than the average pileup of clipped reads. If no clipping_threshold specified, clipping stats for the alignment are calculated and the clipping_threshold is set to the mean + one stddev of the per read clipping of a sample of 10000 reads. """ if clipping_threshold is None: stats = clipping_stats(input_bam_path, sample_size=10000) clipping_threshold = int(stats['mean'] + stats['std']) input_af = pysam.AlignmentFile(input_bam_path, 'rb') output_af = pysam.AlignmentFile(output_bam_path, 'wb', template=input_af) lr_clipped = extract_left_and_right_clipped_read_dicts( input_af, clipping_threshold=clipping_threshold) input_af.close() for clipped_dict in [ lr_clipped['left_clipped'], lr_clipped['right_clipped']]: stack_counts = map(len, clipped_dict.values()) mean_stacking = np.mean(stack_counts) std_stacking = np.std(stack_counts) stacking_cutoff = mean_stacking + 3 * std_stacking for read_list in clipped_dict.values(): if len(read_list) > stacking_cutoff: for read in read_list: output_af.write(read) output_af.close() def get_clipped_reads_smart(input_bam_path, output_bam_path, clipping_threshold=8, phred_encoding=None): """Gets reads not overlapping their adaptor with a terminal segment of clipping with average phred scores above the cutoff """ phred_encoding_to_shift = { 'Illumina 1.5': 31, 'Sanger / Illumina 1.9': 0 } CLIPPED_AVG_PHRED_CUTOFF = 20 if (phred_encoding is not None and phred_encoding in phred_encoding_to_shift): CLIPPED_AVG_PHRED_CUTOFF += phred_encoding_to_shift[phred_encoding] SOFT_CLIP = 4 HARD_CLIP = 5 CLIP = [SOFT_CLIP, HARD_CLIP] input_af = pysam.AlignmentFile(input_bam_path, 'rb') output_af = pysam.AlignmentFile(output_bam_path, 'wb', template=input_af) for read in input_af: # If no cigartuples, i.e. unmapped, continue if read.cigartuples is None: continue if read.is_secondary or read.is_supplementary: continue # TODO: Account for template length # adapter_overlap = max(read.template_length - query_alignment_length, 0) # Determine left and right clipped counts left_clipping = (read.cigartuples[0][1] if read.cigartuples[0][0] in CLIP else 0) right_clipping = (read.cigartuples[-1][1] if read.cigartuples[-1][0] in CLIP else 0) # Write reads to file if clipped bases have average phred score # above cutoff if left_clipping > clipping_threshold: clipped_phred_scores = read.query_qualities[:left_clipping] if np.mean(clipped_phred_scores) > CLIPPED_AVG_PHRED_CUTOFF: output_af.write(read) continue if right_clipping > clipping_threshold: clipped_phred_scores = read.query_qualities[-right_clipping:] if np.mean(clipped_phred_scores) > CLIPPED_AVG_PHRED_CUTOFF: output_af.write(read) continue output_af.close() input_af.close() def get_unmapped_reads(bam_filename, output_filename, avg_phred_cutoff=None): if avg_phred_cutoff is not None: intermediate_filename = '_unfiltered'.join( os.path.splitext(output_filename)) else: intermediate_filename = output_filename cmd = '{samtools} view -h -b -f 0x4 {bam_filename}'.format( samtools=settings.SAMTOOLS_BINARY, bam_filename=bam_filename) with open(intermediate_filename, 'w') as output_fh: subprocess.call( cmd, stdout=output_fh, shell=True, executable=settings.BASH_PATH) if avg_phred_cutoff is not None: filter_low_qual_read_pairs(intermediate_filename, output_filename, avg_phred_cutoff) def add_paired_mates(input_bam_path, source_bam_filename, output_bam_path): bam_file = pysam.AlignmentFile(input_bam_path) input_qnames_to_read = {} for read in bam_file: input_qnames_to_read[read.qname] = True bam_file.close() original_alignmentfile = pysam.AlignmentFile(source_bam_filename, "rb") output_alignmentfile = pysam.AlignmentFile( output_bam_path, "wh", template=original_alignmentfile) for read in original_alignmentfile: if input_qnames_to_read.get(read.qname, False): if not read.is_secondary and not read.is_supplementary: output_alignmentfile.write(read) output_alignmentfile.close() original_alignmentfile.close() def filter_out_unpaired_reads(input_bam_path, output_bam_path): input_af = pysam.AlignmentFile(input_bam_path, 'rb') # Build qname -> flag list dictionary read_flags = {} for read in input_af: if read.qname not in read_flags: read_flags[read.qname] = [read.flag] else: read_flags[read.qname].append(read.flag) # Build qname -> is_paired dictionary reads_with_pairs = {} not_primary_alignment_flag = 256 supplementary_alignment_flag = 2048 for qname, flags in read_flags.items(): primary_count = 0 for f in flags: if (not (f & not_primary_alignment_flag) and not (f & supplementary_alignment_flag)): primary_count += 1 if primary_count == 2: reads_with_pairs[qname] = True # Write reads in input to output if not in bad_quality_names output_af = pysam.AlignmentFile(output_bam_path, "wb", template=input_af) input_af.reset() for read in input_af: if read.qname in reads_with_pairs: output_af.write(read) output_af.close() input_af.close() def filter_low_qual_read_pairs(input_bam_path, output_bam_path, avg_phred_cutoff=20): """ Filters out reads with average phred scores below cutoff TODO: use `bwa sort -n file` to stdout to remove the need to use a dictionary with readnames. """ # Put qnames with average phred scores below the cutoff into dictionary bad_quality_qnames = {} input_af = pysam.AlignmentFile(input_bam_path, "rb") for read in input_af: avg_phred = np.mean(read.query_qualities) if avg_phred < avg_phred_cutoff: bad_quality_qnames[read.qname] = True read_count = 0 input_af.close() input_af = pysam.AlignmentFile(input_bam_path, "rb") # Write reads in input to output if not in bad_quality_names output_af = pysam.AlignmentFile(output_bam_path, "wb", template=input_af) for read in input_af: if not bad_quality_qnames.get(read.qname, False): output_af.write(read) read_count += 1 output_af.close() input_af.close() def create_de_novo_variants_set(alignment_group, variant_set_label, callers_to_include=[ 'DE_NOVO_ASSEMBLY', 'GRAPH_WALK', 'ME_GRAPH_WALK']): """Put all the variants generated by VCFs which have INFO__METHOD values in callers_to_include into a new VariantSet Args: alignment_group: An AlignmentGroup instance variant_set_label: A label for the new VariantSet callers_to_include: INFO__METHOD values to select variants with Returns: variant_set: The VariantSet instance created """ ref_genome = alignment_group.reference_genome # Get de novo variants de_novo_variants = [] for variant in Variant.objects.filter( reference_genome=ref_genome): for vccd in variant.variantcallercommondata_set.all(): if vccd.data.get('INFO_METHOD', None) in callers_to_include: de_novo_variants.append(variant) continue variant_set = VariantSet.objects.create( reference_genome=ref_genome, label='de_novo_variants') update_variant_in_set_memberships( ref_genome, [variant.uid for variant in de_novo_variants], 'add', variant_set.uid) return variant_set def get_coverage_stats(sample_alignment): """Returns a dictionary with chromosome seqrecord_ids as keys and subdictionaries as values. Each subdictionary has three keys: length, mean, and std which hold the particular chromosome's length, mean read coverage, and standard deviation of read coverage """ maybe_chrom_cov_dict = sample_alignment.data.get('chrom_cov_dict', None) if maybe_chrom_cov_dict is not None: return maybe_chrom_cov_dict bam_path = sample_alignment.dataset_set.get(type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() alignment_af = pysam.AlignmentFile(bam_path) chrom_list = alignment_af.references chrom_lens = alignment_af.lengths c_starts = [0]*len(chrom_list) c_ends = chrom_lens chrom_cov_lists = [] for chrom, c_start, c_end in zip(chrom_list, c_starts, c_ends): chrom_cov_lists.append([]) cov_list = chrom_cov_lists[-1] for pileup_col in alignment_af.pileup(chrom, start=c_start, end=c_end, truncate=True): depth = pileup_col.nsegments cov_list.append(depth) alignment_af.close() sub_dict_tup_list = zip( chrom_lens, map(np.mean, chrom_cov_lists), map(np.std, chrom_cov_lists)) sub_dict_list = map( lambda tup: dict(zip(['length', 'mean', 'std'], tup)), sub_dict_tup_list) chrom_cov_dict = dict(zip(chrom_list, sub_dict_list)) sample_alignment.data['chrom_cov_dict'] = chrom_cov_dict sample_alignment.save() return chrom_cov_dict def get_avg_genome_coverage(sample_alignment): """Returns a float which is the average genome coverage, calculated as the average length-weighted read coverage over all chromosomes """ coverage_stats = get_coverage_stats(sample_alignment) len_weighted_coverage = 0 total_len = 0 for sub_dict in coverage_stats.values(): length = sub_dict['length'] avg_coverage = sub_dict['mean'] len_weighted_coverage += length * avg_coverage total_len += length return float(len_weighted_coverage) / total_len
A police-mad little boy who wanted blues and twos for his birthday was set to be disappointed — until big-hearted officers stepped in to make his day. Mom Catherine Stewart said she had approached the police service to see if they could spare an officer to attend the party — but was told none were available. After she posted a plea on Facebook, police media relations manager Dwayne Caines stepped in to help Easton mark his 3rd birthday in style with flashing blue lights and sirens — “blues and twos” — at a party at the family’s home in Smith’s yesterday. Ms Stewart said: “We were just hoping for maybe an officer with a couple of flashing lights to stop by and say hi. Ms Stewart, 32, was speaking after a team of motorcycle officers surprised Easton at his party. Mr Caines said he had been made aware of Ms Stewart’s post by a member of the public and later tagged on the post by another. He added: “I took the responsibility of making contact with a police officer to see if anyone was interested. Mr Caines said several of the volunteers were on parade for the party. He added that six motorcycle officers from the roads policing unit on duty yesterday for the Bermuda Marathon Weekend also agreed to give up their break time to visit little Easton. Calvin Smith, staff officer to Commissioner of Police Stephen Corbishley, said community work was a regular part of the role of officers. Mr Smith said: “We’re a part of the community — we don’t just do the law enforcement part. Mr Smith said the officers who attended the party enjoyed the experience. He added: “This is something Easton will remember for the rest of his life. Mr Smith added that the party was also an opportunity to help forge good relationships with members of the community at a young age.
import logging import random import time import vlc import constants log = logging.getLogger(__file__) class HorrorsEar(object): def __init__(self): self.background_instance = vlc.Instance() self.background_tracks = self.background_instance.media_list_new() self.background_player = self.background_instance.media_list_player_new() self.scare_player = vlc.MediaPlayer def begin(self): [self.background_tracks.add_media(self.background_instance.media_new(track)) for track in constants.BACKGROUND_TRACKS] self.background_player.set_media_list(self.background_tracks) self.background_player.set_playback_mode(vlc.PlaybackMode.loop) self.background_player.play() def scare(self): track = random.choice(constants.SCARE_TRACKS) log.info('playing scare: {}'.format(track)) player = self.scare_player(track) player.play() time.sleep(1) while player.is_playing(): continue player.release() def end(self): self.background_player.stop() self.background_instance.release() self.background_player.release()
* Tax rates. The new law imposes a new tax rate structure with seven tax brackets: 10%, 12%, 22%, 24%, 32%, 35%, and 37%. The top rate was reduced from 39.6% to 37% and applies to taxable income above $500,000 for single taxpayers, and $600,000 for married couples filing jointly. The rates applicable to net capital gains and qualified dividends were not changed. The “kiddie tax” rules were simplified. The net unearned income of a child subject to the rules will be taxed at the capital gain and ordinary income rates that apply to trusts and estates. Thus, the child’s tax is unaffected by the parent’s tax situation or the unearned income of any siblings. *Standard deduction. The new law increases the standard deduction to $24,000 for joint filers, $18,000 for heads of household, and $12,000 for singles and married taxpayers filing separately. Given these increases, many taxpayers will no longer be itemizing deductions. These figures will be indexed for inflation after 2018. *Exemptions. The new law suspends the deduction for personal exemptions. Thus, starting in 2018, taxpayers can no longer claim personal or dependency exemptions. The rules for withholding income tax on wages will be adjusted to reflect this change, but IRS was given the discretion to leave the withholding unchanged for 2018. *New deduction for “qualified business income.” Starting in 2018, taxpayers are allowed a deduction equal to 20 percent of “qualified business income,” otherwise known as “pass-through” income, i.e., income from partnerships, S corporations, LLCs, and sole proprietorships. The income must be from a trade or business within the U.S. Investment income does not qualify, nor do amounts received from an S corporation as reasonable compensation or from a partnership as a guaranteed payment for services provided to the trade or business. The deduction is not used in computing adjusted gross income, just taxable income. For taxpayers with taxable income above $157,500 ($315,000 for joint filers), (1) a limitation based on W-2 wages paid by the business and depreciable tangible property used in the business is phased in, and (2) income from the following trades or businesses is phased out of qualified business income: health, law, consulting, athletics, financial or brokerage services, or where the principal asset is the reputation or skill of one or more employees or owners. *Child and family tax credit. The new law increases the credit for qualifying children (i.e., children under 17) to $2000 from $1000, and increases to $1400 the refundable portion of the credit. It also introduces a new (nonrefundable) $500 credit for a taxpayer’s dependents who are not qualifying children. The adjusted gross income level at which the credits begin to be phased out has been increase to $200,000 ($400,000 for joint filers). *State and local taxes. The itemized deduction for state and local income and property taxes is limited to a total of $10,000 starting in 2018. *Mortgage interest. Under the new law, mortgage interest on loans used to acquire a principal residence and a second home is only deductible on debt up to $750,000 (down from $1 million), starting with loans taken out in 2018. And there is no longer any deduction for interest on home equity loans, regardless of when the debt was incurred. *Miscellaneous itemized deductions. There is no longer a deduction for miscellaneous itemized deductions which were formerly deductible to the extent they exceeded 2 percent of adjusted gross income. This category included items such as tax preparation costs, investment expenses, union dues, and unreimbursed employee expenses. *Medical expenses. Under the new law, for 2017 and 2018, medical expenses are deductible to the extent they exceed 7.5 percent of adjusted gross income for all taxpayers. Previously, the AGI “floor” was 10% for most taxpayers. *Casualty and theft losses. The itemized deduction for casualty and theft losses has been suspended except for losses incurred in a federally declared disaster. *Overall limitation on itemized deductions. The new law suspends the overall limitation on itemized deductions that formerly applied to taxpayers whose adjusted gross income exceeded specified thresholds. The itemized deductions of such taxpayers were reduced by 3% of the amount by which AGI exceeded the applicable threshold, but the reduction could not exceed 80% of the total itemized deductions, and certain items were exempt from the limitations. *Moving expenses. The deduction for job-related moving expenses has been eliminated, except for certain military personnel. The exclusion for moving expense reimbursements has also been suspended. *Alimony. For post-2018 divorce decrees and separation agreements, alimony will not be deductible by the paying spouse and will not be taxable to the receiving spouse. *Health care “individual mandate.” Starting in 2019, there is no longer a penalty for individuals who fail to obtain minimum essential health coverage. *Estate and gift tax exemption. Effective for decedents dying, and gifts made, in 2018, the estate and gift tax exemption has been increased to roughly $11.2 million ($22.4 million for married couples). *Alternative minimum tax (AMT) exemption. The AMT has been retained for individuals by the new law but the exemption has been increased to $109,400 for joint filers ($54,700 for married taxpayers filing separately), and $70,300 for unmarried taxpayers. The exemption is phased out for taxpayers with alternative minimum taxable income over $1 million for joint filers, and over $500,000 for all others.
import asyncio import base64 import json import pyarchy import socket import struct import time from . import constants, security class Datagram(object): @classmethod def from_string(cls, str_: str): dg = cls(**json.loads(str_)) # Verify timestamp if dg.timestamp >= time.time(): return cls() else: return dg def __init__(self, command: int = None, sender: str = None, recipient: str = None, data: str = None, hmac: str = None, timestamp: float = None): object.__init__(self) self.__command = int(command) if command else command self.__sender = str(sender) if sender else sender self.__recipient = str(recipient) if recipient else recipient self.__data = data self.__hmac = str(hmac) if hmac else hmac self.__ts = float(timestamp) if timestamp is not None else time.time() def __str__(self): return json.dumps({ 'command': self.command, 'sender': self.sender, 'recipient': self.recipient, 'data': self.data, 'hmac': self.hmac, 'timestamp': self.timestamp, }) @property def command(self) -> int: return self.__command @command.setter def command(self, command): self.__command = int(command) @property def sender(self) -> str: return self.__sender @property def recipient(self) -> str: return self.__recipient @recipient.setter def recipient(self, recipient: str): self.__recipient = str(recipient) @property def route(self) -> tuple: return (self.sender, self.recipient) @property def data(self): return self.__data @data.setter def data(self, data): if isinstance(data, bytes): self.__data = data.decode() else: self.__data = data @property def hmac(self) -> str: return self.__hmac @property def timestamp(self) -> float: return self.__ts class Node(security.KeyHandler, pyarchy.common.ClassicObject): def __init__(self, stream_reader, stream_writer): security.KeyHandler.__init__(self) pyarchy.common.ClassicObject.__init__(self, '', False) self._stream_reader = stream_reader self._stream_writer = stream_writer self._commands = {} async def send(self, dg: Datagram): data = str(dg).encode() data = base64.b85encode(data) data = self.encrypt(data) n_bytes = len(data) pointer = struct.pack('I', socket.htonl(n_bytes)) try: self._stream_writer.write(pointer + data) await self._stream_writer.drain() except ConnectionResetError: # Client crashed pass async def recv(self, n_bytes: int = None): try: if n_bytes is None: pointer = await self._stream_reader.readexactly(4) n_bytes = socket.ntohl(struct.unpack('I', pointer)[0]) data = await self._stream_reader.read(n_bytes) data = self.decrypt(data) data = base64.b85decode(data).decode() return Datagram.from_string(data) except ConnectionResetError: # Client crashed pass except asyncio.streams.IncompleteReadError: # Failed to receive pointer pass except struct.error: # Received invalid pointer pass except json.decoder.JSONDecodeError: # Bad Datagram pass return None async def start(self): await self.send_handshake() # Maintain the connection while True: dg = await self.recv() if not dg: break if await self.handle_datagram(dg): break async def stop(self): self._stream_writer.close() async def handle_datagram(self, dg: Datagram): func = self._commands.get(dg.command) if not func: func = getattr( self, 'handle_' + constants.CMD_2_NAME.get(dg.command), None) if func: await func(dg) else: await self.send_error(constants.ERR_DISCONNECT) async def send_handshake(self): await self.send( Datagram( command = constants.CMD_SHAKE, sender = self.id, recipient = self.id, data = self.key)) async def handle_handshake(self, dg: Datagram): self.counter_key = int(dg.data) async def send_error(self, errno: int): await self.send( Datagram( command = constants.CMD_ERR, sender = self.id, recipient = self.id, data = errno)) # Add functionality in subclass async def handle_error(self, dg: Datagram): return NotImplemented async def send_response(self, data): await self.send( Datagram( command = constants.CMD_RESP, sender = self.id, recipient = self.id, data = data)) class ClientBase(Node): def __init__(self, stream_reader, stream_writer, hmac_key, challenge_key): Node.__init__(self, stream_reader, stream_writer) self._hmac_key = hmac_key or b'' self._challenge_key = challenge_key or b'' self._name = None def __lt__(self, obj): if isinstance(obj, pyarchy.core.NamedObject): return self.name < obj.name else: return NotImplemented def __gt__(self): if isinstance(obj, pyarchy.core.NamedObject): return self.name > obj.name else: return NotImplemented @property def name(self) -> str: return str(self._name) @name.setter def name(self, name: str): if self._name is None: self._name = str(name) else: raise AttributeError('name can only be set once') __all__ = [ Datagram, Node, ClientBase, ]
8. Learn to love fibre. Too little fibre is a primary cause of constipation in adults and children. “Including fibre in your diet has many health benefits in addition to treating constipation, such as helping to lower blood cholesterol and triglyceride levels, improving blood sugars in people with diabetes and assisting with weight management by providing a feeling of fullness,” says Danielle Wohlgemuth, an Edmonton-based registered dietitian with AHS. Many foods, including fruits and vegetables, whole-grain breads and grains, legumes, nuts and seeds contain fibre. Men aged 19 to 50 need 38 grams per day and women the same age need 25 grams. Visit albertahealthservices.ca and search fibre facts for more ways to include fibre. 9. Reframe your goals. Whether your goal is to lose weight, eat more vegetables or less sodium, the end result is really to have a balanced, healthy lifestyle. 10. Serve up a balanced meal. Larger plates, bowls and glasses can lead to bigger portions. 11. Know your portions. It’s easy to check serving sizes without sophisticated tools. A serving of meat is the size of a deck of cards; a serving of cooked vegetables or a small potato is the size of a hockey puck. A teaspoon or 5 ml of fat (oil, magarine or butter) equals the tip of your thumb, and a serving of fruit is the size of a tennis ball. To learn more, visit healthycanadians.ca.
import socketio class mainNamespace(socketio.AsyncNamespace): def __init__(self, _playlist, _player, _playlistlist, _config, _loop, _users, _namespace): self.playlist = _playlist self.player = _player self.playlistlist = _playlistlist self.config = _config self.loop = _loop self.users = _users self.shuffles = {} self.skips = [] socketio.AsyncNamespace.__init__(self, namespace=_namespace) def newSong(self): self.shuffles = {} self.skips = [] async def on_connected(self, sid, msg): self.users.userConnect(sid, msg['session'], msg['ip']) if self.users.isSidAdmin(sid): self.enter_room(sid, 'adminUsr', namespace='/main') else: self.enter_room(sid, 'stdUsr', namespace='/main') print('Client Connected - {} - {}'.format(sid, self.users.getSidName(sid))) await self.resendAll() async def on_sendAll(self, sid): await self.resendAll() async def resendAll(self): await self.emit('featureDisable', { 'skip': self.config.skippingEnable, 'delete': self.config.songDeletionEnable, 'shuffle': self.config.shuffleEnable, 'newplaylists': self.config.newPlaylists, 'playlistdeletion': self.config.enablePlaylistDeletion, 'playlistediting': self.config.enablePlaylistEditing }, room='stdUsr') await self.emit('featureDisable', { 'skip': True, 'delete': True, 'shuffle': True, 'newplaylists': True, 'playlistdeletion': True, 'playlistediting': True}, room='adminUsr') await self.playlist.sendPlaylist() await self.player.sendDuration() await self.emit('volume_set', {'vol': self.player.getVolume()}) await self.emit('playlistList', self.playlistlist.getPlaylists()) async def on_sent_song(self, sid, msg): global playlist title = msg['data'] requester = self.users.getSidName(sid) # todo convert ip to device name if title != '': str = 'Queued Song - ' + title if '&' in title: str = str + '\nIf you wanted to add a playlist use the full playlist page that has "playlist" in the url' start_pos = title.find('&') msg = title[:start_pos] else: msg = title print('{} - Submitted - {}'.format(requester, title)) p = self.loop.create_task(self.playlist.process(_title=msg, _requester=requester)) else: str = 'Enter a Song Name' await self.emit('response', {'data': str}, room=sid) async def on_button(self, sid, msg): command = msg['data'] if command == 'skip' and (self.config.skippingEnable or self.users.isSidAdmin(sid)): if (self.config.voteSkipNum is 0) or self.users.isSidAdmin(sid): await self.emit('response', {'data': 'Song Skipped'}, room=sid) print('{} - Skipped song'.format(self.users.getSidName(sid))) await self.player.stop() else: if self.users.getSidName(sid) not in self.skips: self.skips.append(self.users.getSidName(sid)) print("{} - Voted to skip the song".format(self.users.getSidName(sid))) if len(self.skips) >= self.config.voteSkipNum: await self.emit('response', {'data': 'Song Skipped'}, room=sid) print('Song was vote Skipped by {} people'.format(len(self.skips))) await self.player.stop() elif command == 'shuffle' and (self.config.shuffleEnable or self.users.isSidAdmin(sid)): if (self.config.shuffleLimit is 0) or self.users.isSidAdmin(sid): await self.emit('response', {'data': 'Songs Shuffled'}, namespace='/main') print('{} - Shuffled playlist'.format(self.users.getSidName(sid))) await self.playlist.shuff() else: if self.users.getSidName(sid) in self.shuffles: self.shuffles[self.users.getSidName(sid)] = self.shuffles[self.users.getSidName(sid)] + 1 else: self.shuffles[self.users.getSidName(sid)] = 1 if self.shuffles[self.users.getSidName(sid)] <= self.config.shuffleLimit: await self.emit('response', {'data': 'Songs Shuffled'}, namespace='/main') print('{} - Shuffled playlist'.format(self.users.getSidName(sid))) await self.playlist.shuff() elif command == 'clear' and (self.config.songDeletionEnable or self.users.isSidAdmin(sid)): await self.playlist.clearall() print('{} - Cleared all of playlist'.format(self.users.getSidName(sid))) await self.emit('response', {'data': 'Playlist Cleared'}, namespace='/main') elif command == 'pause': if self.player.isPaused(): print('{} - Resumed the song'.format(self.users.getSidName(sid))) await self.emit('response', {'data': 'Song Resumed'}, namespace='/main') await self.emit('pause_button', {'data': 'Pause'}) await self.player.pause() elif self.player.running(): print('{} - Paused the song'.format(self.users.getSidName(sid))) await self.emit('response', {'data': 'Song Paused'}, namespace='/main') await self.emit('pause_button', {'data': 'Resume'}) await self.player.pause() async def on_volume(self, sid, msg): vol = int(msg['vol']) self.player.setVolume(vol) await self.emit('volume_set', {'vol': vol}) async def on_delete(self, sid, msg): if self.config.songDeletionEnable or self.users.isSidAdmin(sid): title = msg['title'] index = msg['data'] print('{} - Removed index {} title = {}'.format(self.users.getSidName(sid), index, title)) await self.playlist.remove(index, title) s = 'Removed song from playlist - ' + title await self.emit('response', {'data': s}, room=sid) async def on_addPlaylist(self, sid, msg): songs = self.playlistlist.getsongs(msg['title']) if songs == {}: return await self.emit('response', {'data': 'added playlist - ' + msg['title']}, room=sid) await self.playlist.addPlaylist(songs, self.users.getSidName(sid)) async def on_savequeue(self, sid, msg): if self.config.newPlaylists or self.users.isSidAdmin(sid): await self.emit('response', {'data': 'Saving Current queue as playlist named - ' + str(msg['name'])}, room=sid) print('{} - Saved queue as - {}'.format(self.users.getSidName(sid), msg['name'])) songs = await self.playlist.getQueue() songs['data']['name'] = str(msg['name']) await self.playlistlist.addqueue(songs) async def on_newempty(self, sid, msg): if self.config.newPlaylists or self.users.isSidAdmin(sid): await self.emit('response', {'data': 'Creating a new empty playlist named - ' + str(msg['name'])}, room=sid) print('{} - Created a new playlist named - {}'.format(self.users.getSidName(sid), msg['name'])) await self.playlistlist.newPlaylist(msg['name']) async def on_getplaylist(self, sid, msg): name = msg['data'] print('user modifing - {}'.format(name)) songs = self.playlistlist.getsongs(name) await self.emit('selectedplaylist', songs, room=sid) async def on_add_song(self, sid, msg): if self.config.enablePlaylistEditing or self.users.isSidAdmin(sid): await self.playlistlist.addSong(msg['playlistname'], msg['data']) print('{} - Added - {} - to - {}'.format(self.users.getSidName(sid), msg['data'], msg['playlistname'])) songs = self.playlistlist.getsongs(msg['playlistname']) await self.emit('selectedplaylist', songs, room=sid) async def on_removePlaySong(self, sid, msg): if self.config.enablePlaylistEditing or self.users.isSidAdmin(sid): await self.playlistlist.removeSong(msg['playlistname'], msg['index'], msg['title']) print('{} - Removed {} from playlist - {}'.format(self.users.getSidName(sid), msg['title'], msg['playlistname'])) songs = self.playlistlist.getsongs(msg['playlistname']) await self.emit('selectedplaylist', songs, room=sid) async def on_removePlaylist(self, sid, msg): if self.config.enablePlaylistDeletion or self.users.isSidAdmin(sid): if msg['title'].lower() == msg['userinput'].lower(): await self.playlistlist.removePlaylist(msg['title']) print('{} - Removed playlist from server - {}'.format(self.users.getSidName(sid), msg['title'])) await self.emit('selectedplaylist', {'data': {'name': 'Playlist:', 'dur':0}}, room=sid) else: await self.emit('response', {'data': 'Incorrect name, Unable to remove playlist'}, room=sid)
If you've grown weary of your Lucas starter leaving you by the roadside, try one of our gear reduction starters. These starters fit precisely, and start in an instant. Never be stalled again!
""" Functions used to evaluate the quality of control systems. Includes measures such as stability margins and settling time. Requires numpy """ import numpy as np from numpy import linalg as LA import math import cmath def upper_gain_margin(A, B, C, discrete=True, tol=1e-3, max_gain_dB=60, output_dB=True): """ Calculate the upper gain margin for each input of a loop transfer function described by the state space matrices A, B, and C. Note that stability margins for MIMO systems may not represent the true robustness of the system because the gain or phase can change in all channels at once by a different amount. Args: A: The A matrix of the loop transfer function B: The B matrix of the loop transfer function C: The C matrix of the loop transfer function discrete (optional): True if the loop transfer function is discrete, False if it is continuous. Defaults to True. tol (optional): The tolerance to calculate the result to. Defaults to 1e-3. max_gain_dB (optional): The maximum dB to search to. Defaults to 60 dB ouput_dB (optional): True if the output should be in dB, False if the result should be returned as gain. Defaults to True. Returns: list: The list of upper gain margins at each input. Units dependent on the value of output_dB. """ (n, p) = B.shape max_gain = max(1, math.pow(10, max_gain_dB/20)) gain_list = [None] * p # Create a measure of stability for the poles based on if the system is discrete if discrete is True: # Stable poles for discrete systems are inside the unit circle def is_unstable(poles): return max(abs(poles)) > 1 else: # Stable poles for continuous systems are negative def is_unstable(poles): return max([ pole.real for pole in poles ]) > 0 for i in range(0, p): # Use the bisect method for calculating the gain margin t1 = 1 t2 = max_gain gain_mat = np.matrix(np.eye(p)) gain = t1 while 20 * math.log(t2/t1, 10) > tol: gain = (t1 + t2)/2; # Multiply the current input by the gain gain_mat[i, i] = gain eig_vals, v = LA.eig(A - B*gain_mat*C) if is_unstable(eig_vals): t2 = gain # decrease the gain else: t1 = gain # increase the gain if output_dB is True: gain_list[i] = 20 * math.log(gain, 10) else: gain_list[i] = gain return gain_list def lower_gain_margin(A, B, C, discrete=True, tol=1e-3, min_gain_dB=-60, output_dB=True): """ Calculate the lower gain margin for each input of a loop transfer function described by the state space matrices A, B, and C. Note that stability margins for MIMO systems may not represent the true robustness of the system because the gain or phase can change in all channels at once by a different amount. Not all systems have lower gain margin. These systems will report the minimum value. Args: A: The A matrix of the loop transfer function B: The B matrix of the loop transfer function C: The C matrix of the loop transfer function discrete (optional): True if the loop transfer function is discrete, False if it is continuous. Defaults to True. tol (optional): The tolerance to calculate the result to. Defaults to 1e-3. min_gain_dB (optional): The minimum dB to search to. Defaults to -60 dB ouput_dB (optional): True if the output should be in dB, False if the result should be returned as gain. Defaults to True. Returns: list: The list of lower gain margins for each input. Units dependent on the value of output_dB. """ (n, p) = B.shape min_gain = min(1, math.pow(10, min_gain_dB/20)) gain_list = [None] * p # Create a measure of stability for the poles based on if the system is discrete if discrete is True: # Stable poles for discrete systems are inside the unit circle def is_unstable(poles): return max(abs(poles)) > 1 else: # Stable poles for continuous systems are negative def is_unstable(poles): return max([ pole.real for pole in poles ]) > 0 for i in range(0, p): # Use the bisect method for calculating the gain margin t1 = min_gain t2 = 1 gain_mat = np.matrix(np.eye(p)) gain = t1 while 20 * math.log(t2/t1, 10) > tol: gain = (t1 + t2)/2; # Multiply the current input by the gain gain_mat[i, i] = gain eig_vals, v = LA.eig(A - B*gain_mat*C) if is_unstable(eig_vals): t1 = gain # increase the gain else: t2 = gain # decrease the gain if output_dB is True: gain_list[i] = 20 * math.log(gain, 10) else: gain_list[i] = gain return gain_list def phase_margin(A, B, C, discrete=True, tol=1e-3, max_angle_deg=120): """ Calculate the phase margin for each input of a loop transfer function described by the state space matrices A, B, and C. Note that stability margins for MIMO systems may not represent the true robustness of the system because the gain or phase can change in all channels at once by a different amount. Args: A: The A matrix of the loop transfer function B: The B matrix of the loop transfer function C: The C matrix of the loop transfer function discrete (optional): True if the loop transfer function is discrete, False if it is continuous. Defaults to True. tol (optional): The tolerance to calculate the result to. Defaults to 1e-3. max_angle_deg (optional): The maximum angle to search to. Defaults to 120 degrees Returns: list: The list of phase margins for each input. Units are degrees. """ (n, p) = B.shape max_angle = max(1, max_angle_deg) angle_list = [None] * p # Create a measure of stability for the poles based on if the system is discrete if discrete is True: # Stable poles for discrete systems are inside the unit circle def is_stable(poles): return max(abs(poles)) <= 1 else: # Stable poles for continuous systems are negative def is_stable(poles): return max([ pole.real for pole in poles ]) <= 0 for i in range(0, p): # Use the bisect method for calculating the phase margin t1 = 1 t2 = max_angle gain_mat = np.matrix(np.eye(p, dtype=complex)) angle = t1 while t2 - t1 > tol: angle = (t1 + t2)/2; # Multiply the current input by the phase offset gain_mat[i, i] = cmath.exp(-1j * angle * math.pi/180) eig_vals, v = LA.eig(A - B*gain_mat*C) if is_stable(eig_vals): t1 = angle # increase the angle else: t2 = angle # decrease the angle angle_list[i] = angle return angle_list def print_stability_margins(A, B, C, discrete=True, tol=1e-3): """ Print the stability margins (gain and phase) for each input of a loop transfer function described by the state space matrices A, B, and C. Note that stability margins for MIMO systems may not represent the true robustness of the system because the gain or phase can change in all channels at once by a different amount. Args: A: The A matrix of the loop transfer function B: The B matrix of the loop transfer function C: The C matrix of the loop transfer function discrete (optional): True if the loop transfer function is discrete, False if it is continuous. Defaults to True. tol (optional): The tolerance to calculate the result to. Defaults to 1e-3. Returns: Nothing """ ugm = upper_gain_margin(A, B, C, discrete=discrete, tol=tol) lgm = lower_gain_margin(A, B, C, discrete=discrete, tol=tol) phm = phase_margin(A, B, C, discrete=discrete, tol=tol) for i in range(1, len(ugm)+1): print("Input " + str(i) + " upper gain margin = " + str(round(ugm[i-1], 2)) + " dB") print("Input " + str(i) + " lower gain margin = " + str(round(lgm[i-1], 2)) + " dB") print("Input " + str(i) + " phase margin = " + str(round(phm[i-1],2)) + " deg") def settling_time(t, y, percent=0.02, start=None, end=None): """ Calculate the time it takes for each output to reach its final value to within a given percentage. Args: t (array): The time points (1 x n) y (ndarray): A list of the output vectors (n, m), where m is the number of states. percent (optional): The percent to which the output needs to settle to. start (optional): The starting value to use for calculations. If none is given, then the max of the first values of y are used. Default is None. end (optional): The end value to use for calculations. If none is given, then the min of the last values of y are used. Returns: The settling time in seconds. """ settling_times = [] (num_samples, states) = y.shape if start is None: start = max([abs(n) for n in y[0,:].tolist()[0]]) if end is None: end = round(min([abs(n) for n in y[-1,:].tolist()[0]]), 3) yout = np.transpose(y).tolist() limit = percent * abs(start - end) limit_high = end + limit limit_low = end - limit for state in range(0, states): i = num_samples for y in reversed(yout[state]): i -= 1 if y > limit_high or y < limit_low: settling_times.append(t[i]) break return max(settling_times) def ltf_regsf(sys_ol, L): """ Construct the the loop transfer function of the full state feedback regulator system. Used for calculating stability. Args: sys_ol (StateSpace): The state-space model of the plant L (matrix): The gain matrix Returns: tuple: (A, B, C) Where A, B, and C are the matrices that describe the loop transfer function """ A = sys_ol.A B = sys_ol.B return (A, B, L) def ltf_regob(sys_ol, L, K): """ Construct the the loop transfer function of the full order observer system. Used for calculating stability. Args: sys_ol (StateSpace): The state-space model of the plant L (matrix): The gain matrix K (matrix): The observer gain matrix Returns: tuple: (A, B, C) Where A, B, and C are the matrices that describe the loop transfer function """ A = sys_ol.A B = sys_ol.B C = sys_ol.C (n, p) = B.shape A_ltf_top_row = np.concatenate((A, np.zeros((n, n))), axis=1) A_ltf_bot_row = np.concatenate((K * C, A - (K * C) - (B * L)), axis=1) A_ltf = np.concatenate((A_ltf_top_row, A_ltf_bot_row), axis=0) B_ltf = np.concatenate((B, np.zeros((n, p))), axis=0) C_ltf = np.concatenate((np.zeros((p, n)), L), axis=1) return (A_ltf, B_ltf, C_ltf) def ltf_tsob(sys_ol, Aa, Ba, L1, L2, K): """ Construct the the loop transfer function of the full order observer tracking system. Used for calculating stability. Args: sys_ol (StateSpace): The state-space model of the plant Aa (matrix): The additional dynamics state matrix Ba (matrix): The additional dynamics input matrix L1 (matrix): The plant gain matrix L2 (matrix): The additional dynamics gain matrix K (matrix): The observer gain matrix Returns: tuple: (A, B, C) Where A, B, and C are the matrices that describe the loop transfer function """ A = sys_ol.A B = sys_ol.B C = sys_ol.C (n, p) = B.shape (na, pa) = Ba.shape A_ltf_top_row = np.concatenate((A, np.zeros((n, n+na))), axis=1) A_ltf_mid_row = np.concatenate((K * C, A - K * C - B * L1, -B * L2), axis=1) A_ltf_bot_row = np.concatenate((Ba * C, np.zeros((na, n)), Aa), axis=1) A_ltf = np.concatenate((A_ltf_top_row, A_ltf_mid_row, A_ltf_bot_row), axis=0) B_ltf = np.concatenate((B, np.zeros((n + na, pa))), axis=0) C_ltf = np.concatenate((np.zeros((p, n)), L1, L2), axis=1) return (A_ltf, B_ltf, C_ltf) def ltf_tssf(sys_ol, Aa, Ba, L1, L2): """ Construct the the loop transfer function of the full state feedback tracking system. Used for calculating stability. Args: sys_ol (StateSpace): The state-space model of the plant Aa (matrix): The additional dynamics state matrix Ba (matrix): The additional dynamics input matrix L1 (matrix): The plant gain matrix L2 (matrix): The additional dynamics gain matrix Returns: tuple: (A, B, C) Where A, B, and C are the matrices that describe the loop transfer function """ A = sys_ol.A B = sys_ol.B C = sys_ol.C (n, p) = B.shape (na, pa) = Ba.shape A_ltf_top_row = np.concatenate((A, np.zeros((n, na))), axis=1) A_ltf_bot_row = np.concatenate((Ba * C, Aa), axis=1) A_ltf = np.concatenate((A_ltf_top_row, A_ltf_bot_row), axis=0) B_ltf = np.concatenate((B, np.zeros((na, pa))), axis=0) C_ltf = np.concatenate((L1, L2), axis=1) return (A_ltf, B_ltf, C_ltf)
Sky Matters is a monthly Newsletter focused on Irish Astronomy. In this issue we look at the skies for January 2019, and the year ahead in space flight. Sky Matters - A monthly Newsletter from CIT Blackrock Castle Observatory about the night sky.
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 import logging import traceback from restclients_core.exceptions import DataFailureException from myuw.dao.registration import get_schedule_by_term from myuw.dao.instructor_schedule import get_instructor_schedule_by_term from myuw.dao.term import get_specific_term, get_current_quarter from myuw.dao.textbook import ( get_textbook_by_schedule, get_order_url_by_schedule) from myuw.logger.timer import Timer from myuw.logger.logresp import ( log_api_call, log_msg, log_data_not_found_response) from myuw.views import prefetch_resources from myuw.views.api import ProtectedAPI from myuw.views.error import handle_exception, data_not_found, data_error logger = logging.getLogger(__name__) class Textbook(ProtectedAPI): """ Performs actions on resource at /api/v1/books/[year][quarter][summer_term]. """ def get(self, request, *args, **kwargs): """ GET returns 200 with textbooks for the given quarter """ timer = Timer() year = kwargs.get("year") quarter = kwargs.get("quarter") summer_term = kwargs.get("summer_term", "full-term") return self.respond( timer, request, get_specific_term(year, quarter), summer_term) def respond(self, timer, request, term, summer_term): try: prefetch_resources(request) by_sln = {} # enrolled sections try: schedule = get_schedule_by_term( request, term=term, summer_term=summer_term) by_sln.update(self._get_schedule_textbooks(schedule)) order_url = get_order_url_by_schedule(schedule) if order_url: by_sln["order_url"] = order_url except DataFailureException as ex: if ex.status != 400 and ex.status != 404: raise # instructed sections (not split summer terms) try: schedule = get_instructor_schedule_by_term( request, term=term, summer_term="full-term") by_sln.update(self._get_schedule_textbooks(schedule)) except DataFailureException as ex: if ex.status != 404: raise if len(by_sln) == 0: log_data_not_found_response(logger, timer) return data_not_found() log_api_call(timer, request, "Get Textbook for {}.{}".format( term.year, term.quarter)) return self.json_response(by_sln) except Exception: return handle_exception(logger, timer, traceback) def _get_schedule_textbooks(self, schedule): by_sln = {} if schedule and len(schedule.sections): book_data = get_textbook_by_schedule(schedule) by_sln.update(index_by_sln(book_data)) return by_sln def index_by_sln(book_data): json_data = {} for sln in book_data: json_data[sln] = [] for book in book_data[sln]: json_data[sln].append(book.json_data()) return json_data class TextbookCur(Textbook): """ Performs actions on resource at /api/v1/book/current/. """ def get(self, request, *args, **kwargs): """ GET returns 200 with the current quarter Textbook """ timer = Timer() try: return self.respond( timer, request, get_current_quarter(request), None) except Exception: return handle_exception(logger, timer, traceback)
Wizards are arcane casters that have a massive variety of spells at their disposal. When prepared with the right spells Wizards can rack up huge kill counts and deal massive amounts of damage at the same time. Languages: Wizards can speak Magic. To speak it, use the command /l magic [message] to speak a single line in Magic, or !speak magic to speak it until you !speak another language or opt to !speak common. Any specialization made during character creation will be removed. Specialist Wizards: In order to become a specialist wizard you must visit the spell pedestal in the docks at any level. You will have to sacrifice all use of two schools of casting to enhance another. Specialist Wizards receive an additional rank of Spell Focus when casting spells of their specialty school. This additional rank grants +2 DC and also improves the effects of spells which improve with Spell Focus rank. This additional rank does not help qualify for custom Higher Ground epic spells but does allow earlier access to the Paragon spell for the specialization class. Maximizing their DC and Spell Penetration, a prepared Wizard is a force to be reckoned with. Whether it be for saves or to capitalize on the egregious amount of skill points available to Wizards through their focus on Intelligence, some prefer to pick up a little something extra for a loss in their casting power. NOTE: the !sb commands are currently disabled in HG due to a server conflict that was identified as being caused by them! "!sb save # NAME" - This will save your current spell books into slot # (0-9) with name NAME. "!sb load #" - This will load saved spellbook # (0-9) into your active spell book. "!sb list" - This will list your saved spell books. At Paragon levels, make sure to grab Paragon of Polymath at 63. It allows you to use any Paragon Spell form a non-blocked school (Usually Karsus). Unlike a Sorcerer you can easily make use of spell combos like Mind Fog + Mind Spells or Ghostly Visage + Time Stop. Take advantage of this. Any spell without DC like Tenser's Transformation(for up to +20 Natural AC) Legend Lore (High Lore bonus) Spell Breach (work differently from Mord) and Powerword Stun/Kill should be used. Make sure to employ the most advantageous spell for each boss, make sure to prepare you spellbook in advance. This page was last modified on 20 February 2019, at 07:56. This page has been accessed 8,289 times. Content is available under the Creative Commons Attribution-Noncommercial-Share Alike 3.0 License.
#!/usr/bin/env python ''' Feature homography ================== Example of using features2d framework for interactive video homography matching. ORB features and FLANN matcher are used. The actual tracking is implemented by PlaneTracker class in plane_tracker.py ''' # Python 2/3 compatibility from __future__ import print_function import numpy as np import cv2 import sys PY3 = sys.version_info[0] == 3 if PY3: xrange = range # local modules from tst_scene_render import TestSceneRender def intersectionRate(s1, s2): x1, y1, x2, y2 = s1 s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) area, intersection = cv2.intersectConvexConvex(s1, np.array(s2)) return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2))) from tests_common import NewOpenCVTests class feature_homography_test(NewOpenCVTests): render = None tracker = None framesCounter = 0 frame = None def test_feature_homography(self): self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png'), noise = 0.5, speed = 0.5) self.frame = self.render.getNextFrame() self.tracker = PlaneTracker() self.tracker.clear() self.tracker.add_target(self.frame, self.render.getCurrentRect()) while self.framesCounter < 100: self.framesCounter += 1 tracked = self.tracker.track(self.frame) if len(tracked) > 0: tracked = tracked[0] self.assertGreater(intersectionRate(self.render.getCurrentRect(), np.int32(tracked.quad)), 0.6) else: self.assertEqual(0, 1, 'Tracking error') self.frame = self.render.getNextFrame() # built-in modules from collections import namedtuple FLANN_INDEX_KDTREE = 1 FLANN_INDEX_LSH = 6 flann_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, # 12 key_size = 12, # 20 multi_probe_level = 1) #2 MIN_MATCH_COUNT = 10 ''' image - image to track rect - tracked rectangle (x1, y1, x2, y2) keypoints - keypoints detected inside rect descrs - their descriptors data - some user-provided data ''' PlanarTarget = namedtuple('PlaneTarget', 'image, rect, keypoints, descrs, data') ''' target - reference to PlanarTarget p0 - matched points coords in target image p1 - matched points coords in input frame H - homography matrix from p0 to p1 quad - target bounary quad in input frame ''' TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') class PlaneTracker: def __init__(self): self.detector = cv2.AKAZE_create(threshold = 0.003) self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) self.targets = [] self.frame_points = [] def add_target(self, image, rect, data=None): '''Add a new tracking target.''' x0, y0, x1, y1 = rect raw_points, raw_descrs = self.detect_features(image) points, descs = [], [] for kp, desc in zip(raw_points, raw_descrs): x, y = kp.pt if x0 <= x <= x1 and y0 <= y <= y1: points.append(kp) descs.append(desc) descs = np.uint8(descs) self.matcher.add([descs]) target = PlanarTarget(image = image, rect=rect, keypoints = points, descrs=descs, data=data) self.targets.append(target) def clear(self): '''Remove all targets''' self.targets = [] self.matcher.clear() def track(self, frame): '''Returns a list of detected TrackedTarget objects''' self.frame_points, frame_descrs = self.detect_features(frame) if len(self.frame_points) < MIN_MATCH_COUNT: return [] matches = self.matcher.knnMatch(frame_descrs, k = 2) matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75] if len(matches) < MIN_MATCH_COUNT: return [] matches_by_id = [[] for _ in xrange(len(self.targets))] for m in matches: matches_by_id[m.imgIdx].append(m) tracked = [] for imgIdx, matches in enumerate(matches_by_id): if len(matches) < MIN_MATCH_COUNT: continue target = self.targets[imgIdx] p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue p0, p1 = p0[status], p1[status] x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) tracked.sort(key = lambda t: len(t.p0), reverse=True) return tracked def detect_features(self, frame): '''detect_features(self, frame) -> keypoints, descrs''' keypoints, descrs = self.detector.detectAndCompute(frame, None) if descrs is None: # detectAndCompute returns descs=None if no keypoints found descrs = [] return keypoints, descrs
The Awning Pull is inspired by (not making this up) my memories of Parisian cafés. Size: 6" x 1-1/2". Center to center: 5"
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Selects model based on evaluation score.""" import collections import copy import time import typing from absl import logging from api import data_cache from api import model_selection_record from api.sampling import online_eval_sampling # pylint: disable=g-bad-import-order import common.generate_protos # pylint: disable=unused-import import data_store_pb2 import session_pb2 from data_store import resource_store EPISODE_SCORE_SUCCESS = 1 EPISODE_SCORE_FAILURE = -1 # Somewhat arbitrary constants used as a stopping heuristic. _NUM_ONLINE_EVALS_PER_MODEL = 6 _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT = 1 _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL = 8 class ModelSelector: """Selects model based on evaluation score.""" def __init__(self, data_store, session_resource_id): self._data_store = data_store self._session_resource_id = session_resource_id self._summary_map = None self._session = self._data_store.read(self._session_resource_id) self._progress = None def get_training_state(self) -> session_pb2.SessionInfo.TrainingState: """Get training state of the session for this model selector. Returns: session_pb2.SessionInfo.TrainingState enum. Raises: ValueError if the session type is not supported. """ session_type = self._get_session_type() if session_type == session_pb2.INTERACTIVE_TRAINING: if self._is_session_training(): return session_pb2.SessionInfo.TRAINING else: return session_pb2.SessionInfo.COMPLETED elif session_type == session_pb2.INFERENCE: return session_pb2.SessionInfo.COMPLETED elif session_type == session_pb2.EVALUATION: if self._is_eval_complete(): return session_pb2.SessionInfo.COMPLETED else: return session_pb2.SessionInfo.TRAINING else: raise ValueError(f'Unsupported session type: {session_type} in session ' f'{self._session_resource_id}.') def select_next_model(self): """Selects next model to try. Returns: resource_id.ResourceId of the model to select next. Raises: ValueError if model was not found or if requested for an unsupported session type. """ session_type = self._get_session_type() if session_type == session_pb2.INTERACTIVE_TRAINING: model_resource_id = self._best_offline_or_starting_snapshot_model() logging.info('Selected model %s for training session %s.', model_resource_id, self._session_resource_id) return model_resource_id elif session_type == session_pb2.INFERENCE: model_resource_id = self.select_final_model() logging.info('Selected model %s for inference session %s.', model_resource_id, self._session_resource_id) return model_resource_id elif session_type == session_pb2.EVALUATION: # Fetch the next online eval model. model_resource_id = self._next_online_eval_model() if not model_resource_id: raise ValueError( 'Empty model returned by online eval sampling for session ' f'{self._session_resource_id.session}') logging.info('Selected model %s for evaluation session %s.', model_resource_id, self._session_resource_id) return model_resource_id else: raise ValueError( f'Unsupported session type: {session_type} found for session ' f'{self._session_resource_id.session}.') def _best_offline_or_starting_snapshot_model(self): """Return best model resource ID by offline score or starting snapshot.""" try: offline_model_id = self._best_offline_model() res_id = self._data_store.resource_id_from_proto_ids( project_id=self._session.project_id, brain_id=self._session.brain_id, session_id=self._session.session_id, model_id=offline_model_id) logging.info('Selected best offline model: %s', res_id) return res_id except (FileNotFoundError, ValueError): # If offline model is not found, try getting the snapshot model. try: snapshot = data_cache.get_starting_snapshot(self._data_store, self._session.project_id, self._session.brain_id, self._session.session_id) res_id = self._data_store.resource_id_from_proto_ids( project_id=snapshot.project_id, brain_id=snapshot.brain_id, session_id=snapshot.session, model_id=snapshot.model) logging.info('Selected model from snapshot: %s.', res_id) return res_id except (FileNotFoundError, resource_store.InternalError, ValueError): logging.info( 'Failed to get offline model and model from starting snapshot for ' 'session %s. Returning empty model.', self._session.session_id) return None def _best_offline_model(self): """Goes through offline evaluations and returns model ID with best score.""" offline_eval_summary = self._get_offline_eval_summary( self._session_resource_id) if not offline_eval_summary: raise FileNotFoundError('No offline eval found for session ' f'{self._session_resource_id.session}.') return offline_eval_summary.scores_by_offline_evaluation_id()[0][1].model_id def _next_online_eval_model(self): """Selects the next model resource ID based on the online eval results.""" if not self._get_summary_map(): raise FileNotFoundError('No models found for evaluation session ' f'{self._session_resource_id.session}.') _, model_ids, model_records = self._create_model_records() sampling = online_eval_sampling.UCBSampling() selected_model_index = sampling.select_next(model_records) if selected_model_index >= len(model_ids): raise ValueError( f'Selected model index {selected_model_index} is larger than the ' f'number of models ({len(model_ids)}) we have available.') if selected_model_index < 0: raise ValueError( f'Selected model index is less than 0: {selected_model_index}') model_id = model_ids[selected_model_index] snapshot = data_cache.get_starting_snapshot(self._data_store, self._session.project_id, self._session.brain_id, self._session.session_id) return self._data_store.resource_id_from_proto_ids( project_id=snapshot.project_id, brain_id=snapshot.brain_id, session_id=snapshot.session, model_id=model_id) def _lookup_model_resource_id(self, project_id, brain_id, model_id): """Get model resource ID based on model ID from arbitrary session.""" res_ids, _ = self._data_store.list_by_proto_ids( project_id=project_id, brain_id=brain_id, session_id='*', model_id=model_id, page_size=2) if len(res_ids) != 1: raise RuntimeError( f'Expected one requested model with ID {model_id} in ' + f'projects/{project_id}/brains/{brain_id}, but found {len(res_ids)}') return res_ids[0] def select_final_model(self): """Select the final model ID for each session type.""" session_type = self._get_session_type() if session_type == session_pb2.INTERACTIVE_TRAINING: model_id = self._best_offline_model() return self._data_store.resource_id_from_proto_ids( project_id=self._session.project_id, brain_id=self._session.brain_id, session_id=self._session.session_id, model_id=model_id) snapshot = data_cache.get_starting_snapshot(self._data_store, self._session.project_id, self._session.brain_id, self._session.session_id) if session_type == session_pb2.INFERENCE: return self._lookup_model_resource_id( snapshot.project_id, snapshot.brain_id, snapshot.model) elif session_type == session_pb2.EVALUATION: model_id = self._best_online_model() return self._lookup_model_resource_id( snapshot.project_id, snapshot.brain_id, model_id) else: raise ValueError(f'Unsupported session type: {session_type} found for ' 'session {self._session.session_id}.') def _best_online_model(self): """Select the model ID with the best online evaluation score.""" if not self._get_summary_map(): raise ValueError( 'No models found for session ' f'{self._session_resource_id.session}. Cannot compute best.') _, model_ids, model_records = self._create_model_records() sampling = online_eval_sampling.HighestAverageSelection() selected_model_index = sampling.select_best(model_records) if selected_model_index >= len(model_ids): raise ValueError( f'Selected model index {selected_model_index} is larger than the ' f'number of models ({len(model_ids)}) we have available.') if selected_model_index < 0: raise ValueError( f'Selected model index is less than 0: {selected_model_index}') return model_ids[selected_model_index] def _get_session_type(self) -> session_pb2.SessionType: return data_cache.get_session_type( self._data_store, project_id=self._session_resource_id.project, brain_id=self._session_resource_id.brain, session_id=self._session_resource_id.session) @property def session_progress(self): """Get this session's progress float, lazily initialized.""" if not self._progress: progress_per_assignment = self._data_store.get_assignment_progress( self._session_resource_id) if not progress_per_assignment: self._progress = 0.0 else: self._progress = ( sum(progress_per_assignment.values())/len(progress_per_assignment)) return self._progress def _is_session_training(self): return self.session_progress < 1.0 def _is_eval_complete(self): """Check that the total evals count is larger than the required number. The required number is defined by _NUM_ONLINE_EVALS_PER_MODEL. Returns: bool: True if the required number of online evaluations have been completed, False otherwise. """ total_online_evals, _, _ = self._create_model_records() summary_map = self._get_summary_map() return total_online_evals >= _NUM_ONLINE_EVALS_PER_MODEL * len(summary_map) def _get_summary_map(self) -> typing.DefaultDict[ str, typing.List[model_selection_record.EvaluationSummary]]: """Lazily initializes map of assignment IDs to list of EvaluationSummary.""" if not self._summary_map: before = time.perf_counter() starting_snapshot = data_cache.get_starting_snapshot( self._data_store, self._session.project_id, self._session.brain_id, self._session.session_id) offline_eval_summary = self._get_offline_eval_summary( # Use starting snapshot to create a session resource ID. self._data_store.resource_id_from_proto_ids( project_id=starting_snapshot.project_id, brain_id=starting_snapshot.brain_id, session_id=starting_snapshot.session)) online_eval_summary = self._get_online_eval_summary() self._summary_map = self._generate_summary_map(offline_eval_summary, online_eval_summary) logging.info( 'Generated summary map in %d seconds.', time.perf_counter() - before) return self._summary_map def _get_offline_eval_summary( self, session_resource_id: str ) -> (model_selection_record.OfflineEvaluationByAssignmentAndEvalId): """Populates an OfflineEvaluationByAssignmentAndEvalId from offline evals. Args: session_resource_id: Resource ID for the session to read offline eval from. Returns: An instance of OfflineEvaluationByAssignmentAndEvalId, which maps (assignment_id, offline_evaluation_id) to ModelScores. Raises: ValueError for when the provided session does not have valid corresponding assignments. """ # Get offline evals from the session of the starting snapshot in order of # descending create time. offline_eval_resource_ids, _ = self._data_store.list_by_proto_ids( project_id=session_resource_id.project, brain_id=session_resource_id.brain, session_id=session_resource_id.session, model_id='*', offline_evaluation_id='*', time_descending=True) assignment_resource_ids, _ = self._data_store.list_by_proto_ids( project_id=session_resource_id.project, brain_id=session_resource_id.brain, session_id=session_resource_id.session, assignment_id='*') assignment_ids = set( [data_cache.get_assignment_id(self._data_store, res_id) for res_id in assignment_resource_ids]) offline_eval_summary = ( model_selection_record.OfflineEvaluationByAssignmentAndEvalId()) for offline_eval_resource_id in offline_eval_resource_ids: number_of_models_per_assignment = [ len(offline_eval_summary.model_ids_for_assignment_id(a)) for a in assignment_ids] if (min(number_of_models_per_assignment) >= _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT and sum(number_of_models_per_assignment) >= _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL): # Hit all the assignments with at least # _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT model and make # sure we have at least _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL # across all assignments. break offline_eval = self._data_store.read(offline_eval_resource_id) if offline_eval.assignment not in assignment_ids: raise ValueError( f'Assignment ID {offline_eval.assignment} not found in ' f'assignments for session {session_resource_id.session}.') models_for_assignment = offline_eval_summary.model_ids_for_assignment_id( offline_eval.assignment) if len(models_for_assignment) >= _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL: # No need to look at this offline evaluation score since we have enough # models for the assignment. continue scores_by_assignment_and_eval_id = offline_eval_summary[ model_selection_record.AssignmentEvalId( assignment_id=offline_eval.assignment, offline_evaluation_id=offline_eval.offline_evaluation_id)] scores_by_assignment_and_eval_id.add_score( offline_eval.model_id, offline_eval.score) return offline_eval_summary def _get_online_eval_summary(self) -> ( typing.DefaultDict[str, typing.List[float]]): """Gets list of online scores per model. Returns: typing.DefaultDict[str, List[float]] mapping model_id to scores. """ # This session is the evaluating session ID, so we look for online evals for # self._session_resource_id. online_eval_resource_ids, _ = self._data_store.list_by_proto_ids( attribute_type=data_store_pb2.OnlineEvaluation, project_id=self._session_resource_id.project, brain_id=self._session_resource_id.brain, session_id=self._session_resource_id.session, episode_id='*') online_summaries = collections.defaultdict(list) for online_eval_resource_id in online_eval_resource_ids: online_eval = self._data_store.read(online_eval_resource_id) online_summaries[online_eval.model].append(online_eval.score) return online_summaries def _generate_summary_map( self, offline_eval_summary, online_eval_summary ) -> typing.DefaultDict[str, typing.List[ model_selection_record.EvaluationSummary]]: """Joins the summaries by corresponding assignment IDs and model IDs. Args: offline_eval_summary: model_selection_record.OfflineEvaluationByAssignmentAndEvalId instance. online_eval_summary: typing.DefaultDict[string, List[float]] mapping model_id to scores. Returns: typing.DefaultDict[string, List[EvaluationSummary]] mapping assignment IDs to list of EvalutionSummary. """ summary_map = model_selection_record.SummaryMap() # We're allowed the max of _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL and # number of assignments * _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT. models_budget = max( _MAXIMUM_NUMBER_OF_MODELS_TO_ONLINE_EVAL, len(offline_eval_summary) * _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT) models_by_assignment_map = copy.deepcopy(offline_eval_summary) # First, populate with scores from top # _NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT models for each assignment. for assignment_id in list(models_by_assignment_map.assignment_ids): top_model_scores_for_assignment_id = ( models_by_assignment_map.scores_by_offline_evaluation_id( assignment_id, models_limit=_NUM_MODELS_TO_ONLINE_EVAL_PER_ASSIGNMENT)) for eval_id, model_score in top_model_scores_for_assignment_id: self._add_summary(assignment_id, eval_id, model_score, online_eval_summary.get(model_score.model_id, []), summary_map) models_by_assignment_map.remove_model(model_score.model_id) # If we can still add more models, populate by getting one from each # assignment. while (summary_map.models_count < models_budget and models_by_assignment_map): for assignment_id in list(models_by_assignment_map.assignment_ids): top_scores_for_assignment_id = ( models_by_assignment_map.scores_by_offline_evaluation_id( assignment_id, models_limit=1)) # Pick off one model at a time. for eval_id, model_score in top_scores_for_assignment_id: self._add_summary(assignment_id, eval_id, model_score, online_eval_summary.get(model_score.model_id, []), summary_map) models_by_assignment_map.remove_model(model_score.model_id) return summary_map def _add_summary(self, assignment_id, eval_id, model_score, online_scores, summary_map): """Add or update an existing EvaluationSummary in the SummaryMap. Args: assignment_id: Assignment ID of the score to update the SummaryMap with. eval_id: Offline evaluation ID of the score to update the SummaryMap with. model_score: ModelScore instance containing information about the score to update the SummaryMap with. online_scores: List of online scores to update the SummaryMap with. summary_map: SummaryMap instance to update. """ existing_eval_summary = ( summary_map.eval_summary_for_assignment_and_model( assignment_id, model_score.model_id)) if existing_eval_summary: existing_eval_summary.offline_scores[eval_id] = model_score.score else: summary_map[assignment_id].append( model_selection_record.EvaluationSummary( model_id=model_score.model_id, offline_scores={eval_id: model_score.score}, online_scores=online_scores)) def _create_model_records(self): """Creates ModelRecords for sampling and return number of total eval runs. Returns: total_runs: Number of online evaluation runs recorded. model_ids: List of model IDs that recorded online evaluations. model_records: List of online_eval_sampling.ModelRecords instances. Raises: ValueError when size of model IDs and model records don't match. """ total_runs = 0 model_ids = [] model_records = [] for _, eval_summaries in self._get_summary_map().items(): for eval_summary in eval_summaries: successes = 0 failures = 0 model_ids.append(eval_summary.model_id) for score in eval_summary.online_scores: if score == EPISODE_SCORE_SUCCESS: successes += 1 elif score == EPISODE_SCORE_FAILURE: failures += 1 else: logging.error('Unknown online score %d.', score) total_runs += successes + failures model_records.append(online_eval_sampling.ModelRecord( successes=successes, failures=failures)) if len(model_records) != len(model_ids): raise ValueError( 'Size of model records don\'t match the size of model IDs.') return total_runs, model_ids, model_records
(April 16, 2018) – Water Infrastructure Investment. Consent decrees. Integrated planning. Rate affordability. Your utility can be 100% correct on the issues, and still suffer negative blowback due to poorly executed communications on advocacy matters. Takeaways: Follow-up resources and collaboration opportunities will help ensure that your StratComm: H2O benefits extend beyond the event, to an ongoing peer-to-peer communications support. StratComm: H2O is designed to empower forward-thinking clean water leaders to take hold of their communications platform, maximize its resources, and direct it toward a valuable, measurable return on investment. Join us as we share expert strategies, resources and peer-to-peer solutions from some of the best water sector communicators in the country - and come away with concrete action plans to improve your platform and grow your nationwide network. Register today!
""" Manage data in folders """ def identify_groups(folder, splitStr, groupPos, outFolder): """ Identifica o grupo a que um ficheiro pertence e envia-o para uma nova pasta com os ficheiros que pertecem a esse grupo. Como e que o grupo e identificado? * O nome do ficheiro e partido em dois em funcao de splitStr; * O groupPos identifica qual e a parte (primeira ou segunda) que corresponde ao grupo. """ import os from gasp.oss.info import list_files from gasp.oss.ops import create_folder from gasp.oss.ops import copy_file files = list_files(folder) # List groups and relate files with groups: groups = {} for _file in files: # Split filename filename = os.path.splitext(os.path.basename(_file))[0] fileForm = os.path.splitext(os.path.basename(_file))[1] group = filename.split(splitStr)[groupPos] namePos = 1 if not groupPos else 0 if group not in groups: groups[group] = [[filename.split(splitStr)[namePos], fileForm]] else: groups[group].append([filename.split(splitStr)[namePos], fileForm]) # Create one folder for each group and put there the files related # with that group. for group in groups: group_folder = create_folder(os.path.join(outFolder, group)) for filename in groups[group]: copy_file( os.path.join(folder, '{a}{b}{c}{d}'.format( a=filename[0], b=splitStr, c=group, d=filename[1] )), os.path.join(group_folder, '{a}{b}'.format( a=filename[0], b=filename[1] )) )
Come and meet our unchained Siberian Husky sled dogs and experience the sport of dog sledding. Visit the pack at our kennel and get to know how joyful, intelligent and gentle they are. Get to know their love to pull and work and of course to cuddle and play. We have created a unique sled dog environment where we emphasize respect for the dogs and focus on their well being. We start our dog sledding tours from our farm Heiði. Heiði is located close to Lake Mývatn, north Iceland. In late April and May when snow is melting from the area around our farm and Kennel at Heiði, we will move our tours to higher grounds were there is good snow.
"""A set of helper functions to work with the astropy module.""" import functools import random import string import tempfile import subprocess import collections from itertools import cycle, islice, chain, combinations, zip_longest import scipy import numpy as np from astropy.table import Table, join from astropy.coordinates import SkyCoord from astropy import units as u #from astroquery.vizier import Vizier ############################################################################### # Astropy Utilities # ############################################################################### def change_column_dtype(table, colname, newdtype): '''Changes the dtype of a column in a table. Use this function to change the dtype of a particular column in a table. ''' tempcol = table[colname] colindex = table.colnames.index(colname) del(table[colname]) table.add_column(np.asanyarray(tempcol, dtype=newdtype), index=colindex) def astropy_table_index(table, column, value): '''Returns the row index of the table which has the value in column. There are often times when you want to know the index of the row where a certain column has a value. This function will return a list of row indices that match the value in the column.''' return astropy_table_indices(table, column, [value]) def astropy_table_indices(table, column, values): '''Returns the row indices of the table which have the values in column. If you need to get the indices of values located in the column of a table, this function will determine that for you. ''' indices = mark_selections_in_columns(table[column], values) return np.where(indices) def mark_selections_in_columns(col, values): '''Return index indicating values are in col. Returns an index array which is the size of col that indicates True when col holds an entry equal to value, and False otherwise.''' if len(col) > len(values)**2: return multi_logical_or(*[col == v for v in values]) else: try: valset = set(values) except TypeError: unmasked_values = values[values.mask == False] valset = set(unmasked_values) index = [] for v in col: try: incol = v in valset except TypeError: incol = False index.append(incol) return np.array(index, dtype=np.bool) def multi_logical_or(*arrs): '''Performs a logical or for an arbitrary number of boolean arrays.''' return functools.reduce(np.logical_or, arrs, False) def multi_logical_and(*arrs): '''Performs a logical or for an arbitrary number of boolean arrays.''' return functools.reduce(np.logical_and, arrs, True) def astropy_table_row(table, column, value): '''Returns the row of the table which has the value in column. If you want to know the row in an astropy table where a value in a column corresponds to a given value, this function will return that row. If there are multiple rows which match the value in the column, you will get all of them. If no rows match the value, this function will throw a ValueError.''' return table[astropy_table_index(table, column, value)] def extract_subtable_from_column(table, column, selections): '''Returns a table which only contains values in selections. This function will create a Table whose values in column are only those found in selections. ''' return table[astropy_table_indices(table, column, selections)] def filter_column_from_subtable(table, column, selections): '''Returns a table where none of the values in column are selections. This function will create a Table whose values are those in column which are not found in selections. ''' subindices = astropy_table_indices(table, column, selections) compindices = get_complement_indices(subindices, len(table)) return table[compindices] def join_by_id(table1, table2, columnid1, columnid2, join_type="inner", conflict_suffixes=("_A", "_B"), idproc=None, additional_keys=[]): '''Joins two tables based on columns with different names. Table1 and table2 are the tables to be joined together. The column names that should be joined are the two columnids. Columnid1 will be the column name for the returned table. In case of conflicts, the conflict suffixes will be appended to the keys with conflicts. To merge conflicts instead of keeping them separate, add the column name to additional_keys. If the entries in the columns to be merged should be processed a certain way, the function that does the processing should be given in idfilter. For no processing, "None" should be passed instead. ''' # Process the columns if need be. if idproc is not None: # I want to duplicate the data so it won't be lost. And by keeping it # in the table, it will be preserved when it is joined. origcol1 = table1[columnid1] origcol2 = table2[columnid2] randomcol1 = generate_random_string(10) randomcol2 = generate_random_string(10) table1.rename_column(columnid1, randomcol1) table2.rename_column(columnid2, randomcol2) table1[columnid1] = idproc(origcol1) table2[columnid2] = idproc(origcol2) # If columnid1 = columnid2, then we can go straight to a join. If not, then # columnid2 needs to be renamed to columnid1. If table2[columnid1] exists, # then we have a problem and an exception should be thrown. if columnid1 != columnid2: if columnid1 not in table2.colnames: table2[columnid1] = table2[columnid2] else: raise ValueError( "Column {0} already exists in second table.".format(columnid1)) try: newtable = join( table1, table2, keys=[columnid1]+additional_keys, join_type=join_type, table_names=list(conflict_suffixes), uniq_col_name="{col_name}{table_name}") finally: # Clean up the new table. if columnid1 != columnid2: del(table2[columnid1]) if idproc is not None: del(table1[columnid1]) del(table2[columnid2]) del(newtable[randomcol1]) del(newtable[randomcol2]) table1.rename_column(randomcol1, columnid1) table2.rename_column(randomcol2, columnid2) return newtable def join_by_ra_dec( table1, table2, ra1="RA", dec1="DEC", ra2="RA", dec2="DEC", ra1_unit=u.degree, dec1_unit=u.degree, ra2_unit=u.degree, dec2_unit=u.degree, match_threshold=5*u.arcsec, join_type="inner", conflict_suffixes=("_A", "_B")): '''Join two tables by RA and DEC. This function will essentially perform a join between tables using coordinates. The column names for the coordinates should be given in ra1, ra2, dec1, dec2. In case of conflicts, the conflict_suffices will be used for columns in table1 and table2, respectively. ''' # Instead of directly using RA/Dec, we'll set up a column that maps rows in # table 2 to rows in table2. match_column = generate_random_string(10) ra1_coords = table1[ra1] try: ra1_coords = ra1_coords.to(ra1_unit) except u.UnitConversionError: ra1_coords = ra1_coords * ra1_unit dec1_coords = table1[dec1] try: dec1_coords = dec1_coords.to(dec1_unit) except u.UnitConversionError: dec1_coords = dec1_coords * dec1_unit ra2_coords = table2[ra2] try: ra2_coords = ra2_coords.to(ra2_unit) except u.UnitConversionError: ra2_coords = ra2_coords * ra2_unit dec2_coords = table2[dec2] try: dec2_coords = dec2_coords.to(dec2_unit) except u.UnitConversionError: dec2_coords = dec2_coords * dec2_unit # This will cross-match the two catalogs to find the nearest matches. coords1 = SkyCoord(ra=ra1_coords, dec=dec1_coords) coords2 = SkyCoord(ra=ra2_coords, dec=dec2_coords) idx, d2d, d3d = coords1.match_to_catalog_sky(coords2) # We only count matches which are within the match threshold. matches = d2d < match_threshold matched_tbl1 = table1[matches] try: table2[match_column] = np.arange(len(table2)) matched_tbl1[match_column] = table2[idx[matches]][match_column] newtable = join( matched_tbl1, table2, keys=match_column, join_type=join_type, table_names=list(conflict_suffixes), uniq_col_name="{col_name}{table_name}") finally: del(table2[match_column]) del(newtable[match_column]) # Want to inherit table1 column naming. # This will require deleting the table2 coordinates from the new table. try: del(newtable[ra2]) except KeyError: # This occurs when ra1=ra2. assert ra1==ra2 newtable.rename_column(ra1+conflict_suffixes[0], ra1) del(newtable[ra2+conflict_suffixes[1]]) try: del(newtable[dec2]) except KeyError: assert dec1==dec2 newtable.rename_column(dec1+conflict_suffixes[0], dec1) del(newtable[dec2+conflict_suffixes[1]]) return newtable def generate_random_string(length): '''Generate a random string with the given length.''' return "".join([random.choice(string.ascii_letters) for _ in range(length)]) def get_complement_indices(initindices, tablelength): '''Returns the indices corresponding to rows not in partialtable. This function essenially creates indices which correspond to the rows in totaltable rows not in partialtable. ''' compmask = np.ones(tablelength, np.bool) compmask[initindices] = 0 return np.where(compmask) def get_complement_table(partialtable, totaltable, compcolumn): '''Returns a subtable of total table without rows in partialtable. This is kinda like an operation to create a table which when stacked with partialtable and sorted by compcolumn, will create totaltable. ''' partialindices = astropy_table_indices(totaltable, compcolumn, partialtable[compcolumn]) compmask = get_complement_indices(partialindices, len(totaltable)) comp_sample = totaltable[compmask] return comp_sample def split_table_by_value(table, column, splitvalue): '''Bifurcates a table in two. This function splits a table based on the values in column and returns two tables in a 2-tuple. Values less than splitvalue are in the first tuple. Values greater than splitvalue are in the second. ''' lowentries = table[np.where(table[column] < splitvalue)] highentries = table[np.where(table[column] >= splitvalue)] return lowentries, highentries def first_row_in_group(tablegroup): '''Iterates through groups and selects the first row from each group. This is good for tables where there are multiple entries for each grouping, but the first row in the table is the preferable one. Such a thing occurs with the Catalog of Active Binary Systems (III). ''' rowholder = [] for group in tablegroup.groups: rowholder.append(group[0]) filteredtable = Table(rows=rowholder, names=tablegroup.colnames) return filteredtable def byte_to_unicode_cast(bytearr): '''Cast a numpy byte array to unicode. A change in Astropy 3.0 led to some columns from FITS files being stored as numpy byte arrays instead of string. This is an explicit cast of this column to a string array. https://github.com/astropy/astropy/pull/6821 The text in the bug report seems to indicate that conversion from bytes objects to unicode should be done transparently, but this doesn't seem to be the case.''' strcol = np.asarray(bytearr, np.unicode_) return strcol def set_numeric_fill_values(table, fill_value): '''Fill all of the columns in table specified in colnames. This is a convenience function to be able to conveniently get a filled table without having to manually fill a ton of columns.''' for col in table.colnames: if np.issubdtype(table[col].dtype, np.number): table[col].fill_value = fill_value def mask_numeric_fill_values(table, fill_value): '''Fill all of the columns in table specified in colnames. This convenience function to mask numeric columns in a table.''' for col in table.colnames: if np.issubdtype(table[col].dtype, np.number): table[col] = np.ma.masked_values(table[col], fill_value) ############################################################################### # Astroquery Catalog # ############################################################################### def Vizier_cached_table(tblpath, tablecode): '''Read a table from disk, querying Vizier if needed. For large tables which can be automatically queried from Vizier, but take a long time to download, this function will download the queried table into tblpath, and then read from it for all following times. The tablecode is the code (e.g. "J/A+A/512/A54/table8") uniquely identifying the desired table.''' try: tbl = Table.read(str(tblpath), format="ascii.ipac") except FileNotFoundError: Vizier.ROW_LIMIT = -1 tbl = Vizier.get_catalogs(tablecode)[0] tbl.write(str(tblpath), format="ascii.ipac") return tbl ############################################################################### # Spreadsheet help # ############################################################################### def inspect_table_as_spreadsheet(table): '''Opens the table in Libreoffice. For cases where it would be much easier to look at data by analyzing it in a spreadsheet, this function will essentially take the table and load it into Libreoffice so that operations can be done on it. ''' with tempfile.NamedTemporaryFile() as fp: table.write(fp.name, format="ascii.csv") libreargs = ["oocalc", fp.name] try: subprocess.run(libreargs) except FileNotFoundError: libreargs[0] = "localc" subprocess.run(libreargs) def inspect_table_in_topcat(table): '''Opens the table in TOPCAT TOPCAT is a useful tool for inspecting tables that are suited to be written as FITS files. TOPCAT is actually much more extensible than we are using it for, but it's helpful for this purpose. ''' with tempfile.NamedTemporaryFile() as fp: table.write(fp.name, format="fits", overwrite=True) topcatargs = ["/home/regulus/simonian/topcat/topcat", fp.name] subprocess.run(topcatargs) ############################################################################### # Caching large data files # ############################################################################### class memoized(object): '''Decorator. Cache's a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). ''' def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up print("Uncacheable") return self.func(*args) if args in self.cache: print("Cached") return self.cache[args] else: print("Putting into cache") value = self.func(*args) self.cache[args] = value return value def __repr__(self): '''Return the function's docstring.''' return self.func.__doc__ def __get__(self, obj, objtype): '''Support instance methods.''' return functools.partial(self.__call__, obj) def shortcut_file(filename, format="fits", fill_value=-9999): ''' Return a decorator that both caches the result and saves it to a file. This decorator should be used for commonly used snippets and combinations of tables that are small enough to be read in quickly, and processed enough that generating them from scratch is time-intensive. ''' class Memorize(object): ''' A function decorated with @memorize caches its return value every time it is called. If the function is called later with the same arguments, the cached value is returned (the function is not reevaluated). The cache is stored in the filename provided in shortcut_file for reuse in future executions. If the function corresponding to this decorated has been updated, make sure to change the object at the given filename. ''' def __init__(self, func): self.func = func self.filename = filename self.table = None def __call__(self, *args): if self.table is None: try: self.read_cache() except FileNotFoundError: value = self.func(*args) self.table = value self.save_cache() return self.table def read_cache(self): ''' Read the table in from the given location. This will take the format given in the shortcut_file command. ''' self.table = Table.read(self.filename, format=format, character_as_bytes=False) mask_numeric_fill_values(self.table, fill_value) # If the dtype is fits, then the Astropy FITS program doesn't # convert correctly between bytes and strings. # See https://github.com/astropy/astropy/issues/5280 def save_cache(self): ''' Save the table into the given filename using the given format. ''' set_numeric_fill_values(self.table, fill_value) try: self.table.write(self.filename, format=format) except FileNotFoundError: self.filename.parent.mkdir(parents=True) self.table.write(self.filename, format=format) def __repr__(self): ''' Return the function's docstring. ''' return self.func.__doc__ def __get__(self, obj, objtype): ''' Support instance methods. ''' return functools.partial(self.__call__, obj) return Memorize ############################################################################### # Itertools help # ############################################################################### def roundrobin(*iterables): '''roundrobin('ABC', 'D', 'EF') --> ADEBFC''' # Recipe cedited to George Sakkis pending = len(iterables) nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending)) def take(n, iterable): '''Return first n items of the iterable as a list.''' return list(islice(iterable, n)) def flatten(listOfLists): "Flatten one level of nesting" return chain.from_iterable(listOfLists) def random_permutation(iterable, r=None): """Random selection from itertools.product(*args, **kwds)""" pool = tuple(iterable) r = len(pool) if r is None else r return tuple(random.sample(pool, r)) def powerset(iterable): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) def consume(iterator, n): "Advance the iterator n-steps ahead. If n is none, consume entirely." # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None) def nth(iterable, n, default=None): "Returns the nth item or a default value" return next(islice(iterable, n, None), default) def zip_equal(*iterables): '''Unzips, throwing an error if iterables have different lengths.''' sentinel = object() for combo in zip_longest(*iterables, fillvalue=sentinel): if sentinel in combo: raise ValueError("Iterables have different lengths") yield combo ############################################################################### # Binary confidence intervals # ############################################################################### def poisson_upper(n, sigma): '''Return the Poisson upper limit of the confidence interval. This is the upper limit for a given number of successes n, and the width of the confidence interval is given in sigmas.''' up = (n+1)*(1 - 1/9/(n+1) + sigma/3/np.sqrt(n+1))**3 return up def scaled_poisson_upper(n, sigma, scale): '''Return the upper limit of a scaled Poisson variable. This is the upper limit for a given number of successes if the random variable was scaled by a scale factor.''' confidence_level = scipy.stats.norm.cdf(sigma) upperlim = scipy.stats.chi2.ppf(1-(1-confidence_level)/scale, 2*n+2)/2 return upperlim def scaled_poisson_lower(n, sigma, scale): '''Return the lower limit of a scaled Poisson variable. This is the lower limit for a given number of successes if the random variable was scaled by a scale factor.''' confidence_level = scipy.stats.norm.cdf(sigma) lowerlim = scipy.stats.chi2.ppf(1-confidence_level/scale, 2*n)/2 return lowerlim def poisson_upper_exact(n, sigma): '''Return the Poisson upper limit of the confidence interval. This is the upper limit for a given number of successes n, and the width of the confidence interval is given in sigmas. This expression uses a root-finding algorithm as opposed to an approximation.''' confidence_level = scipy.stats.norm.cdf(sigma) upperlim = scipy.stats.chi2.ppf(confidence_level, 2*n+2)/2 return upperlim def poisson_lower_exact(n, sigma): '''Return the Poisson lower limit of the confidence interval. This is the lower limit for a given number of successes n, and the width of the confidence interval is given in sigmas. This expression uses a root-finding algorithm as opposed to an approximation.''' confidence_level = scipy.stats.norm.cdf(sigma) lowerlim = scipy.stats.chi2.ppf(1-confidence_level, 2*n)/2 return lowerlim def poisson_lower(n, sigma): '''Return the Poisson lower limit of the confidence interval. This is the lower limit for a given number of successes n, and the width of the confidence interval is given in sigmas. This formula is from Gehrels (1986) and contains tuned parameters.''' betas = {1.0: 0.0, 2.0: 0.062, 3.0:0.222} gammas = {1.0: 0.0, 2.0: -2.19, 3.0: -1.85} low = n * (1 - 1/9/n - sigma/3/np.sqrt(n) + betas[sigma]*n**gammas[sigma])**3 return low def binomial_upper(n1, n, sigma=1): '''The upper limit of the one-sigma binomial probability. This is the upper limit for a given number of successes n1 out of n trials. This is a numerically exact solution to the value.''' if sigma <= 0: raise ValueError("The probability needs to be positive.") cl = -scipy.special.erf(-sigma) ul = np.where(n1 != n, scipy.special.betaincinv(n1+1, n-n1, cl), 1) return ul def binomial_lower(n1, n, sigma=1): '''The lower limit of the one-sigma binomial probability. This is the lower limit for a given number of successes n1 out of n trials. This provides a numerically exact solution to the value.''' ll = 1 - binomial_upper(n-n1, n, sigma=sigma) return ll ############################################################################ # Numpy help # ############################################################################### def slicer_vectorized(arr, strindices): '''Extract the substring at strindices from an array. Given a string array arr, extract the substring elementwise corresponding to the indices in strindices.''' arr = np.array(arr, dtype=np.unicode_) indexarr = np.array(strindices, dtype=np.int_) temparr = arr.view('U1').reshape(len(arr), -1)[:,strindices] return np.fromstring(temparr.tostring(), dtype='U'+str(len(indexarr))) def check_null(arr, nullvalue): '''Returns a boolean array indicating which values of arr are nullvalue. The currently recognized types of nullvalues are floats, NaN, and np.ma.masked. This function encapsulates using the appropriate methods, because simply doing arr == nullvalue does not work all of the time, particularly for NaN values.''' if np.isnan(nullvalue): return np.isnan(arr) elif nullvalue is np.ma.masked: return np.ma.getmaskarray(arr) else: return arr == nullvalue ############################################################################### # Matplotlib Boundaries # ############################################################################### def round_bound(lowbounds, upbounds, round_interval): '''Return a lower and upper bound within the given rounding interval. Generally the bounds should be the value plus or minus the error. Round-interval should be the width of the tick marks.''' minbound, maxbound = np.min(lowbounds), np.max(upbounds) lowlim = (minbound // round_interval) * round_interval highlim = ((maxbound // round_interval) + 1) * round_interval return lowlim, highlim def adjust_axes(ax, lowx, highx, lowy, highy, xdiff, ydiff): '''Adjust the given axes to ensure all data fits within them. Ensure that the given matplotlib axes can accomodate both the new x and y limits provided in this function, as well as the internal x and y limits. The tick intervals for x and y should be given in xdiff and ydiff.''' min_x, max_x = round_bound(lowx, highx, xdiff) min_y, max_y = round_bound(lowy, highy, ydiff) prev_xmin, prev_xmax = ax.get_xlim() prev_ymin, prev_ymax = ax.get_ylim() min_x = min(min_x, prev_xmin) max_x = max(max_x, prev_xmax) min_y = min(min_y, prev_ymin) max_y = max(max_y, prev_ymax) ax.set_xlim(min_x, max_x) ax.set_ylim(min_y, max_y)
Myson is the largest producer of radiators and towel warmers in the world. Along with their quality line of fan convectors, Myson has committed to stocking in the US thousands of units and receives a new container each week. Their radiators are of the best quality construction. We encourage you to compare the quality. Complete specification catalogs are available upon request.
import tqdm import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import utils.filesys as fs import utils.utils as ut import utils.tfw as tfw import utils.printer as pr import utils.datasets as ds """ A combination of modules that interacts with environment """ class Model: def __init__(self, name, **kwargs): self.name = name self._make_model(**kwargs) self._set_description() self._make_var_dict() self._make_session() def __del__(self): self.sess.close() def _set_description(self, **kwargs): desc = 'name: {0}'.format(self.name) for module in self.modules: desc += '\n/{0}: {1}'.format(module.name, module.get_description()) self._make_description(desc) def _make_description(self, desc): self.description = desc self.desc = tf.summary.text( 'description', tf.convert_to_tensor(desc)) def _make_model(self, **kwargs): self.modules = [] def _make_var_dict(self): self.var_dict = {} for module in self.modules: self.var_dict.update(module.var_dict) def _make_session(self): self.model_saver = tf.train.Saver(self.var_dict) self.initializer = tf.global_variables_initializer() self.sess = tf.Session() self.reset() self.print_num_params() def print_num_params(self): for module in self.modules: pr.log('# of params in {0}: {1}'.format( module.name, module.get_num_params())) def reset(self): self.sess.run(self.initializer) def save(self, path): fs.check_dir(path, make=True) self.model_saver.save(self.sess, path) def load(self, path): fs.check_dir(path, fatal=True) self.model_saver.restore(self.sess, path) #---------------------------------------------------------------------------# #--------------------------------- System ----------------------------------# #---------------------------------------------------------------------------# class System: def __init__(self, dataset): self.dataset = dataset def train_x(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs): gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size) for batch in gen: result = train_func(batch['x'], **kwargs) for proc in procs: proc.process(batch, result) def train_xy(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs): gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size) for batch in gen: result = train_func(batch['x'], batch['y'], **kwargs) for proc in procs: proc.process(batch, result) def train_batch(self, train_func, batch_count=100, batch_size=20, procs=[], **kwargs): gen = ds.batch_generator(self.dataset['train'], batch_count, batch_size) for batch in gen: result = train_func(batch, batch_count, **kwargs) for proc in procs: proc.process(batch, result) def test_x(self, test_func, batch_size=100): gen = ds.epoch_generator(self.dataset['test'], batch_size) results = [] for batch in gen: results.append(test_func(batch['x'])) return np.mean(results) def test_xy(self, test_func, batch_size=100): gen = ds.epoch_generator(self.dataset['test'], batch_size) results = [] for batch in gen: results.append(test_func(batch['x'], batch['y'])) return np.mean(results) #---------------------------------------------------------------------------# #------------------------------- Processors --------------------------------# #---------------------------------------------------------------------------# class ResultProcessor: def process(self, batch, result): pass class Logger(ResultProcessor): def __init__(self, model, log_dir='./log/', scalar_step=1, image_step=1000): self.log_dir = log_dir + str(ut.generate_id()) + '/' fs.check_dir(self.log_dir, make=True) self.summary_saver = tf.summary.FileWriter(self.log_dir, model.sess.graph) self.log(model.sess.run(model.desc)) self.scalar_step = scalar_step self.image_step = image_step def process(self, batch, result): gs = result['global_step'] s = result.get('summary', None) si = result.get('summary-image', None) if gs % self.scalar_step == 0: self.log(s, gs) if gs % self.image_step == 0: self.log(si, gs) def log(self, summary=None, global_step=0): if summary is not None: self.summary_saver.add_summary(summary, global_step) class Reporter(ResultProcessor): def __init__(self, steps=100, kwords=[], log_dir=None): self.steps = steps self.kwords = kwords self.log_dir = log_dir def process(self, batch, result): step = batch['step'] if step % self.steps == 0: report = '[step {0}]'.format(step) if 'global_step' in result: report += '[gstep {0}]'.format(result['global_step']) for word in self.kwords: report += '[{0} {1:.4f}]'.format(word, result[word]) tqdm.tqdm.write(report) self.log2file(report) def log2file(self, msg): if self.log_dir is not None: with open(self.log_dir + "report.txt", "a") as file: file.write(msg + "\n") file.close() class Presenter(ResultProcessor): def __init__(self, name, sess, func, fig_num=1, steps=100, logger=None): self.name = name self.sess = sess self.func = func self.fig_num = fig_num self.steps = steps self.logger = logger plt.ion() if logger is not None: image = func() self.image_ph = tf.placeholder(shape=image.shape, dtype=tf.float32) self.image_r = tfw.compact(self.image_ph) self.summary = tf.summary.image(name, self.image_r) def __del__(self): plt.ioff() plt.show() def process(self, batch, result): gstep = result['global_step'] if gstep % self.steps == 0: images = self.func() # Summarizing the image if self.logger is not None: self.logger.log( self.sess.run(self.summary, {self.image_ph: images}), gstep) # Displaying the image plt.figure(self.fig_num) ut.show2(images) plt.pause(1e-5)
With recently renovated, expanded, and extremely well-equipped 2-D and 3-D studios, students engage in their creative pursuits long after classes have ended with card-swipe access to facilities. Upper-level students enjoy spacious, semi-private studio spaces that allow for expanded exploration in material and scale. And when not working, impromptu critique sessions and shared meals often take place in multiple studio art lounge areas. The recently renovated Carnegie Center for the Visual Arts is the hub of the program. The former historic Carnegie Library houses the Department of Art, Design, and Theatre offices as well as providing large, open, light-filled studios for drawing, painting, and design. The building also accommodates individual studio space for upper-level students. Completely renovated in 2013, the Alford 3-D Design Studio houses generous workspace for all levels of sculpture, ceramics, and printmaking. Significant investment from the University has created a top-of-the-line woodworking shop, materials and equipment for metal sculpture and welding, as well as tools for reductive stone carving. The ceramics studio houses 15 potter’s wheels as well as space and equipment for hand-built pottery, glazing, and firing. The printmaking studio houses equipment for relief, intaglio, and lithography processes. Jeremiah Hall serves as a temporary home to studios for the digital photography classes. While much of the student’s work is completed in the Mac labs in the Tyler Digital Communication Center, Jeremiah Hall provides portrait and still photography labs and is available for documenting art work and setting up photo references for subjects.
# -*- coding: utf-8 -*- __all__ = ('Folder', 'FileFolder', 'Album', 'PhotoAlbum') import datetime from mongoengine import * from bson import ObjectId from django.http import HttpResponse from django.core.servers.basehttp import FileWrapper from filesize import size PHOTO_MIMETYPES = { 'PNG': 'image/png', 'JPEG': 'image/jpeg', 'JPG': 'image/jpeg', 'GIF': 'image/gif', #TODO: se encontrar mais coloque aqui } class Album(Document): """ Classe que representa um álbum. Atributos: * ref: referência para o documento dono do álbum * created: data de criação do álbum """ ref = GenericReferenceField() # documento dono do album created = DateTimeField(default=datetime.datetime.now) @classmethod def new_from_owner(cls, owner): """ Cria um novo album para um objeto """ obj = cls(ref=owner) obj.save() return obj @property def photos(self): """ Retorna uma queryset das fotos do álbum """ return PhotoAlbum.objects(album=self) def put_file(self, infile, pk=None): """ Insere uma foto no album * infile: arquivo do tipo IO de entrada. * pk: para definir uma chave primaria para foto (não obrigatório). """ photo = PhotoAlbum() photo.image.put(infile) photo.album = self if pk: photo.pk = ObjectId(pk) photo.save() return photo def delete_all_photos(self): """ Remove todas as fotos do álbum """ for photo in self.photos: photo.delete() def delete(self, *args, **kwargs): """ Apaga o album e suas fotos """ self.delete_all_photos() return super(Album, self).delete(*args, **kwargs) def lock_photos(self): """ Tranca as fotos para proibir a remoção. """ for photo in self.photos: photo.locked = True photo.save() meta = {'allow_inheritance': False, 'collection': 'album', 'indexes': [ {'fields': ['ref']}, ]} class PhotoAlbum(Document): """ Representa uma foto de um álbum. Atributos: * image: ImageField para armazenar a imagem tamanho máximo 800x600, thumbnail de 160x120. * locked: se a foto está tracada para remoção/edição. * comment: comentário da foto. * created: data e hora de criação da foto * album: álbum referênte da foto. """ image = ImageField( db_alias='fs', #database especial para arquivos size=(800, 600), thumbnail_size=(160, 120, False)) locked = BooleanField(default=False) comment = StringField(max_length=200) created = DateTimeField(default=datetime.datetime.now) album = ReferenceField('Album') @property def mimetype(self): return PHOTO_MIMETYPES.get(self.image.format) def as_response(self): """ Retorna a resposta HTTP contento a imagem """ wrapper = FileWrapper(self.image) response = HttpResponse(wrapper, content_type=self.mimetype) response['Cache-Control'] = 'no-store, no-cache, must-revalidate' response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT' response['Pragma'] = 'no-cache' return response def json_format(self): """ Retorna em formato JSON para re-envio. """ return { 'pk': str(self.pk), 'comment': self.comment } def as_thumb_response(self): """ Retorna a resposta HTTP contento a imagem em thumbnail. """ wrapper = FileWrapper(self.image.thumbnail) response = HttpResponse(wrapper, content_type=self.mimetype) response['Cache-Control'] = 'no-store, no-cache, must-revalidate' response['Expires'] = 'Sat, 26 Jul 1997 05:00:00 GMT' response['Pragma'] = 'no-cache' return response def delete(self, force=False, *args, **kwargs): """ Deleta a imagem do banco de dados. """ if self.locked and not force: return self.image.delete() #apaga a imagem do banco de dados return super(PhotoAlbum, self).delete(*args, **kwargs) meta = {'allow_inheritance': False, 'collection': 'album_photo', 'ordering': ['created'], 'indexes': [ {'fields': ['album', 'created']}, {'fields': ['created']}, ]} class FileFolder(Document): """ Representa um arquivo de uma pasta. """ file = FileField(db_alias='fs') #database especial para arquivos created = DateTimeField(default=datetime.datetime.now) folder = ReferenceField('Folder') @property def mimetype(self): return getattr(self.file, 'content_type') @property def filename(self): """ Retorna o nome do arquivo """ return getattr(self.file, 'name') @property def size(self): """ Retorna o tamanho em bytes do arquivo. """ return getattr(self.file, 'length') @property def human_size(self): """ Retorna o tamanho humanizado do arquivo """ return size(self.size) def json_format(self): """ Retorna o arquivo em formato JSON """ return { 'pk': str(self.pk), 'filename': self.filename, 'human_size': self.human_size } def as_response(self): """ Retorna resposta HTTP contendo o arquivo anexado. """ wrapper = FileWrapper(self.file) response = HttpResponse(wrapper, content_type=self.mimetype) response['Content-Disposition'] = ( u'attachment; filename=%s' % self.filename).encode('utf8') response['Cache-Control'] = 'no-cache' return response def delete(self, *args, **kwargs): """ Remove o arquivo do banco de dados. """ self.file.delete() #apaga a imagem do banco de dados return super(FileFolder, self).delete(*args, **kwargs) meta = {'allow_inheritance': False, 'collection': 'file_folder', 'ordering': ['created'], 'indexes': [ {'fields': ['folder', 'created']}, {'fields': ['created']}, ]} class Folder(Document): """ Representa uma pasta de arquivos. Atributos: * ref: referência para o documento dono da pasta * created: data e hora de criação da pasta """ ref = GenericReferenceField() # documento dono da pasta created = DateTimeField(default=datetime.datetime.now) @classmethod def new_from_owner(cls, owner): """ Cria uma pasta para um objeto """ obj = cls(ref=owner) obj.save() return obj @property def files(self): """ Retorna os arquivos contidos na pasta """ return FileFolder.objects(folder=self) def put_file(self, infile, **kwargs): """ Insere um arquivo na pasta """ f = FileFolder() f.file.put(infile, **kwargs) f.folder = self f.save() return f def delete(self, *args, **kwargs): """ Apaga a pasta e seus arquivos """ for f in self.files: f.delete() return super(Folder, self).delete(*args, **kwargs) meta = {'allow_inheritance': False, 'collection': 'folder', 'indexes': [ {'fields': ['ref']}, ]}
Books To Read In Your 20s Books Everyone Should Read The Alchemist Paulo Coelho Book Club Books My Books Great Books Uplifting Books Best Inspirational Books Motivational Books Forwards The Alchemist Paulo Coelho - This is a great book for a book club.... With his numerous Inspiring and Motivational Books - Hippie is the autobiographical novel, where he takes his fascinating reader,s to re-live the dream of a generation that logged for peace and dared to challenge the established social order. Download chennai tamil nadu footprint focus guide or read online here in PDF or EPUB. Please click button to get chennai tamil nadu footprint focus guide book now. All books are in clear copy here, and all files are secure so don't worry about it. conditionalist faith of our fathers pdf With his numerous Inspiring and Motivational Books - Hippie is the autobiographical novel, where he takes his fascinating reader,s to re-live the dream of a generation that logged for peace and dared to challenge the established social order. Download chennai tamil nadu footprint focus guide or read online here in PDF or EPUB. Please click button to get chennai tamil nadu footprint focus guide book now. All books are in clear copy here, and all files are secure so don't worry about it. My mother had the entire set from this series, and every book was beautifully illustrated. It took me ages to wade through classical Tamil, but the pictures kept me going.
# -*- coding: utf-8 -*- class Syscmd(object): def _cmd(self, cmd, *args, **kwargs): """Execute system commands Args: *args: The positional arguments are used as arguments to the command. For example, the following python code: _cmd("git, "commit", "--help") would execute: git commit --help f: One of CALL, CHECK_CALL, or CHECK_OUTPUT. Corresponds to the function from the subprocess module called to execute the command. Defaults to CHECK_CALL **kwargs: The keyword arguments are passed through to the subprocess function as-is. Returns: Whatever is returned by the respective subprocess function. For example, f=CALL would return the returncode attribute, and f=CHECK_OUTPUT would return the content of stdout. Exmples: The following call: _cmd("git", "commit", "-m", "Commit Message", cwd="/path/to/repo") results in: subprocess.check_call(["git", "commit", "-m", "Commit Message"], cwd="/path/to/repo") And: _cmd("git", "checkout", "-b", "branch_name", f=CHECK_OUTPUT, cwd="/path/to/repo") results in: subprocess.check_output(["git", "checkout", "-b", "branch_name"], cwd="/path/to/repo") """ import syscmd f = kwargs.pop('f', syscmd.CHECK_CALL) f = syscmd._sub_calls[f] full_args = (cmd,) + tuple(args) full_kwargs = syscmd._default_subprocess_kwargs.copy() full_kwargs.update(kwargs) return f(full_args, **full_kwargs) def _which(self, cmd): import os for path in os.environ.get('PATH', '').split(os.pathsep): if path == "": continue full_path = os.path.join(path, cmd) if os.access(full_path, os.X_OK): return full_path return None def __getattr__(self, name): from functools import partial cmd = self._which(name) if cmd != None: return partial(self._cmd, cmd) raise AttributeError("'module' object has no attribute %r" % (name,)) import sys sys.modules[__name__] = Syscmd()
Traveling to or studying in Marshall Islands, it can be useful to learn how to say and pronounce Primary Nouns words such as supermarket. Located in East Asia & Pacific, Marshall Islands has a population of 54,816 (2011) consisting of unspecified and sees the arrival of 5,000 (2010) tourists a year. In Marshall Islands, they speak English and Marshallese. SpeakSheets offers our Lifetime Members printable and downloadable language one-sheets with no monthly costs ever, guaranteed. With more than 17 languages covering over 2.3 billion people worldwide, we can help make your travels to Marshall Islands and beyond even more fun! Since a national language of Marshall Islands is English, you should do fine just saying "supermarket " We provide translations for these phrases and over 200 more and growing. We guarantee our language sheets are best on the web. How to say "supermarket " in Marshallese We don't have Marshallese SpeakSheets quite yet. However, we are always working on new languages and more helpful terms for our sheets. Our Lifetime Members can request new languages for no additional charge and those requests get top priority. We generally provide SpeakSheets for a new langage, professional translated, in about a month. So if you are planning ahead for your trip. you can sign up today, request a new language, and we’ll let you know when it is available. SpeakSheets is money-back satisfaction guaranteed!
""" Inspired by yunohost_completion.py (author: Christophe Vuillot) ======= This script generates man pages for yunohost. Pages are stored in OUTPUT_DIR """ import os import yaml import gzip import argparse from datetime import date from collections import OrderedDict from jinja2 import Template base_path = os.path.split(os.path.realpath(__file__))[0] template = Template(open(os.path.join(base_path, "manpage.template")).read()) THIS_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) ACTIONSMAP_FILE = os.path.join(THIS_SCRIPT_DIR, '../data/actionsmap/yunohost.yml') def ordered_yaml_load(stream): class OrderedLoader(yaml.Loader): pass OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, lambda loader, node: OrderedDict(loader.construct_pairs(node))) return yaml.load(stream, OrderedLoader) def main(): parser = argparse.ArgumentParser(description="generate yunohost manpage based on actionsmap.yml") parser.add_argument("-o", "--output", default="output/yunohost") parser.add_argument("-z", "--gzip", action="store_true", default=False) args = parser.parse_args() if os.path.isdir(args.output): if not os.path.exists(args.output): os.makedirs(args.output) output_path = os.path.join(args.output, "yunohost") else: output_dir = os.path.split(args.output)[0] if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) output_path = args.output # man pages of "yunohost *" with open(ACTIONSMAP_FILE, 'r') as actionsmap: # Getting the dictionary containning what actions are possible per domain actionsmap = ordered_yaml_load(actionsmap) for i in actionsmap.keys(): if i.startswith("_"): del actionsmap[i] today = date.today() result = template.render( month=today.strftime("%B"), year=today.year, categories=actionsmap, str=str, ) if not args.gzip: with open(output_path, "w") as output: output.write(result) else: with gzip.open(output_path, mode="w", compresslevel=9) as output: output.write(result) if __name__ == '__main__': main()
a80+ flight ground test without overhaul demonstrated by RD-0120. b Operates from numerous non-space launcher bases. All can carry personnel or payload, but are automatic, autonomous vehicles. oxidizer mass is always many times greater than the fuel mass: it is the oxidizer that affects the mass of propellants the most. Reuse and sustained operations implies that the returned vehicle is ready for another flight after an inspection. With today's rocket engines this is improbable, because they are designed for minimum weight and not for sustained use, as aircraft engines are. Designing rocket engines for sustained use would require readopting the philosophy in place for the XLR-129. Flights before overhaul is indicative of an operational system that has sustained operational capability and need not be refurbished after every launch. In 1964 the goals for the vehicle to support the Manned Orbiting Laboratory (MOL) and the XLR-129 was 100 flights before overhaul. One of the serious impediments to commercial operations is that there is only one launch site available per launcher. This may be acceptable for the commercial communications satellite organizations, just as operations from one coal mine was acceptable for the first commercial railroad train in York, England. A commercial space transportation system will have to have the characteristics of a UPS or Federal Express system to be truly commercial. Until the launchers are designed for a lower mass ratio, say, four or less, that will not be practicable. When a mass ratio of four or less is achieved the entire concept of operations will change, because with the correct hypersonic configuration and propulsion system the time-consuming vertical assembly, fueling and month-long count-down will be eliminated. Runway operations will become the norm, opening more launch and return sites for distributed operations. Orbital plane change and offset maneuvers will be far more economical whether executed in ascent and not from orbit. Another item in the table is applicable to TSTO. This is an important consideration. Most of the analyses discussed in this chapter were done for SSTO because this requires only one vehicle, offers the best approach for sustained operations, and is the most challenging. SSTO, however, can look, and be, too much like a one-size-fits-all solution. The advantage of a TSTO solution is the payload to orbit flexibility. An SSTO with a 7 metric ton (15,435 lb) payload to orbit is a hypersonic vehicle with an empty weight (OEW) about 70 metric tons (154,300 lb) and a gross weight (TOGW) of about 380 metric tons (837,900 lb). That is a mass ratio to orbit of 4.9. The payload to Earth orbit is 10% of the vehicle empty weight that carries it. This means, whether people or support supplies, the payload is always 7 tons. However a hypersonic glider, that is the second stage of a TSTO, with a 7 metric ton payload can be carried by a first stage that stages at Mach 11 and that has an OEW of about 35 metric tons. So the payload to Earth orbit is 20% of the vehicle empty weight that carries it. The first stage OEW is about 38 tons, for a total empty weight of 73 tons (161,000 lb). The total gross weight of the two stages is about 210 tons (463,000 lb), with the second stage gross weight at about 94.5 tons (208,500 lb). That means a total mass ratio of 5.0. If the second stage were a cargo-only, expendable cylinder, then for the same gross second stage weight the payload would be about 17.5 tons (38,600 lb). The payload to Earth orbit is 50% of the vehicle empty weight that carries it. The gross weight is the same, so the mass ratio is the same. Thus there is much more flexibility in the payload variety and weight that can be delivered to Earth orbit. In addition, the offset or orbital plane maneuver would be carried by the first stage flying as an aircraft in the atmosphere, not the stage reaching orbital speed and altitude [Czysz and Vandenkerckhove, 2000]. The propulsion conclusions apply to TSTO as well as SSTO.
#!/usr/bin/env python3 import math, os, ctypes import sdl2, sdl2.sdlmixer, sdl2.sdlimage class Win: # s, w, h, rect_ref # run_fps, run_t_end, run_t_begin, run_frame_t, run_frame_t_ms # window, windowsurface # event, event_ref # on_key, on_click, on_declick, on_move # draw def __init__ (self, w, h, s='win', run_fps = 60, on_key = None, on_click = None, on_declick = None, on_move = None): self.w, self.h = (w, h) self.s = s self.run_fps = run_fps self.on_key = on_key self.on_click = on_click self.on_declick = on_declick self.on_move = on_move self.keyc = sdl2.keycode.SDL_SCANCODE_UNKNOWN self.keys = [False] * sdl2.SDL_NUM_SCANCODES self.mx, self.my = (0, 0) self.mx_f, self.my_f = (0.0, 0.0) self.mb0_, self.mb1_, self.mb2_ = (False, False, False) if not self.create (): return None def create (self): self.draw = ctypes.cdll.LoadLibrary ('draw.so') if not self.draw: return False if sdl2.SDL_Init (sdl2.SDL_INIT_VIDEO): return False self.window = sdl2.SDL_CreateWindow (bytes(self.s, 'utf-8'), sdl2.SDL_WINDOWPOS_CENTERED, sdl2.SDL_WINDOWPOS_CENTERED, self.w, self.h, sdl2.SDL_WINDOW_SHOWN) if not self.window: return False self.windowsurface = sdl2.SDL_GetWindowSurface (self.window) # sdl2.mouse.SDL_ShowCursor (False) self.rect_ref = ctypes.byref (sdl2.SDL_Rect (0, 0, self.w, self.h)) self.draw.draw_init (ctypes.c_void_p ( self.windowsurface.contents.pixels), self.w, self.h) sdl2.SDL_UpdateWindowSurface (self.window) self.event = sdl2.SDL_Event () self.event_ref = ctypes.byref (self.event) self.run_frame_t = 1.0 / self.run_fps self.run_frame_t_ms = int(self.run_frame_t * 1000.0) self.run_t_begin = self.run_t_end = sdl2.SDL_GetTicks () return True def quit (self): sdl2.SDL_DestroyWindow (self.window) sdl2.SDL_Quit () def tick (self): while sdl2.SDL_PollEvent (self.event_ref) != 0: if self.event.type == sdl2.SDL_QUIT: return False elif self.event.type == sdl2.SDL_KEYDOWN: if self.event.key.keysym.scancode ==\ sdl2.keycode.SDL_SCANCODE_ESCAPE: return False self.keyc = self.event.key.keysym.scancode self.keys [self.keyc] = True if self.on_key: self.on_key () elif self.event.type == sdl2.SDL_KEYUP: self.keyc = sdl2.keycode.SDL_SCANCODE_UNKNOWN self.keys [self.keyc] = False elif self.event.type == sdl2.SDL_MOUSEBUTTONDOWN: if self.event.button.button == sdl2.SDL_BUTTON_LEFT: self.mb0_ = True elif self.event.button.button == sdl2.SDL_BUTTON_RIGHT: self.mb1_ = True elif self.event.button.button == sdl2.SDL_BUTTON_MIDDLE: self.mb2_ = True self.mx = self.event.button.x self.my = self.event.button.y self.mx_f = (self.mx / self.w) - 0.5 self.my_f = (self.my / self.h) - 0.5 if self.on_click: self.on_click () elif self.event.type == sdl2.SDL_MOUSEBUTTONUP: self.mx = self.event.button.x self.my = self.event.button.y self.mx_f = (self.mx / self.w) - 0.5 self.my_f = (self.my / self.h) - 0.5 if self.on_declick: self.on_declick () if self.event.button.button == sdl2.SDL_BUTTON_LEFT: self.mb0_ = False elif self.event.button.button == sdl2.SDL_BUTTON_RIGHT: self.mb1_ = False elif self.event.button.button == sdl2.SDL_BUTTON_MIDDLE: self.mb2_ = False elif self.event.type == sdl2.SDL_MOUSEMOTION: self.mx = self.event.motion.x self.my = self.event.motion.y self.mx_f = (self.mx / self.w) - 0.5 self.my_f = (self.my / self.h) - 0.5 if self.on_move: self.on_move () self.run_t_end = sdl2.SDL_GetTicks () _t = self.run_t_begin - self.run_t_end + self.run_frame_t_ms if _t > 0: sdl2.SDL_Delay (_t) self.run_t_begin = sdl2.SDL_GetTicks () return True def render (self): sdl2.SDL_UpdateWindowSurface (self.window) return True
Business Announcement: Siam Sindhorn Co, ltd. Managing Director Chalaluck Bunnag and Clarence Tan, IHG’s Managing Director of South East Asia & Korea signed a partnership agreement which will bring the Kimpton Hotel Brand to be part of Sindhorn Village, mixed use project in the Langsuan area. Chalaluck Bunnag Managing Director Siam Sindhorn Co., Ltd. Chonpreya Pacharaswate Chief Operation Officer Siam Sindhorn Co., Ltd.
from zExceptions import Redirect from zope.component import ComponentLookupError from plone.app.layout.viewlets.common import ViewletBase from Products.Five import BrowserView from Products.CMFPlone.utils import getToolByName from bda.disclaimer.interfaces import IDisclaimerText class DisclaimerViewlet(ViewletBase): def update(self): self.accepted = self.request.cookies.get('_dc_acc') def render(self): if self.accepted \ or self.request['ACTUAL_URL'].endswith('/@@disclaimer'): return '' purl = getToolByName(self.context, 'portal_url') pobj = purl.getPortalObject() url = '%s/%s' % (pobj.absolute_url(), '@@disclaimer') raise Redirect(url) class DisclaimerPage(BrowserView): def currentlang(self): plt = getToolByName(self.context, 'portal_languages') if not plt: return None return plt.getLanguageBindings()[0] def pagetitle(self): purl = getToolByName(self.context, 'portal_url') pobj = purl.getPortalObject() return pobj.title def checkdisclaimer(self): display = True if self.request.form.get('_dc_accept') == '1' \ and self.request.form.get('_dc_submitted'): self.request.response.setCookie('_dc_acc', '1', path='/') display = False elif self.request.cookies.get('_dc_acc'): display = False if not display: purl = getToolByName(self.context, 'portal_url') pobj = purl.getPortalObject() url = pobj.absolute_url() raise Redirect(url) def disclaimertext(self): try: return IDisclaimerText(self.context)() except ComponentLookupError, e: return 'No Disclaimer text registered. %s' % str(e) except AttributeError, e: return 'Disclaimer Text not provided properly. %s' % str(e)
Mainstay Dinnerware Lenox Dinnerware Corelle Dinnerware Clearance Mainstays Amelia is just one of the many collections of pictures or photos that are on this website. you will get a lot of information about in here. We not only provide information about only, you will get a lot more references to design your dream home. So , don't forget to keep visit Editionslechienrouge.org to get the latest information about home design, kitchen, bedroom, bathroom, living room, and furniture and more. Mainstay Dinnerware Lenox Dinnerware Corelle Dinnerware Clearance Mainstays Amelia was posted in December 19, 2018 at 1:31 pm. Mainstay Dinnerware Lenox Dinnerware Corelle Dinnerware Clearance Mainstays Amelia has viewed by 25 users. Click it and download the Mainstay Dinnerware Lenox Dinnerware Corelle Dinnerware Clearance Mainstays Amelia.
# Copyright 2013-2021 Aerospike, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import re def parse_record(parent_field, record): field_names = [] field_values = [] for name in record: if isinstance(record[name], dict): new_parent_field = parent_field.copy() new_parent_field.append(name) names = " ".join(new_parent_field) if "converted" in record[name]: field_names.append(names) field_values.append(record[name]["converted"]) elif "raw" in record[name]: field_names.append(names) field_values.append(record[name]["raw"]) else: # Must have subgroups: sub_names, sub_values = parse_record(new_parent_field, record[name]) field_names.extend(sub_names) field_values.extend(sub_values) else: raise Exception("Unhandled parsing") return field_names, field_values def parse_output(actual_out={}, horizontal=False, header_len=2, merge_header=True): """ commmon parser for all show commands will return tuple of following @param heading : first line of output @param header: Second line of output @param params: list of parameters """ title = actual_out["title"] description = actual_out.get("description", "") data_names = {} data_values = [] num_records = 0 for group in actual_out["groups"]: for record in group["records"]: temp_names, temp_values = parse_record([], record) # We assume every record has the same set of names if len(data_names) == 0: data_names = temp_names data_values.append(temp_values) num_records += 1 return title, description, data_names, data_values, num_records def get_separate_output(in_str=""): _regex = re.compile(r"((?<=^{).*?(?=^}))", re.MULTILINE | re.DOTALL) out = re.findall(_regex, in_str) ls = [] for item in out: item = remove_escape_sequence(item) item = "{" + item + "}" ls.append(json.loads(item)) return ls def get_merged_header(*lines): h = [[_f for _f in _h.split(" ") if _f] for _h in lines] header = [] if len(h) == 0 or any(len(h[i]) != len(h[i + 1]) for i in range(len(h) - 1)): return header for idx in range(len(h[0])): header_i = h[0][idx] for jdx in range(len(h) - 1): if h[jdx + 1][idx] == ".": break header_i += " " + h[jdx + 1][idx] header.append(header_i) return header def check_for_subset(actual_list, expected_sub_list): if not expected_sub_list: return True if not actual_list: return False for i in expected_sub_list: if isinstance(i, tuple): found = False for s_i in i: if s_i is None: found = True break if s_i in actual_list: found = True break if not found: print(i, actual_list) return False else: if i not in actual_list: print(i) return False return True # Checks that a single expected list has a subset equal to actual_list. def check_for_subset_in_list_of_lists(actual_list, list_of_expected_sub_lists): for expected_list in list_of_expected_sub_lists: if check_for_subset(actual_list, expected_list): return True return False def remove_escape_sequence(line): ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]") return ansi_escape.sub("", line) def check_for_types(actual_lists, expected_types): def is_float(x): try: val = float(x) if "." in x: return True return False except ValueError: return False def is_int(x): try: val = int(x) if "." in x: return False return True except ValueError: return False def is_bool(x): if x in ("True", "true", "False", "false"): return True return False def check_list_against_types(a_list): if a_list is None or expected_types is None: return False if len(a_list) == len(expected_types): for idx in range(len(a_list)): typ = expected_types[idx] val = a_list[idx] if typ == int: if not is_int(val): return False elif typ == float: if not is_float(val): return False elif typ == bool: if not is_bool(val): return False elif typ == str: if any([is_bool(val), is_int(val), is_float(val)]): return False else: raise Exception("Type is not yet handles in test_util.py", typ) return True return False for actual_list in actual_lists: if check_list_against_types(actual_list) == False: return False return True
For the most part, people are either Apple people or not. If you see someone with an iPhone, it’s usually a safe assumption that they use a Mac. However, some people don't fit neatly into that box, because they haven’t conformed to one way of doing things. The most common variant of this is the iPhone user who prefers Windows, but we've also come across a surprising number of people with MacBooks and Android phones. For whatever reason, someone has a Mac and an Android, and they don’t always know how to make them play nice together. These renegade users can run into trouble, especially when trying to transfer media files, such as their music, from their computer to their phone, or vice-versa. Unlike the iPhone/Windows people who have iTunes to ease such syncing, Android/Mac folks generally rely on a USB cable and a prayer. Unfortunately, they're too often left with a Mac that won’t recognize the connected Android, and no idea what to do next. Perhaps an Android Transfer that may help you to transfer music from Mac to Android in a convenient way. Check and follow the tutorial below. You can use Android Transfer to copy music between Mac and Android phone without using other fancy methods. Click and download it below. Connect your Android phone or tablet to Mac via a USB cable. The Android Transfer will detect your Android phone or tablet in seconds. The information of your phone will be displayed as below. Click the Music tab at the left column, click Add on the menu bar. Choose Add music files from Mac. Then navigate and select the songs you want to transfer to your phone. That's it. Only two steps, you can copy every single music you want from your Mac to Android phone. You could find more function of it. Download it below!
import logging from haystack.forms import SearchForm from haystack.query import SearchQuerySet from django import forms from django.forms.fields import CharField from django.utils.translation import ugettext_lazy as _ from models import UserProfile log = logging.getLogger(__name__) class UserProfileForm(forms.ModelForm): first_name = CharField(label=_('First name'), required=False) last_name = CharField(label=_('Last name'), required=False) class Meta: model = UserProfile # Don't allow users edit someone else's user page, exclude = ('user', 'whitelisted') def __init__(self, *args, **kwargs): super(UserProfileForm, self).__init__(*args, **kwargs) try: self.fields['first_name'].initial = self.instance.user.first_name self.fields['last_name'].initial = self.instance.user.last_name except: pass def save(self, *args, **kwargs): first_name = self.cleaned_data.pop('first_name', None) last_name = self.cleaned_data.pop('last_name', None) profile = super(UserProfileForm, self).save(*args, **kwargs) if kwargs.get('commit', True): user = profile.user user.first_name = first_name user.last_name = last_name user.save() return profile class FacetField(forms.MultipleChoiceField): ''' For filtering searches on a facet, with validation for the format of facet values. ''' def valid_value(self, value): ''' Although this is a choice field, no choices need to be supplied. Instead, we just validate that the value is in the correct format for facet filtering (facet_name:value) ''' if ":" not in value: return False return True class FacetedSearchForm(SearchForm): ''' Supports fetching faceted results with a corresponding query. `facets` A list of facet names for which to get facet counts `models` Limit the search to one or more models ''' selected_facets = FacetField(required=False) def __init__(self, *args, **kwargs): facets = kwargs.pop('facets', []) models = kwargs.pop('models', []) super(FacetedSearchForm, self).__init__(*args, **kwargs) for facet in facets: self.searchqueryset = self.searchqueryset.facet(facet) if models: self.searchqueryset = self.searchqueryset.models(*models) def clean_selected_facets(self): facets = self.cleaned_data['selected_facets'] cleaned_facets = [] clean = SearchQuerySet().query.clean for facet in facets: field, value = facet.split(":", 1) if not value: # Ignore empty values continue value = clean(value) cleaned_facets.append(u'%s:"%s"' % (field, value)) return cleaned_facets def search(self): sqs = super(FacetedSearchForm, self).search() for facet in self.cleaned_data['selected_facets']: sqs = sqs.narrow(facet) self.searchqueryset = sqs return sqs
Which TPA2 Hero class is your favourite? So, given that you have a choice of what class the Hero will be, who do you usually choose? Myself, I go for a Psy Fighter in vast majority of cases (along with useful Orb intake, making it a versatile, powerul and highly mobile unit), but have experimented with other class types as well and found that the Assassin is pretty good, especially when backed-up by orbs (effectively killing Shadow Guardians on last map in one go with Mega Stab!). I prefer Psy Fighter. The rest of the classes aren't as expensive in the meat market. Although, my favorite not counting cost would be swordsman, 'cause I can go to the frontlines and deal major damage without dying. My favourite class would have to be the Psy fighter because the majority of the orbs are geared towards Psy abilties, and I like having my hero stay back and shoot people and heal people at the same time. In TPA2, psy fighters can't heal. This would make a great poll. I like the bowman, because they can attack, while being out of harms way. Good point, forgot to add it. I find that the extra perks of using a Swordsman and a Bowman as Heroes don't really pay off: the special stats and the use of orbs seem to enhance the Psy Fighter and the Assassin to a much greater effect. Unlike the other classes, the Psy Fighter has to train Psy Power. With the other classes you can train Strenth and allow the orbs to boost the amount of PsP instead of training Psy Power. True, but then the Psy Fighter only needs one category to invest the gold in: Psy Power. And the orbs will boost PsP anyway. Whilst for other classes you also have to spend gold boosting the Strength stat, along with the Psy Power stat for any of the special attacks (this especially applies to the Stab attacks by the Assassin). You only have to booste Psy Power to a point, then it will be cheaper to upgrade strength. That's true as well, but then quite a few of the attacks from other classes will not be boosted by Psy Power, so that gold investment will not be as good, as only some of the attacks will deal more damage. I guess to each their own. I like the bowman, because she is easy to keep out of harms way. Out of interst, as I never used the Bowman as the Hero, do you actually use her in battles much, or do you effectively keep her as far away from the battle as possible? I like to use her with a spearman or swordman. The melee unit takes on the soldier, while the bowman picks them off.
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Cisco Systems, Inc. # Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import collections import copy import logging import netaddr from django.conf import settings from django.utils.translation import ugettext_lazy as _ from neutronclient.common import exceptions as neutron_exc from neutronclient.v2_0 import client as neutron_client import six from horizon import exceptions from horizon import messages from horizon.utils.memoized import memoized from openstack_dashboard.api import base from openstack_dashboard.api import network_base from openstack_dashboard.api import nova from openstack_dashboard.contrib.developer.profiler import api as profiler from openstack_dashboard import policy LOG = logging.getLogger(__name__) IP_VERSION_DICT = {4: 'IPv4', 6: 'IPv6'} OFF_STATE = 'OFF' ON_STATE = 'ON' ROUTER_INTERFACE_OWNERS = ( 'network:router_interface', 'network:router_interface_distributed', 'network:ha_router_replicated_interface' ) class NeutronAPIDictWrapper(base.APIDictWrapper): def __init__(self, apidict): if 'admin_state_up' in apidict: if apidict['admin_state_up']: apidict['admin_state'] = 'UP' else: apidict['admin_state'] = 'DOWN' # Django cannot handle a key name with ':', so use '__'. apidict.update({ key.replace(':', '__'): value for key, value in apidict.items() if ':' in key }) super(NeutronAPIDictWrapper, self).__init__(apidict) def set_id_as_name_if_empty(self, length=8): try: if not self._apidict['name'].strip(): id = self._apidict['id'] if length: id = id[:length] self._apidict['name'] = '(%s)' % id except KeyError: pass def items(self): return self._apidict.items() @property def name_or_id(self): return (self._apidict.get('name').strip() or '(%s)' % self._apidict['id'][:13]) class Agent(NeutronAPIDictWrapper): """Wrapper for neutron agents.""" class Network(NeutronAPIDictWrapper): """Wrapper for neutron Networks.""" class Subnet(NeutronAPIDictWrapper): """Wrapper for neutron subnets.""" def __init__(self, apidict): apidict['ipver_str'] = get_ipver_str(apidict['ip_version']) super(Subnet, self).__init__(apidict) class SubnetPool(NeutronAPIDictWrapper): """Wrapper for neutron subnetpools.""" class Port(NeutronAPIDictWrapper): """Wrapper for neutron ports.""" def __init__(self, apidict): if 'mac_learning_enabled' in apidict: apidict['mac_state'] = \ ON_STATE if apidict['mac_learning_enabled'] else OFF_STATE pairs = apidict.get('allowed_address_pairs') if pairs: apidict = copy.deepcopy(apidict) wrapped_pairs = [PortAllowedAddressPair(pair) for pair in pairs] apidict['allowed_address_pairs'] = wrapped_pairs super(Port, self).__init__(apidict) class PortAllowedAddressPair(NeutronAPIDictWrapper): """Wrapper for neutron port allowed address pairs.""" def __init__(self, addr_pair): super(PortAllowedAddressPair, self).__init__(addr_pair) # Horizon references id property for table operations self.id = addr_pair['ip_address'] class Router(NeutronAPIDictWrapper): """Wrapper for neutron routers.""" class RouterStaticRoute(NeutronAPIDictWrapper): """Wrapper for neutron routes extra route.""" def __init__(self, route): super(RouterStaticRoute, self).__init__(route) # Horizon references id property for table operations self.id = route['nexthop'] + ":" + route['destination'] class SecurityGroup(NeutronAPIDictWrapper): # Required attributes: id, name, description, tenant_id, rules def __init__(self, sg, sg_dict=None): if sg_dict is None: sg_dict = {sg['id']: sg['name']} sg['rules'] = [SecurityGroupRule(rule, sg_dict) for rule in sg['security_group_rules']] super(SecurityGroup, self).__init__(sg) def to_dict(self): return {k: self._apidict[k] for k in self._apidict if k != 'rules'} @six.python_2_unicode_compatible class SecurityGroupRule(NeutronAPIDictWrapper): # Required attributes: # id, parent_group_id # ip_protocol, from_port, to_port, ip_range, group # ethertype, direction (Neutron specific) def _get_secgroup_name(self, sg_id, sg_dict): if sg_id: if sg_dict is None: sg_dict = {} # If sg name not found in sg_dict, # first two parts of UUID is used as sg name. return sg_dict.get(sg_id, sg_id[:13]) else: return u'' def __init__(self, sgr, sg_dict=None): # In Neutron, if both remote_ip_prefix and remote_group_id are None, # it means all remote IP range is allowed, i.e., 0.0.0.0/0 or ::/0. if not sgr['remote_ip_prefix'] and not sgr['remote_group_id']: if sgr['ethertype'] == 'IPv6': sgr['remote_ip_prefix'] = '::/0' else: sgr['remote_ip_prefix'] = '0.0.0.0/0' rule = { 'id': sgr['id'], 'parent_group_id': sgr['security_group_id'], 'direction': sgr['direction'], 'ethertype': sgr['ethertype'], 'ip_protocol': sgr['protocol'], 'from_port': sgr['port_range_min'], 'to_port': sgr['port_range_max'], } cidr = sgr['remote_ip_prefix'] rule['ip_range'] = {'cidr': cidr} if cidr else {} group = self._get_secgroup_name(sgr['remote_group_id'], sg_dict) rule['group'] = {'name': group} if group else {} super(SecurityGroupRule, self).__init__(rule) def __str__(self): if 'name' in self.group: remote = self.group['name'] elif 'cidr' in self.ip_range: remote = self.ip_range['cidr'] else: remote = 'ANY' direction = 'to' if self.direction == 'egress' else 'from' if self.from_port: if self.from_port == self.to_port: proto_port = ("%s/%s" % (self.from_port, self.ip_protocol.lower())) else: proto_port = ("%s-%s/%s" % (self.from_port, self.to_port, self.ip_protocol.lower())) elif self.ip_protocol: try: ip_proto = int(self.ip_protocol) proto_port = "ip_proto=%d" % ip_proto except Exception: # well-defined IP protocol name like TCP, UDP, ICMP. proto_port = self.ip_protocol else: proto_port = '' return (_('ALLOW %(ethertype)s %(proto_port)s ' '%(direction)s %(remote)s') % {'ethertype': self.ethertype, 'proto_port': proto_port, 'remote': remote, 'direction': direction}) class SecurityGroupManager(network_base.SecurityGroupManager): backend = 'neutron' def __init__(self, request): self.request = request self.client = neutronclient(request) def _list(self, **filters): secgroups = self.client.list_security_groups(**filters) return [SecurityGroup(sg) for sg in secgroups.get('security_groups')] @profiler.trace def list(self): tenant_id = self.request.user.tenant_id return self._list(tenant_id=tenant_id) def _sg_name_dict(self, sg_id, rules): """Create a mapping dict from secgroup id to its name.""" related_ids = set([sg_id]) related_ids |= set(filter(None, [r['remote_group_id'] for r in rules])) related_sgs = self.client.list_security_groups(id=related_ids, fields=['id', 'name']) related_sgs = related_sgs.get('security_groups') return dict((sg['id'], sg['name']) for sg in related_sgs) @profiler.trace def get(self, sg_id): secgroup = self.client.show_security_group(sg_id).get('security_group') sg_dict = self._sg_name_dict(sg_id, secgroup['security_group_rules']) return SecurityGroup(secgroup, sg_dict) @profiler.trace def create(self, name, desc): body = {'security_group': {'name': name, 'description': desc, 'tenant_id': self.request.user.project_id}} secgroup = self.client.create_security_group(body) return SecurityGroup(secgroup.get('security_group')) @profiler.trace def update(self, sg_id, name, desc): body = {'security_group': {'name': name, 'description': desc}} secgroup = self.client.update_security_group(sg_id, body) return SecurityGroup(secgroup.get('security_group')) @profiler.trace def delete(self, sg_id): self.client.delete_security_group(sg_id) @profiler.trace def rule_create(self, parent_group_id, direction=None, ethertype=None, ip_protocol=None, from_port=None, to_port=None, cidr=None, group_id=None): if not cidr: cidr = None if from_port < 0: from_port = None if to_port < 0: to_port = None if isinstance(ip_protocol, int) and ip_protocol < 0: ip_protocol = None body = {'security_group_rule': {'security_group_id': parent_group_id, 'direction': direction, 'ethertype': ethertype, 'protocol': ip_protocol, 'port_range_min': from_port, 'port_range_max': to_port, 'remote_ip_prefix': cidr, 'remote_group_id': group_id}} try: rule = self.client.create_security_group_rule(body) except neutron_exc.Conflict: raise exceptions.Conflict(_('Security group rule already exists.')) rule = rule.get('security_group_rule') sg_dict = self._sg_name_dict(parent_group_id, [rule]) return SecurityGroupRule(rule, sg_dict) @profiler.trace def rule_delete(self, sgr_id): self.client.delete_security_group_rule(sgr_id) @profiler.trace def list_by_instance(self, instance_id): """Gets security groups of an instance.""" ports = port_list(self.request, device_id=instance_id) sg_ids = [] for p in ports: sg_ids += p.security_groups return self._list(id=set(sg_ids)) if sg_ids else [] @profiler.trace def update_instance_security_group(self, instance_id, new_security_group_ids): ports = port_list(self.request, device_id=instance_id) for p in ports: params = {'security_groups': new_security_group_ids} port_update(self.request, p.id, **params) class FloatingIp(base.APIDictWrapper): _attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'instance_type', 'pool'] def __init__(self, fip): fip['ip'] = fip['floating_ip_address'] fip['fixed_ip'] = fip['fixed_ip_address'] fip['pool'] = fip['floating_network_id'] super(FloatingIp, self).__init__(fip) class FloatingIpPool(base.APIDictWrapper): pass class FloatingIpTarget(base.APIDictWrapper): pass class FloatingIpManager(network_base.FloatingIpManager): device_owner_map = { 'compute:': 'compute', 'neutron:LOADBALANCER': 'loadbalancer', } def __init__(self, request): self.request = request self.client = neutronclient(request) @profiler.trace def list_pools(self): search_opts = {'router:external': True} return [FloatingIpPool(pool) for pool in self.client.list_networks(**search_opts).get('networks')] def _get_instance_type_from_device_owner(self, device_owner): for key, value in self.device_owner_map.items(): if device_owner.startswith(key): return value return device_owner def _set_instance_info(self, fip, port=None): if fip['port_id']: if not port: port = port_get(self.request, fip['port_id']) fip['instance_id'] = port.device_id fip['instance_type'] = self._get_instance_type_from_device_owner( port.device_owner) else: fip['instance_id'] = None fip['instance_type'] = None @profiler.trace def list(self, all_tenants=False, **search_opts): if not all_tenants: tenant_id = self.request.user.tenant_id # In Neutron, list_floatingips returns Floating IPs from # all tenants when the API is called with admin role, so # we need to filter them with tenant_id. search_opts['tenant_id'] = tenant_id port_search_opts = {'tenant_id': tenant_id} else: port_search_opts = {} fips = self.client.list_floatingips(**search_opts) fips = fips.get('floatingips') # Get port list to add instance_id to floating IP list # instance_id is stored in device_id attribute ports = port_list(self.request, **port_search_opts) port_dict = collections.OrderedDict([(p['id'], p) for p in ports]) for fip in fips: self._set_instance_info(fip, port_dict.get(fip['port_id'])) return [FloatingIp(fip) for fip in fips] @profiler.trace def get(self, floating_ip_id): fip = self.client.show_floatingip(floating_ip_id).get('floatingip') self._set_instance_info(fip) return FloatingIp(fip) @profiler.trace def allocate(self, pool, tenant_id=None, **params): if not tenant_id: tenant_id = self.request.user.project_id create_dict = {'floating_network_id': pool, 'tenant_id': tenant_id} if 'floating_ip_address' in params: create_dict['floating_ip_address'] = params['floating_ip_address'] fip = self.client.create_floatingip( {'floatingip': create_dict}).get('floatingip') self._set_instance_info(fip) return FloatingIp(fip) @profiler.trace def release(self, floating_ip_id): self.client.delete_floatingip(floating_ip_id) @profiler.trace def associate(self, floating_ip_id, port_id): # NOTE: In Neutron Horizon floating IP support, port_id is # "<port_id>_<ip_address>" format to identify multiple ports. pid, ip_address = port_id.split('_', 1) update_dict = {'port_id': pid, 'fixed_ip_address': ip_address} self.client.update_floatingip(floating_ip_id, {'floatingip': update_dict}) @profiler.trace def disassociate(self, floating_ip_id): update_dict = {'port_id': None} self.client.update_floatingip(floating_ip_id, {'floatingip': update_dict}) def _get_reachable_subnets(self, ports): if not is_enabled_by_config('enable_fip_topology_check', True): # All subnets are reachable from external network return set( p.fixed_ips[0]['subnet_id'] for p in ports if p.fixed_ips ) # Retrieve subnet list reachable from external network ext_net_ids = [ext_net.id for ext_net in self.list_pools()] gw_routers = [r.id for r in router_list(self.request) if (r.external_gateway_info and r.external_gateway_info.get('network_id') in ext_net_ids)] reachable_subnets = set([p.fixed_ips[0]['subnet_id'] for p in ports if ((p.device_owner in ROUTER_INTERFACE_OWNERS) and (p.device_id in gw_routers))]) # we have to include any shared subnets as well because we may not # have permission to see the router interface to infer connectivity shared = set([s.id for n in network_list(self.request, shared=True) for s in n.subnets]) return reachable_subnets | shared @profiler.trace def list_targets(self): tenant_id = self.request.user.tenant_id ports = port_list(self.request, tenant_id=tenant_id) servers, has_more = nova.server_list(self.request, detailed=False) server_dict = collections.OrderedDict( [(s.id, s.name) for s in servers]) reachable_subnets = self._get_reachable_subnets(ports) targets = [] for p in ports: # Remove network ports from Floating IP targets if p.device_owner.startswith('network:'): continue port_id = p.id server_name = server_dict.get(p.device_id) for ip in p.fixed_ips: if ip['subnet_id'] not in reachable_subnets: continue target = {'name': '%s: %s' % (server_name, ip['ip_address']), 'id': '%s_%s' % (port_id, ip['ip_address']), 'port_id': port_id, 'instance_id': p.device_id} targets.append(FloatingIpTarget(target)) return targets def _target_ports_by_instance(self, instance_id): if not instance_id: return None search_opts = {'device_id': instance_id} return port_list(self.request, **search_opts) @profiler.trace def get_target_id_by_instance(self, instance_id, target_list=None): if target_list is not None: targets = [target for target in target_list if target['instance_id'] == instance_id] if not targets: return None return targets[0]['id'] else: # In Neutron one port can have multiple ip addresses, so this # method picks up the first one and generate target id. ports = self._target_ports_by_instance(instance_id) if not ports: return None return '{0}_{1}'.format(ports[0].id, ports[0].fixed_ips[0]['ip_address']) @profiler.trace def list_target_id_by_instance(self, instance_id, target_list=None): if target_list is not None: return [target['id'] for target in target_list if target['instance_id'] == instance_id] else: ports = self._target_ports_by_instance(instance_id) return ['{0}_{1}'.format(p.id, p.fixed_ips[0]['ip_address']) for p in ports] def is_simple_associate_supported(self): # NOTE: There are two reason that simple association support # needs more considerations. (1) Neutron does not support the # default floating IP pool at the moment. It can be avoided # in case where only one floating IP pool exists. # (2) Neutron floating IP is associated with each VIF and # we need to check whether such VIF is only one for an instance # to enable simple association support. return False def is_supported(self): network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) return network_config.get('enable_router', True) def get_ipver_str(ip_version): """Convert an ip version number to a human-friendly string.""" return IP_VERSION_DICT.get(ip_version, '') @memoized def neutronclient(request): insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) c = neutron_client.Client(token=request.user.token.id, auth_url=base.url_for(request, 'identity'), endpoint_url=base.url_for(request, 'network'), insecure=insecure, ca_cert=cacert) return c @profiler.trace def list_resources_with_long_filters(list_method, filter_attr, filter_values, **params): """List neutron resources with handling RequestURITooLong exception. If filter parameters are long, list resources API request leads to 414 error (URL is too long). For such case, this method split list parameters specified by a list_field argument into chunks and call the specified list_method repeatedly. :param list_method: Method used to retrieve resource list. :param filter_attr: attribute name to be filtered. The value corresponding to this attribute is specified by "filter_values". If you want to specify more attributes for a filter condition, pass them as keyword arguments like "attr2=values2". :param filter_values: values of "filter_attr" to be filtered. If filter_values are too long and the total URI length exceed the maximum length supported by the neutron server, filter_values will be split into sub lists if filter_values is a list. :param params: parameters to pass a specified listing API call without any changes. You can specify more filter conditions in addition to a pair of filter_attr and filter_values. """ try: params[filter_attr] = filter_values return list_method(**params) except neutron_exc.RequestURITooLong as uri_len_exc: # The URI is too long because of too many filter values. # Use the excess attribute of the exception to know how many # filter values can be inserted into a single request. # We consider only the filter condition from (filter_attr, # filter_values) and do not consider other filter conditions # which may be specified in **params. if type(filter_values) != list: filter_values = [filter_values] # Length of each query filter is: # <key>=<value>& (e.g., id=<uuid>) # The length will be key_len + value_maxlen + 2 all_filter_len = sum(len(filter_attr) + len(val) + 2 for val in filter_values) allowed_filter_len = all_filter_len - uri_len_exc.excess val_maxlen = max(len(val) for val in filter_values) filter_maxlen = len(filter_attr) + val_maxlen + 2 chunk_size = allowed_filter_len // filter_maxlen resources = [] for i in range(0, len(filter_values), chunk_size): params[filter_attr] = filter_values[i:i + chunk_size] resources.extend(list_method(**params)) return resources @profiler.trace def network_list(request, **params): LOG.debug("network_list(): params=%s", params) networks = neutronclient(request).list_networks(**params).get('networks') # Get subnet list to expand subnet info in network list. subnets = subnet_list(request) subnet_dict = dict([(s['id'], s) for s in subnets]) # Expand subnet list from subnet_id to values. for n in networks: # Due to potential timing issues, we can't assume the subnet_dict data # is in sync with the network data. n['subnets'] = [subnet_dict[s] for s in n.get('subnets', []) if s in subnet_dict] return [Network(n) for n in networks] @profiler.trace def network_list_for_tenant(request, tenant_id, include_external=False, **params): """Return a network list available for the tenant. The list contains networks owned by the tenant and public networks. If requested_networks specified, it searches requested_networks only. """ LOG.debug("network_list_for_tenant(): tenant_id=%(tenant_id)s, " "params=%(params)s", {'tenant_id': tenant_id, 'params': params}) networks = [] shared = params.get('shared') if shared is not None: del params['shared'] if shared in (None, False): # If a user has admin role, network list returned by Neutron API # contains networks that do not belong to that tenant. # So we need to specify tenant_id when calling network_list(). networks += network_list(request, tenant_id=tenant_id, shared=False, **params) if shared in (None, True): # In the current Neutron API, there is no way to retrieve # both owner networks and public networks in a single API call. networks += network_list(request, shared=True, **params) params['router:external'] = params.get('router:external', True) if params['router:external'] and include_external: if shared is not None: params['shared'] = shared fetched_net_ids = [n.id for n in networks] # Retrieves external networks when router:external is not specified # in (filtering) params or router:external=True filter is specified. # When router:external=False is specified there is no need to query # networking API because apparently nothing will match the filter. ext_nets = network_list(request, **params) networks += [n for n in ext_nets if n.id not in fetched_net_ids] return networks @profiler.trace def network_get(request, network_id, expand_subnet=True, **params): LOG.debug("network_get(): netid=%(network_id)s, params=%(params)s", {'network_id': network_id, 'params': params}) network = neutronclient(request).show_network(network_id, **params).get('network') if expand_subnet: if request.user.tenant_id == network['tenant_id'] or network['shared']: # Since the number of subnets per network must be small, # call subnet_get() for each subnet instead of calling # subnet_list() once. network['subnets'] = [subnet_get(request, sid) for sid in network['subnets']] return Network(network) @profiler.trace def network_create(request, **kwargs): """Create a network object. :param request: request context :param tenant_id: (optional) tenant id of the network created :param name: (optional) name of the network created :returns: Network object """ LOG.debug("network_create(): kwargs = %s", kwargs) if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body = {'network': kwargs} network = neutronclient(request).create_network(body=body).get('network') return Network(network) @profiler.trace def network_update(request, network_id, **kwargs): LOG.debug("network_update(): netid=%(network_id)s, params=%(params)s", {'network_id': network_id, 'params': kwargs}) body = {'network': kwargs} network = neutronclient(request).update_network(network_id, body=body).get('network') return Network(network) @profiler.trace def network_delete(request, network_id): LOG.debug("network_delete(): netid=%s", network_id) neutronclient(request).delete_network(network_id) @profiler.trace def subnet_list(request, **params): LOG.debug("subnet_list(): params=%s", params) subnets = neutronclient(request).list_subnets(**params).get('subnets') return [Subnet(s) for s in subnets] @profiler.trace def subnet_get(request, subnet_id, **params): LOG.debug("subnet_get(): subnetid=%(subnet_id)s, params=%(params)s", {'subnet_id': subnet_id, 'params': params}) subnet = neutronclient(request).show_subnet(subnet_id, **params).get('subnet') return Subnet(subnet) @profiler.trace def subnet_create(request, network_id, **kwargs): """Create a subnet on a specified network. :param request: request context :param network_id: network id a subnet is created on :param cidr: (optional) subnet IP address range :param ip_version: (optional) IP version (4 or 6) :param gateway_ip: (optional) IP address of gateway :param tenant_id: (optional) tenant id of the subnet created :param name: (optional) name of the subnet created :param subnetpool_id: (optional) subnetpool to allocate prefix from :param prefixlen: (optional) length of prefix to allocate :returns: Subnet object Although both cidr+ip_version and subnetpool_id+preifxlen is listed as optional you MUST pass along one of the combinations to get a successful result. """ LOG.debug("subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s", {'network_id': network_id, 'kwargs': kwargs}) body = {'subnet': {'network_id': network_id}} if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnet'].update(kwargs) subnet = neutronclient(request).create_subnet(body=body).get('subnet') return Subnet(subnet) @profiler.trace def subnet_update(request, subnet_id, **kwargs): LOG.debug("subnet_update(): subnetid=%(subnet_id)s, kwargs=%(kwargs)s", {'subnet_id': subnet_id, 'kwargs': kwargs}) body = {'subnet': kwargs} subnet = neutronclient(request).update_subnet(subnet_id, body=body).get('subnet') return Subnet(subnet) @profiler.trace def subnet_delete(request, subnet_id): LOG.debug("subnet_delete(): subnetid=%s", subnet_id) neutronclient(request).delete_subnet(subnet_id) @profiler.trace def subnetpool_list(request, **params): LOG.debug("subnetpool_list(): params=%s", params) subnetpools = \ neutronclient(request).list_subnetpools(**params).get('subnetpools') return [SubnetPool(s) for s in subnetpools] @profiler.trace def subnetpool_get(request, subnetpool_id, **params): LOG.debug("subnetpool_get(): subnetpoolid=%(subnetpool_id)s, " "params=%(params)s", {'subnetpool_id': subnetpool_id, 'params': params}) subnetpool = \ neutronclient(request).show_subnetpool(subnetpool_id, **params).get('subnetpool') return SubnetPool(subnetpool) @profiler.trace def subnetpool_create(request, name, prefixes, **kwargs): """Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object """ LOG.debug("subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, " "kwargs=%(kwargs)s", {'name': name, 'prefixes': prefixes, 'kwargs': kwargs}) body = {'subnetpool': {'name': name, 'prefixes': prefixes, } } if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnetpool'].update(kwargs) subnetpool = \ neutronclient(request).create_subnetpool(body=body).get('subnetpool') return SubnetPool(subnetpool) @profiler.trace def subnetpool_update(request, subnetpool_id, **kwargs): LOG.debug("subnetpool_update(): subnetpoolid=%(subnetpool_id)s, " "kwargs=%(kwargs)s", {'subnetpool_id': subnetpool_id, 'kwargs': kwargs}) body = {'subnetpool': kwargs} subnetpool = \ neutronclient(request).update_subnetpool(subnetpool_id, body=body).get('subnetpool') return SubnetPool(subnetpool) @profiler.trace def subnetpool_delete(request, subnetpool_id): LOG.debug("subnetpool_delete(): subnetpoolid=%s", subnetpool_id) return neutronclient(request).delete_subnetpool(subnetpool_id) @profiler.trace def port_list(request, **params): LOG.debug("port_list(): params=%s", params) ports = neutronclient(request).list_ports(**params).get('ports') return [Port(p) for p in ports] @profiler.trace def port_get(request, port_id, **params): LOG.debug("port_get(): portid=%(port_id)s, params=%(params)s", {'port_id': port_id, 'params': params}) port = neutronclient(request).show_port(port_id, **params).get('port') return Port(port) def unescape_port_kwargs(**kwargs): for key in kwargs: if '__' in key: kwargs[':'.join(key.split('__'))] = kwargs.pop(key) return kwargs @profiler.trace def port_create(request, network_id, **kwargs): """Create a port on a specified network. :param request: request context :param network_id: network id a subnet is created on :param device_id: (optional) device id attached to the port :param tenant_id: (optional) tenant id of the port created :param name: (optional) name of the port created :returns: Port object """ LOG.debug("port_create(): netid=%(network_id)s, kwargs=%(kwargs)s", {'network_id': network_id, 'kwargs': kwargs}) kwargs = unescape_port_kwargs(**kwargs) body = {'port': {'network_id': network_id}} if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['port'].update(kwargs) port = neutronclient(request).create_port(body=body).get('port') return Port(port) @profiler.trace def port_delete(request, port_id): LOG.debug("port_delete(): portid=%s", port_id) neutronclient(request).delete_port(port_id) @profiler.trace def port_update(request, port_id, **kwargs): LOG.debug("port_update(): portid=%(port_id)s, kwargs=%(kwargs)s", {'port_id': port_id, 'kwargs': kwargs}) kwargs = unescape_port_kwargs(**kwargs) body = {'port': kwargs} port = neutronclient(request).update_port(port_id, body=body).get('port') return Port(port) @profiler.trace def router_create(request, **kwargs): LOG.debug("router_create():, kwargs=%s", kwargs) body = {'router': {}} if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['router'].update(kwargs) router = neutronclient(request).create_router(body=body).get('router') return Router(router) @profiler.trace def router_update(request, r_id, **kwargs): LOG.debug("router_update(): router_id=%(r_id)s, kwargs=%(kwargs)s", {'r_id': r_id, 'kwargs': kwargs}) body = {'router': {}} body['router'].update(kwargs) router = neutronclient(request).update_router(r_id, body=body) return Router(router['router']) @profiler.trace def router_get(request, router_id, **params): router = neutronclient(request).show_router(router_id, **params).get('router') return Router(router) @profiler.trace def router_list(request, **params): routers = neutronclient(request).list_routers(**params).get('routers') return [Router(r) for r in routers] @profiler.trace def router_list_on_l3_agent(request, l3_agent_id, **params): routers = neutronclient(request).\ list_routers_on_l3_agent(l3_agent_id, **params).get('routers') return [Router(r) for r in routers] @profiler.trace def router_delete(request, router_id): neutronclient(request).delete_router(router_id) @profiler.trace def router_add_interface(request, router_id, subnet_id=None, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id client = neutronclient(request) return client.add_interface_router(router_id, body) @profiler.trace def router_remove_interface(request, router_id, subnet_id=None, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id neutronclient(request).remove_interface_router(router_id, body) @profiler.trace def router_add_gateway(request, router_id, network_id): body = {'network_id': network_id} neutronclient(request).add_gateway_router(router_id, body) @profiler.trace def router_remove_gateway(request, router_id): neutronclient(request).remove_gateway_router(router_id) @profiler.trace def router_static_route_list(request, router_id=None): router = router_get(request, router_id) try: routes = [RouterStaticRoute(r) for r in router.routes] except AttributeError: LOG.debug("router_static_route_list(): router_id=%(router_id)s, " "router=%(router)s", {'router_id': router_id, 'router': router}) return [] return routes @profiler.trace def router_static_route_remove(request, router_id, route_ids): currentroutes = router_static_route_list(request, router_id=router_id) newroutes = [] for oldroute in currentroutes: if oldroute.id not in route_ids: newroutes.append({'nexthop': oldroute.nexthop, 'destination': oldroute.destination}) body = {'routes': newroutes} new = router_update(request, router_id, **body) return new @profiler.trace def router_static_route_add(request, router_id, newroute): body = {} currentroutes = router_static_route_list(request, router_id=router_id) body['routes'] = [newroute] + [{'nexthop': r.nexthop, 'destination': r.destination} for r in currentroutes] new = router_update(request, router_id, **body) return new @profiler.trace def tenant_quota_get(request, tenant_id): return base.QuotaSet(neutronclient(request).show_quota(tenant_id)['quota']) @profiler.trace def tenant_quota_update(request, tenant_id, **kwargs): quotas = {'quota': kwargs} return neutronclient(request).update_quota(tenant_id, quotas) @profiler.trace def agent_list(request, **params): agents = neutronclient(request).list_agents(**params) return [Agent(a) for a in agents['agents']] @profiler.trace def list_dhcp_agent_hosting_networks(request, network, **params): agents = neutronclient(request).list_dhcp_agent_hosting_networks(network, **params) return [Agent(a) for a in agents['agents']] @profiler.trace def list_l3_agent_hosting_router(request, router, **params): agents = neutronclient(request).list_l3_agent_hosting_routers(router, **params) return [Agent(a) for a in agents['agents']] @profiler.trace def show_network_ip_availability(request, network_id): ip_availability = neutronclient(request).show_network_ip_availability( network_id) return ip_availability @profiler.trace def add_network_to_dhcp_agent(request, dhcp_agent, network_id): body = {'network_id': network_id} return neutronclient(request).add_network_to_dhcp_agent(dhcp_agent, body) @profiler.trace def remove_network_from_dhcp_agent(request, dhcp_agent, network_id): return neutronclient(request).remove_network_from_dhcp_agent(dhcp_agent, network_id) @profiler.trace def provider_list(request): providers = neutronclient(request).list_service_providers() return providers['service_providers'] # TODO(pkarikh) need to uncomment when osprofiler will have no # issues with unicode in: # openstack_dashboard/test/test_data/nova_data.py#L470 data # @profiler.trace def servers_update_addresses(request, servers, all_tenants=False): """Retrieve servers networking information from Neutron if enabled. Should be used when up to date networking information is required, and Nova's networking info caching mechanism is not fast enough. """ # Get all (filtered for relevant servers) information from Neutron try: ports = list_resources_with_long_filters( port_list, 'device_id', [instance.id for instance in servers], request=request) fips = FloatingIpManager(request) if fips.is_supported(): floating_ips = list_resources_with_long_filters( fips.list, 'port_id', [port.id for port in ports], all_tenants=all_tenants) else: floating_ips = [] networks = list_resources_with_long_filters( network_list, 'id', set([port.network_id for port in ports]), request=request) except Exception: error_message = _('Unable to connect to Neutron.') LOG.error(error_message) messages.error(request, error_message) return # Map instance to its ports instances_ports = collections.defaultdict(list) for port in ports: instances_ports[port.device_id].append(port) # Map port to its floating ips ports_floating_ips = collections.defaultdict(list) for fip in floating_ips: ports_floating_ips[fip.port_id].append(fip) # Map network id to its name network_names = dict(((network.id, network.name) for network in networks)) for server in servers: try: addresses = _server_get_addresses( request, server, instances_ports, ports_floating_ips, network_names) except Exception as e: LOG.error(six.text_type(e)) else: server.addresses = addresses def _server_get_addresses(request, server, ports, floating_ips, network_names): def _format_address(mac, ip, type): try: version = netaddr.IPAddress(ip).version except Exception: error_message = _('Unable to parse IP address %s.') % ip LOG.error(error_message) messages.error(request, error_message) raise return {u'OS-EXT-IPS-MAC:mac_addr': mac, u'version': version, u'addr': ip, u'OS-EXT-IPS:type': type} addresses = collections.defaultdict(list) instance_ports = ports.get(server.id, []) for port in instance_ports: network_name = network_names.get(port.network_id) if network_name is not None: for fixed_ip in port.fixed_ips: addresses[network_name].append( _format_address(port.mac_address, fixed_ip['ip_address'], u'fixed')) port_fips = floating_ips.get(port.id, []) for fip in port_fips: addresses[network_name].append( _format_address(port.mac_address, fip.floating_ip_address, u'floating')) return dict(addresses) @profiler.trace @memoized def list_extensions(request): try: extensions_list = neutronclient(request).list_extensions() except exceptions.ServiceCatalogException: return {} if 'extensions' in extensions_list: return tuple(extensions_list['extensions']) else: return () @profiler.trace @memoized def is_extension_supported(request, extension_alias): extensions = list_extensions(request) for extension in extensions: if extension['alias'] == extension_alias: return True else: return False def is_enabled_by_config(name, default=True): network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) return network_config.get(name, default) @memoized def is_service_enabled(request, config_name, ext_name): return (is_enabled_by_config(config_name) and is_extension_supported(request, ext_name)) @memoized def is_quotas_extension_supported(request): return (is_enabled_by_config('enable_quotas', False) and is_extension_supported(request, 'quotas')) @memoized def is_router_enabled(request): return (is_enabled_by_config('enable_router') and is_extension_supported(request, 'router')) # FEATURE_MAP is used to define: # - related neutron extension name (key: "extension") # - corresponding dashboard config (key: "config") # - RBAC policies (key: "poclies") # If a key is not contained, the corresponding permission check is skipped. FEATURE_MAP = { 'dvr': { 'extension': 'dvr', 'config': { 'name': 'enable_distributed_router', 'default': False, }, 'policies': { 'get': 'get_router:distributed', 'create': 'create_router:distributed', 'update': 'update_router:distributed', } }, 'l3-ha': { 'extension': 'l3-ha', 'config': {'name': 'enable_ha_router', 'default': False}, 'policies': { 'get': 'get_router:ha', 'create': 'create_router:ha', 'update': 'update_router:ha', } }, } def get_feature_permission(request, feature, operation=None): """Check if a feature-specific field can be displayed. This method check a permission for a feature-specific field. Such field is usually provided through Neutron extension. :param request: Request Object :param feature: feature name defined in FEATURE_MAP :param operation (optional): Operation type. The valid value should be defined in FEATURE_MAP[feature]['policies'] It must be specified if FEATURE_MAP[feature] has 'policies'. """ network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {}) feature_info = FEATURE_MAP.get(feature) if not feature_info: # Translators: Only used inside Horizon code and invisible to users raise ValueError(_("The requested feature '%(feature)s' is unknown. " "Please make sure to specify a feature defined " "in FEATURE_MAP.")) # Check dashboard settings feature_config = feature_info.get('config') if feature_config: if not network_config.get(feature_config['name'], feature_config['default']): return False # Check policy feature_policies = feature_info.get('policies') if feature_policies: policy_name = feature_policies.get(operation) if not policy_name: # Translators: Only used inside Horizon code and invisible to users raise ValueError(_("The 'operation' parameter for " "get_feature_permission '%(feature)s' " "is invalid. It should be one of %(allowed)s") % {'feature': feature, 'allowed': ' '.join(feature_policies.keys())}) role = (('network', policy_name),) if not policy.check(role, request): return False # Check if a required extension is enabled feature_extension = feature_info.get('extension') if feature_extension: try: return is_extension_supported(request, feature_extension) except Exception: msg = (_("Failed to check Neutron '%s' extension is not supported") % feature_extension) LOG.info(msg) return False # If all checks are passed, now a given feature is allowed. return True
I was watching this last night for the 초 time and I found the songs quite catchy like I want it all and The Boys are back which was great. But whats different about this is that they use other artists as well Jordan Pruitt , Stan , & Jesse McCarthey. The charcthers were great yet I found Ryan very gay . He wears 담홍색, 핑크 which is so girly on him. Chad I thought was cute and Troy was really okey. Alot of people hate this but if 당신 are into Grease 또는 헤어스프레이 또는 any musical then you'll 사랑 it. But I think its alot like a teenage version of Grease.Boy meets girl fall in 사랑 balh blah. I mean its a 디즈니 movie for crying out loud and 디즈니 is a very cheesy production. 사무용 겉옷, 전반적인 it was alot better than the first two . I saw all 3 of them , I own the 2nd and 3rd one on Dvd. It makes me look wierd. I don't know but no-one better wear the same dress as me.
import traceback import logging logger = logging.getLogger('exp') class ExpError (Exception): def __init__ (self, message): self.message = message def __str__ (self): return self.message class AuthenticationError (ExpError): pass class UnexpectedError (ExpError): def __init__ (self, *args, **kwargs): logger.debug('An unexpected error occured:') logger.debug(traceback.format_exc()) super(UnexpectedError, self).__init__(*args, **kwargs) # Cannot execute desired action. class RuntimeError(ExpError): def __init__ (self, message): logger.debug('A runtime error has occured: %s' % message) def __str__ (self): return self.message class ApiError(ExpError): def __init__(self, code=None, message=None, status_code=None, payload=None): self.message = message or 'An unknown error has occurred.' self.code = code or 'unknown.error' self.status_code = status_code self.payload = payload def __str__(self): return '%s: %s \n %s' % (self.code, self.message, self.payload) class NetworkError(ExpError): pass
229. Myers W. A. Micropsia and testicular restriction. — The psychoanalytic quarterly, 1977, vol. 46, No 4, p. 580-605. 230. Peters R. S. Education of emotions. — In: Feelings and emotions: The Loyola simposium. N. Y., 1970, p. 187-203. 231. Psychological stress. / Ed. by M. H. Appley, R. Trumbull. — N. Y.: Appleton-Century-Crofts, 1967. — 471 p. 232. Rangell L. The scope of intrapsychic conflict: microscopic and macroscopic consideration. — The psychoanalytic study of the child. N. Y., 1963, vol. 18, p. 75-102. 233. Rapaport D. Structure of psychoanalytic theory: a systematisation attempt. — Psychological issues, 1960, vol. 11, p. 1-158. 234. Rapaport D., Gill M. M. The points of view and assumption of metapsychology. — Intern, journ. of psychoanalysis, 1959, vol. 49, p. 153-162. 235. Rycroft Ch. A critical dictionary of psychoanalysis. — L.: Nelson, 1968. — 189 p. 236. Sarnoff A. Personality. Dynamic and development. — N. Y.: Wiley & Sons, 1962. — 572 p. 237. Sartre J.-P. Exquisse d'une theorie des emotions. — Paris: Hormann, 1965. — 64 p. 238. Stxtfer R. Psychoanalytical interpretation in Rorstxtch testing. — N. Y.: Grune & Stratton, 1954. — 446 p. 239. Sells S. B. On the nature of stress. — In: Social and. psychological factors in stress. N. Y., 1970, p. 134-139. 240. Selye H. Stress, cancer and the mind. — In: Cancer, stress, and death. N. Y.; L., 1979, p. 11-19. 241. Sjoback H. H. The psychoanalytic theory of defensive processes. — Lund: Gleefup, 1973. — 297 p. 242. Skinner B. F. Cumulative record. — N. Y.: Appleton-Century-Crofts, 1959. — 430 p. 243. Skinner B. F. About behaviorism. — N. Y.: Khopf, 1974. — 256 p. 244. Sperling O. E. Exaggeration as a defense. — Psycho-analytical quarterly, 1963, vol. 32, p. 533-548. 245. Sperling S. J. On denial and the essential nature of defense. — International journal of psychoanalytical science, 1958, vol. 39, p. 25-38. 246. Spitz R. A. Some early prototypes of ego defenses. — Journal of American Psychoanalytic association, 1961, vol. 9, p. 626-651. 247. Stolorow R. D., Lachman F. M. Early object loss and denial. Development considerations. — The psychoanalytic quarterly, 1975, vol, 44, No 4, p. 596-611. 248. Suicide in different cultures. / Ed. by Farberow N. L. — Baltimore: Univ. Park press, 1975. — 286 p.
from __future__ import division, print_function import numpy as np from scipy.linalg import inv, svd import matplotlib.pyplot as plt from abel.rbasex import _bs_rbasex Rmax = 40 # SVD for 0th-order inverse Abel transform P, = _bs_rbasex(Rmax, 0, False) A = inv(P.T) V, s, UT = svd(A) # setup x axis def setx(): plt.xlim((0, Rmax)) plt.xticks([0, 1/4 * Rmax, 1/2 * Rmax, 3/4 * Rmax, Rmax], ['$0$', '', '', '', '$r_{\\rm max}$']) # plot i-th +- 0, 1 singular vectors def plotu(i, title): plt.title('$\\mathbf{v}_i,\\quad i = ' + title + '$') i = int(i) plt.plot(V[:, i - 1], '#DD0000') plt.plot(V[:, i], '#00AA00') plt.plot(V[:, i + 1], '#0000FF') setx() fig = plt.figure(figsize=(6, 6), frameon=False) # singular values plt.subplot(321) plt.title('$\\sigma_i$') plt.plot(s, 'k') setx() plt.ylim(bottom=0) # vectors near 0 plt.subplot(322) plotu(1, '0, 1, 2') # vectors near 1/4 plt.subplot(323) plotu(1/4 * Rmax, '\\frac{1}{4} r_{\\rm max} \\pm 0, 1') # vectors near middle plt.subplot(324) plotu(1/2 * Rmax, '\\frac{1}{2} r_{\\rm max} \\pm 0, 1') # vectors near 3/4 plt.subplot(325) plotu(3/4 * Rmax, '\\frac{3}{4} r_{\\rm max} \\pm 0, 1') # vectors near end plt.subplot(326) plotu(Rmax - 1, 'r_{\\rm max} - 2, 1, 0') plt.tight_layout() #plt.savefig('rbasex-SVD.svg') #plt.show()
A professional/career Health, Safety and Environment (HSE). Eze Prof I. F. Okunamiri graduated from the Polytechnic of South Bank, London S.E.I. with a Bachelor of Science degree in Occupational Hygiene. He also holds a PhD in International Safety Management and Engineering Administration (HC) of the Pacific Western University, USA. Professor in Safety Management & Engineering (HC) (Green Hills University, Denmark) RSH Diploma in Public Health, School of Hygiene, Aba. Finally he retired as a Corporate Safety Manager from Guinness Nigeria Plc in November 2002, after putting in another twenty years working for Guinness. An astitute and pioneer member of the Nigerian Institute of Safety Professionals (NISP) now Institute of Safety Professionals of Nigeria (ISPON). He served as the Secretary General of this Institute for nine (9) years, and National Publicity Secretary for three (3) years. He is the former President National Industrial Safety Council of Nigeria (NISCN), a tripartite body set up by the Federal Government, and comprising the Governments (represented by Director of Factories, Federal Ministry of Labour & Productivity and States government representatives), Employers (represented by the Nigerian Employers Consultative Association, (NECA) and Workers (represented by the Nigeria Labour Congress (NLC)/Trade Union Congress (TUC), and whose objective is to prevent industrial accidents and hazards and promote occupational health and welfare in industrial establishments. He participated in many International and National Safety Seminars, Workshops and Conferences He has written and presented papers on Safety Management, Accident Costs, Principles of Accident Prevention, Permit to work system, Logistics HSE, Emergency management, Confined Space Entry, etc. for Organizations, institutions, etc. Member British Occupational Hygiene Society (BOHS), London. Borno State Headquarters Scout Commissioner. President Obowu Development Association, Lagos. Crowned Eze Ndigbo, IFO LGA, Ogun State, Nigeria.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import sorl.thumbnail.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Categorias', fields=[ ('id_categoria', models.AutoField(primary_key=True, serialize=False)), ('categoria', models.CharField(unique=True, db_index=True, max_length=100)), ], ), migrations.CreateModel( name='Imagens', fields=[ ('id_imagem', models.AutoField(primary_key=True, serialize=False)), ('img_cover', sorl.thumbnail.fields.ImageField(upload_to='', null=True)), ('data_inserido', models.DateTimeField(auto_now_add=True)), ('data_modificado', models.DateTimeField(auto_now=True)), ('img_link_orig', models.URLField(unique=True, db_index=True, max_length=700)), ], ), migrations.CreateModel( name='LinksRSS', fields=[ ('id_links_rss', models.AutoField(primary_key=True, serialize=False)), ('link_rss', models.URLField(db_index=True, max_length=600)), ('data_adicionado', models.DateTimeField(auto_now_add=True)), ('data_modificado', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Postagens', fields=[ ('id_postagem', models.AutoField(primary_key=True, serialize=False)), ('titulo', models.CharField(max_length=500)), ('link', models.URLField(unique=True, db_index=True, max_length=700)), ('link_origi', models.URLField(null=True, unique=True, db_index=True, max_length=700)), ('texto', models.TextField(null=True)), ('data_adicionado', models.DateTimeField(auto_now_add=True)), ('data_modificado', models.DateTimeField(auto_now=True)), ('horario_postagem_site', models.DateTimeField(null=True)), ('fk_imagem', models.ForeignKey(to='Crawler.Imagens', related_name='fk_imagem_postagem')), ('fk_rss', models.ForeignKey(to='Crawler.LinksRSS', related_name='fk_rss_postagem')), ], ), migrations.CreateModel( name='RSSCategorias', fields=[ ('id_rss_categorias', models.AutoField(primary_key=True, serialize=False)), ('fk_categoria', models.ForeignKey(to='Crawler.Categorias', related_name='fk_categoria_rss')), ('fk_rss', models.ForeignKey(to='Crawler.LinksRSS', related_name='fk_rss')), ], ), migrations.CreateModel( name='Sites', fields=[ ('id_sites', models.AutoField(primary_key=True, serialize=False)), ('titulo', models.CharField(db_index=True, max_length=150)), ('descricao', models.TextField()), ('link', models.URLField(db_index=True, max_length=600)), ('idioma', models.CharField(max_length=30)), ('data_adicionado', models.DateTimeField(auto_now_add=True)), ('data_modificado', models.DateTimeField(auto_now=True)), ('fk_logo', models.ForeignKey(null=True, to='Crawler.Imagens')), ], ), migrations.CreateModel( name='SitesCategorias', fields=[ ('id_sites_categorias', models.AutoField(primary_key=True, serialize=False)), ('fk_categoria', models.ForeignKey(to='Crawler.Categorias', related_name='fk_categoria')), ('fk_site', models.ForeignKey(to='Crawler.Sites', related_name='fk_site')), ], ), migrations.CreateModel( name='Tags', fields=[ ('id_tag', models.AutoField(primary_key=True, serialize=False)), ('tag', models.CharField(unique=True, db_index=True, max_length=100)), ('contador', models.PositiveIntegerField(default=1)), ], ), migrations.CreateModel( name='TagsPostagens', fields=[ ('id_tags_postagens', models.AutoField(primary_key=True, serialize=False)), ('fk_postagem', models.ForeignKey(to='Crawler.Postagens', related_name='tp_postagem')), ('fk_tag', models.ForeignKey(to='Crawler.Tags', related_name='tp_tags')), ], ), migrations.AddField( model_name='linksrss', name='fk_sites', field=models.ForeignKey(to='Crawler.Sites'), ), migrations.AlterUniqueTogether( name='tagspostagens', unique_together=set([('fk_postagem', 'fk_tag')]), ), migrations.AlterUniqueTogether( name='sitescategorias', unique_together=set([('fk_site', 'fk_categoria')]), ), migrations.AlterUniqueTogether( name='rsscategorias', unique_together=set([('fk_rss', 'fk_categoria')]), ), ]
As an official SEAT dealer, Specialist Cars SEAT Dundee is perfectly suited to take care of all your SEAT car needs. If you're looking to buy a new car, we have the full range of SEAT models on offer ready for you to view and test drive, including the SEAT Ibiza, SEAT Leon and the SEAT Ateca. We have an extensive range of SEAT used cars for sale too, available in different shapes, models, colours and ages, meaning there is something for everyone and our highly experienced team will be on hand to find out what you need and to get you into your next SEAT car. Located in the West Gourdie Industrial Estate, Specialist Cars SEAT Dundee is easy to get to from the A90, just a short distance from the University of Dundee College of Medicine and the Ninewells Hospital and Medical Centre. Contact us today to find out more about the products and services we offer or to book a test drive in your next new car.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015 Takeshi HASEGAWA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import threading import time import cv2 class PreviewRequestHandler(object): def __init__(self, http_handler): self._http_handler = http_handler self._plugin = http_handler.server.parent self._frame = None self._new_image = False self._stopped = False self._http_handler.send_response(200) self._http_handler.send_header( 'Content-type', 'multipart/x-mixed-replace; boundary=--frame_boundary') self._http_handler.end_headers() self._plugin._listeners.append(self) while not self._stopped: time.sleep(0.05) if (self._frame is None): continue # FIXME: JPEG data should be shared among connections, for # performance result, jpeg = cv2.imencode('.jpg', self._frame) if not result: continue jpeg_length = len(jpeg) self.new_image = False self._http_handler.wfile.write( '--frame_boundary\r\n'.encode('utf-8') ) self._http_handler.send_header('Content-type', 'image/jpeg') self._http_handler.send_header('Content-length', str(jpeg_length)) self._http_handler.end_headers() self._http_handler.wfile.write(jpeg) self._plugin._listeners.remove(self) def on_event(self, event_name, context, params=None): if event_name == 'on_show_preview': self._frame = context['engine']['frame'] self._new_image = (self._frame is not None) elif event_name == 'on_stop': self._stopped = True
Every component on this black RAW 4 piece grinder is easily cleanable and replaceable, even the pollen screen. You can add or remove components onto your grinder and customize it exactly the way you like. For example you can remove the screen and use it as a whole leaf grinder (grinding directly into the super large catch basin). While it's technically a 4 piece grinder (grind, sift and collect) these RAW ones actually break down into 12 distinct pieces!
#------------------------------------------------------------------------------ # Condition objects - used for chaining together tests that yield True/False results #------------------------------------------------------------------------------ class Condition(object): """ Used to chain together objects for conditional testing. """ class NO_DATA(Exception): pass def __init__(self, value=None): self.value = value def eval(self, data=NO_DATA): return bool(self.value) def __or__(self, other): return Or(self, other) def __ror__(self, other): return Or(other, self) def __and__(self, other): return And(self, other) def __rand__(self, other): return And(other, self) def __invert__(self): return Inverse(self) def __nonzero__(self): return self.eval() def __str__(self): return str(self.value) Always = Condition(True) Never = Condition(False) class Inverse(Condition): def __init__(self, toInvert): self.toInvert = toInvert def eval(self, data=Condition.NO_DATA): return not self.toInvert.eval(data) def __str__(self): return "not %s" % self.toInvert class AndOrAbstract(Condition): def __init__(self, *args): self.args = [] for arg in args: if isinstance(arg, self.__class__): self.args.extend(arg.args) else: self.args.append(arg) def eval(self, data=Condition.NO_DATA): for arg in self.args: if isinstance(arg, Condition): val = arg.eval(data) else: val = bool(arg) if val == self._breakEarly: return self._breakEarly return not self._breakEarly def __str__(self): return "(%s)" % self._strJoiner.join([str(x) for x in self.args]) class And(AndOrAbstract): _breakEarly = False _strJoiner = ' and ' class Or(AndOrAbstract): _breakEarly = True _strJoiner = ' or '
Two of our great supporters were featured on BBC Countryfile at the weekend. Paddock Farm Butchery and Will Greenstock from the Horse and groom, Bourton on the Hill. It was great to see Nick and Jon Francis and Will on TV at the weekend. Don’t worry if you missed it here’s the link to the program.
# -*- coding: latin-1 -*- # ----------------------------------------------------------------------------- # Copyright 2011-2013 # Stephen Tiedemann <stephen.tiedemann@googlemail.com>, # Alexander Knaub <sanyok.og@googlemail.com> # # Licensed under the EUPL, Version 1.1 or - as soon they # will be approved by the European Commission - subsequent # versions of the EUPL (the "Licence"); # You may not use this work except in compliance with the # Licence. # You may obtain a copy of the Licence at: # # http://www.osor.eu/eupl # # Unless required by applicable law or agreed to in # writing, software distributed under the Licence is # distributed on an "AS IS" basis, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. # See the Licence for the specific language governing # permissions and limitations under the Licence. # ----------------------------------------------------------------------------- import logging log = logging.getLogger(__name__) import nfc.tag import nfc.clf import nfc.ndef class NDEF(object): def __init__(self, tag): self._tag = tag self._msg = '' self._cc = tag[8:12] log.debug("capability container " + str(self._cc).encode("hex")) self._skip = set(range(104, 120)) self.changed # force initial read def _read_tlv(self, offset): read_tlv = { 0x00: lambda x: x + 1, 0x01: self._read_lock_tlv, 0x02: self._read_memory_tlv, 0x03: self._read_ndef_tlv, 0xFE: lambda x: None }.get(self._tag[offset], self._read_unknown_tlv) return read_tlv(offset + 1) def _read_unknown_tlv(self, offset): log.debug("found unknown tlv") length, offset = self._read_tlv_length(offset) return offset + length def _read_ndef_tlv(self, offset): log.debug("ndef message tlv at 0x{0:0X}".format(offset-1)) self._ndef_tlv_offset = offset - 1 length, offset = self._read_tlv_length(offset) log.debug("ndef message length is {0}".format(length)) self._capacity = (self._cc[2]+1)*8 - offset - len(self._skip) if length < 255 and self._capacity >= 255: self._capacity -= 2 # account for three byte length format self._msg = bytearray() while length > 0: if not offset in self._skip: self._msg.append(self._tag[offset]) length -= 1 offset += 1 return None def _read_lock_tlv(self, offset): log.debug("dynamic lock byte tlv at 0x{0:0X}".format(offset-1)) length, offset = self._read_tlv_length(offset) value = self._tag[offset:offset+length] page_offs = value[0] >> 4 byte_offs = value[0] & 0x0F resv_size = ((value[1] - 1) / 8) + 1 page_size = 2 ** (value[2] & 0x0F) resv_start = page_offs * page_size + byte_offs self._skip.update(range(resv_start, resv_start + resv_size)) return offset + length def _read_memory_tlv(self, offset): log.debug("memory control tlv at 0x{0:0X}".format(offset-1)) length, offset = self._read_tlv_length(offset) value = self._tag[offset:offset+length] page_offs = value[0] >> 4 byte_offs = value[0] & 0x0F resv_size = value[1] page_size = 2 ** (value[2] & 0x0F) resv_start = page_offs * page_size + byte_offs self._skip.update(range(resv_start, resv_start + resv_size)) return offset + length def _read_tlv_length(self, offset): length = self._tag[offset] if length == 255: length = self._tag[offset+1] * 256 + self._tag[offset+2]; offset = offset + 2 if length < 256 or length == 0xFFFF: raise ValueError("invalid tlv lenght value") return length, offset + 1 @property def version(self): """The version of the NDEF mapping.""" return "%d.%d" % (self._cc[1]>>4, self._cc[1]&0x0F) @property def capacity(self): """The maximum number of user bytes on the NDEF tag.""" return self._capacity @property def readable(self): """Is True if data can be read from the NDEF tag.""" return self._cc[3] & 0xF0 == 0x00 @property def writeable(self): """Is True if data can be written to the NDEF tag.""" return self._cc[3] & 0x0F == 0x00 @property def length(self): """NDEF message data length.""" return len(self._msg) @property def changed(self): """True if the message has changed since the read.""" if self.readable: old_msg = self._msg[:] # make a copy offset = 12 while offset is not None: offset = self._read_tlv(offset) return self._msg != old_msg return False @property def message(self): """An NDEF message object (an empty record message if tag is empty).""" if self.readable: try: return nfc.ndef.Message(str(self._msg)) except nfc.ndef.parser_error: pass return nfc.ndef.Message(nfc.ndef.Record()) @message.setter def message(self, msg): if not self.writeable: raise nfc.tag.AccessError data = bytearray(str(msg)) nlen = len(data) if nlen > self.capacity: raise nfc.tag.CapacityError if nlen < self.capacity: data = data + "\xFE" with self._tag as tag: tag[0x08] = 0x00 tag[0x09] = 0x10 tag[0x0B] = 0x00 offset = self._ndef_tlv_offset + 1 if len(data) < 255: tag[offset] = nlen offset += 1 else: tag[offset] = 255 tag[offset+1] = nlen / 256 tag[offset+2] = nlen % 256 offset += 3 for octet in data: while offset in self._skip: offset += 1 tag[offset] = octet offset += 1 with self._tag as tag: tag[8] = 0xE1 class Type1Tag(object): type = "Type1Tag" def __init__(self, clf, target): self.clf = clf self.uid = target.uid self._mmap = self.read_all()[2:] self._sync = set() self.ndef = None if self[8] == 0xE1: try: self.ndef = NDEF(self) except Exception as error: log.error("while reading ndef: {0!r}".format(error)) def __str__(self): return "Type1Tag UID=" + str(self.uid).encode("hex") def __getitem__(self, key): if type(key) is int: key = slice(key, key+1) if not type(key) is slice: raise TypeError("key must be of type int or slice") if key.start > key.stop: raise ValueError("start index is greater than stop index") if key.stop > len(self._mmap): for block in range(len(self._mmap)/8, key.stop/8): self._mmap += self.read_block(block) bytes = self._mmap[key.start:key.stop] return bytes if len(bytes) > 1 else bytes[0] def __setitem__(self, key, value): if type(key) is int: key = slice(key, key+1) if type(key) is not slice: raise TypeError("key must be of type int or slice") if type(value) == int: value = bytearray([value]) else: value = bytearray(value) if len(value) != key.stop - key.start: raise ValueError("value and slice length do not match") if key.stop > len(self._mmap): self.__getitem__(key) for i in xrange(key.start, key.stop): self._mmap[i] = value[i-key.start] self._sync.add(i) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: if self._mmap[10] < 15: for i in sorted(self._sync): self.write_byte(i, self._mmap[i]) self._sync.clear() else: while len(self._sync) > 0: block = sorted(self._sync).pop(0) / 8 self.write_block(block, self._mmap[block<<3:(block+1)<<3]) self._sync -= set(range(block<<3, (block+1)<<3)) @property def is_present(self): """Returns True if the tag is still within communication range.""" try: data = self.transceive("\x78\x00\x00"+self.uid) return data and len(data) == 6 except nfc.clf.DigitalProtocolError: return False def transceive(self, data, timeout=0.1): return self.clf.exchange(data, timeout) def read_id(self): """Read header rom and all static memory bytes (blocks 0-14). """ log.debug("read all") cmd = "\x78\x00\x00\x00\x00\x00\x00" return self.transceive(cmd) def read_all(self): """Read header rom and all static memory bytes (blocks 0-14). """ log.debug("read all") cmd = "\x00\x00\x00" + self.uid return self.transceive(cmd) def read_byte(self, addr): """Read a single byte from static memory area (blocks 0-14). """ log.debug("read byte at address 0x{0:03X}".format(addr)) cmd = "\x01" + chr(addr) + "\x00" + self.uid return self.transceive(cmd)[1] def write_byte(self, addr, byte, erase=True): """Write a single byte to static memory area (blocks 0-14). The target byte is zero'd first if 'erase' is True (default). """ log.debug("write byte at address 0x{0:03X}".format(addr)) cmd = "\x53" if erase is True else "\x1A" cmd = cmd + chr(addr) + chr(byte) + self.uid return self.transceive(cmd) def read_block(self, block): """Read an 8-byte data block at address (block * 8). """ log.debug("read block at address 0x{0:03X}".format(block*8)) cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid return self.transceive(cmd)[1:9] def write_block(self, block, data, erase=True): """Write an 8-byte data block at address (block * 8). The target bytes are zero'd first if 'erase' is True (default). """ log.debug("write block at address 0x{0:03X}".format(block*8)) cmd = "\x54" if erase is True else "\x1B" cmd = cmd + chr(block) + data + self.uid return self.transceive(cmd)
Support » Plugin: Proper Network Activation » This is so cool…. Thank you so much for this piece of work. Some authors want to add multisite support to their plugins but don’t know how. Thanks to this plugin I can use those plugins now across my sub websites. Thanks again! The topic ‘This is so cool….’ is closed to new replies.
# -*- coding: utf-8 -*- """Core classes for overalls.""" class FileCoverage(object): """Coverage result for a single file. :param str filename: Name of the filename the results are for. :param str source: Source code (newline separated). :param list coverage: List of coverage results. One entry per line. Entries may be an integer (number of lines covered) or None (not relevant, e.g. for lines that are comments). """ def __init__(self, filename, source, coverage): self.filename = filename self.source = source self.coverage = coverage def __repr__(self): return "<%s filename=%r source=%r coverage=%r>" % ( self.__class__.__name__, self.filename, self.source[:30], self.coverage[:30], ) def __eq__(self, other): if not isinstance(other, FileCoverage): return NotImplemented return all((self.filename == other.filename, self.source == other.source, self.coverage == other.coverage)) class CoverageResults(object): """Coverage results.""" def __init__(self): self.files = [] def append(self, file_coverage): self.files.append(file_coverage) def extend(self, results): self.files.extend(results.files) class Collector(object): """Object that knows how collect coverage results from a single source.""" def results(self): """Should read the coverage source and return a `Results` instance.""" raise NotImplementedError("Collectors should implement .results.") class StaticCollector(Collector): """Collector that returns a static set of results.""" def __init__(self, results): self._results = results def results(self): return self._results class CollectorSet(Collector): """Collector that combines results from a set of other collectors.""" def __init__(self, collectors): self._collectors = collectors def results(self): combined = CoverageResults() for collector in self._collectors: combined.extend(collector.results()) return combined class Uploader(object): """Object that knows how to upload coverage results somewhere.""" def upload(self, results): """Upload a set of `Results`.""" raise NotImplementedError("Uploaders should implement .upload.")
1 00:00:00,459 --> 00:00:10,990 And now for a read aloud of Paul Bunyan 2 00:00:10,990 --> 00:00:14,270 Even as a baby, Paul Bunyan was mighty big. 3 00:00:14,270 --> 00:00:16,810 How big? 4 00:00:16,810 --> 00:00:23,29 Well, he was so big that his parents had to use a covered wagon for his cradle. 5 00:00:23,29 --> 00:00:28,29 As you might imagine, young Paul Bunyan had a big appetite. 6 00:00:28,29 --> 00:00:35,289 He gobbled up five barrels of porridge a day, and his parents had to milk four dozen cows 7 00:00:35,289 --> 00:00:40,59 every morning and evening just to keep his baby bottle filled. 8 00:00:40,59 --> 00:00:46,10 Paul was so big it caused some problems in the little town in Maine where he grew up. 9 00:00:46,10 --> 00:00:52,550 When he sneezed, he blew the birds from Maine to California. 10 00:00:52,550 --> 00:00:57,679 When he snored, the neighbors ran out of their houses hollering, “Earthquake! 11 00:00:57,679 --> 00:00:58,730 Earthquake!” 12 00:00:58,730 --> 00:01:05,479 After that, Paul’s father thought it might be better if Paul didn’t sleep in town. 13 00:01:05,479 --> 00:01:11,280 He built a cot on a large raft for Paul and floated it off the coast. 14 00:01:11,280 --> 00:01:17,229 Paul slept on the raft for a few nights, but the floating cot didn’t work out. 15 00:01:17,229 --> 00:01:23,320 When Paul turned over in his sleep, he created gigantic waves that knocked down houses along 16 00:01:23,320 --> 00:01:24,340 the coast. 17 00:01:24,340 --> 00:01:31,159 Eventually, Paul’s father decided that the East Coast was just too small for Paul Bunyan. 18 00:01:31,159 --> 00:01:35,409 The only sensible thing to do was to move out West. 19 00:01:35,409 --> 00:01:39,729 So the Bunyan family moved to Minnesota. 20 00:01:39,729 --> 00:01:45,289 In those days Minnesota was full of logging camps, sawmills, and lumberjacks. 21 00:01:45,289 --> 00:01:49,299 Americans were moving west and “building the country.” 22 00:01:49,299 --> 00:01:55,909 They had to cut down a lot of trees to make their homes, not to mention their schools, 23 00:01:55,909 --> 00:01:58,840 churches, boats, and furniture. 24 00:01:58,840 --> 00:02:04,590 When he grew up, Paul Bunyan went to work as a lumberjack, and what a lumberjack he 25 00:02:04,590 --> 00:02:06,740 proved to be! 26 00:02:06,740 --> 00:02:12,390 He made himself a giant ax, with a handle carved out of a full-grown hickory tree. 27 00:02:12,390 --> 00:02:15,970 He could bring down a giant tree with a single swing 28 00:02:15,970 --> 00:02:17,540 of his ax. 29 00:02:17,540 --> 00:02:23,130 As the tree tipped over, he would yell, “Timber!” so the other lumberjacks had time to get out 30 00:02:23,130 --> 00:02:25,530 of the way. 31 00:02:25,530 --> 00:02:28,950 Everyone looked up to Paul Bunyan—way up! 32 00:02:28,950 --> 00:02:31,420 The other lumberjacks were full of admiration for him. 33 00:02:31,420 --> 00:02:36,280 The bosses were grateful for the amazing amount of work he could do in a day. 34 00:02:36,280 --> 00:02:43,720 Paul had a big heart, too, but one thing he always wished for was a true friend. 35 00:02:43,720 --> 00:02:47,840 There simply wasn’t anybody else his size who could be his friend. 36 00:02:47,840 --> 00:02:52,510 That all changed during the winter of the Big Blue Snow. 37 00:02:52,510 --> 00:02:58,590 It was called the winter of the Big Blue Snow because it was so cold that everyone shivered 38 00:02:58,590 --> 00:03:00,800 and turned blue. 39 00:03:00,800 --> 00:03:02,220 Even the snow shivered and turned blue. 40 00:03:02,220 --> 00:03:09,60 One day, as Paul made his way through the blue snowdrifts, he heard a muffled whimper. 41 00:03:09,60 --> 00:03:15,830 He followed the noise until he saw two big, blue, furry things sticking up out of the 42 00:03:15,830 --> 00:03:16,330 snow. 43 00:03:16,330 --> 00:03:19,480 He reached down and gave a pull. 44 00:03:19,480 --> 00:03:26,800 It turned out that the two big, blue, furry things were two big, blue ears. 45 00:03:26,800 --> 00:03:31,230 And connected to the big, blue ears was a giant, blue, baby ox! 46 00:03:31,230 --> 00:03:36,590 Paul exclaimed, “The poor little fellow is half frozen.” 47 00:03:36,590 --> 00:03:40,350 Paul carried the blue ox home, wrapped him in blankets, and fed him. 48 00:03:40,350 --> 00:03:47,650 The baby ox was so content that he took a long nap in Paul’s big, strong arms. 49 00:03:47,650 --> 00:03:52,700 When he woke up, he looked up at Paul and do you know what he said? 50 00:03:52,700 --> 00:03:53,150 “Mama! 51 00:03:53,150 --> 00:03:55,590 Mama!” 52 00:03:55,590 --> 00:03:59,680 Then he gave Paul a big, slobbery lick on the face. 53 00:03:59,680 --> 00:04:05,90 Paul laughed and said, “Babe, we’re gonna be great friends!” 54 00:04:05,90 --> 00:04:06,220 And they were. 55 00:04:06,220 --> 00:04:14,730 In fact, Paul Bunyan and Babe the Blue Ox were soon inseparable. 56 00:04:14,730 --> 00:04:16,470 Everywhere Paul went, Babe went, too. 57 00:04:16,470 --> 00:04:20,729 The two of them worked together in the lumber camps. 58 00:04:20,729 --> 00:04:23,510 Paul chopped down the trees. 59 00:04:23,510 --> 00:04:28,990 Then Babe hauled them to the river and dropped them in so they could float downstream to 60 00:04:28,990 --> 00:04:29,479 a sawmill. 61 00:04:29,479 --> 00:04:34,40 Together, Paul and Babe did the work of a hundred men. 62 00:04:34,40 --> 00:04:41,830 The lumber company figured the best way to keep Paul Bunyan happy was through his stomach, 63 00:04:41,830 --> 00:04:45,50 so they hired a special cook to feed Paul and Babe. 64 00:04:45,50 --> 00:04:49,659 The cook’s name was Sourdough Sam. 65 00:04:49,659 --> 00:04:52,340 Sourdough Sam was known for the giant flapjacks he cooked 66 00:04:52,340 --> 00:04:56,719 in the world’s biggest frying pan. 67 00:04:56,719 --> 00:05:01,770 The colossal pan sat on an enormous cast iron frame. 68 00:05:01,770 --> 00:05:07,539 Every morning Sourdough Sam would build a raging forest fire underneath the pan. 69 00:05:07,539 --> 00:05:13,180 Then he would call for his two helpers, Lars Larson and Pete Peterson. 70 00:05:13,180 --> 00:05:18,710 Lars and Pete would grease up the pan by tying slabs of bacon to their 71 00:05:18,710 --> 00:05:23,590 feet and skating back and forth across the sizzling pan. 72 00:05:23,590 --> 00:05:28,770 Then Sourdough Sam would make a giant stack of pancakes for Paul and an even larger stack 73 00:05:28,770 --> 00:05:29,110 for Babe. 74 00:05:29,110 --> 00:05:34,310 Thanks to Sourdough Sam and his overgrown flapjacks, Babe eventually grew to be even 75 00:05:34,310 --> 00:05:37,210 bigger than Paul. 76 00:05:37,210 --> 00:05:43,889 He was so big that, if you were standing at his front legs, you had to use a telescope 77 00:05:43,889 --> 00:05:46,330 to see all the way to his back legs. 78 00:05:46,330 --> 00:05:53,349 In fact, he was so heavy that his footprints filled up with water and turned into lakes. 79 00:05:53,349 --> 00:05:59,90 In fact, there are more than ten thousand lakes in Minnesota today, and most of them 80 00:05:59,90 --> 00:06:04,389 were created by Babe the Blue Ox back in the frontier days. 81 00:06:04,389 --> 00:06:10,120 Babe and Paul helped the lumberjacks solve all sorts of problems. 82 00:06:10,120 --> 00:06:14,300 Once there was a river that was full of twists and turns. 83 00:06:14,300 --> 00:06:19,740 Sometimes the trees would get stuck in the turns and never make it downstream to the 84 00:06:19,740 --> 00:06:20,740 sawmill. 85 00:06:20,740 --> 00:06:24,729 But Paul Bunyan thought of a way to fix that! 86 00:06:24,729 --> 00:06:28,470 He went to one end of the river and sent Babe to the other end. 87 00:06:28,470 --> 00:06:32,520 Paul grabbed the river and pulled in one direction. 88 00:06:32,520 --> 00:06:36,20 Babe pulled the other end in the opposite direction. 89 00:06:36,20 --> 00:06:37,300 Then—snap! 90 00:06:37,300 --> 00:06:43,669 Just like that, all of the kinks were pulled out, and the river was as straight as an ax 91 00:06:43,669 --> 00:06:44,870 handle. 92 00:06:44,870 --> 00:06:49,289 Of course, this tightening operation left the river a good deal longer than it had been 93 00:06:49,289 --> 00:06:53,499 before, and there was a lot of extra water lying around. 94 00:06:53,499 --> 00:06:57,870 Paul and Babe worked together to dig five big holes 95 00:06:57,870 --> 00:07:00,0 to hold all the extra water. 96 00:07:00,0 --> 00:07:04,100 Nowadays these are called the Great Lakes. 97 00:07:04,100 --> 00:07:08,449 One day, the logging bosses got to talking. 98 00:07:08,449 --> 00:07:13,300 One of them said that the United States was a fine country, to be sure, but it could still 99 00:07:13,300 --> 00:07:16,610 stand a little improvement. 100 00:07:16,610 --> 00:07:20,540 For one thing, it could use a few more rivers. 101 00:07:20,540 --> 00:07:25,360 And what it really needed was a big river running right down the middle of the country, 102 00:07:25,360 --> 00:07:28,469 all the way from Minnesota down to New Orleans. 103 00:07:28,469 --> 00:07:33,449 “If we had a river like that,” the man said, “we could ship timber down to New 104 00:07:33,449 --> 00:07:39,599 Orleans and all around the world!” 105 00:07:39,599 --> 00:07:42,889 Paul Bunyan happened to overhear this conversation. 106 00:07:42,889 --> 00:07:46,690 He told the bosses he would see what he could do. 107 00:07:46,690 --> 00:07:50,960 He hitched up Babe and they started plowing south. 108 00:07:50,960 --> 00:07:55,870 As they plowed, they threw a great mound of dirt and rocks to the right and a smaller 109 00:07:55,870 --> 00:07:58,530 mound to the left. 110 00:07:58,530 --> 00:08:03,340 On the right side they made the Rocky Mountains, and on the left side they made the Appalachian 111 00:08:03,340 --> 00:08:05,749 Mountains. 112 00:08:05,749 --> 00:08:11,580 Paul Bunyan and Babe didn’t stop until they had plowed a channel all the way south to 113 00:08:11,580 --> 00:08:13,560 the Gulf of Mexico. 114 00:08:13,560 --> 00:08:18,300 And the river that flows in that channel nowadays, that’s what we call the Mississippi River. 115 00:08:18,300 --> 00:08:25,669 From that day on, Paul and Babe went around the country, using their size and strength 116 00:08:25,669 --> 00:08:27,889 to help anyone who needed it. 117 00:08:27,889 --> 00:08:34,830 Later, they dug the Grand Canyon as they made their way to the West Coast of California. 118 00:08:34,830 --> 00:08:40,930 And when the wind blows just right from the west, you can still smell those infamous, 119 00:08:40,930 --> 00:08:43,320 colossal pancakes cooking on the frontier.
#!/usr/bin/python """Window handling module.""" import sdl2.ext import pygame2 from sdl2 import rect, render from sdl2.ext.compat import isiterable try: import sdl2.sdlgfx except: pass window = None class Window(object): def __init__(self, title="Pygame2", size=(800, 600), type="hardware", fullscreen=False): """An object used to create SDL2 windows. The *Window* object contains backwards compatible methods for the pygame display and creates a simple way to render surfaces to the screen. Args: title (str): The title of the window. size (tuple of int, optional): The size of the window in pixels, defaults to (800, 600). type (str, optional): The type of SDL2 window to create. Can be either "hardware" or "software", defaults to "hardware". fullscreen (boolean, optional): Whether or not the window is fullscreen, defaults to False. """ self.title = title self.size = size self.width = size[0] self.height = size[1] self.type = type # Create our SDL2 window. self.sdl2_window = sdl2.ext.Window(title, size) self.sdl2_window.show() self.world = sdl2.ext.World() self.systems = [] self.sprites = [] self.to_blit = [] # Set up our renderer. if type == "software": self.texture_renderer = None self.sprite_renderer = SoftwareRenderer(self.sdl2_window) self.factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE) elif type == "hardware": self.texture_renderer = sdl2.ext.Renderer(self.sdl2_window) self.sprite_renderer = TextureRenderer(self.texture_renderer) self.factory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=self.texture_renderer) self.sw_factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE) # Add our renderer as a system that will be called when # world.process() is called. self.world.add_system(self.sprite_renderer) def update(self): """Updates the contents of the window. When this method is called, we render all sprites that have been added to our "to_blit" list. Args: None Returns: None """ dt = 0 for system in self.systems: system.process(self, dt) #if self.type == "hardware": # self.texture_renderer.clear() #if self.sprites: # self.sprite_renderer.render(self.sprites) if self.to_blit: self.sprite_renderer.render(self.to_blit) self.to_blit = [] #self.world.process() self.sdl2_window.refresh() def blit(self, surface, position): """Adds a sprite to our list of sprites to be drawn on update. This method allows backwards compatibility of pygame projects by setting the sprite's position and adding it to our "to_blit" list. Args: surface (pygame2.Surface): The surface object containing the sprite to draw on the screen. position (tuple of int): The (x, y) position on the screen to draw the sprite at. Returns: None """ sprite = surface.sprite if not position: position = [sprite.x, sprite.y] else: sprite.x = position[0] sprite.y = position[1] #self.sprite_renderer.render(sprite) self.to_blit.append(sprite) def toggle_fullscreen(self): """Toggles fullscreen. This method toggles fullscreen using the SDL2_SetWindowFullscreen function. Args: None Returns: None """ sdl2.SDL_SetWindowFullscreen(self.sdl2_window, sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP) def set_caption(self, title): """Sets the title of the SDL2 window. This method allows backwards compatibility with pygame. Args: title (str): The title of the window. Returns: None """ self.title = title self.sdl2_window.title = title def set_icon(self, icon_path): """Sets the icon of the window. This method allows backwards compatibility with pygame. Args: icon_path (str): Path to the icon file to use. Returns: None """ pass def add_system(self, system): """Adds an object with a "process" method that is executed on update. This method employs a new way to define "systems" that will be called whenever the window's "update" method is called. Args: system (object): An object with a "process" method. """ self.systems.append(system) def fill(self, color): """Fills the window with an RGB(A) color. This method provides a backwards compatible method for filling the screen with a particular color. Args: color (tuple of int): The (r, g, b, a) color values to fill the screen. Returns: None """ if self.type == "software": # Fill the screen with black every frame. sdl2.ext.fill(self.sprite_renderer.surface, sdl2.ext.Color(color[0], color[1], color[2])) elif self.type == "hardware": self.texture_renderer.clear(color) def get_rect(self): """Gets the rectangle of the current window. This method provides a pygame-compatible way to get the rectangle of the current window. Args: None Returns: A pygame2.Rect object. """ rect = pygame2.Rect(0, 0, self.width, self.height) return rect class SoftwareRenderer(sdl2.ext.SoftwareSpriteRenderSystem): def __init__(self, window): """Creates an SDL2 software renderer used for software rendering. SDL2 is capable of using either software or texture-based rendering. Texture rendering uses hardware acceleration to draw 2d sprites, while software rendering uses the CPU to draw 2d sprites. Args: window (pygame2.display.Window): The pygame2 window object. """ super(SoftwareRenderer, self).__init__(window) def render(self, components): """Renders a sprite or list of sprites to the screen. This is a modified version of the original pysdl2 software render method, but includes the ability to rotate sprites using sdlgfx. Note that sdlgfx must be installed for this method to work. Args: components (SDL2 Sprite or List): A sprite or list of sprites to render to the screen. Returns: None """ # Fill the screen with black every frame. #sdl2.ext.fill(self.surface, sdl2.ext.Color(0, 0, 0)) # If we're using software rendering, do rotation using sdlgfx. if isiterable(components): sprites = [] for sprite in components: rotozoom = sdl2.sdlgfx.rotozoomSurface surface = rotozoom(sprite.original.surface, sprite.angle, 1.0, 1).contents sdl2.SDL_FreeSurface(sprite.surface) sprite.surface = surface sprites.append(sprite) components = sprites else: surface = sdl2.sdlgfx.rotozoomSurface(components.original.surface, components.angle, 1.0, 1).contents sdl2.SDL_FreeSurface(components.surface) components.surface = surface super(SoftwareRenderer, self).render(components) class TextureRenderer(sdl2.ext.TextureSpriteRenderSystem): def __init__(self, target): """Creates an SDL2 texture renderer used for hardware rendering. SDL2 is capable of using either software or texture-based rendering. Texture rendering uses hardware acceleration to draw 2d sprites, while software rendering uses the CPU to draw 2d sprites. Args: target (sdl2.ext.Renderer): An SDL2 texture renderer object. """ super(TextureRenderer, self).__init__(target) def render(self, sprites, x=None, y=None): """Renders a sprite or list of sprites to the screen. This method overrides the render method of the sdl2.ext.TextureSpriteRenderSystem to use "SDL_RenderCopyEx" instead of "SDL_RenderCopy" to allow sprite rotation: http://wiki.libsdl.org/SDL_RenderCopyEx Args: sprites (SDL2 Sprite or List): A sprite or list of sprites to render to the screen. x (int, optional): X position to render the sprite, defaults to None y (int, optional): Y position to render the sprite, defaults to None Returns: None """ r = rect.SDL_Rect(0, 0, 0, 0) if isiterable(sprites): rcopy = render.SDL_RenderCopyEx renderer = self.sdlrenderer x = x or 0 y = y or 0 for sp in sprites: r.x = x + sp.x r.y = y + sp.y r.w, r.h = sp.size if rcopy(renderer, sp.texture, None, r, sp.angle, None, render.SDL_FLIP_NONE) == -1: raise SDLError() else: r.x = sprites.x r.y = sprites.y r.w, r.h = sprites.size if x is not None and y is not None: r.x = x r.y = y render.SDL_RenderCopyEx(self.sdlrenderer, sprites.texture, None, r, sprites.angle, None, render.SDL_FLIP_NONE) render.SDL_RenderPresent(self.sdlrenderer) def create(size=(800, 600), title="Pygame2", type="hardware"): """Creates an SDL2 window. This method provides a pygame-like way to create a window. Args: size (tuple of int, optional): An (x, y) size of the window to create, defaults to (800, 600) title (str, optional): The title of the window, defaults to "Pygame2" type (str, optional): The type of sprite renderer to use. Can be either "software" or "hardware". Defaults to "hardware". Returns: A pygame2.display.Window object. """ return set_mode(size, title, type) def set_mode(size=(800, 600), title="Pygame2", type="hardware"): """Creates an SDL2 window with the provided size. This method provides a pygame-compatible way to create a window. Args: size (tuple of int, optional): An (x, y) size of the window to create, defaults to (800, 600) title (str, optional): The title of the window, defaults to "Pygame2" type (str, optional): The type of sprite renderer to use. Can be either "software" or "hardware". Defaults to "hardware". Returns: A pygame2.display.Window object. """ global window if not window: window = Window(title, size, type) else: raise Exception("Error: Cannot create a window after one has already been created.") return window def set_caption(title): """Sets the title of the current window. This method provides a pygame-compatible way to set the window caption. Args: title (str): The title of the window. Returns: A pygame2.display.Window object. """ global window if window: window.set_caption(title) else: window = Window(title) return window def update(): """Updates the contents of the current window. This method provides a pygame-compatible way to refresh the window. Args: None Returns: None """ global window window.update() def flip(): """Updates the contents of the current window. This method provides a pygame-compatible way to refresh the window. Args: None Returns: None """ update() def get_surface(): """Returns a copy of the current window object. This method provides a pygame-compatible method to get the current window. Args: None Returns: A pygame2.display.Window object. """ global window return window def set_mode(size, fullscreen=0, depth=32): """Sets the resolution of the window. This method provides a pygame-compatible way to create or set the window size. Args: size (tuple of int): The (x, y) size of the window. fullscreen (boolean, optional): Whether or not to set the window to fullscreen mode, defaults to 0. depth (int, optional): Legacy argument for pygame compatibility, defaults to 32. Returns: A pygame2.display.Window object. """ global window if window: sdl2.SDL_SetWindowSize(window.sdl2_window.window, size[0], size[1]) window.size = size window.width = size[0] window.height = size[1] else: window = Window(size=size) return window
iSALUS' award winning Patient Timeline screen gives you a historical snapshot of all your patient interactions in one single convenient location! Save time and improve health outcomes by reviewing a patient's medications and associated contraindications, problems, allergies, vitals, medications, notes, immunizations, lab and order results. All with secure eCommunication with the patient or staff, eDocuments, or patient data from Health Information Exchanges.
#5/17/2014 import telnetlib import random from utility import * tn = telnetlib.Telnet("3dttt_87277cd86e7cc53d2671888c417f62aa.2014.shallweplayaga.me",1234) X = 'X' O = 'O' def get_sym(coor): #sym => symmetric if coor == 0: return 2 if coor == 1: return 1 if coor == 2: return 0 def get_move(new_O_pos): x,y,z = new_O_pos #x,y are in wrong order return (get_sym(x),get_sym(y),get_sym(z)) def get_new_pos(pre,cur): for i in cur: if not (i in pre): return i def is_all_empty(open_all): ret = True for i in range(9): ret = ret and (len(open_all[i]) == 0) return ret def get_next_open(open_all): #open_all: tuple of list of tuples valid = [] for i in range(9): if len(open_all[i])>0: if i in [0,1,2]: z = 0 elif i in [3,4,5]: z = 1 elif i in [6,7,8]: z = 2 for j in open_all[i]: valid.append((j[0],j[1],z)) index = random.randint(0,len(valid)-1) return valid[index] #return (open_all[i][0][0],open_all[i][0][1],z) def get_empty(row1,row_num): open_list =[] #list of tuples lis = row1.split() if len(lis) == 2: open_list.append((row_num,0)); open_list.append((row_num,1)); open_list.append((row_num,2)); elif len(lis) == 3: if X in lis: index = lis.index(X) if index == 0: open_list.append((row_num,1)) open_list.append((row_num,2)) elif index == 1: open_list.append((row_num,0)) open_list.append((row_num,2)) elif index == 2: open_list.append((row_num,0)) open_list.append((row_num,1)) elif O in lis: index = lis.index(O) if index == 0: open_list.append((row_num,1)) open_list.append((row_num,2)) elif index == 1: open_list.append((row_num,0)) open_list.append((row_num,2)) elif index == 2: open_list.append((row_num,0)) open_list.append((row_num,1)) elif len(lis) == 4: if lis[0] == '|': open_list.append((row_num,0)) elif lis[3] == '|': open_list.append((row_num,2)) else: open_list.append((row_num,1)) return open_list def main(): score_list = get_score_list() turns = 0 pre_Olist = [] #list of tuples cur_Olist = [] #same above while True: ret = tn.read_until("y\n") print ret tn.read_until("0") row00 = tn.read_until("\n").strip() tn.read_until("1") #skip row01 = tn.read_until("\n").strip() tn.read_until("2") #skip row02 = tn.read_until("\n").strip() ret = tn.read_until("y\n") tn.read_until("0") row10 = tn.read_until("\n").strip() tn.read_until("1") #skip row11 = tn.read_until("\n").strip() tn.read_until("2") #skip row12 = tn.read_until("\n").strip() ret = tn.read_until("y\n") tn.read_until("0") row20 = tn.read_until("\n").strip() tn.read_until("1") #skip row21 = tn.read_until("\n").strip() tn.read_until("2") #skip row22 = tn.read_until("\n").strip() #print row00 #print row01 #print row02 #print "" open0 = (get_empty(row00,0), get_empty(row01,1), get_empty(row02,2)) #print row10 #print row11 #print row12 #print "" open1 = (get_empty(row10,0), get_empty(row11,1), get_empty(row12,2)) #print row20 #print row21 #print row22 open2 = (get_empty(row20,0), get_empty(row21,1), get_empty(row22,2)) rows = (row00,row01,row02,row10,row11,row12,row20,row21,row22) ret = tn.read_some() print ret open_all = (open0[0],open0[1],open0[2],open1[0],open1[1],open1[2],open2[0],open2[1],open2[2]) open_list = convert_open_list(open_all) if is_all_empty(open_all): ret = tn.read_some() print ret pre_Olist = [] cur_Olist = [] turns = 0 #return continue y,x,z = get_next_open(open_all) Xlist = get_pos_list(rows,'X') Olist = get_pos_list(rows,'O') next_move = minimax(Xlist,Olist,open_list) print "next move", next_move #get_score(score_list,Xlist,Olist) if turns==0: send = "1,1,1" cur_Olist = get_pos_list(rows,'O') turns += 1 else: pre_Olist = cur_Olist; cur_Olist = get_pos_list(rows,'O') new_pos = get_new_pos(pre_Olist,cur_Olist) #y,x,z = get_move(new_pos) y,x,z = next_move send = str(x)+","+str(y)+","+str(z) print "sending ",send tn.write(send+"\n") if __name__=="__main__": main()
Phone Number in Paris. Find a Phone Number, Address or other Contact Detail, like Email Address and Profiles of people or friends who live in Paris, Tennessee, USA. Results of Phone Numbers and Addresses in any Street.
import datetime import os from io import BytesIO from PIL import Image from PIL.Image import isImageType from django.conf import settings from django.utils.translation import ugettext as _ from pptx import Presentation from pptx.enum.shapes import MSO_SHAPE_TYPE from pptx.enum.text import PP_PARAGRAPH_ALIGNMENT from pptx.util import Inches, Pt SEEDSOURCE_TITLE = getattr(settings, 'SEEDSOURCE_TITLE', _('Seedlot Selection Tool')) class PPTCreator(object): def __init__(self): self.presentation = None self.width = None self.height = None def degree_sign(self, s): return s.replace('&deg;', '°') def add_text(self, text_frame, lines): for line in lines: paragraph = text_frame.add_paragraph() for segment in line: text, size, bold = segment run = paragraph.add_run() run.text = text run.font.size = Pt(size) run.font.bold = bold def get_transfer_method_text(self, method, center): if method != 'seedzone': method_text = _('Custom transfer limits, climatic center based on the selected location') elif center == 'zone': method_text = _('Transfer limits and climatic center based on seed zone') else: method_text = _('Transfer limits based on seed zone, climatic center based on the selected location') return method_text def replace_shape_image(self, shape, image): im_bytes = BytesIO() image.save(im_bytes, 'PNG') shape.part.related_parts[shape._element.blip_rId].blob = im_bytes.getvalue() def replace_shape_text(self, shape, text): paragraph = shape.text_frame.paragraphs[0] for run in paragraph.runs[1:]: paragraph._p.remove(run._r) paragraph.runs[0].text = text def add_title_text(self, slide, title): shape = slide.shapes.add_textbox(Inches(.41), Inches(.23), Inches(9.18), Inches(.5)) tf = shape.text_frame tf.text = title paragraph = tf.paragraphs[0] paragraph.font.size = Pt(24) paragraph.alignment = PP_PARAGRAPH_ALIGNMENT.CENTER def render_template(self, context): for slide in self.presentation.slides: self.render_template_slide(slide, context) def render_template_slide(self, slide, context): for shape in slide.shapes: if shape.name not in context: continue value = context[shape.name] if callable(value): value(shape) elif shape.shape_type == MSO_SHAPE_TYPE.PICTURE: if not isImageType(value): raise TypeError('Template value {} must be an Image type'.format(shape.name)) self.replace_shape_image(shape, value) elif shape.shape_type == MSO_SHAPE_TYPE.TEXT_BOX: if not isinstance(value, str): raise TypeError('Template value {} must be a string'.format(shape.name)) self.replace_shape_text(shape, value) def add_slide(self): slide = self.presentation.slides.add_slide(self.presentation.slide_layouts[0]) # Delete placeholders for placeholder in (slide.placeholders): placeholder.element.getparent().remove(placeholder.element) return slide def create_overview_slide(self, context): objective = context['objective'] location_label = context['location_label'] point = context['point'] elevation = context['elevation'] seedlot_year = context['seedlot_year'] site_year = context['site_year'] site_model = context['site_model'] method = context['method'] center = context['center'] location = (point['y'], point['x']) data_url = 'http://cfcg.forestry.ubc.ca/projects/climate-data/climatebcwna/#ClimateWNA' method_text = self.get_transfer_method_text(method, center) slide = self.add_slide() self.add_title_text(slide, '{} - {}'.format(SEEDSOURCE_TITLE, datetime.datetime.today().strftime('%m/%d/%Y'))) # Body shape = slide.shapes.add_textbox(Inches(.65), Inches(.73), Inches(8.69), Inches(6.19)) shape.text_frame.word_wrap = True self.add_text(shape.text_frame, ( ((_('Objective:') + ' ', 18, True), (objective, 18, False)), (('', 18, False),), (('{}: '.format(location_label), 18, True), ('{}, {}'.format(*location), 18, False)), ((_('Elevation:') + ' ', 18, True), (_('{elevation} ft').format(elevation=elevation), 18, False)), (('', 18, False),), ((_('Climate scenarios'), 24, True),), ((_('Seedlot climate:') + ' ', 18, True), (seedlot_year, 18, False)), ((_('Planting site climate: ') + ' ', 18, True), (' '.join((site_year, site_model or '')), 18, False)), (('', 18, False),), ((_('Transfer limit method:') + ' ', 18, True), (method_text, 18, False)), (('\n', 18, False),), ((_('Data URL:') + ' ', 12, True), (data_url, 12, False)) )) # Hyperlink URL shape.text_frame.paragraphs[-1].runs[-1].hyperlink.address = data_url def create_variables_slide(self, variables): slide = self.add_slide() self.add_title_text(slide, _('Climate Variables')) num_rows = len(variables) + 1 table = slide.shapes.add_table( num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows ).table cols = table.columns cols[0].width = Inches(4.59) cols[1].width = Inches(2.06) cols[2].width = Inches(2.4) # Headers table.cell(0, 0).text = _('Variable') table.cell(0, 1).text = _('Center') table.cell(0, 2).text = _('Transfer limit') + ' (+/-)' for i, variable in enumerate(variables, start=1): units = self.degree_sign(variable['units']) center_label = ' '.join((variable['value'], units)) limit_label = '{} {}{}'.format( variable['limit'], units, ' ({})'.format(_('modified')) if variable['modified'] else '' ) table.cell(i, 0).text = variable['label'] table.cell(i, 1).text = center_label table.cell(i, 2).text = limit_label def create_constraints_slide(self, constraints): slide = self.add_slide() self.add_title_text(slide, _('Constraints')) num_rows = len(constraints) + 1 table = slide.shapes.add_table( num_rows, 3, Inches(.47), Inches(.73), Inches(9.05), Inches(.4) * num_rows ).table cols = table.columns cols[0].width = Inches(4.59) cols[1].width = Inches(2.06) cols[2].width = Inches(2.4) # Headers table.cell(0, 0).text = _('Constraint') table.cell(0, 1).text = _('Value') table.cell(0, 2).text = '{} (+/-)'.format(_('Range')) for i, constraint in enumerate(constraints, start=1): if constraint['type'] == 'shapefile': table.cell(i, 0).text = constraint['label'] table.cell(i, 1)._tc.set('gridSpan', str(2)) table.cell(i, 1).text = constraint['filename'] else: table.cell(i, 0).text = constraint['label'] table.cell(i, 1).text = constraint['value'] table.cell(i, 2).text = constraint['range'] def add_presenter_notes(self, slide, context): text_frame = slide.notes_slide.notes_text_frame objective = context['objective'] location_label = context['location_label'] point = context['point'] elevation = context['elevation'] seedlot_year = context['seedlot_year'] site_year = context['site_year'] site_model = context['site_model'] method = context['method'] center = context['center'] location = (point['y'], point['x']) method_text = self.get_transfer_method_text(method, center) lines = [ ((_('Objective:') + ' ', 12, True), (objective, 12, False)), (('{}: '.format(location_label), 12, True), ('{}, {}'.format(*location), 12, False)), ((_('Elevation:') + ' ', 12, True), ('{} ft'.format(elevation), 12, False)), ((_('Climate Scenarios'), 12, True),), ((' {} '.format(_('Seedlot climate:')), 12, True), (seedlot_year, 12, False)), ((' {} '.format(_('Planting site climate:')), 12, True), ('{} {}'.format(site_year, site_model or ''), 12, False)), ((_('Transfer limit method:') + ' ', 12, True), (method_text, 12, False)) ] if method == 'seedzone': band = context['band'] band_str = ", {}' - {}'".format(band[0], band[1]) if band else '' lines += [ ((_('Species:') + ' ', 12, True), (context['species'], 12, False)), ((_('Seed zone:') + ' ', 12, True), (context['zone'] + band_str, 12, False)) ] # Variables table variables = context['variables'] name_width = max([len(_('Variable'))] + [len(x['label']) for x in variables]) + 3 center_width = max( [len(_('Center'))] + [len(' '.join([str(x['value']), self.degree_sign(x['units'])])) for x in variables] ) + 3 transfer_width = max( [len(_('Transfer limit') + ' (+/-)')] + [ len('{} {}{}'.format( x['limit'], self.degree_sign(x['units']), ' ({})'.format(_('modified')) if x['modified'] else '') ) for x in variables ] ) lines += [ (('', 12, False),), ((_('Variables'), 12, True),), ((''.join([ _('Variable').ljust(name_width), _('Center').ljust(center_width), _('Transfer limit') + ' (+/-)'.ljust(transfer_width) ]), 12, False),), (('-' * (name_width + center_width + transfer_width), 12, False),) ] for variable in context['variables']: units = self.degree_sign(variable['units']) lines += [ ((''.join([ variable['label'].ljust(name_width), '{} {}'.format(variable['value'], units).ljust(center_width), '{} {}{}'.format( variable['limit'], units, ' ({})'.format(_('modified')) if variable['modified'] else '' ) ]), 12, False),) ] if context['constraints']: # Constraints table constraints = context['constraints'] name_width = max([len('Constraint')] + [len(x['label']) for x in constraints]) + 3 value_width = max( [len(_('Value'))] + [len(x['value']) for x in [c for c in constraints if c['type'] != 'shapefile']] ) + 3 range_width = max( [len(_('Range') + ' (+/-)')] + [len(x['range']) for x in [c for c in constraints if c['type'] != 'shapefile']] ) + 3 # Ensure we have room for shapefile name, if there is one shape_constraint = [c for c in constraints if c['type'] == 'shapefile'] if shape_constraint: filename_width = len(shape_constraint[0]['filename']) if filename_width > value_width + range_width: range_width = filename_width - value_width lines += [ (('', 12, False),), ((_('Constraints'), 12, True),), ((''.join([ _('Constraint').ljust(name_width), _('Value').ljust(value_width), _('Range') + ' (+/-)'.ljust(range_width) ]), 12, False),), (('-' * (name_width + value_width + range_width), 12, False),) ] for constraint in constraints: if constraint['type'] == 'shapefile': lines += [ ((''.join([ constraint['label'].ljust(name_width), constraint['filename'].ljust(value_width + range_width) ]), 12, False),) ] else: lines += [ ((''.join([ constraint['label'].ljust(name_width), constraint['value'].ljust(value_width), constraint['range'].ljust(range_width) ]), 12, False),) ] self.add_text(text_frame, lines) for paragraph in text_frame.paragraphs: paragraph.font.name = 'Andale Mono' def get_presentation(self, context): self.presentation = Presentation( os.path.join(os.path.dirname(__file__), 'templates', 'pptx', 'report.pptx') ) self.width = Inches(self.presentation.slide_width / Inches(1)) self.height = Inches(self.presentation.slide_height / Inches(1)) self.render_template(dict( coord_bottom=self.degree_sign(context['south']), coord_right=self.degree_sign(context['east']), coord_left=self.degree_sign(context['west']), coord_top=self.degree_sign(context['north']), scale_label=context['scale'], map_image=Image.open(context['image_data']), attribution=_('Generated {date} by the Seedlot Selection Tool').format( date=datetime.datetime.today().strftime('%m/%d/%Y') ) )) self.create_overview_slide(context) self.create_variables_slide(context['variables']) if context['constraints']: self.create_constraints_slide(context['constraints']) self.add_presenter_notes(self.presentation.slides[0], context) return self.presentation
Blakeslee, Albert Francis, 1874–1954, American botanist, b. Genesee, New York. He received his Ph.D. at Harvard (1904) and was a member of the faculty until 1907. After several years as professor at Connecticut Agricultural College (now the Univ. of Connecticut), he joined the staff of the Carnegie Institution of Washington at Cold Spring Harbor, N.Y., and later served as its director (1936–41). In 1943 he became director of the Smith College Genetics Experiment Station. From his earliest research, the discovery of sexual reproduction in bread molds, his contributions to botany and genetics were of far-reaching significance. His study of the inheritance and geographical distribution of the jimson weed, Datura, provided important information concerning chromosome behavior, genic balance, and species evolution. He introduced the use of the alkaloid colchicine to increase the number of chromosomes in the plant cell.
# Copyright (C) Ivan Kravets <me@ikravets.com> # See LICENSE for details. """ CMSIS The ARM Cortex Microcontroller Software Interface Standard (CMSIS) is a vendor-independent hardware abstraction layer for the Cortex-M processor series and specifies debugger interfaces. The CMSIS enables consistent and simple software interfaces to the processor for interface peripherals, real-time operating systems, and middleware. It simplifies software re-use, reducing the learning curve for new microcontroller developers and cutting the time-to-market for devices. http://www.arm.com/products/processors/cortex-m/cortex-microcontroller-software-interface-standard.php """ from os.path import join from SCons.Script import DefaultEnvironment env = DefaultEnvironment() env.Replace( PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-cmsis") ) env.VariantDirWrap( join("$BUILD_DIR", "FrameworkCMSIS"), join("$PLATFORMFW_DIR", "cores", "${BOARD_OPTIONS['build']['core']}") ) env.Append( CPPPATH=[ join("$BUILD_DIR", "FrameworkCMSIS"), join("$BUILD_DIR", "FrameworkCMSISVariant") ] ) envsafe = env.Clone() # # Target: Build Core Library # libs = [] libs.append(envsafe.BuildLibrary( join("$BUILD_DIR", "FrameworkCMSISVariant"), join("$PLATFORMFW_DIR", "variants", "${BOARD_OPTIONS['build']['variant']}") )) env.Append(LIBS=libs)
The threat to add 23% VAT to supplements has now been put off until November to allow the Government to legislate an acceptable alternative. The move, which was to have come into force on 1st March, was unexpectedly deferred as a petition signed by 75,000 people was presented to the Irish Parliament and Revenue offices. But the fight isn't over with campaigners determined to make sure the threat never materialises. In just three months following Limerick City and County Council’s launch of the project with a number of partners including the HSE and Healthy Ireland, 'We’re Breastfeeding Friendly' has encouraged a total of 73 Limerick establishments, including hotels, restaurants, shops, museums, sports clubs, community organisations and churches, to sign up for the campaign. The drive is aimed at improving the health and wellbeing of breastfeeding mothers, babies and their families by encouraging businesses, organisations and communities to become ‘Breastfeeding Friendly’. Mothers breastfeeding in the premises will never be asked to move to another area or stop breastfeeding. With the display of a ‘We’re Breastfeeding Friendly’ window sticker and/or poster other local authorities around the country will be rolling out ‘We’re Breastfeeding Friendly’ in their counties/cities. Follow the campaign online with the hashtags #breastfeedingfriendlylimerick and #LKWorkingTogether. One lucky reader has won a fabulous hamper of lavera natural and organic beauty goodies worth over €160. The winner of this fabulous prize is Deirdre Ferry. Get down to Evolv Health and Wellbeing in Enniscorthy, Co Wexford for their annual Spring into Health Week from 25 February to 2 March. Evolv have up to 50% off offers on supplements, €10 facials with Dr Hauschka, Dead Sea Magik & Holos, €25 treatments which include massage, reiki, sinus treatment, kinesiology and others such as allergy testing, ear candling, nutritional therapy and hypnotherapy. Make a day of it – have a free health consultation, learn about new products that could benefit your lifestyle and sample healthy delicious foods as well. Every day there will be something different. Don't miss out! Is there a downside to 'detox'? Nutritional therapist Emily Blake answers for CNM (College of Naturopathic Medicine). Detox is about using nutrition and other natural therapies to stimulate our liver and other detoxification organs so they function optimally. It helps us to thrive – to have radiant skin and better energy, mood, hormone balance and memory for example. Supporting detoxification before trying to conceive may also protect future generations from toxin-related illnesses. Keep a bedtime routine – go to bed and get up at the same time every day so that your routine will be less disrupted. And allow your body’s sleep rhythms to become established. Make your bedroom restful – if your room is quiet, dark and not overheated it will be easier for your body and brain to realise it is time to wind down. Avoid screens – remove TVs, laptops and mobile phones from your bedroom. The light these emit can disrupt your sleep patterns. Wind down – by taking a cup of herbal tea, a warm bath, listening to a relaxation CD or relaxing music, reading a book. Avoid alcohol – it may make you feel sleepy, but will ultimately mean you wake up during the night. Avoid napping – daytime nappers often find it leads to night time awakenings. A good night’s sleep should mean you don’t need a daytime nap. Avoid caffeine – have a cut-off time after which you don’t drink caffeinated tea or coffee because it can keep you awake. Take magnesium – it can help the body to relax into sleep. Write tomorrow’s list – it may help to order your mind and organise your thoughts so that you can relax. Optician Vision Express recently commissioned a survey of 1,000 people across Ireland to find out if we take care of our eyesight as we age. The survey revealed that 81% noticed a deterioration in their eyesight when they reached an average age of 42. However, 60% of those over 40, and 52% of those over 60, haven’t had an eye test in over a year, and men are less likely than women to have a regular eye test. Almost 60% of the survey respondents said they struggle when reading, and almost half (49%) said they have difficultly seeing clearly when on the computer or watching TV. Glaucoma is an eye-related disease that mostly affects those over 60. It is symptomless at first and a regular eye test is the only way to detect it. Over 90% of individuals who are diagnosed early will retain some useful sight, but late diagnosis can result in total blindness. A study by researchers from the University of Calgary and University of Waterloo in Canada and published in medical journal JAMA Pediatrics has looked at the correlation between the amount of screen time small children have with their rate of development. The researchers used data from an ongoing cohort study of 2000 mothers and children in Canada. They found that one in four children showed signs of developmental delay and that increased screen time was linked to poorer test scores. They concluded that young children benefit better from a balance in their lives, including reading, interaction with others, playing and physical activity, rather than more screen time. 1 Plan your meals before you go – take five minutes to check what you have in the cupboards, fridge and freezer before you go shopping. Planning what you need for your meals for the next week can save both time and money. 2 Make a shopping list – to help you get what you really need and avoid those impulse buys, especially for unhealthy snacks and treats that can be high in sugar, fat and salt. 3 Be aware of food promotions – while offers for healthy foods like fruit and veg are fantastic, the majority of foods on promotion in supermarkets are unhealthy. You can avoid those by skipping certain aisles, being aware of the end-of-aisle shelfs as ‘watch out’ areas and passing up on promotional offers unless they fit in with your plans. 4 Pester power is an issue – shop alone or with a friend if you can. We’ve all been there, or seen someone else under pressure from children looking for treats while shopping. If you can get some time on your own – take it. 5 Don’t shop when you are hungry – your hunger will be fighting all your good intentions to stick with your healthy choices. If you do have to shop on an empty stomach stick to the list and do your best not to get distracted. 6 Try to avoid shopping when tired or stressed – we all tend to reach for comfort foods to help get us through. Pick a time and a day to shop when you know you’ll be under less pressure. 7 Look at food labels – to help you understand what’s healthy and what’s not. Gradually build up your knowledge of food labelling so you can make sense of it, at a glance. 8 Look for a good balance – make sure you include plenty of fruit and veg, wholegrain and brown carbs over white, and especially watch out for those treats. A quick scan over your trolley before you hit the checkout will help you get a sense of this. The decision to implement 23% VAT on food supplements was taken by the Minister for Finance Paschal Donohoe in the run up to Budget 2018. Health Stores Ireland has launched a campaign to prevent the introduction of 23% VAT on supplements including vitamins, minerals, probiotics, omega-3 and -6 fatty acids and folic acid. This tax is hugely damaging to the health of many vulnerable people who depend on these supplements to maintain a good level of health and standard of living. Even if this is implemented the campaigning will continue past 1 March, so please add your name to the petition to make your voices heard.
"""engine.SCons.Platform.darwin Platform-specific initialization for Mac OS X systems. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Platform.Platform() selection method. """ # # Copyright (c) 2001 - 2015 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Platform/darwin.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog" import os import posix def generate(env): posix.generate(env) env['SHLIBSUFFIX'] = '.dylib' # put macports paths at front to override Apple's versions, fink path is after # For now let people who want Macports or Fink tools specify it! # env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin' # Store extra system paths in env['ENV']['PATHOSX'] filelist = ['/etc/paths',] # make sure this works on Macs with Tiger or earlier try: dirlist = os.listdir('/etc/paths.d') except: dirlist = [] for file in dirlist: filelist.append('/etc/paths.d/'+file) for file in filelist: if os.path.isfile(file): f = open(file, 'r') lines = f.readlines() for line in lines: if line: env.AppendENVPath('PATHOSX', line.strip('\n')) f.close() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
What a wonderful weekend we had here at The Acres for Tie-Dye Weekend!! The Acres Eatery served up a delicious meal buffet style Friday night, giving campers a chance to come get their bellies full. Later in the evening, we played the “Bee Movie” for anyone who wanted to bring their chairs, blankets or golf carts and have a nice relaxing evening. Tie-Dying started Saturday morning as campers lined up and learned how to spin, band and dye a t-shirt! Everyone had a blast making a new shirt with their own special touch on them. Later in the afternoon, all ages were welcome to play a family fun game of Wiffle Ball. Teams battled it out to see who would go home with a victory! Campers were able to meet their fellow neighbors and tear the dance floor up when DJ’s Mark & Patty came and played all our favorite dance music Saturday night! We took a small break from dancing to do our 50/50 raffle drawing later in the evening. Our 1st place winner took home a whopping $645!! Congratulations!! Sunday morning, Theresa Moore delivered a powerful message titled “Offer Your Hand : Not Your Judgement”. Tie-Dye weekend was so much fun!! Thank you to everyone who came and camped with us this past weekend, we really enjoyed having you all here.
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" from xml.dom import minidom import webob from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') softauth = extensions.soft_extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.security_group_api = NativeSecurityGroupAPI() self.compute_api = compute.API( security_group_api=self.security_group_api) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = self.security_group_api.get(context, id=rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group def _authorize_context(self, req): context = req.environ['nova.context'] authorize(context) return context def _validate_id(self, id): try: return int(id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) def _from_body(self, body, key): if not body: raise exc.HTTPUnprocessableEntity() value = body.get(key, None) if value is None: raise exc.HTTPUnprocessableEntity() return value class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = self._authorize_context(req) id = self._validate_id(id) security_group = self.security_group_api.get(context, None, id, map_exception=True) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = self._authorize_context(req) id = self._validate_id(id) security_group = self.security_group_api.get(context, None, id, map_exception=True) self.security_group_api.destroy(context, security_group) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = self._authorize_context(req) raw_groups = self.security_group_api.list(context, project=context.project_id) limited_list = common.limited(raw_groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = self._authorize_context(req) security_group = self._from_body(body, 'security_group') group_name = security_group.get('name', None) group_description = security_group.get('description', None) self.security_group_api.validate_property(group_name, 'name', None) self.security_group_api.validate_property(group_description, 'description', None) group_ref = self.security_group_api.create(context, group_name, group_description) return {'security_group': self._format_security_group(context, group_ref)} class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = self._authorize_context(req) sg_rule = self._from_body(body, 'security_group_rule') parent_group_id = self._validate_id(sg_rule.get('parent_group_id', None)) security_group = self.security_group_api.get(context, None, parent_group_id, map_exception=True) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self.security_group_api.rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) security_group_rule = self.security_group_api.add_rules( context, parent_group_id, security_group['name'], [values])[0] return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _rule_args_to_dict(self, context, to_port=None, from_port=None, ip_protocol=None, cidr=None, group_id=None): if group_id is not None: group_id = self._validate_id(group_id) #check if groupId exists self.security_group_api.get(context, id=group_id) return self.security_group_api.new_group_ingress_rule( group_id, ip_protocol, from_port, to_port) else: cidr = self.security_group_api.parse_cidr(cidr) return self.security_group_api.new_cidr_ingress_rule( cidr, ip_protocol, from_port, to_port) def delete(self, req, id): context = self._authorize_context(req) id = self._validate_id(id) rule = self.security_group_api.get_rule(context, id) group_id = rule.parent_group_id security_group = self.security_group_api.get(context, None, group_id, map_exception=True) self.security_group_api.remove_rules(context, security_group, [rule['id']]) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = self._authorize_context(req) self.security_group_api.ensure_default(context) try: instance = self.compute_api.get(context, server_id) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) groups = db.security_group_get_by_instance(context, instance['id']) result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.security_group_api = NativeSecurityGroupAPI() self.compute_api = compute.API( security_group_api=self.security_group_api) def _parse(self, body, action): try: body = body[action] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) return group_name def _invoke(self, method, context, id, group_name): try: instance = self.compute_api.get(context, id) method(context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) group_name = self._parse(body, 'addSecurityGroup') return self._invoke(self.security_group_api.add_to_instance, context, id, group_name) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) group_name = self._parse(body, 'removeSecurityGroup') return self._invoke(self.security_group_api.remove_from_instance, context, id, group_name) class SecurityGroupsOutputController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupsOutputController, self).__init__(*args, **kwargs) self.compute_api = compute.API() def _extend_servers(self, req, servers): key = "security_groups" for server in servers: instance = req.get_db_instance(server['id']) groups = instance.get(key) if groups: server[key] = [{"name": group["name"]} for group in groups] def _show(self, req, resp_obj): if not softauth(req.environ['nova.context']): return if 'server' in resp_obj.obj: resp_obj.attach(xml=SecurityGroupServerTemplate()) self._extend_servers(req, [resp_obj.obj['server']]) @wsgi.extends def show(self, req, resp_obj, id): return self._show(req, resp_obj) @wsgi.extends def create(self, req, resp_obj, body): return self._show(req, resp_obj) @wsgi.extends def detail(self, req, resp_obj): if not softauth(req.environ['nova.context']): return resp_obj.attach(xml=SecurityGroupServersTemplate()) self._extend_servers(req, list(resp_obj.obj['servers'])) class SecurityGroupsTemplateElement(xmlutil.TemplateElement): def will_render(self, datum): return "security_groups" in datum def make_server(elem): secgrps = SecurityGroupsTemplateElement('security_groups') elem.append(secgrps) secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group', selector="security_groups") secgrp.set('name') class SecurityGroupServerTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('server') make_server(root) return xmlutil.SlaveTemplate(root, 1) class SecurityGroupServersTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('servers') elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') make_server(elem) return xmlutil.SlaveTemplate(root, 1) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "os-security-groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() actions = extensions.ControllerExtension(self, 'servers', controller) controller = SecurityGroupsOutputController() output = extensions.ControllerExtension(self, 'servers', controller) return [actions, output] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources class NativeSecurityGroupAPI(compute.api.SecurityGroupAPI): @staticmethod def raise_invalid_property(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_group_already_exists(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_invalid_group(msg): raise exc.HTTPBadRequest(explanation=msg) @staticmethod def raise_invalid_cidr(cidr, decoding_exception=None): raise exception.InvalidCidr(cidr=cidr) @staticmethod def raise_over_quota(msg): raise exception.SecurityGroupLimitExceeded(msg) @staticmethod def raise_not_found(msg): raise exc.HTTPNotFound(explanation=msg)
Our customers are our most important asset and we would like to hear what you have to say or how we can help you! We will get back to you as soon as possible. Thank you!
import base64 import logging from itertools import izip from cStringIO import StringIO import cv2 import numpy as np from skimage.io import imread import indicoio from .keys import INDICO_API_KEY indicoio.api_key = INDICO_API_KEY SERVER_URL = "http://localhost:3000/random" def get_faces_dimens(image_string, bounds): try: result = indicoio.facial_localization(image_string) faces = [] for face in result: x1, y1 = face["top_left_corner"] x2, y2 = face["bottom_right_corner"] faces.append((x1, y1, x2, y2)) return faces except Exception as e: logger.error(e) def get_suitable_cat(width, height): image = imread(SERVER_URL) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) resized_image = cv2.resize(image, (width, height)) return resized_image def show(img): cv2.imshow("result", img) cv2.waitKey() def process(input_url): input_image = imread(input_url) input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) image_string = base64.b64encode(cv2.imencode(".png", input_image)[1].tostring()) faces = get_faces_dimens(image_string, input_image.shape) cats = [] for x1, y1, x2, y2 in faces: width, height = x2 - x1, y2 - y1 cat = get_suitable_cat(width, height) cats.append(cat) for (x1, y1, x2, y2), cat in izip(faces, cats): if cat.shape[2] > 3: mask = np.where(cat[:,:,3]) input_image[y1:y2, x1:x2, :][mask] = cat[:,:,:3][mask] else: input_image[y1:y2, x1:x2, :] = cat output = StringIO() output.write(cv2.imencode(".png", input_image)[1].tostring()) output.seek(0) return output
A big thank you go out to our amazing listeners. This adorable episode would not be possible without you! We hope you and your family have the MERRIEST Christmas! In this episode our listeners read us the classic poem ‘Twas The Night Before Christmas.
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ GoGrid driver """ import time import hashlib try: import json except ImportError: import simplejson as json from libcloud.common.base import ConnectionUserAndKey, Response from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.common.types import MalformedResponseError from libcloud.compute.providers import Provider from libcloud.compute.types import NodeState from libcloud.compute.base import Node, NodeDriver from libcloud.compute.base import NodeSize, NodeImage, NodeLocation HOST = 'api.gogrid.com' PORTS_BY_SECURITY = { True: 443, False: 80 } API_VERSION = '1.7' STATE = { "Starting": NodeState.PENDING, "On": NodeState.RUNNING, "Off": NodeState.PENDING, "Restarting": NodeState.REBOOTING, "Saving": NodeState.PENDING, "Restoring": NodeState.PENDING, } GOGRID_INSTANCE_TYPES = {'512MB': {'id': '512MB', 'name': '512MB', 'ram': 512, 'disk': 30, 'bandwidth': None, 'price':0.095}, '1GB': {'id': '1GB', 'name': '1GB', 'ram': 1024, 'disk': 60, 'bandwidth': None, 'price':0.19}, '2GB': {'id': '2GB', 'name': '2GB', 'ram': 2048, 'disk': 120, 'bandwidth': None, 'price':0.38}, '4GB': {'id': '4GB', 'name': '4GB', 'ram': 4096, 'disk': 240, 'bandwidth': None, 'price':0.76}, '8GB': {'id': '8GB', 'name': '8GB', 'ram': 8192, 'disk': 480, 'bandwidth': None, 'price':1.52}} class GoGridResponse(Response): def success(self): if self.status == 403: raise InvalidCredsError('Invalid credentials', GoGridNodeDriver) if self.status == 401: raise InvalidCredsError('API Key has insufficient rights', GoGridNodeDriver) if not self.body: return None try: return json.loads(self.body)['status'] == 'success' except ValueError: raise MalformedResponseError('Malformed reply', body=self.body, driver=GoGridNodeDriver) def parse_body(self): if not self.body: return None return json.loads(self.body) def parse_error(self): try: return json.loads(self.body)["list"][0]['message'] except ValueError: return None class GoGridConnection(ConnectionUserAndKey): """ Connection class for the GoGrid driver """ host = HOST responseCls = GoGridResponse def add_default_params(self, params): params["api_key"] = self.user_id params["v"] = API_VERSION params["format"] = 'json' params["sig"] = self.get_signature(self.user_id, self.key) return params def get_signature(self, key, secret): """ create sig from md5 of key + secret + time """ m = hashlib.md5(key+secret+str(int(time.time()))) return m.hexdigest() class GoGridIpAddress(object): """ IP Address """ def __init__(self, id, ip, public, state, subnet): self.id = id self.ip = ip self.public = public self.state = state self.subnet = subnet class GoGridNode(Node): # Generating uuid based on public ip to get around missing id on # create_node in gogrid api # # Used public ip since it is not mutable and specified at create time, # so uuid of node should not change after add is completed def get_uuid(self): return hashlib.sha1( "%s:%d" % (self.public_ip,self.driver.type) ).hexdigest() class GoGridNodeDriver(NodeDriver): """ GoGrid node driver """ connectionCls = GoGridConnection type = Provider.GOGRID name = 'GoGrid' features = {"create_node": ["generates_password"]} _instance_types = GOGRID_INSTANCE_TYPES def _get_state(self, element): try: return STATE[element['state']['name']] except: pass return NodeState.UNKNOWN def _get_ip(self, element): return element.get('ip').get('ip') def _get_id(self, element): return element.get('id') def _to_node(self, element, password=None): state = self._get_state(element) ip = self._get_ip(element) id = self._get_id(element) n = GoGridNode(id=id, name=element['name'], state=state, public_ip=[ip], private_ip=[], extra={'ram': element.get('ram').get('name'), 'isSandbox': element['isSandbox'] == 'true'}, driver=self.connection.driver) if password: n.extra['password'] = password return n def _to_image(self, element): n = NodeImage(id=element['id'], name=element['friendlyName'], driver=self.connection.driver) return n def _to_images(self, object): return [ self._to_image(el) for el in object['list'] ] def _to_location(self, element): location = NodeLocation(id=element['id'], name=element['name'], country="US", driver=self.connection.driver) return location def _to_ip(self, element): ip = GoGridIpAddress(id=element['id'], ip=element['ip'], public=element['public'], subnet=element['subnet'], state=element["state"]["name"]) ip.location = self._to_location(element['datacenter']) return ip def _to_ips(self, object): return [ self._to_ip(el) for el in object['list'] ] def _to_locations(self, object): return [self._to_location(el) for el in object['list']] def list_images(self, location=None): params = {} if location is not None: params["datacenter"] = location.id images = self._to_images( self.connection.request('/api/grid/image/list', params).object) return images def list_nodes(self): passwords_map = {} res = self._server_list() try: for password in self._password_list()['list']: try: passwords_map[password['server']['id']] = password['password'] except KeyError: pass except InvalidCredsError: # some gogrid API keys don't have permission to access the password list. pass return [ self._to_node(el, passwords_map.get(el.get('id'))) for el in res['list'] ] def reboot_node(self, node): id = node.id power = 'restart' res = self._server_power(id, power) if not res.success(): raise Exception(res.parse_error()) return True def destroy_node(self, node): id = node.id res = self._server_delete(id) if not res.success(): raise Exception(res.parse_error()) return True def _server_list(self): return self.connection.request('/api/grid/server/list').object def _password_list(self): return self.connection.request('/api/support/password/list').object def _server_power(self, id, power): # power in ['start', 'stop', 'restart'] params = {'id': id, 'power': power} return self.connection.request("/api/grid/server/power", params, method='POST') def _server_delete(self, id): params = {'id': id} return self.connection.request("/api/grid/server/delete", params, method='POST') def _get_first_ip(self, location=None): ips = self.ex_list_ips(public=True, assigned=False, location=location) try: return ips[0].ip except IndexError: raise LibcloudError('No public unassigned IPs left', GoGridNodeDriver) def list_sizes(self, location=None): return [ NodeSize(driver=self.connection.driver, **i) for i in self._instance_types.values() ] def list_locations(self): locations = self._to_locations( self.connection.request('/api/common/lookup/list', params={'lookup': 'ip.datacenter'}).object) return locations def ex_create_node_nowait(self, **kwargs): """Don't block until GoGrid allocates id for a node but return right away with id == None. The existance of this method is explained by the fact that GoGrid assigns id to a node only few minutes after creation.""" name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] try: ip = kwargs['ex_ip'] except KeyError: ip = self._get_first_ip(kwargs.get('location')) params = {'name': name, 'image': image.id, 'description': kwargs.get('ex_description', ''), 'isSandbox': str(kwargs.get('ex_issandbox', False)).lower(), 'server.ram': size.id, 'ip': ip} object = self.connection.request('/api/grid/server/add', params=params, method='POST').object node = self._to_node(object['list'][0]) return node def create_node(self, **kwargs): """Create a new GoGird node See L{NodeDriver.create_node} for more keyword args. @keyword ex_description: Description of a Node @type ex_description: C{string} @keyword ex_issandbox: Should server be sendbox? @type ex_issandbox: C{bool} @keyword ex_ip: Public IP address to use for a Node. If not specified, first available IP address will be picked @type ex_ip: C{string} """ node = self.ex_create_node_nowait(**kwargs) timeout = 60 * 20 waittime = 0 interval = 2 * 60 while node.id is None and waittime < timeout: nodes = self.list_nodes() for i in nodes: if i.public_ip[0] == node.public_ip[0] and i.id is not None: return i waittime += interval time.sleep(interval) if id is None: raise Exception("Wasn't able to wait for id allocation for the node %s" % str(node)) return node def ex_save_image(self, node, name): """Create an image for node. Please refer to GoGrid documentation to get info how prepare a node for image creation: http://wiki.gogrid.com/wiki/index.php/MyGSI @keyword node: node to use as a base for image @type node: L{Node} @keyword name: name for new image @type name: C{string} """ params = {'server': node.id, 'friendlyName': name} object = self.connection.request('/api/grid/image/save', params=params, method='POST').object return self._to_images(object)[0] def ex_edit_node(self, **kwargs): """Change attributes of a node. @keyword node: node to be edited @type node: L{Node} @keyword size: new size of a node @type size: L{NodeSize} @keyword ex_description: new description of a node @type ex_description: C{string} """ node = kwargs['node'] size = kwargs['size'] params = {'id': node.id, 'server.ram': size.id} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] object = self.connection.request('/api/grid/server/edit', params=params).object return self._to_node(object['list'][0]) def ex_edit_image(self, **kwargs): """Edit metadata of a server image. @keyword image: image to be edited @type image: L{NodeImage} @keyword public: should be the image public? @type public: C{bool} @keyword ex_description: description of the image (optional) @type ex_description: C{string} @keyword name: name of the image @type name C{string} """ image = kwargs['image'] public = kwargs['public'] params = {'id': image.id, 'isPublic': str(public).lower()} if 'ex_description' in kwargs: params['description'] = kwargs['ex_description'] if 'name' in kwargs: params['friendlyName'] = kwargs['name'] object = self.connection.request('/api/grid/image/edit', params=params).object return self._to_image(object['list'][0]) def ex_list_ips(self, **kwargs): """Return list of IP addresses assigned to the account. @keyword public: set to True to list only public IPs or False to list only private IPs. Set to None or not specify at all not to filter by type @type public: C{bool} @keyword assigned: set to True to list only addresses assigned to servers, False to list unassigned addresses and set to None or don't set at all not no filter by state @type assigned: C{bool} @keyword location: filter IP addresses by location @type location: L{NodeLocation} @return: C{list} of L{GoGridIpAddress}es """ params = {} if "public" in kwargs and kwargs["public"] is not None: params["ip.type"] = {True: "Public", False: "Private"}[kwargs["public"]] if "assigned" in kwargs and kwargs["assigned"] is not None: params["ip.state"] = {True: "Assigned", False: "Unassigned"}[kwargs["assigned"]] if "location" in kwargs and kwargs['location'] is not None: params['datacenter'] = kwargs['location'].id ips = self._to_ips( self.connection.request('/api/grid/ip/list', params=params).object) return ips
The cap and rotor of any ignition system must be considered maintenance items. Just like when you change the oil and filter in your car, you should always replace your cap and rotor as a set. MSD makes it easy with these cap and rotor replacement kits. The correct rotor is already matched to the correct cap for you.
# -=- encoding: utf-8 -=- # # SFLvault - Secure networked password store and credentials manager. # # Copyright (C) 2008 Savoir-faire Linux inc. # # Author: Alexandre Bourget <alexandre.bourget@savoirfairelinux.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from base64 import b64decode, b64encode from datetime import datetime import re from Crypto.PublicKey import ElGamal from sqlalchemy import Column, MetaData, Table, types, ForeignKey from sqlalchemy.orm import mapper, relation, backref from sqlalchemy.orm import scoped_session, sessionmaker, eagerload, lazyload from sqlalchemy.orm import eagerload_all from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy import sql from sflvault.model import meta from sflvault.model.meta import Session, metadata from sflvault.model.custom_types import JSONEncodedDict from sflvault.common.crypto import * from zope.sqlalchemy import ZopeTransactionExtension # TODO: add an __all__ statement here, to speed up loading... def init_model(engine): """Call me before using any of the tables or classes in the model.""" sm = sessionmaker(autoflush=True, bind=engine, expire_on_commit=False, extension=ZopeTransactionExtension()) meta.engine = engine meta.Session = scoped_session(sm) users_table = Table("users", metadata, Column('id', types.Integer, primary_key=True), Column('username', types.Unicode(50)), # ElGamal user's public key. Column('pubkey', types.Text), # Used in the login/authenticate challenge Column('logging_token', types.Binary(35)), # Time until the token is valid. Column('logging_timeout', types.DateTime), # This stamp is used to wipe users which haven't 'setup' # their account before this date/time Column('waiting_setup', types.DateTime, nullable=True), Column('created_time', types.DateTime, default=datetime.now), # Admin flag, allows to add users, and grant access. Column('is_admin', types.Boolean, default=False) ) usergroups_table = Table('users_groups', metadata, Column('id', types.Integer, primary_key=True), Column('user_id', types.Integer, ForeignKey('users.id')), Column('group_id', types.Integer, ForeignKey('groups.id')), Column('is_admin', types.Boolean, default=False), Column('cryptgroupkey', types.Text), ) groups_table = Table('groups', metadata, Column('id', types.Integer, primary_key=True), Column('name', types.Unicode(50)), Column('hidden', types.Boolean, default=False), # ElGamal group's public key Column('pubkey', types.Text), ) servicegroups_table = Table('services_groups', metadata, Column('id', types.Integer, primary_key=True), Column('service_id', types.Integer, ForeignKey('services.id')), Column('group_id', types.Integer, ForeignKey('groups.id')), Column('cryptsymkey', types.Text), ) customers_table = Table('customers', metadata, Column('id', types.Integer, primary_key=True), Column('name', types.Unicode(100)), Column('created_time', types.DateTime), # username, même si yé effacé. Column('created_user', types.Unicode(50)) ) machines_table = Table('machines', metadata, Column('id', types.Integer, primary_key=True), Column('customer_id', types.Integer, ForeignKey('customers.id')), # relation customers Column('created_time', types.DateTime, default=datetime.now), # Unicode lisible, un peu de descriptif Column('name', types.Unicode(150)), # Domaine complet. Column('fqdn', types.Unicode(150)), # Adresse IP si fixe, sinon 'dyn' Column('ip', types.String(100)), # Où il est ce serveur, location géographique, et dans # la ville et dans son boîtier (4ième ?) Column('location', types.Text), # Notes sur le serveur, références, URLs, etc.. Column('notes', types.Text) ) # Each ssh or web app. service that have a password. services_table = Table('services', metadata, Column('id', types.Integer, primary_key=True), # Service lies on which Machine ? Column('machine_id', types.Integer, ForeignKey('machines.id')), # Hierarchical service required to access this one ? Column('parent_service_id', types.Integer, ForeignKey('services.id')), # REMOVED: replaced by servicegroups_table many-to-many. #Column('group_id', types.Integer, # ForeignKey('groups.id')), Column('url', types.String(250)), # Full service desc. # simplejson'd python structures, depends on url scheme Column('metadata', JSONEncodedDict), # reserved. Column('notes', types.Text), Column('secret', types.Text), Column('secret_last_modified', types.DateTime, default=datetime.now) ) class Service(object): def __repr__(self): return "<Service s#%d: %s>" % (self.id, self.url) class Machine(object): def __repr__(self): return "<Machine m#%d: %s (%s %s)>" % (self.id if self.id else 0, self.name, self.fqdn, self.ip) class User(object): def setup_expired(self): """Return True/False if waiting_setup has expired""" if self.waiting_setup and self.waiting_setup < datetime.now(): return True else: return False def elgamal(self): """Return the ElGamal object, ready to encrypt stuff.""" e = ElGamal.ElGamalobj() (e.p, e.g, e.y) = unserial_elgamal_pubkey(self.pubkey) return e def __repr__(self): return "<User u#%d: %s>" % (self.id, self.username) class UserGroup(object): """Membership of a user to a group""" def __init__(self, user=None): if user: self.user = user def __repr__(self): return "<UserGroup element>" class ServiceGroup(object): """membership of a service to a group""" def __init__(self, service=None): if service: self.service = service def __repr__(self): return "<ServiceGroup element>" class Group(object): def __repr__(self): return "<Group: %s>" % (self.name) def elgamal(self): """Return the ElGamal object, ready to encrypt stuff.""" e = ElGamal.ElGamalobj() (e.p, e.g, e.y) = unserial_elgamal_pubkey(self.pubkey) return e class Customer(object): def __repr__(self): return "<Customer c#%d: %s>" % (self.id, self.name) # User # .groups_assoc # UserGroup # .group # Group # .services_assoc # ServiceGroup # .service # Service # Service # .groups_assoc # ServiceGroup # .group # Group # .users_assoc # UserGroup # .user # User # Map each class to its corresponding table. mapper(User, users_table, { # Quick access to services... 'services': relation(Service, secondary=usergroups_table.join(servicegroups_table, usergroups_table.c.group_id==servicegroups_table.c.group_id), backref='users', viewonly=True, ), 'groups_assoc': relation(UserGroup, backref='user') }) User.groups = association_proxy('groups_assoc', 'group') mapper(UserGroup, usergroups_table, { 'group': relation(Group, backref='users_assoc') }) mapper(Group, groups_table, { 'services_assoc': relation(ServiceGroup, backref='group') }) Group.users = association_proxy('users_assoc', 'user') Group.services = association_proxy('services_assoc', 'service') mapper(ServiceGroup, servicegroups_table, { 'service': relation(Service, backref='groups_assoc') }) mapper(Service, services_table, { 'children': relation(Service, lazy=False, backref=backref('parent', uselist=False, remote_side=[services_table.c.id]), primaryjoin=services_table.c.parent_service_id==services_table.c.id) }) Service.groups = association_proxy('groups_assoc', 'group') mapper(Machine, machines_table, { 'services': relation(Service, backref='machine', lazy=False) }) mapper(Customer, customers_table, { 'machines': relation(Machine, backref='customer', lazy=False) }) ################ Helper functions ################ def query(cls): """Shortcut to meta.Session.query(cls)""" return meta.Session.query(cls) def get_user(user, eagerload_all_=None): """Get a user provided a username or an int(user_id), possibly eager loading some relations. """ if isinstance(user, int): uq = query(User).filter_by(id=user) else: uq = query(User).filter_by(username=user) if eagerload_all_: uq = uq.options(eagerload_all(eagerload_all_)) usr = uq.first() if not usr: raise LookupError("Invalid user: %s" % user) return usr def get_objects_ids(objects_ids, object_type): """Return a list of valid IDs for certain object types. objects_ids - Must be a list of str or ints object_type - One of 'groups', 'machines', 'customers """ return get_objects_list(objects_ids, object_type, return_objects=False)[1] def get_objects_list(objects_ids, object_type, eagerload_all_=None, return_objects=True): """Get a list of objects by their IDs, either as int or str. Make sure we return a list of integers as IDs. object_type - the type of object to be returned. It must be one of ['groups', 'machines', 'customers'] return_objects - whether to return the actual objects or not. """ objects_types_assoc = {'groups': Group, 'machines': Machine, 'customers': Customer} # Check if object_type is valid if object_type not in objects_types_assoc: raise ValueError("Invalid object type: %s" % (object_type)) # Get variables if isinstance(objects_ids, str): objects_ids = [int(objects_ids)] elif isinstance(objects_ids, int): objects_ids = [objects_ids] elif isinstance(objects_ids, list): objects_ids = [int(x) for x in objects_ids] else: raise ValueError("Invalid %s specification" % (object_type)) # Pull the objects/IDs from the DB obj = objects_types_assoc[object_type] if return_objects: objects_q = query(obj).filter(obj.id.in_(objects_ids)) if eagerload_all_: objects_q = objects_q.options(eagerload_all(eagerload_all_)) objects = objects_q.all() else: objects_q = sql.select([obj.id]).where(obj.id.in_(objects_ids)) objects = meta.Session.execute(objects_q).fetchall() if len(objects) != len(objects_ids): # Woah, you specified objects that didn't exist ? valid_objects = [x.id for x in objects] invalid_objects = [x for x in objects_ids if x not in valid_objects] raise ValueError("Invalid %s: %s" % (object_type, invalid_objects)) return (objects if return_objects else None, objects_ids) def search_query(swords, filters=None, verbose=False): # Create the join.. sel = sql.outerjoin(customers_table, machines_table).outerjoin(services_table) if filters: # Remove filters that are just None filters = dict([(x, filters[x]) for x in filters if filters[x]]) if not isinstance(filters, dict): raise RuntimeError("filters param must be a dict, or None") if [True for x in filters if not isinstance(filters[x], list)]: raise RuntimeError("filters themselves must be a list of ints") if 'groups' in filters: sel = sel.join(servicegroups_table) sel = sel.select(use_labels=True) if filters: if 'groups' in filters: sel = sel.where(ServiceGroup.group_id.in_(filters['groups'])) if 'machines' in filters: sel = sel.where(Machine.id.in_(filters['machines'])) if 'customers' in filters: sel = sel.where(Customer.id.in_(filters['customers'])) # Fields to search in.. textfields = [Customer.name, Machine.name, Machine.fqdn, Machine.ip, Machine.location, Machine.notes, Service.url, Service.notes] numfields = [Customer.id, Machine.id, Service.id] # TODO: distinguish between INTEGER fields and STRINGS and search # differently (check only ==, and only if word can be converted to int()) andlist = [] for word in swords: orlist = [field.ilike('%%%s%%' % word) for field in textfields] if word.isdigit(): # Search numeric fields too orlist += [field == int(word) for field in numfields] orword = sql.or_(*orlist) andlist.append(orword) sel = sel.where(sql.and_(*andlist)) sel = sel.order_by(Machine.name, Service.url) return meta.Session.execute(sel)
This is a one of a kind handmade necklace with a nautical theme. The main focal charm features a cute blue octopus made from white shrink plastic. Dangling off its tentacles are a starfish, anchor and fish Tibetan silver charms. The cord is a lovely deep black satin with an extender chain featuring a tiny key on the end. The necklace measures approximately 41cm long and the extender is 5.5cm. The pendant hangs off the cord by a silver plated bail giving lovely movement. The pendant (including shrink charm and silver charms) measures approximately 4cm long by 2.5 cm at widest point.
# -*- coding: utf-8 -*- """ A simple Python module for parsing human names into their individual components. Components:: * Title * First name * Middle names * Last names * Suffixes Works for a variety of common name formats for latin-based languages. Over 100 unit tests with example names. Should be unicode safe but it's fairly untested. HumanName instances will pass an equals (==) test if their lower case unicode representations are the same. -------- Copyright Derek Gulbranson, May 2009 <derek73 at gmail>. http://code.google.com/p/python-nameparser Parser logic based on PHP nameParser.php by G. Miernicki http://code.google.com/p/nameparser/ LGPL http://www.opensource.org/licenses/lgpl-license.html This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. """ __author__ = "Derek Gulbranson" __revision__ = "$Id: nameparser.py 25 2010-08-18 19:57:57Z derek73 $" __version__ = "0.1.2" __license__ = "LGPL" __url__ = "http://code.google.com/p/python-nameparser" TITLES = [ 'dr','doctor','miss','misses','mr','mister','mrs','ms','sir', 'rev','madam','madame','AB','2ndLt','Amn','1stLt','A1C','Capt','SrA','Maj', 'SSgt','LtCol','TSgt','Col','BrigGen','1stSgt','MajGen','SMSgt','LtGen', '1stSgt','Gen','CMSgt','1stSgt','CCMSgt','CMSAF','PVT','2LT','PV2','1LT', 'PFC','CPT','SPC','MAJ','CPL','LTC','SGT','COL','SSG','BG','SFC','MG', 'MSG','LTG','1SGT','GEN','SGM','CSM','SMA','WO1','WO2','WO3','WO4','WO5', 'ENS','SA','LTJG','SN','LT','PO3','LCDR','PO2','CDR','PO1','CAPT','CPO', 'RADM(LH)','SCPO','RADM(UH)','MCPO','VADM','MCPOC','ADM','MPCO-CG','CWO-2', 'CWO-3','CWO-4','Pvt','2ndLt','PFC','1stLt','LCpl','Capt','Cpl','Maj','Sgt', 'LtCol','SSgt','Col','GySgt','BGen','MSgt','MajGen','1stSgt','LtGen','MGySgt', 'Gen','SgtMaj','SgtMajMC','WO-1','CWO-2','CWO-3','CWO-4','CWO-5','ENS','SA', 'LTJG','SN','LT','PO3','LCDR','PO2','CDR','PO1','CAPT','CPO','RDML','SCPO', 'RADM','MCPO','VADM','MCPON','ADM','FADM','WO1','CWO2','CWO3','CWO4','CWO5' ] # QUESTIONABLE_TITLES could be last names or they could be titles # TODO: need to find best way to deal with these.. http://code.google.com/p/python-nameparser/issues/detail?id=3 QUESTIONABLE_TITLES = ['judge',] # PUNC_TITLES could be names or titles, but if they have period at the end they're a title PUNC_TITLES = ['hon.'] PREFICES = [ 'abu','bon','ben','bin','da','dal','de','del','der','de','di','e','ibn', 'la','le','san','st','ste','van','vel','von' ] SUFFICES = [ 'esq','esquire','jr','sr','2','i','ii','iii','iv','v','clu','chfc', 'cfp','md','phd' ] CAPITALIZATION_EXCEPTIONS = { 'ii': 'II', 'iii': 'III', 'iv': 'IV', 'md': 'M.D.', 'phd': 'Ph.D.' } CONJUNCTIONS = ['&', 'and', 'et', 'e', 'und', 'y'] ENCODING = 'utf-8' import re re_spaces = re.compile(r"\s+") re_word = re.compile(r"\w+") re_mac = re.compile(r'^(ma?c)(\w)', re.I) re_initial = re.compile(r'^(\w\.|[A-Z])?$') import logging # logging.basicConfig(level=logging.DEBUG) log = logging.getLogger('HumanName') def lc(value): '''Lower case and remove any periods to normalize for comparison.''' if not value: return u'' return value.lower().replace('.','') def is_not_initial(value): return not re_initial.match(value) class HumanName(object): """ Parse a person's name into individual components Usage:: >>> name = HumanName("Dr. Juan Q. Xavier de la Vega III") >>> name.title 'Dr.' >>> name.first 'Juan' >>> name.middle 'Q. Xavier' >>> name.last 'de la Vega' >>> name.suffix 'III' >>> name2 = HumanName("de la Vega, Dr. Juan Q. Xavier III") >>> name == name2 True >>> len(name) 5 >>> list(name) ['Dr.', 'Juan', 'Q. Xavier', 'de la Vega', 'III'] >>> name[1:-1] [u'Juan', u'Q. Xavier', u'de la Vega'] """ def __init__(self, full_name=u"", titles=TITLES, prefices=PREFICES, suffices=SUFFICES, punc_titles=PUNC_TITLES, conjunctions=CONJUNCTIONS, capitalization_exceptions=CAPITALIZATION_EXCEPTIONS): super(HumanName, self).__init__() self.titles = titles self.punc_titles = punc_titles self.conjunctions = conjunctions self.prefices = prefices self.suffices = suffices self.capitalization_exceptions = capitalization_exceptions self.full_name = full_name self.title = u"" self.first = u"" self.suffixes = [] self.middle_names = [] self.last_names = [] self.unparsable = False self.count = 0 self.members = ['title','first','middle','last','suffix'] if self.full_name: self.parse_full_name() def __iter__(self): return self def __len__(self): l = 0 for x in self: l += 1 return l def __eq__(self, other): """ HumanName instances are equal to other objects whose lower case unicode representations are the same """ return unicode(self).lower() == unicode(other).lower() def __ne__(self, other): return not unicode(self).lower() == unicode(other).lower() def __getitem__(self, key): return [getattr(self, x) for x in self.members[key]] def next(self): if self.count >= len(self.members): self.count = 0 raise StopIteration else: c = self.count self.count = c + 1 return getattr(self, self.members[c]) or self.next() def __unicode__(self): return u" ".join(self) def __str__(self): return self.__unicode__().encode('utf-8') def __repr__(self): if self.unparsable: return u"<%(class)s : [ Unparsable ] >" % {'class': self.__class__.__name__,} return u"<%(class)s : [\n\tTitle: '%(title)s' \n\tFirst: '%(first)s' \n\tMiddle: '%(middle)s' \n\tLast: '%(last)s' \n\tSuffix: '%(suffix)s'\n]>" % { 'class': self.__class__.__name__, 'title': self.title, 'first': self.first, 'middle': self.middle, 'last': self.last, 'suffix': self.suffix, } @property def middle(self): return u" ".join(self.middle_names) @property def last(self): return u" ".join(self.last_names) @property def suffix(self): return u", ".join(self.suffixes) def is_conjunction(self, piece): return lc(piece) in self.conjunctions and is_not_initial(piece) def is_prefix(self, piece): return lc(piece) in self.prefices and is_not_initial(piece) def parse_full_name(self): if not self.full_name: raise AttributeError("Missing full_name") if not isinstance(self.full_name, unicode): self.full_name = unicode(self.full_name, ENCODING) # collapse multiple spaces self.full_name = re.sub(re_spaces, u" ", self.full_name.strip() ) # reset values self.title = u"" self.first = u"" self.suffixes = [] self.middle_names = [] self.last_names = [] self.unparsable = False # break up full_name by commas parts = [x.strip() for x in self.full_name.split(",")] log.debug(u"full_name: " + self.full_name) log.debug(u"parts: " + unicode(parts)) pieces = [] if len(parts) == 1: # no commas, title first middle middle middle last suffix for part in parts: names = part.split(' ') for name in names: name.replace(',','').strip() pieces.append(name) log.debug(u"pieces: " + unicode(pieces)) for i, piece in enumerate(pieces): try: next = pieces[i + 1] except IndexError: next = None try: prev = pieces[i - 1] except IndexError: prev = None if lc(piece) in self.titles: self.title = piece continue if piece.lower() in self.punc_titles: self.title = piece continue if not self.first: self.first = piece.replace(".","") continue if (i == len(pieces) - 2) and (lc(next) in self.suffices): self.last_names.append(piece) self.suffixes.append(next) break if self.is_prefix(piece): self.last_names.append(piece) continue if self.is_conjunction(piece) and i < len(pieces) / 2: self.first += ' ' + piece continue if self.is_conjunction(prev) and (i-1) < len(pieces) / 2: self.first += ' ' + piece continue if self.is_conjunction(piece) or self.is_conjunction(next): self.last_names.append(piece) continue if i == len(pieces) - 1: self.last_names.append(piece) continue self.middle_names.append(piece) else: if lc(parts[1]) in self.suffices: # title first middle last, suffix [, suffix] names = parts[0].split(' ') for name in names: name.replace(',','').strip() pieces.append(name) log.debug(u"pieces: " + unicode(pieces)) self.suffixes += parts[1:] for i, piece in enumerate(pieces): try: next = pieces[i + 1] except IndexError: next = None if lc(piece) in self.titles: self.title = piece continue if piece.lower() in self.punc_titles: self.title = piece continue if not self.first: self.first = piece.replace(".","") continue if i == (len(pieces) -1) and self.is_prefix(piece): self.last_names.append(piece + " " + next) break if self.is_prefix(piece): self.last_names.append(piece) continue if self.is_conjunction(piece) or self.is_conjunction(next): self.last_names.append(piece) continue if i == len(pieces) - 1: self.last_names.append(piece) continue self.middle_names.append(piece) else: # last, title first middles[,] suffix [,suffix] names = parts[1].split(' ') for name in names: name.replace(',','').strip() pieces.append(name) log.debug(u"pieces: " + unicode(pieces)) self.last_names.append(parts[0]) for i, piece in enumerate(pieces): try: next = pieces[i + 1] except IndexError: next = None if lc(piece) in self.titles: self.title = piece continue if piece.lower() in self.punc_titles: self.title = piece continue if not self.first: self.first = piece.replace(".","") continue if lc(piece) in self.suffices: self.suffixes.append(piece) continue self.middle_names.append(piece) try: if parts[2]: self.suffixes += parts[2:] except IndexError: pass if not self.first and len(self.middle_names) < 1 and len(self.last_names) < 1: self.unparsable = True log.error(u"Unparsable full_name: " + self.full_name) def cap_word(self, word): if self.is_prefix(word) or self.is_conjunction(word): return lc(word) if word in self.capitalization_exceptions: return self.capitalization_exceptions[word] mac_match = re_mac.match(word) if mac_match: def cap_after_mac(m): return m.group(1).capitalize() + m.group(2).capitalize() return re_mac.sub(cap_after_mac, word) else: return word.capitalize() def cap_piece(self, piece): if not piece: return "" replacement = lambda m: self.cap_word(m.group(0)) return re.sub(re_word, replacement, piece) def capitalize(self): name = unicode(self) if not (name == name.upper() or name == name.lower()): return self.title = self.cap_piece(self.title) self.first = self.cap_piece(self.first) self.middle_names = self.cap_piece(self.middle).split(' ') self.last_names = self.cap_piece(self.last).split(' ') self.suffixes = self.cap_piece(self.suffix).split(' ')
ELPC works in Illinois to advance wind and solar energy, improve energy efficiency policies, clean up old and dirty coal plants, reduce diesel pollution, expand adoption of electric vehicles, and preserve and protect Lake Michigan and the Chicago River. Additionally, many of ELPC’s region-wide efforts are centered in Chicago – the spoke-and-wheel Midwest High-Speed Rail Network radiates out from Chicago to small and large cities within a 400-mile radius; the MISO energy transmission grid touches all of ELPC’s states, with many transmission lines running to Chicago’s dense urban population; and the Chicagoland area also hosts important waterway connections between the Great Lakes and the Mississippi River. ELPC’s staff works out of our LEED Platinum headquarters in Chicago and makes a big difference throughout Illinois and the Midwest. ELPC works with a diverse group of allies in Illinois, including environmental, public health and community organizations, as well as universities, unions, trade associations, chambers of commerce and private industry. Clean Energy – ELPC has led the charge to transition Illinois from having little renewable energy installed and virtually no utility-sponsored energy efficiency programs to a position of national policy leadership. Years of work led to significant policy breakthroughs when Illinois enacted some of the nation’s leading Renewable Energy Standards (RES), Energy Efficiency Performance Standards, and distributed energy standards. Clean Air – ELPC’s “clean up or shut down” strategic legal advocacy over a decade with our grassroots community and public health partners resulted in the Fisk and Crawford coal plants shutting down in August 2012. These were the largest, most polluting coal plants operating in city neighborhoods. ELPC’s litigation pressure before the Illinois Pollution Control Board and our legislative lobbying and leadership on the groundbreaking Chicago Clean Power Coalition helped result in a coal-free Chicago. Clean Water – For years, Chicago was one of the few major cities in which wastewater was not disinfected prior to discharge into the river. ELPC’s and our colleagues’ persistent and effective advocacy over six years succeeded in 2011. The U.S. EPA and the Illinois Pollution Control Board directed the Metropolitan Water Reclamation District to install modern pollution control equipment to disinfect wastewater, and the District is complying. Clean Transportation – ELPC provided legal representation and guidance to grassroots groups opposed to the “Prairie Parkway” highway proposal, which was successfully squashed in 2012. We advocated smarter “fix it first” alternatives that achieved transportation and job creation goals without disrupting communities and harming natural resources. Wild & Natural Places – Bell Smith Springs is a beautiful part of the Shawnee National Forest, where streams have carved out interesting rock formations and natural springs form large pools of pristine water. ELPC protected this special place in Illinois from unnecessary logging that would have caused significant soil erosion and damaged the springs. Our emergency injunction halted the logging until a thorough environmental analysis could be completed.
#!/data/project/nullzerobot/python/bin/python import wtforms.validators from wtforms.validators import * from wtforms.validators import ValidationError from messages import msg import re ############################## class _Required(Required): def __init__(self, *args, **kwargs): if not kwargs.get('message', False): kwargs['message'] = msg['validator-require'] super(_Required, self).__init__(*args, **kwargs) wtforms.validators.Required = _Required ############################## class _NumberRange(NumberRange): def __init__(self, *args, **kwargs): if 'message' not in kwargs: kwargs['message'] = msg['validator-mustbe-in-min-max'] super(_NumberRange, self).__init__(*args, **kwargs) wtforms.validators.NumberRange = _NumberRange ############################## # Have to do like this because the original Email.__init__ contains Email itself def Email__init__(self, message=msg['validator-invalid-email']): super(Email, self).__init__(r'^.+@[^.].*\.[a-z]{2,10}$', re.IGNORECASE, message) wtforms.validators.Email.__init__ = Email__init__ ############################## def _Number(negative=False, decimal=False): charset = r'\d' if negative: charset += '-' if decimal: charset += r'\.' def _Number(form, field): if not field.data or not re.match('^[' + charset + ']+$', field.data): raise ValidationError(msg['validator-not-number']) return _Number wtforms.validators.Number = _Number ############################## def _Wiki(): def _Wiki(form, field): if not field.data or any(char in field.data for char in '#'): raise ValidationError(msg['validator-not-wiki']) return _Wiki wtforms.validators.Wiki = _Wiki ############################## class _IgnoreMe(object): def __init__(self, *args, **kwargs): pass __call__ = __init__ wtforms.validators.IgnoreMe = _IgnoreMe ##############################
Ahoy matey! Cruise the men and learn about Big Bear lake, and see sights like the solar observatory, China Island, celebrity homes, the story of the Four Dams and other fun stuff. This entertaining tour will complete your Big Bear ROMP experience! Round-trip transportation is included, and cocktails, beer and wine are available for purchase on board.
#!/usr/bin/env python """ usage: plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl Loads any number of .pkl files produced by train.py. Extracts all of their monitoring channels and prompts the user to select a subset of them to be plotted. """ from __future__ import print_function __authors__ = "Ian Goodfellow, Harm Aarts" __copyright__ = "Copyright 2010-2012, Universite de Montreal" __credits__ = ["Ian Goodfellow"] __license__ = "3-clause BSD" __maintainer__ = "LISA Lab" __email__ = "pylearn-dev@googlegroups" import gc import numpy as np import sys from theano.compat.six.moves import input, xrange from pylearn2.utils import serial from theano.printing import _TagGenerator from pylearn2.utils.string_utils import number_aware_alphabetical_key from pylearn2.utils import contains_nan, contains_inf import argparse channels = {} def unique_substring(s, other, min_size=1): """ .. todo:: WRITEME """ size = min(len(s), min_size) while size <= len(s): for pos in xrange(0,len(s)-size+1): rval = s[pos:pos+size] fail = False for o in other: if o.find(rval) != -1: fail = True break if not fail: return rval size += 1 # no unique substring return s def unique_substrings(l, min_size=1): """ .. todo:: WRITEME """ return [unique_substring(s, [x for x in l if x is not s], min_size) for s in l] def main(): """ .. todo:: WRITEME """ parser = argparse.ArgumentParser() parser.add_argument("--out") parser.add_argument("model_paths", nargs='+') parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1') options = parser.parse_args() model_paths = options.model_paths if options.out is not None: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt print('generating names...') model_names = [model_path.replace('.pkl', '!') for model_path in model_paths] model_names = unique_substrings(model_names, min_size=10) model_names = [model_name.replace('!','') for model_name in model_names] print('...done') for i, arg in enumerate(model_paths): try: model = serial.load(arg) except Exception: if arg.endswith('.yaml'): print(sys.stderr, arg + " is a yaml config file," + "you need to load a trained model.", file=sys.stderr) quit(-1) raise this_model_channels = model.monitor.channels if len(sys.argv) > 2: postfix = ":" + model_names[i] else: postfix = "" for channel in this_model_channels: channels[channel+postfix] = this_model_channels[channel] del model gc.collect() while True: # Make a list of short codes for each channel so user can specify them # easily tag_generator = _TagGenerator() codebook = {} sorted_codes = [] for channel_name in sorted(channels, key = number_aware_alphabetical_key): code = tag_generator.get_tag() codebook[code] = channel_name codebook['<'+channel_name+'>'] = channel_name sorted_codes.append(code) x_axis = 'example' print('set x_axis to example') if len(channels.values()) == 0: print("there are no channels to plot") break # If there is more than one channel in the monitor ask which ones to # plot prompt = len(channels.values()) > 1 if prompt: # Display the codebook for code in sorted_codes: print(code + '. ' + codebook[code]) print() print("Put e, b, s or h in the list somewhere to plot " + "epochs, batches, seconds, or hours, respectively.") response = input('Enter a list of channels to plot ' + \ '(example: A, C,F-G, h, <test_err>) or q to quit' + \ ' or o for options: ') if response == 'o': print('1: smooth all channels') print('any other response: do nothing, go back to plotting') response = input('Enter your choice: ') if response == '1': for channel in channels.values(): k = 5 new_val_record = [] for i in xrange(len(channel.val_record)): new_val = 0. count = 0. for j in xrange(max(0, i-k), i+1): new_val += channel.val_record[j] count += 1. new_val_record.append(new_val / count) channel.val_record = new_val_record continue if response == 'q': break #Remove spaces response = response.replace(' ','') #Split into list codes = response.split(',') final_codes = set([]) for code in codes: if code == 'e': x_axis = 'epoch' continue elif code == 'b': x_axis = 'batche' elif code == 's': x_axis = 'second' elif code == 'h': x_axis = 'hour' elif code.startswith('<'): assert code.endswith('>') final_codes.add(code) elif code.find('-') != -1: #The current list element is a range of codes rng = code.split('-') if len(rng) != 2: print("Input not understood: "+code) quit(-1) found = False for i in xrange(len(sorted_codes)): if sorted_codes[i] == rng[0]: found = True break if not found: print("Invalid code: "+rng[0]) quit(-1) found = False for j in xrange(i,len(sorted_codes)): if sorted_codes[j] == rng[1]: found = True break if not found: print("Invalid code: "+rng[1]) quit(-1) final_codes = final_codes.union(set(sorted_codes[i:j+1])) else: #The current list element is just a single code final_codes = final_codes.union(set([code])) # end for code in codes else: final_codes ,= set(codebook.keys()) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] styles = list(colors) styles += [color+'--' for color in colors] styles += [color+':' for color in colors] fig = plt.figure() ax = plt.subplot(1,1,1) # plot the requested channels for idx, code in enumerate(sorted(final_codes)): channel_name= codebook[code] channel = channels[channel_name] y = np.asarray(channel.val_record) if contains_nan(y): print(channel_name + ' contains NaNs') if contains_inf(y): print(channel_name + 'contains infinite values') if x_axis == 'example': x = np.asarray(channel.example_record) elif x_axis == 'batche': x = np.asarray(channel.batch_record) elif x_axis == 'epoch': try: x = np.asarray(channel.epoch_record) except AttributeError: # older saved monitors won't have epoch_record x = np.arange(len(channel.batch_record)) elif x_axis == 'second': x = np.asarray(channel.time_record) elif x_axis == 'hour': x = np.asarray(channel.time_record) / 3600. else: assert False ax.plot( x, y, styles[idx % len(styles)], marker = '.', # add point margers to lines label = channel_name) plt.xlabel('# '+x_axis+'s') ax.ticklabel_format( scilimits = (-3,3), axis = 'both') handles, labels = ax.get_legend_handles_labels() lgd = ax.legend(handles, labels, loc = 'upper left', bbox_to_anchor = (1.05, 1.02)) # Get the axis positions and the height and width of the legend plt.draw() ax_pos = ax.get_position() pad_width = ax_pos.x0 * fig.get_size_inches()[0] pad_height = ax_pos.y0 * fig.get_size_inches()[1] dpi = fig.get_dpi() lgd_width = ax.get_legend().get_frame().get_width() / dpi lgd_height = ax.get_legend().get_frame().get_height() / dpi # Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches. # I had trouble getting everything to align vertically. ax_width = 3 ax_height = 3 total_width = 2*pad_width + ax_width + lgd_width total_height = 2*pad_height + np.maximum(ax_height, lgd_height) fig.set_size_inches(total_width, total_height) ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height]) if(options.yrange is not None): ymin, ymax = map(float, options.yrange.split(':')) plt.ylim(ymin, ymax) if options.out is None: plt.show() else: plt.savefig(options.out) if not prompt: break if __name__ == "__main__": main()
Gina Long’s professional life is one of contrasts. With an address book that is the envy of Suffolk and beyond, and a dream to help the most needy families in her home county, Gina has successfully interwoven the two in a 40-year career as a charity campaigner, which last year saw her achievements recognised with an MBE. She has always felt blessed to have the continued support of family, friends, business leaders, dignitaries and more, but there’s a reason everything Gina touches turns golden for the national and local charities she supports. Ask anyone who knows her and the words ‘passionate’, ‘enthusiastic’, ‘dedicated’ and ‘driven’ come tumbling out. Now she has channelled that passion to help others through her own charitable trust, GeeWizz, in a bid to make more of a direct impact on the lives of disabled people and families affected by cancer. Explaining the origins of the charity, Gina says the guiding principle is transparency and trust. The charity might be less than a year old, but like her past successes, it is heading towards a hugely successful first birthday. Never happy having just one big project to organise, with weeks to go before the charity ball in The Hangar at Milsoms Kesgrave on October 14, Gina was in the middle of planning her next global Ultimate Charity Auction – www.ultimatecharityauction.com – a two-week online fundraiser supporting four national and local charities, which gives people the chance to bid on a vast array of money-can’t-buy prizes. This year’s auction – Saturday October 22 to Sunday November 6 – is expected to raise £1 million for the first time since this successful fundraising format was first started by Gina in 2012. The ball, organised in partnership with Suffolk Magazine and Paul Milsom, owner of Milsoms, sold out four months before the big event, and GeeWizz has already gained valuable support from William van Cutsem, who along with legendary former Ipswich Town and England defender Terry Butcher are the charity’s ambassadors. The event has attracted other high-profile names, with support from former England cricketer and broadcaster Graeme Swann, Suffolk operatic star Laura Wright, who will be performing, and popular television presenter Chris Hollins, acting as MC for the evening. He will be joined by Graeme Swann later in the evening. Gina’s career reads like a who’s who of charities. She was one of the founding members of the Suffolk Breakthrough Breast Cancer group, which announced last year that it had raised £1 million to help fund research into the disease, while the Ultimate Charity Auction, now in its fifth year, has supported the likes of the Sir Bobby Robson Foundation, Breast Care Now, East Anglia’s Children’s Hospice and Ipswich Hospital. This year, the auction will support community football projects run by In The Game, the League Managers Association charity, Sarcoma UK, GeeWizz and the My WiSH Charity, supporting Gina’s local hospital, the West Suffolk. Gina, and her husband, Andrew, created the Classic Sports Cars by the Lake, held annually at their farm to raise money for St Nicholas Hospice Care. She was also part of the Suffolk fundraising committee of the Prince’s Trust, helping to raise more than £400,000, and was chairman of EACH’s special events committee, covering Suffolk, Norfolk, Cambridgeshire and North Essex. She is patron of the Bury St Edmunds Womens Aid Centre, and the Ipswich and Suffolk Business Club, as well as the first ambassador of West Suffolk Hospital and its My WiSH Charity. Gina started fundraising at the age of 13, organising a 24-hour swimathon in Ipswich and raising £1,300 for children with special needs. “I have always believed very strongly in the positive difference charity can make on individuals, families and communities. That is what drives me on.” She has two children, Samuel and Alexandra, and five stepchildren, James, Charles, Georgina, Thomas and Olivia. “Over the many years doing what I love, I have finally worked out the right work/life balance. My family life will always come first. I feel very blessed to have been given such wonderful opportunities, to work with some amazing people and to have such a supportive family around me. “I live by the Ghandi quote, Service to others is the rent you pay for your room here on earth.” She established GeeWizz because she wanted to know exactly where every penny of the money she raised was going. “It is not just about the money. I work pro-actively with families who may have a sick child and don’t know where to turn to. Their lives could be dramatically improved by having an upgraded or electric wheelchair, rather than a manual one – something that may not get funded elsewhere.