text
stringlengths
29
850k
__version__ = '0.1.1' __author__ = 'Juan Batiz-Benet' __email__ = 'juan@benet.ai' __doc__ = ''' pylru datastore implementation. Tested with: * datastore 0.3.0 * pylru 1.0.5 ''' import pylru import datastore.core class LRUCacheDatastore(datastore.Datastore): '''Represents an LRU cache datastore, backed by pylru. Hello World: >>> import datastore.pylru >>> >>> ds = datastore.pylru.LRUCacheDatastore(100) >>> >>> hello = datastore.Key('hello') >>> ds.put(hello, 'world') >>> ds.contains(hello) True >>> ds.get(hello) 'world' >>> ds.delete(hello) >>> ds.get(hello) None ''' def __init__(self, size): self._cache = pylru.lrucache(size) def __len__(self): return len(self._cache) def clear(self): self._cache.clear() def get(self, key): '''Return the object named by key.''' try: return self._cache[key] except KeyError, e: return None def put(self, key, value): '''Stores the object.''' self._cache[key] = value def delete(self, key): '''Removes the object.''' if key in self._cache: del self._cache[key] def contains(self, key): '''Returns whether the object is in this datastore.''' return key in self._cache def query(self, query): '''Returns a sequence of objects matching criteria expressed in `query`''' # entire dataset already in memory, so ok to apply query naively return query(self._cache.values())
Swimming Pool & Spa Services. you can relax with a pro. Whether you have an indoor or outdoor spa PPS Victoria trained experts specialize in spa maintenance and repairs. Transform your spa back to its original haven of relaxation and tranquility. We offer maintenance and repairs for most makes and models and we ensure our service is reliable and cost effective. PPS Victoria offers its clients a broad range of spa services, ranging from full onsite system health check, regular spa maintenance services, spa water testing, cleaning services, system upgrade to spa accessories. We supply and install all major brands and models which include Davey, Gecko, Spa-Quip and others. Spa maintenance includes water testing and balance, cleaning filters, equipment check and advice to ensure that your spa is safe and running at its most optimum performance. PPS Victoria offers a variety of repair services. This includes equipment health check, repair services, replacement parts and equipment up-grades. We service most makes and models which include: Spa Controllers, Spa Circulation Pumps, Spa Heater Systems (Electrical or Gas), Ozone Generation Systems, Valves, Spa Booster Pumps, Spa Blowers, Spa Lights, Jets and Fittings. PPS Victoria spa technicians have the skills required to analyse and repair your existing spa and hot tub. Our services range from repair, upgrades, installation and maintenance. PPS Victoria supplies and supports all major spa equipment brands and models.
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide color related properties. ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports from ... import colors from .. import enums from .bases import Property from .container import Tuple from .enum import Enum from .either import Either from .numeric import Byte, Percent from .regex import Regex #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Color', 'RGB', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- class RGB(Property): ''' Accept colors.RGB values. ''' def validate(self, value, detail=True): super(RGB, self).validate(value, detail) if not (value is None or isinstance(value, colors.RGB)): msg = "" if not detail else "expected RGB value, got %r" % (value,) raise ValueError(msg) class Color(Either): ''' Accept color values in a variety of ways. For colors, because we support named colors and hex values prefaced with a "#", when we are handed a string value, there is a little interpretation: if the value is one of the 147 SVG named colors or it starts with a "#", then it is interpreted as a value. If a 3-tuple is provided, then it is treated as an RGB (0..255). If a 4-tuple is provided, then it is treated as an RGBa (0..255), with alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.) Example: .. code-block:: python >>> class ColorModel(HasProps): ... prop = Color() ... >>> m = ColorModel() >>> m.prop = "firebrick" >>> m.prop = "#a240a2" >>> m.prop = (100, 100, 255) >>> m.prop = (100, 100, 255, 0.5) >>> m.prop = "junk" # ValueError !! >>> m.prop = (100.2, 57.3, 10.2) # ValueError !! ''' def __init__(self, default=None, help=None): types = (Enum(enums.NamedColor), Regex("^#[0-9a-fA-F]{6}$"), Regex("^rgba\(((25[0-5]|2[0-4]\d|1\d{1,2}|\d\d?)\s*," "\s*?){2}(25[0-5]|2[0-4]\d|1\d{1,2}|\d\d?)\s*," "\s*([01]\.?\d*?)\)"), Regex("^rgb\(((25[0-5]|2[0-4]\d|1\d{1,2}|\d\d?)\s*," "\s*?){2}(25[0-5]|2[0-4]\d|1\d{1,2}|\d\d?)\s*?\)"), Tuple(Byte, Byte, Byte), Tuple(Byte, Byte, Byte, Percent), RGB) super(Color, self).__init__(*types, default=default, help=help) def __str__(self): return self.__class__.__name__ def transform(self, value): if isinstance(value, tuple): value = colors.RGB(*value).to_css() return value def _sphinx_type(self): return self._sphinx_prop_link() #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
815 WEST 180TH GROUP LLC (DOS ID 5437170) is a corporation registered with New York State Department of State (NYSDOS). The initial filling date is November 2, 2018. DOS Process Address 512 7th Avenue 6th Fl. Street Address 512 7TH AVENUE 6TH FL. Please comment or provide details below to improve the information on 815 WEST 180TH GROUP LLC.
from django import forms from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from allauth.account.forms import SignupForm, LoginForm from allauth.account import app_settings from userprofile.models import UserProfile from core.utils import USER_TYPE_CHOICES from core.utils import USER_TYPE PAYMENT_TYPE_CHOICES = ( ('paypal', _(u'PayPal')), ('amazon', _(u'Amazon')), ) class CommonForm(forms.Form): first_name = forms.CharField(max_length=30, required=True, label=_(u'First name')) middle_name = forms.CharField(max_length=30, required=False, label=_(u'Middle name')) last_name = forms.CharField(max_length=30, required=True, label=_(u'Last name')) address = forms.CharField(max_length=255, required=True, label=_(u'Approximate address')) lng = forms.CharField(required=True, label=_(u'Longitude'), max_length=255) lat = forms.CharField(required=True, label=_(u'Latitude'), max_length=255) mobile = forms.CharField(max_length=30, required=False, label=_(u'Mobile number')) payment_type = forms.ChoiceField(required=False, label=_(u'Payment type'), choices=PAYMENT_TYPE_CHOICES) payment_account = forms.CharField(max_length=30, required=False, label=_(u'Payment id')) class Meta: model = UserProfile exclude = ( 'available_balance', ) class ParticipantSignupForm(SignupForm): def create_user(self, commit=True): user = super(ParticipantSignupForm, self).create_user() user_profile = UserProfile.objects.get_or_create(user=user)[0] user_profile.is_participant = True user_profile.save() return user class ScientistSignupForm(SignupForm): def create_user(self, commit=True): user = super(ScientistSignupForm, self).create_user() user_profile = UserProfile.objects.get_or_create(user=user)[0] user_profile.is_scientist = True user_profile.is_participant = True user_profile.save() return user class DepartmentSignupForm(SignupForm): def create_user(self, commit=True): user = super(DepartmentSignupForm, self).create_user() user_profile = UserProfile.objects.get_or_create(user=user)[0] user_profile.is_department = True user_profile.is_scientist = True user_profile.is_participant = True user_profile.save() return user class NewLoginForm(LoginForm): user_type = forms.ChoiceField(choices=USER_TYPE_CHOICES) def clean(self): if self._errors: return user = authenticate(**self.user_credentials()) if user: if user.is_active: self.user = user user_type = self.data['user_type'] if not user_type or user_type not in USER_TYPE: raise forms.ValidationError(_("Incorrect user type.")) profile = UserProfile.objects.get_or_create(user=user)[0] if not profile.has_role(user_type): raise forms.ValidationError(_("The user do not have the user type: %s." % user_type)) else: raise forms.ValidationError(_("This account is currently" " inactive.")) else: if app_settings.AUTHENTICATION_METHOD == 'email': error = _("The e-mail address and/or password you specified" " are not correct.") elif app_settings.AUTHENTICATION_METHOD == 'username': error = _("The username and/or password you specified are" " not correct.") else: error = _("The login and/or password you specified are not" " correct.") raise forms.ValidationError(error) return self.cleaned_data
Louisa May Alcott was an American novelist. Novel Little Women for which she is best known was published in 1868. It is loosely based on her childhood experiences with her three sisters.Louisa May Alcott's overwhelming success dated from the appearance of the first part of Little Women: or Meg, Jo, Beth and Amy, (1868) a semiautobiographical account of her childhood years with her sisters in Concord, Massachusetts. A sequel, Good Wives, (1869) followed the March sisters into adulthood and their respective marriages. Little Men (1871) detailed the characters and ways of her nephews who lived with her at Orchard House in Concord.
# -*- coding: utf-8 -*- """ celery.task.sets ~~~~~~~~~~~~~~~~ Old ``group`` implementation, this module should not be used anymore use :func:`celery.group` instead. """ from __future__ import absolute_import from __future__ import with_statement from celery._state import get_current_worker_task from celery.app import app_or_default from celery.canvas import subtask, maybe_subtask # noqa from celery.utils import uuid class TaskSet(list): """A task containing several subtasks, making it possible to track how many, or when all of the tasks have been completed. :param tasks: A list of :class:`subtask` instances. Example:: >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss') >>> s = TaskSet(refresh_feed.s(url) for url in urls) >>> taskset_result = s.apply_async() >>> list_of_return_values = taskset_result.join() # *expensive* """ app = None def __init__(self, tasks=None, app=None, Publisher=None): super(TaskSet, self).__init__(maybe_subtask(t) for t in tasks or []) self.app = app_or_default(app or self.app) self.Publisher = Publisher or self.app.amqp.TaskProducer self.total = len(self) # XXX compat def apply_async(self, connection=None, connect_timeout=None, publisher=None, taskset_id=None): """Apply TaskSet.""" app = self.app if app.conf.CELERY_ALWAYS_EAGER: return self.apply(taskset_id=taskset_id) with app.connection_or_acquire(connection, connect_timeout) as conn: setid = taskset_id or uuid() pub = publisher or self.Publisher(conn) results = self._async_results(setid, pub) result = app.TaskSetResult(setid, results) parent = get_current_worker_task() if parent: parent.request.children.append(result) return result def _async_results(self, taskset_id, publisher): return [task.apply_async(taskset_id=taskset_id, publisher=publisher) for task in self] def apply(self, taskset_id=None): """Applies the TaskSet locally by blocking until all tasks return.""" setid = taskset_id or uuid() return self.app.TaskSetResult(setid, self._sync_results(setid)) def _sync_results(self, taskset_id): return [task.apply(taskset_id=taskset_id) for task in self] def _get_tasks(self): return self def _set_tasks(self, tasks): self[:] = tasks tasks = property(_get_tasks, _set_tasks)
Something Wicked music festival occurs in Houston, Texas every year. This years lineup included artists such as Borgore, Jack Ü and The Chainsmokers. Fans received some disappointing news when Something Wicked released a statement in regards to Day 2 of the festival. Day 2 was cancelled due to severe weather in the area. In the festival’s statement, they addressed that details about refunds will be announced on Monday. Something Wicked is disappointed to announce DAY 2 of Something Wicked 2015 is now canceled due to severe weather. The decision to cancel Day 2 adheres to numerous weather reports and warnings. Fan and employee safety are paramount. The severe weather hitting the Houston area is a result of Hurricane Patricia. Hurricane Patricia hit land in Mexico and is reported to be the strongest Hurricane ever recorded. It\’s path included Houston, TX which would result in life threatening conditions, causing flooding and wind damage. Some of the artists expected to perform on day 2 were Jack Ü, The Chainsmokers, Steve Aoki, Cash Cash, and Jay Hardway. Day 2 was filled with big names and amazing sets. It\’s a shame mother nature created this historic storm which has damaged the lives of many. Something Wicked being cancelled, has potentially saved many from putting themselves in danger due to the magnitude of the storm.
# Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Preprocesses data from listening tests.""" import json import os import random from typing import Dict, List, Any, Tuple import data_plotting import data_helpers import pprint import csv from absl import app from absl import flags import numpy as np FLAGS = flags.FLAGS flags.DEFINE_string( "input_file_path", "extra_data/probes_two_tone_set_extra_data.csv", "JSON file with answers per annotator.") flags.DEFINE_string( "output_directory", "extra_data", "Directory to save preprocessed data in.") flags.DEFINE_integer( "seed", 2, "Random seed." ) flags.DEFINE_integer("min_frequency", 20, "Minimum frequency for a tone.") flags.DEFINE_integer("max_frequency", 20000, "Maximum frequency for a tone.") flags.DEFINE_integer( "unity_decibel_level", 90, "Which decibel level equals a sine at unity in the wavefiles." ) def get_data( input_file: str, save_directory: str, critical_bands: List[int], unity_db_level: int) -> List[Dict[str, Any]]: """Returns data.""" with open(input_file, "r") as infile: csvreader = csv.reader(infile, delimiter=',') data = [] for i, raw_example_line in enumerate(csvreader): if i == 0: continue example_specs = raw_example_line[2].split("],[") masker_frequency, masker_level = example_specs[0].strip("[[").split(",") probe_frequency, probe_level = example_specs[1].strip("]]").split(",") wavfile_identifier = raw_example_line[4].split("/")[-1] example = { "probe_frequency": float(probe_frequency), "probe_level": int(probe_level), "perceived_probe_levels": [], "worker_ids": [], "masker_frequency": float(masker_frequency), "masker_level": int(masker_level), "wavfile_identifier": wavfile_identifier } data.append(example) return data def prepare_data_modeling(train_set: List[Dict[str, Any]], curves_file: str, save_directory: str): lookup_table = data_helpers.AnswerLookupTable() for example in train_set: lookup_table.add(example["masker_frequency"], example["probe_level"], example["masker_level"], example["probe_frequency"], example) preprocessed_train_set = [] with open(curves_file, "r") as infile: answers_matched = 0 curve_data = json.load(infile) for i, masker_probe_curves in enumerate(curve_data): masker_frequency = float(masker_probe_curves["masker_frequency"]) probe_level = int(masker_probe_curves["probe_level"]) curves = masker_probe_curves["curves"] for j, curve in enumerate(curves): curve_data[i]["curves"][j]["failed"] = [] masker_level = int(curve["masker_level"]) probe_frequencies = curve["probe_frequencies"] for k, probe_frequency in enumerate(probe_frequencies): probe_frequency = float(probe_frequency) example_answers = lookup_table.extract(masker_frequency, probe_level, masker_level, probe_frequency) if example_answers: answers = example_answers["perceived_probe_levels"] perceived_levels = np.array(answers) # Hardcoded removal of failed probes (too high frequency). if probe_frequency == 17625.0: curve_data[i]["curves"][j]["failed"].append(k) else: masking = probe_level - perceived_levels masking[masking < 0] = 0 curve_data[i]["curves"][j]["probe_masking"][k] = list(masking) answers_matched += 1 preprocessed_train_set.append(example_answers) else: curve_data[i]["curves"][j]["failed"].append(k) data_plotting.plot_masking_patterns_grid(curve_data, save_directory=save_directory) data_plotting.plot_masking_patterns(curve_data, save_directory=save_directory) with open(os.path.join(save_directory, "preprocessed_train_set.json"), "w") as outfile: json.dump(preprocessed_train_set, outfile, indent=4) return answers_matched def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") if not os.path.exists(FLAGS.input_file_path): raise ValueError("No data found at %s" % FLAGS.input_file_path) if not os.path.exists(FLAGS.output_directory): os.mkdir(FLAGS.output_directory) critical_bands = [ FLAGS.min_frequency, 100, 200, 300, 400, 505, 630, 770, 915, 1080, 1265, 1475, 1720, 1990, 2310, 2690, 3125, 3675, 4350, 5250, 6350, 7650, 9400, 11750, 15250, FLAGS.max_frequency ] random.seed(FLAGS.seed) data = get_data(FLAGS.input_file_path, FLAGS.output_directory, critical_bands, FLAGS.unity_decibel_level) with open(os.path.join(FLAGS.output_directory, "extra_train_set.json"), "w") as outfile: json.dump(data, outfile, indent=4) if __name__ == "__main__": app.run(main)
In 2017 Leaders in Energy continued to mature in its mission to build a community of leaders and a global action network to advance clean energy and sustainable solutions for a more sustainable energy system, economy, and world. Our membership continued to grow in the Washington DC area, with 1,500 members on our mailing list, in addition to our LinkedIn group with over 2,900 members. We have a presence in most major U.S. metropolitan areas and over 100 countries. Under the leadership of Executive Director Janine Finnell and our Board, Team Members and Advisors, the organization has provided important forums for clean energy and sustainability in the DC area, as well as nationally and globally. Our events last year came at a time of immense change and new threats, but also new opportunities to cement the transition to a green economy. Leadership has become one of the key pillars of our outreach. This past December leaders from four generations were recognized for their contributions to the clean energy field. For the first time, we gave a Lifetime Achievement Award, which was presented to S. David Freeman. Throughout the year attendees learned the leadership skills that are needed for the green economy. Elsewhere, members wrestled with the political leadership changes in this country and their implications for the lofty agenda set in the 2015 Paris Agreement and President Obama’s Clean Power Plan. The second pillar is sustainable communities, which includes encouraging clean energy technologies and promoting the principles of the circular economy. Leaders in Energy hosted a workshop in October on the circular economy and launched a working group to take a deep dive into the opportunities that are available in this area, so that we can move away from a throwaway society. Leaders in Energy partnered with the DC Chapter of the International Society of Sustainability Professionals and U.S. Green Building Council National Capital Region to visit the newly certified Living Building Challenge facility at the Alice Ferguson Foundation. (AFF visit – May) Later that year, AFF President and CEO Lori Arguelles won the Generation X award for leadership in sustainability. A significant part of the year was dedicated to resilience and energy security. For the first time, in June, Leaders in Energy hosted an author, Vienna-based Marc Elsberg, who wrote Blackout about a fictional cyberattack on the European grid. In June we were given a rather distressing wakeup call, as experts debated the level of cybersecurity in the power grid and discussed preparedness and response measures. (Energy Infrastructure and Cybersecurity forum – June) . Later in September, there was a small book discussion group to delve deeper into Elsberg’s book. At the Northern Virginia Community College (NVCC) Green Festival, we learned from Sandra Postel of National Geographic about the need to reduce our water footprint. (NVCC Festival – April) According to the World Economic Forum, water scarcity is a top global risk. It will be interesting to see if anything comes out of the 2018 WEF Annual Meeting in the next week on this. Our third pillar is where we take what we’ve learned and what we are passionate about and try to make some money. For the fourth year running, we held our Green Jobs Forum and job fair, highlighting Leaders in Energy members who have landed a green job or have become an entrepreneur. This year we were pleased to welcome Beth Offenbacker, PhD, as the facilitator of a Green Career workshop (to be reprised this January). This year we were supported by many important sponsors and partners, including ACE, ArlingtonGreen, Cadmus, Career Confidence, Carrier, Cisco, Coffeffe, The College of Agriculture, Urban Sustainability and Environmental Sciences (CAUSES) at the University of the District of Columbia, CSRA, Darktrace, DC Net Impact, DC Sustainable Energy Utility, Edison Electric Institute, Edge Hosting, eSai LLC, FilmBison Media, Green Leaders DC, GRID Alternatives Mid-Atlantic, Groupsense, Home Energy Medics, Industrial Ecosystems Partners, LLC, International Society of Sustainability Professionals, Johnson Controls, Longenecker & Associates, MeteoViva, Microgrid Knowledge, MOM’s Organic Market, Nissan Leaf, Potential Energy DC, Resilient Virginia, Tesla, The Solar Foundation, Sourcebooks, Thermaxx Jackets, United Nations Association of the National Capital Area (UNA-NCA), U.S. Green Building Council, VMDO, Water Management, Inc., Waterford, Inc., 40 Plus, among others. We invite you to join our community of leaders. Connect on LinkedIn, Facebook, and Twitter. There are many stories of our members connecting to find green jobs, get inspired, and collaborate on projects to make a difference by advancing clean energy and sustainable solutions for a more sustainable energy system, economy, and world.
# -*- coding: utf-8 -*- """obole URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from __future__ import absolute_import, unicode_literals from django.conf.urls import include, url from django.conf import settings from django.contrib import admin from django.contrib.auth.models import User from rest_framework import routers, serializers, viewsets # Serializers define the API representation. class UserSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = User fields = ('url', 'username', 'email', 'is_staff') # ViewSets define the view behavior. class UserViewSet(viewsets.ModelViewSet): queryset = User.objects.all() serializer_class = UserSerializer # Routers provide an easy way of automatically determining the URL conf. router = routers.DefaultRouter() #router.register(r'users', UserViewSet) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns = [ url(r'^', include(router.urls)), url(r'^admin/', admin.site.urls), url(r'^', include('users.urls')), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) ] if settings.DEBUG: from django.conf.urls.static import static urlpatterns += static( settings.STATIC_URL, document_root=settings.STATIC_ROOT ) + static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
Lettuce leaves with wasabi, a loin chop, three chicken nuggets, and some gin. Do you know Donald Baker (a former dental therapist, now a helicopter pilot) from London? Not personally, but my cousin has been briefly married to him. Then there was a public embarrassment about Donald fooling around with every stripper from London suburbia, even barely breathing ones, so their marriage came to a sudden conclusion. Which brand of toilet paper do you use? Since I have embraced ecology, I really can't use commercial products. My hand-made toilet paper is obtained from the fibers of Heterocisapa moronica, an almost extinct shrub native of Congo. Actually, I do. I have a tiny bullet shaped birthmark on my left heel. Probably my father did inadvertently eat a bullet while my mother was pregnant. How popular are you, on a scale of one to ten? I don't know. I think I'm a four in Mongolia, but a six in Boston. I heard that you will soon participate to a charity marathon. Can you tell us why'd you decide to undertake such an effort? I had to. Because of the astral conjunction, you know. And since when did you feel a need to make people aware of legionellosis? Between you and me, I couldn't care less about it. I guess so! I have to hug 2 random people who wear a black shirt before a relevant encounter. To be sincere, my boss had patiently set up my little exchange with Emily Blunt days beforehand. Unfortunately, I decided at the last moment that I had more interesting things to do, like growing mosses or learning Chinese. So, the interview above is essentially the impression of a dream that ensued after a generous dinner of wild boar stew and salami.
#! /usr/bin/python import os, sys, re BROWN_LCURVE=( "learningcurve0008", "learningcurve0016", "learningcurve0032",\ "learningcurve0064", "learningcurve0128", "learningcurve0256", "full" ) WSJ_LCURVE=( "learningcurve0008", "learningcurve0016", "learningcurve0032", \ "learningcurve0064", "learningcurve0128", "learningcurve0256", \ "learningcurve0512", "learningcurve1024", "full" ) score_finder=re.compile(r"^(0\.\d+\s*)+$") brown_map={} counter = 1 for c in BROWN_LCURVE: brown_map[c]="l%d" % counter counter += 1 wsj_map={} counter = 1 for c in WSJ_LCURVE: wsj_map[c] = "l%d" % counter counter += 1 models=("m1","m2","m3","m4","m6") inpath=os.path.abspath(os.path.expanduser(sys.argv[1])) content_states=0 function_states=0 model_id="" data_id="" corpus="" dataline = "model.id,corpus,data.id,function.states,content.states,states,f1to1,fmto1,r1to1,rmto1" print dataline for fi in os.listdir(inpath): fullpath=os.path.join(inpath,fi) if os.path.isfile(fullpath): labs = fi.split(".") corpus=labs[0] if corpus == "brown": data_id=brown_map[labs[-2]] else: data_id=wsj_map[labs[-2]] model_id=labs[-3] function_states=labs[-5] content_states=labs[-4] states = "%d" % (int(function_states) + int(content_states)) handle = open(fullpath) scores = "" for line in handle: m = score_finder.search(line) if m: scores=line break if len(scores) > 0: scores = scores.split() f1to1 = scores[0] fmto1 = scores[1] r1to1=scores[2] rmto1=scores[3] datam = {"model_id":model_id, "data_id":data_id, "corpus":corpus, \ "function_states":function_states, "content_states":content_states, \ "f1to1":f1to1, "fmto1":fmto1, "r1to1":r1to1, "rmto1":rmto1,"states":states} dataline = "%(model_id)s,%(corpus)s,%(data_id)s,%(function_states)s,%(content_states)s,%(states)s,%(f1to1)s,%(fmto1)s,%(r1to1)s,%(rmto1)s" % datam print dataline
Where can I buy Yanmar products or Spare Parts? You can buy Yanmar products or spare parts from a widespread network all over the world. Please contact your local Yanmar distributor. Where can I find help if I have a question about or a problem with a product? Technical support is best provided by your local Yanmar dealer. There are Yanmar dealers all over the world. Please contact the nearest distributor. They will be able to provide you with the best support. Or fill in this form, we will get back to you with an answer. Where can I find product brochures? All available product related brochures can be downloaded on the related product page. Where can I apply for a job at Yanmar? Please visit our jobs page, or send an open application via this form. Where can I find the engine model of my Yanmar engine? On the engine rocker arm cover is a metal data plate giving engine model + serial number. Can I use bio-diesel, kerosine, or heating oil in the engine? Bio-diesel is allowed until B7 (=7% bio-fuel content). Kerosine, heating oil or fuels different than EN590 fuel specification may damage the fuel system. Is there a 'hot-line' where I can get answers to my questions related to my Yanmar diesel engine? No, Yanmar Europe does not have a 'hot-line'. Please contact the Yanmar distributor in your country. Where can I find prices on this website? We do not show prices on the website. Please contact the Yanmar distributor in your country for prices of our products.
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): # if not filters: filters = {} columns = get_columns() summ_data = [] data = get_bom_stock(filters) qty_to_make = filters.get("qty_to_make") for row in data: item_map = get_item_details(row.item_code) reqd_qty = qty_to_make * row.actual_qty last_pur_price = frappe.db.get_value("Item", row.item_code, "last_purchase_rate") if row.to_build > 0: diff_qty = row.to_build - reqd_qty summ_data.append([row.item_code, row.description, item_map[row.item_code]["manufacturer"], item_map[row.item_code]["manufacturer_part_no"], row.actual_qty, row.to_build, reqd_qty, diff_qty, last_pur_price]) else: diff_qty = 0 - reqd_qty summ_data.append([row.item_code, row.description, item_map[row.item_code]["manufacturer"], item_map[row.item_code]["manufacturer_part_no"], row.actual_qty, "0.000", reqd_qty, diff_qty, last_pur_price]) return columns, summ_data def get_columns(): """return columns""" columns = [ _("Item") + ":Link/Item:100", _("Description") + "::150", _("Manufacturer") + "::100", _("Manufacturer Part Number") + "::100", _("Qty") + ":Float:50", _("Stock Qty") + ":Float:100", _("Reqd Qty")+ ":Float:100", _("Diff Qty")+ ":Float:100", _("Last Purchase Price")+ ":Float:100", ] return columns def get_bom_stock(filters): conditions = "" bom = filters.get("bom") table = "`tabBOM Item`" qty_field = "qty" if filters.get("show_exploded_view"): table = "`tabBOM Explosion Item`" qty_field = "stock_qty" if filters.get("warehouse"): warehouse_details = frappe.db.get_value("Warehouse", filters.get("warehouse"), ["lft", "rgt"], as_dict=1) if warehouse_details: conditions += " and exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and ledger.warehouse = wh.name)" % (warehouse_details.lft, warehouse_details.rgt) else: conditions += " and ledger.warehouse = '%s'" % frappe.db.escape(filters.get("warehouse")) else: conditions += "" return frappe.db.sql(""" SELECT bom_item.item_code, bom_item.description, bom_item.{qty_field}, ifnull(sum(ledger.actual_qty), 0) as actual_qty, ifnull(sum(FLOOR(ledger.actual_qty / bom_item.{qty_field})), 0) as to_build FROM {table} AS bom_item LEFT JOIN `tabBin` AS ledger ON bom_item.item_code = ledger.item_code {conditions} WHERE bom_item.parent = '{bom}' and bom_item.parenttype='BOM' GROUP BY bom_item.item_code""".format(qty_field=qty_field, table=table, conditions=conditions, bom=bom), as_dict=1) def get_item_details(item_code): items = frappe.db.sql("""select it.item_group, it.item_name, it.stock_uom, it.name, it.brand, it.description, it.manufacturer_part_no, it.manufacturer from tabItem it where it.item_code = %s""", item_code, as_dict=1) return dict((d.name, d) for d in items)
This entry was posted in Anime, Episodes, Manga and tagged Amin, Ashley. Bookmark the permalink. Ashley and Amin! What a great episode! Ashley especially, thanks for being so open about your experience! You were definitely not rambling. Have not watched the show yet but it’s coming up in my que. Can’t wait to listen to this. This was a really great episode, enjoyed the discussion. There is so much more to this show than I realised. I read the wikipedia article on the show, before listening to this, and I thought ‘it’s the plot of Never Been Kissed’ but I guess I was wrong about that. Fantastic episode! Thanks for turning me onto the show. I find a lot of anime’s very fantastical and over-the-top (which is great!), but this one hit me on that gut-level. As Ashley says, it’s those little details of social anxiety that the show picks up on, that most people wouldn’t even thin of, but anxious people get paralyzed by. And I love how the main character projects this meek, timid persona, but ultimately harbors this wealth of emotions and fears and ideas. It really captures how anxious people spend so much time running around their own heads. You were talking about another show, what was its name? It sounded like H.K. at first (as in N.H.K), but how’s it spelled?
# This is a python script for uploading batch data to Genotet server. # The user may write a *.tsv file, with each line as: # file_path data_name file_type description # The command line would be: # python uploadBatch.py username example.tsv # And then enter your password for Genotet. from requests_toolbelt import MultipartEncoder import requests import sys import getpass import json url = 'http://localhost:3000' # Please change it accordingly. def upload_file(file_path, data_name, file_type, description, cookies): upload_url = url + '/genotet/upload' file_path_parts = file_path.split('\/') file_name = file_path_parts[len(file_path_parts) - 1] params = MultipartEncoder( fields={'type': file_type, 'name': data_name, 'description': description, 'username': 'anonymous', 'file': (file_name, open(file_path, 'rb'), 'text/plain')}) headers = {'Content-Type': params.content_type} cookie = {'genotet-session': cookies['genotet-session']} response = requests.post(upload_url, data=params, headers=headers, cookies=cookie) print response.status_code return True def auth(username, password): auth_url = url + '/genotet/user' params = { 'type': 'sign-in', 'username': username, 'password': password } params = {'data': json.dumps(params)} response = requests.get(auth_url, params=params) if response.status_code != 200: return False return response.cookies, True def main(argv): if len(argv) < 3: print 'input not enough' return username = argv[1] password = getpass.getpass('Password:') cookies, auth_result = auth(username, password) if not auth_result: print 'username/password not correct' return else: print 'sign in success' file_path = argv[2] tsv_file = open(file_path, 'r') for line in tsv_file: parts = line.split('\t') result = upload_file(parts[0], parts[1], parts[2], parts[3], cookies) if not result: print 'failed to upload ' + parts[0] return if __name__ == '__main__': main(sys.argv)
WITH the politics and politicians of Northern Ireland occupying centre stage during the Brexit crisis, the latest book from a University of Huddersfield professor and his co-researchers is the most detailed investigation yet published into the membership and the attitudes of one of the Province’s longest-established and formerly most powerful political parties. For decades, the Ulster Unionist Party (UUP) was the dominant force in Northern Ireland, leading every Stormont government from 1922 to 1972. Its former leader David Trimble won a Nobel Peace Prize. But it would be eclipsed by the rise of the Democratic Unionist Party, founded in 1971 by Ian Paisley. The DUP – crucial to the survival of Theresa May’s Government – was the subject of an earlier book, entitled The Democratic Unionist Party – From Protest to Power – by James McAuley, who is Professor of Political Sociology and Irish Studies at the University of Huddersfield. Now, he and his colleagues Thomas Hennessey, Máire Braniff, Jonathan Tonge and Sophie A Whiting, who hold senior posts at a range of universities, have researched and written their follow-up, The Ulster Unionist Party – Country Before Party?. Based on a survey of almost half the party’s 2,000 membership, plus a large number of detailed interviews, it provides the most comprehensive current and historical account of a party that was long the most important in Northern Ireland. The book has been welcomed by the current leader of the UUP, Robin Swann. In a newspaper article, he stated that “people have made assumptions about our members’ views, but this the first time that anyone has taken the time to research exactly what they think”. And he took the opportunity to affirm that while he is leader there will be no merger with the DUP. Professor McAuley said that there had been surprisingly little research on the UUP over the past 30 years. “They won every election contested in Northern Ireland from 1922 to 1972, when Stormont was prorogued. Then they were slowly but surely overtaken by the DUP, which better articulated the fears of many people within the Unionist community. “For a long time, the UUP were thought of as ‘Big House Unionism’, where the leaders were felt to belong to a different class to most of the voters,” said Professor McAuley. The UUP now presents itself as a more moderate Unionist party. It has broken historic links with the Orange Order and has tried – with limited success – to attract a larger Catholic membership. Professor McAuley and his colleagues were able to contact a large proportion of the current UUP membership, gauging their attitudes on a spectrum of political, religious and social issues. The result is a collaboratively written book of nine chapters that covers topics including the UUP during the Troubles; Britishness and the Union; Religion and the UUP; and the role of women in the party. The book’s conclusion speculates on the future of the party. Now that Professor McAuley and his colleagues have completed books about the two major Unionist parties in Northern Ireland, they aim to shift their attention to the liberal and centrist Alliance Party and the nationalist SDLP – and maybe Sinn Fein in due course. This would require access to the membership. “We are hoping that good things said about the two books we have published so far will convince the other parties that it is a worthwhile exercise,” said Professor McAuley. * The Ulster Unionist Party. Country Before Party, by Thomas Hennessey, Máire Braniff, James W. McAuley, Jonathan Tonge and Sophie A. Whiting is published by Oxford University Press.
import sys from character import Character from monster import Dragon, Goblin, Troll class Game: def setup(self): self.player = Character() self.monster = [ Goblin(), Troll(), Dragon() ] self.monster = self.get_next_monster() def get_next_monster(self): try: return self.monster.pop(0) except IndexError: return None def monster_turn(self): if self.monster.attack(): print("{} is attacking!".format(self.monster)) if input("Dodge ? Y/N").lower() == 'y': if self.player.dodge(): print("your dodged the attack!") else: print("you got hit anyway!") self.player.hit_points -= 1 else: print("you hot hit anyway!") self.player.hit_points -= 1 else: print("{} isn't attacking this turn.".format(self.monster)) def player_turn(self): player_choice = input("[A]ttack, [R]est, [Q]uit? ").lower() if player_choice == 'a': print("you're attacking {}!".format(self.monster)) if self.player.attack(): if self.monster.dodge(): print("{} dodge your attack!".format(self.monster)) else: if self.player.leveled_up(): self.monster.hit_points -= 2 else: self.monster.hit_points -= 1 print("your hit {} with your {}!".format( self.monster, self.player.weapon)) else: print("your missed") elif player_choice == 'r': self.player.rest() elif player_choice == 'q': sys.exit() else: self.player_turn() def cleanup(self): if self.monster.hit_points <= 0: self.player.experience += self.monster.experience print("your killed {} ".format(self.monster)) self.monster = self.get_next_monster() def __init__(self): self.setup() while self.player.hit_points and (self.monster or self.monsters): print('\n'+'='*20) print(self.player) self.monster_turn() print('-'*20) self.player_turn() self.cleanup() print('\n'+'='*20) if self.player.hit_points: print("your win") elif self.monsters or self.monster: print("you lose") sys.exit() Game()
Stripes Welcome to the Pacific is the go-to guide for anyone making a permanent change-of-station in the Pacific region. Useful tips include getting around, shopping, driving, local culture and language, dining out — everything that makes a PCS easier. Your choice of Japan, Guam or Korea edition. If you plan to travel around, get all three! 2018-19 Editions are now available. Published once a year. For past issues, please see Annual Manual on stripes.com! Note: This publication is free of charge only at U.S. military installations in Pacific.
""" Parser and evaluator for FormulaResponse and NumericalResponse Uses pyparsing to parse. Main function as of now is evaluator(). """ import math import operator import numbers import numpy import scipy.constants import functions from pyparsing import ( Word, Literal, CaselessLiteral, ZeroOrMore, MatchFirst, Optional, Forward, Group, ParseResults, stringEnd, Suppress, Combine, alphas, nums, alphanums ) DEFAULT_FUNCTIONS = { 'sin': numpy.sin, 'cos': numpy.cos, 'tan': numpy.tan, 'sec': functions.sec, 'csc': functions.csc, 'cot': functions.cot, 'sqrt': numpy.sqrt, 'log10': numpy.log10, 'log2': numpy.log2, 'ln': numpy.log, 'exp': numpy.exp, 'arccos': numpy.arccos, 'arcsin': numpy.arcsin, 'arctan': numpy.arctan, 'arcsec': functions.arcsec, 'arccsc': functions.arccsc, 'arccot': functions.arccot, 'abs': numpy.abs, 'fact': math.factorial, 'factorial': math.factorial, 'sinh': numpy.sinh, 'cosh': numpy.cosh, 'tanh': numpy.tanh, 'sech': functions.sech, 'csch': functions.csch, 'coth': functions.coth, 'arcsinh': numpy.arcsinh, 'arccosh': numpy.arccosh, 'arctanh': numpy.arctanh, 'arcsech': functions.arcsech, 'arccsch': functions.arccsch, 'arccoth': functions.arccoth } DEFAULT_VARIABLES = { 'i': numpy.complex(0, 1), 'j': numpy.complex(0, 1), 'e': numpy.e, 'pi': numpy.pi, 'k': scipy.constants.k, # Boltzmann: 1.3806488e-23 (Joules/Kelvin) 'c': scipy.constants.c, # Light Speed: 2.998e8 (m/s) 'T': 298.15, # Typical room temperature: 298.15 (Kelvin), same as 25C/77F 'q': scipy.constants.e # Fund. Charge: 1.602176565e-19 (Coulombs) } # We eliminated the following extreme suffixes: # P (1e15), E (1e18), Z (1e21), Y (1e24), # f (1e-15), a (1e-18), z (1e-21), y (1e-24) # since they're rarely used, and potentially confusing. # They may also conflict with variables if we ever allow e.g. # 5R instead of 5*R SUFFIXES = { '%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12, 'c': 1e-2, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12 } class UndefinedVariable(Exception): """ Indicate when a student inputs a variable which was not expected. """ pass def lower_dict(input_dict): """ Convert all keys in a dictionary to lowercase; keep their original values. Keep in mind that it is possible (but not useful?) to define different variables that have the same lowercase representation. It would be hard to tell which is used in the final dict and which isn't. """ return {k.lower(): v for k, v in input_dict.iteritems()} # The following few functions define evaluation actions, which are run on lists # of results from each parse component. They convert the strings and (previously # calculated) numbers into the number that component represents. def super_float(text): """ Like float, but with SI extensions. 1k goes to 1000. """ if text[-1] in SUFFIXES: return float(text[:-1]) * SUFFIXES[text[-1]] else: return float(text) def eval_number(parse_result): """ Create a float out of its string parts. e.g. [ '7.13', 'e', '3' ] -> 7130 Calls super_float above. """ return super_float("".join(parse_result)) def eval_atom(parse_result): """ Return the value wrapped by the atom. In the case of parenthesis, ignore them. """ # Find first number in the list result = next(k for k in parse_result if isinstance(k, numbers.Number)) return result def eval_power(parse_result): """ Take a list of numbers and exponentiate them, right to left. e.g. [ 2, 3, 2 ] -> 2^3^2 = 2^(3^2) -> 512 (not to be interpreted (2^3)^2 = 64) """ # `reduce` will go from left to right; reverse the list. parse_result = reversed( [k for k in parse_result if isinstance(k, numbers.Number)] # Ignore the '^' marks. ) # Having reversed it, raise `b` to the power of `a`. power = reduce(lambda a, b: b ** a, parse_result) return power def eval_parallel(parse_result): """ Compute numbers according to the parallel resistors operator. BTW it is commutative. Its formula is given by out = 1 / (1/in1 + 1/in2 + ...) e.g. [ 1, 2 ] -> 2/3 Return NaN if there is a zero among the inputs. """ if len(parse_result) == 1: return parse_result[0] if 0 in parse_result: return float('nan') reciprocals = [1. / e for e in parse_result if isinstance(e, numbers.Number)] return 1. / sum(reciprocals) def eval_sum(parse_result): """ Add the inputs, keeping in mind their sign. [ 1, '+', 2, '-', 3 ] -> 0 Allow a leading + or -. """ total = 0.0 current_op = operator.add for token in parse_result: if token == '+': current_op = operator.add elif token == '-': current_op = operator.sub else: total = current_op(total, token) return total def eval_product(parse_result): """ Multiply the inputs. [ 1, '*', 2, '/', 3 ] -> 0.66 """ prod = 1.0 current_op = operator.mul for token in parse_result: if token == '*': current_op = operator.mul elif token == '/': current_op = operator.truediv else: prod = current_op(prod, token) return prod def add_defaults(variables, functions, case_sensitive): """ Create dictionaries with both the default and user-defined variables. """ all_variables = dict(DEFAULT_VARIABLES) all_functions = dict(DEFAULT_FUNCTIONS) all_variables.update(variables) all_functions.update(functions) if not case_sensitive: all_variables = lower_dict(all_variables) all_functions = lower_dict(all_functions) return (all_variables, all_functions) def evaluator(variables, functions, math_expr, case_sensitive=False): """ Evaluate an expression; that is, take a string of math and return a float. -Variables are passed as a dictionary from string to value. They must be python numbers. -Unary functions are passed as a dictionary from string to function. """ # No need to go further. if math_expr.strip() == "": return float('nan') # Parse the tree. math_interpreter = ParseAugmenter(math_expr, case_sensitive) math_interpreter.parse_algebra() # Get our variables together. all_variables, all_functions = add_defaults(variables, functions, case_sensitive) # ...and check them math_interpreter.check_variables(all_variables, all_functions) # Create a recursion to evaluate the tree. if case_sensitive: casify = lambda x: x else: casify = lambda x: x.lower() # Lowercase for case insens. evaluate_actions = { 'number': eval_number, 'variable': lambda x: all_variables[casify(x[0])], 'function': lambda x: all_functions[casify(x[0])](x[1]), 'atom': eval_atom, 'power': eval_power, 'parallel': eval_parallel, 'product': eval_product, 'sum': eval_sum } return math_interpreter.reduce_tree(evaluate_actions) class ParseAugmenter(object): """ Holds the data for a particular parse. Retains the `math_expr` and `case_sensitive` so they needn't be passed around method to method. Eventually holds the parse tree and sets of variables as well. """ def __init__(self, math_expr, case_sensitive=False): """ Create the ParseAugmenter for a given math expression string. Do the parsing later, when called like `OBJ.parse_algebra()`. """ self.case_sensitive = case_sensitive self.math_expr = math_expr self.tree = None self.variables_used = set() self.functions_used = set() def vpa(tokens): """ When a variable is recognized, store it in `variables_used`. """ varname = tokens[0][0] self.variables_used.add(varname) def fpa(tokens): """ When a function is recognized, store it in `functions_used`. """ varname = tokens[0][0] self.functions_used.add(varname) self.variable_parse_action = vpa self.function_parse_action = fpa def parse_algebra(self): """ Parse an algebraic expression into a tree. Store a `pyparsing.ParseResult` in `self.tree` with proper groupings to reflect parenthesis and order of operations. Leave all operators in the tree and do not parse any strings of numbers into their float versions. Adding the groups and result names makes the `repr()` of the result really gross. For debugging, use something like print OBJ.tree.asXML() """ # 0.33 or 7 or .34 or 16. number_part = Word(nums) inner_number = (number_part + Optional("." + Optional(number_part))) | ("." + number_part) # pyparsing allows spaces between tokens--`Combine` prevents that. inner_number = Combine(inner_number) # SI suffixes and percent. number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys()) # 0.33k or 17 plus_minus = Literal('+') | Literal('-') number = Group( Optional(plus_minus) + inner_number + Optional(CaselessLiteral("E") + Optional(plus_minus) + number_part) + Optional(number_suffix) ) number = number("number") # Predefine recursive variables. expr = Forward() # Handle variables passed in. They must start with letters/underscores # and may contain numbers afterward. inner_varname = Word(alphas + "_", alphanums + "_") varname = Group(inner_varname)("variable") varname.setParseAction(self.variable_parse_action) # Same thing for functions. function = Group(inner_varname + Suppress("(") + expr + Suppress(")"))("function") function.setParseAction(self.function_parse_action) atom = number | function | varname | "(" + expr + ")" atom = Group(atom)("atom") # Do the following in the correct order to preserve order of operation. pow_term = atom + ZeroOrMore("^" + atom) pow_term = Group(pow_term)("power") par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k par_term = Group(par_term)("parallel") prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4 prod_term = Group(prod_term)("product") sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3 sum_term = Group(sum_term)("sum") # Finish the recursion. expr << sum_term # pylint: disable=W0104 self.tree = (expr + stringEnd).parseString(self.math_expr)[0] def reduce_tree(self, handle_actions, terminal_converter=None): """ Call `handle_actions` recursively on `self.tree` and return result. `handle_actions` is a dictionary of node names (e.g. 'product', 'sum', etc&) to functions. These functions are of the following form: -input: a list of processed child nodes. If it includes any terminal nodes in the list, they will be given as their processed forms also. -output: whatever to be passed to the level higher, and what to return for the final node. `terminal_converter` is a function that takes in a token and returns a processed form. The default of `None` just leaves them as strings. """ def handle_node(node): """ Return the result representing the node, using recursion. Call the appropriate `handle_action` for this node. As its inputs, feed it the output of `handle_node` for each child node. """ if not isinstance(node, ParseResults): # Then treat it as a terminal node. if terminal_converter is None: return node else: return terminal_converter(node) node_name = node.getName() if node_name not in handle_actions: # pragma: no cover raise Exception(u"Unknown branch name '{}'".format(node_name)) action = handle_actions[node_name] handled_kids = [handle_node(k) for k in node] return action(handled_kids) # Find the value of the entire tree. return handle_node(self.tree) def check_variables(self, valid_variables, valid_functions): """ Confirm that all the variables used in the tree are valid/defined. Otherwise, raise an UndefinedVariable containing all bad variables. """ if self.case_sensitive: casify = lambda x: x else: casify = lambda x: x.lower() # Lowercase for case insens. # Test if casify(X) is valid, but return the actual bad input (i.e. X) bad_vars = set(var for var in self.variables_used if casify(var) not in valid_variables) bad_vars.update(func for func in self.functions_used if casify(func) not in valid_functions) if bad_vars: raise UndefinedVariable(' '.join(sorted(bad_vars)))
I came across falafel chips in Trader Joe’s when I visited San Francisco last year. Not knowing where to buy these here in Canada, I decided I had to come up with a recipe for them, and even considered starting a falafel chip enterprise. Recently, while still trying to work on my own recipe, I came across another brand of falafel chips in a general store in Sydenham, Ont., so my idea of commercialising my own was dashed. Nonetheless, I am pleased that someone else is doing it, even if they are imported. They are made by a company called ‘flamous’ (sic) and are sold as Falafel Chips. Even if you can buy these, or the ones from Trader Joe’s, you might be interested in making them yourself. They are cheaper, fresher, and you can alter the recipe to your own taste and according to what is available. They are excellent on their own, or served with hummus, guacamole, or whatever you fancy. The recipes I have come up with are not so much recipes as ‘a method’. As long as you have chick pea flour (besan) cumin and water, you can make these. You only need to make sure the dough is not too sticky but sticky enough to stick together. The method of rolling them out is suprisingly simple, as long as you remember to oil the parchment paper. Sometimes I add oil, sometimes not, it seems to make little difference. The more herbs and spices you add, the more complex the flavour, so feel free to come up with your own variations. The first recipe contains dried dandelion leaves, which give them those green speckles, and the second one contains tomato paste and some cornmeal. Mix together all the dry ingredients. Add water until the mixture holds together, but is not sticky. Lightly grease two sheets of parchment paper and put small dobs (about the size of a grape) of batter spaced about 2 inches apart. Place the other piece of parchment over this, and roll out with a rolling pin, until the chips are thin enough that when touched with your finger, you can feel the hard surface below.There should be no squishy feeling of the dough. Peel off the top layer of parchment, place the chips, still on their paper, on a cookie tin, and back in a preheated oven for 20 minutes at 275 Fahrenheit. The chips should be an even gold colour. Store in a container with a tight-fitting lid to keep them crisp. Follow the same procedure for the first recipe. Categories: recipes, snacks | Tags: chips, cornmeal, dandelion greens, edible flowers, falafel chips, foraging, gluten free, healthy snack, organic, snack, vegan | Permalink. Chenopodium album, meaning white goose foot, related to spinach, rhubarb, beets and chard, known as lambsquarters, pigweed and a number of other names, grows in all gardens in this area – anywhere that soil has been turned. Of all the weeds I pull in my vegetable and flower gardens, fully half of them must be this weed. So if cleaning your garden means you have some healthful and tasty vittels for dinner, you kill two birds with one stone. Like so many overlooked wild plants, this one is full of good stuff: niacin, folate, iron, magnesium and phosphorus and, even more, dietary fibre, protein, vitamins A, B6 and C, thiamin, riboflavin, calcium, potassium, copper and manganese. If you are still not convinced that this is worth eating, it is a great substitute for spinach, at no cost, and if you pick it in an unpoluted garden free of chemicals and contaminants, probably better than spinach. If you know how to cook spinach, there is no real need for recipes. Young shoots can be used in salads, and as the plant ages, just pick the leaves off the sturdy stem (discard any blemished ones), rinse well and use as you would spinach. This recipe I am sharing is one that requires a little more effort than sauteing or steaming, but I think highlights the rich green colour and delicate flavour of the plant. It could also be made with rice, as you would a risotto. If my pictures are not enough to help you identify it, there are plenty of pictures and descriptions available on the internet, and if you are still not sure if you have it in your garden, check with someone familiar with local weeds – there must be one somewhere near you. 4 cups lambsquarters, leaves only. Heat the oil in a saucepan and fry the onion until soft, but not browned. Add the barley and fry for a couple of minutes, being sure to coat all the barley with oil. Add the garlic and fry for another minute. Stir in the herbs, salt and grated lemon peel. Pour 1/2 cup of the water or stock and stir the mixture occasionally until most of the water has been absorbed. Continue to add water, 1/2 cup at a time. When the last addition of water is made, add the lambsquarters and mix well until there is no more liquid visible. Categories: edible weeds, garden vegetable, recipes | Tags: barley, chenopodium album, foraging, lambsquarters, recipe, risotto, side dish, vegetarian, weeds | Permalink. Here is a soup which can be served hot or cold. It makes an elegant and inexpensive dish at this time of year. I added the pomegranate molasses to add a little red colour, and found the flavour goes well with the spices, but if you don’t have any, it can be omitted. Mix the first five ingredients together in a saucepan, bring to a boil and simmer for half an hour. Strain the mixture through a fine sieve and discard the pulp. Add the sugar and stir over medium heat until it dissolves. Mix the cornstarch in some water and add to the mixture. Add the yogourt, heat but do not boil. Categories: recipes | Tags: rhubarb, soup | Permalink. Rhubarb is not a wild plant, but once you have it, you have it forever. Mine was a good healthy plant when I moved to my current property almost three years ago, and it just keeps getting bigger and better. This may be the first year I can’t keep up with it – I use it for desserts, chutney and soups, and I freeze a good deal of it for the winter just by chopping it and storing it in the freezer in plastic bags. And if you like mixing your rhubarb with strawberries for pies, you might want to try other sweet fruits, such as apricots, blueberries, seedless grapes or dates. This is a basic stewed recipe which, with minor alterations, can also be jam or filling. I used fresh ginger for flavouring, but vanilla, orange blossom or rosewater are also really good. This can be used for filling tarts, thumbprint cookies etc. Mix the first three ingredients in a bowl and let sit overnight or several hours, until the sugar has become all liquid. At this point, add the cornstarch. Bring to a boil and then simmer for a few minutes, until the rhubarb is just tender, but not mushy, and the liquid has become translucent. Once cooled, fill baked tart shells, cookies etc. Do as above, omittting the cornstarch. Same as for the stewed rhubarb, but continue to cook until it is a thick consistency. Categories: dessert, recipes | Tags: desserts, fruit preserves, jam, rhubarb, storing rhubarb, tarts | Permalink.
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ExpressRoutePortsLocationsOperations(object): """ExpressRoutePortsLocationsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2019_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.ExpressRoutePortsLocationListResult"] """Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each location. Available bandwidths can only be obtained when retrieving a specific peering location. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.ExpressRoutePortsLocationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-04-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore def get( self, location_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.ExpressRoutePortsLocation" """Retrieves a single ExpressRoutePort peering location, including the list of available bandwidths available at said peering location. :param location_name: Name of the requested ExpressRoutePort peering location. :type location_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ExpressRoutePortsLocation, or the result of cls(response) :rtype: ~azure.mgmt.network.v2019_04_01.models.ExpressRoutePortsLocation :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2019-04-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'locationName': self._serialize.url("location_name", location_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
Published 04/21/2019 03:11:27 pm at 04/21/2019 03:11:27 pm in Hydrangea Plants Sale. hydrangea plants sale climbing hydrangea plant prune climbing hydrangea climbing hydrangea plants for sale sweet winter deals on silk plants direct hydrangea pack of cream silk plants direct hydrangea pack of cream. sweet winter deals on silk plants direct hydrangea pack of cream silk plants direct hydrangea pack of cream, hydrangea plants for sale near me hydrangea care white hydrangea hydrangea plants for sale near me hydrangea care white hydrangea plants for sale near me, hydrangeas plus munchkin, big sale pcs bonsai hydrangea mixed climbing hydrangea plants big sale pcs bonsai hydrangea mixed climbing hydrangea plants flowers garden plant bonsai beautify the environmen garden in bonsai from home garden , hydrangea plants grow nikko blue shade garden perennials burpee hydrangea macrophylla la dreamin ppaf large, master gardener plant sale thurston county washington state master gardener plant sale, hydrangea direct gardening blue hydrangea sale, growing hydrangeas inside gardening pinterest hydrangea this is a guide about growing hydrangeas inside when we see beautiful blooming hydrangeas for sale in foil wrapped pots we wonder how well they will grow , hydrangia plant hydrangea sun goddess hydrangea plant food hydrangia plant zoom hydrangea plants for sale nz , pee gee hydrangea plants for sale low prices online pee gee hydrangea, hot sale white hydrangea plants balcony bonsai viburnum hydrangea hot sale white hydrangea plants balcony bonsai viburnum hydrangea macrophylla bonsai plant flower plantas for home.
#! /usr/bin/env python import argparse from collections import defaultdict from tools import templates from tools.experiment_parser import parse_all from tools.table_generator import format_table SEPARATE_EF = True def kmer_to_read_coverage(c, k, read_length=100): if c is not None: return c * read_length / (read_length - k + 1) def compute_average(table_lines, std_key_suffix='_std'): table_cnt = defaultdict(lambda: defaultdict(int)) table_sum = defaultdict(lambda: defaultdict(float)) table_avg = defaultdict(lambda: defaultdict(float)) table_std_sum = defaultdict(lambda: defaultdict(float)) for key, val in table_lines.items(): for k, v in val.items(): try: table_sum[key[1:]][k] += v table_cnt[key[1:]][k] += 1.0 except TypeError: pass for key, val in table_sum.items(): for k, v in val.items(): if table_cnt[key][k] == 0: table_avg[key][k] = None else: table_avg[key][k] = v / table_cnt[key][k] for key, val in table_lines.items(): for k, v in val.items(): try: table_std_sum[key[1:]][k] += (v - table_avg[key[1:]][k]) ** 2 except TypeError: pass for key, val in table_std_sum.items(): for k, v in val.items(): if table_cnt[key][k] <= 1: table_avg[key][k + std_key_suffix] = 0 else: table_avg[key][k + std_key_suffix] = (v / (table_cnt[key][k] - 1)) ** 0.5 return table_avg def main(args): table_lines = parse_all(args.path, args.filter, not args.no_error, legacy=args.legacy) header = [ 'seq_name', 'provided_coverage', 'provided_error_rate', 'provided_k', 'coverage', 'error_rate', 'genome_size', 'q1', 'q2', 'q', 'guessed_coverage', 'guessed_error_rate', 'provided_loglikelihood', 'loglikelihood', 'guessed_loglikelihood', ] header_avg = [ 'provided_coverage', 'provided_error_rate', 'provided_k', 'coverage', 'coverage_std', 'error_rate', 'error_rate_std', 'genome_size', 'genome_size_std', 'q1', 'q1_std', 'q2', 'q2_std', 'q', 'q_std', 'guessed_coverage', 'guessed_coverage_std', 'guessed_error_rate', 'guessed_error_rate_std', 'provided_loglikelihood', 'provided_loglikelihood_std', 'loglikelihood', 'loglikelihood_std', 'guessed_loglikelihood', 'guessed_loglikelihood_std', ] # header = [ # 'provided_coverage', 'provided_error_rate', # 'coverage', 'error_rate', # ] format_templates = { 'html': templates.html, 'csv': templates.csv, 'tex': templates.tex, } format_escape = { 'tex': lambda x: x.replace('_', '\\_'), } titles = { 'provided_coverage': 'Coverage', 'provided_error_rate': 'Error Rate', 'coverage': 'Est. Coverage', 'coverage_std': 'Est. Coverage Std', 'error_rate': 'Est. Error Rate', 'error_rate_std': 'Est. Error Rate Std', 'genome_size': 'Est. Genome Size', 'genome_size_std': 'Est. Genome Size Std', } if args.average: table_lines = compute_average(table_lines) header = header_avg print(format_table( header, titles, sorted( list(table_lines.values()), key=lambda x: ( x['provided_coverage'], x['provided_error_rate'], x['provided_k'], x.get('repeats', False), x['seq_name'], ) ), template_file=format_templates[args.format], escape=format_escape.get(args.format, None), )) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Parse experiment output and generate table') parser.add_argument('path', help='Experiment') parser.add_argument('-f', '--format', default='html', help='Table format') parser.add_argument('-i', '--filter', default='*.out', help='Filter files') parser.add_argument('-a', '--average', action='store_true', help='Compute average from all sequences') parser.add_argument('-ne', '--no-error', action='store_true', help='Error is unknown') parser.add_argument('--legacy', action='store_true', help='Run in legacy mode') args = parser.parse_args() main(args)
Please join us for these special worship opportunities as we celebrate the resurrection of Jesus Christ! 7am | Sunrise Service will be an intimate service with worship led by our Student worship team. Experience this service outside during sunrise, God’s perfect backdrop. No childcare is available at this service. 8:30am | This Easter Service is a full service with specially selected songs and sermon focused on our Risen Savior. Children’s Church is available for newborn – 5th grade. New families please pre-register below. 10:15am | This Easter Service will be equal in its worship and message to the 8:30am service and just as powerful. Children’s Church is available for newborn – 5th grade. New families please pre-register below. If you have children, please help us to prepare for you and your family by taking a moment to pre-register. An evening of worship, reflection, and communion focusing on the sacrifice our Savior gave on the Cross. Discover Easter and the events leading up to it in a way you and your children will never forget! Walk with Jesus is a powerfully moving family event that will guide your family through Jesus’ last days on Earth. Space is limited - get details and reserve your time slot HERE.
#!/usr/bin/env python # encoding:utf8 """ DragonPy - Dragon 32 emulator in Python ======================================= :created: 2014 by Jens Diemer - www.jensdiemer.de :copyleft: 2014 by the DragonLib team, see AUTHORS for more details. :license: GNU GPL v3 or above, see LICENSE for more details. """ from __future__ import absolute_import, division, print_function import six import logging from dragonlib.CoCo.basic_tokens import COCO_BASIC_TOKENS from dragonlib.core.basic import BasicListing, RenumTool, BasicTokenUtil,\ BasicLine from dragonlib.core.basic_parser import BASICParser from dragonlib.core.binary_files import BinaryFile from dragonlib.dragon32.basic_tokens import DRAGON32_BASIC_TOKENS from dragonlib.utils.logging_utils import log_bytes log=logging.getLogger(__name__) DRAGON32 = "Dragon32" COCO2B = "CoCo" class BaseAPI(object): RENUM_REGEX = r""" (?P<statement> GOTO|GOSUB|THEN|ELSE ) (?P<space>\s*) (?P<no>[\d*,\s*]+) """ def __init__(self): self.listing = BasicListing(self.BASIC_TOKENS) self.renum_tool = RenumTool(self.RENUM_REGEX) self.token_util = BasicTokenUtil(self.BASIC_TOKENS) def program_dump2ascii_lines(self, dump, program_start=None): """ convert a memory dump of a tokensized BASIC listing into ASCII listing list. """ dump = bytearray(dump) # assert isinstance(dump, bytearray) if program_start is None: program_start = self.DEFAULT_PROGRAM_START return self.listing.program_dump2ascii_lines(dump, program_start) def parse_ascii_listing(self, basic_program_ascii): parser = BASICParser() parsed_lines = parser.parse(basic_program_ascii) if not parsed_lines: log.critical("No parsed lines %s from %s ?!?" % ( repr(parsed_lines), repr(basic_program_ascii) )) log.debug("Parsed BASIC: %s", repr(parsed_lines)) return parsed_lines def ascii_listing2basic_lines(self, basic_program_ascii, program_start): parsed_lines = self.parse_ascii_listing(basic_program_ascii) basic_lines = [] for line_no, code_objects in sorted(parsed_lines.items()): basic_line = BasicLine(self.token_util) basic_line.code_objects_load(line_no,code_objects) basic_lines.append(basic_line) return basic_lines def ascii_listing2program_dump(self, basic_program_ascii, program_start=None): """ convert a ASCII BASIC program listing into tokens. This tokens list can be used to insert it into the Emulator RAM. """ if program_start is None: program_start = self.DEFAULT_PROGRAM_START basic_lines = self.ascii_listing2basic_lines(basic_program_ascii, program_start) program_dump=self.listing.basic_lines2program_dump(basic_lines, program_start) assert isinstance(program_dump, bytearray), ( "is type: %s and not bytearray: %s" % (type(program_dump), repr(program_dump)) ) return program_dump def pformat_tokens(self, tokens): """ format a tokenized BASIC program line. Useful for debugging. returns a list of formated string lines. """ return self.listing.token_util.pformat_tokens(tokens) def pformat_program_dump(self, program_dump, program_start=None): """ format a BASIC program dump. Useful for debugging. returns a list of formated string lines. """ assert isinstance(program_dump, bytearray) if program_start is None: program_start = self.DEFAULT_PROGRAM_START return self.listing.pformat_program_dump(program_dump, program_start) def renum_ascii_listing(self, content): return self.renum_tool.renum(content) def reformat_ascii_listing(self, basic_program_ascii): parsed_lines = self.parse_ascii_listing(basic_program_ascii) ascii_lines = [] for line_no, code_objects in sorted(parsed_lines.items()): print() print(line_no, code_objects) basic_line = BasicLine(self.token_util) basic_line.code_objects_load(line_no,code_objects) print(basic_line) basic_line.reformat() new_line = basic_line.get_content() print(new_line) ascii_lines.append(new_line) return "\n".join(ascii_lines) def bas2bin(self, basic_program_ascii, load_address=None, exec_address=None): # FIXME: load_address/exec_address == program_start ?!?! if load_address is None: load_address = self.DEFAULT_PROGRAM_START if exec_address is None: exec_address = self.DEFAULT_PROGRAM_START tokenised_dump = self.ascii_listing2program_dump(basic_program_ascii, load_address) log.debug(type(tokenised_dump)) log.debug(repr(tokenised_dump)) log_bytes(tokenised_dump, msg="tokenised: %s") binary_file = BinaryFile() binary_file.load_tokenised_dump(tokenised_dump, load_address=load_address, exec_address=exec_address, ) binary_file.debug2log(level=logging.CRITICAL) data = binary_file.dump_DragonDosBinary() return data def bin2bas(self, data): """ convert binary files to a ASCII basic string. Supported are: * Dragon DOS Binary Format * TODO: CoCo DECB (Disk Extended Color BASIC) Format see: http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=348&p=10139#p10139 """ data = bytearray(data) binary_file = BinaryFile() binary_file.load_from_bin(data) if binary_file.file_type != 0x01: log.error("ERROR: file type $%02X is not $01 (tokenised BASIC)!", binary_file.file_type) ascii_lines = self.program_dump2ascii_lines(dump=binary_file.data, # FIXME: #program_start=bin.exec_address program_start=binary_file.load_address ) return "\n".join(ascii_lines) class Dragon32API(BaseAPI): CONFIG_NAME = DRAGON32 MACHINE_NAME = "Dragon 32" BASIC_TOKENS = DRAGON32_BASIC_TOKENS PROGRAM_START_ADDR = 0x0019 VARIABLES_START_ADDR = 0x001B ARRAY_START_ADDR = 0x001D FREE_SPACE_START_ADDR = 0x001F # Default memory location of BASIC listing start DEFAULT_PROGRAM_START = 0x1E01 class CoCoAPI(Dragon32API): """ http://sourceforge.net/p/toolshed/code/ci/default/tree/cocoroms/dragon_equivs.asm """ CONFIG_NAME = COCO2B MACHINE_NAME = "CoCo" BASIC_TOKENS = COCO_BASIC_TOKENS def example_renum_ascii_listing(): api = Dragon32API() ascii_listing="\n".join([ '1 PRINT "LINE 10"', '2 PRINT "LINE 20"', '3 GOTO 1', ]) print( api.renum_ascii_listing(ascii_listing) ) def test_bin2bas(): api = Dragon32API() with open(os.path.expanduser("~/DragonEnvPy3/DwRoot/AUTOLOAD.DWL"), "rb") as f: data1=f.read() ascii_listing=api.bin2bas(data1) print(ascii_listing) data2 = api.bas2bin(ascii_listing, load_address=0x1e01, exec_address=0x1e01) log_bytes(data1, "data1: %s", level=logging.CRITICAL) log_bytes(data2, "data2: %s", level=logging.CRITICAL) if __name__ == '__main__': import os from dragonlib.utils.logging_utils import setup_logging setup_logging( # level=1 # hardcore debug ;) # level=10 # DEBUG # level=20 # INFO level=30 # WARNING # level=40 # ERROR # level=50 # CRITICAL/FATAL # level=99 ) # example_renum_ascii_listing() test_bin2bas()
This page covers the key design decisions that you'll encounter with MKS. Provide an option to build 100% self-sufficient bases, but provide this at a significant cost, in both time and effort, commensurate with the reward. Add the capability of having parts that can be launched as lightweight packages, then 'built out' in-situ. Build a mechanism that forces part dependencies. For example, to get food the player must build an Agriculture Module, which requires Dirt from the Drill, and many converters require machinery generated by the Assembly Plant to operate. Include resource exploitation through a mechanism that forces the player to choose their colonization sites carefully, and situate them with easy access to resources. Keep things at a fairly high abstraction level. The sweet spot of MKS is in construction and logistics management over a fairly abstracted system of resources. Leverage existing add-ons where possible, and do this with minimal dependencies. The stock resource system is used to handle all generators and harvesters, the Community Resource Pack (CRP) handles shared resources, and the USI Catalog is used for Generators, Life-Support, and Kontainers. Using the USI life support mod (USI-LS) with MKS can help provide an appropriate challenge where players will weigh the costs, risks, and rewards of supplying missions exclusively with carried life support Supplies, or in making the investment in a permanent colony. That being said, this is not just a single greenhouse part (although food production is part of the mod). Rather, it brings an entire colonization end-game to KSP in a fun and challenging way. Once fully developed, a planetary MKS colony can provide for a self sustaining support loop. Due to the way MKS is designed, orbital colonies will always have some degree of lossiness and need to be supported with periodic supplies from Kerbin or a nearby ground base. The USI-LS mod also includes the Habitation construct, where Kerbals need room to move around or they get grumpy. They're also fond of Kerbin, and unless serious effort is made to create the kreature comforts of home, known as ColonySupplies, they'll eventually get homesick and need to go back to Kerbin. There are three classes of kolonization parts. Each has its purpose in the building of your kolony. The smaller Ranger series is intended as the simplest way to deploy the basic modules. They sacrifice small size with the need to deploy large Material Kits. The parts also have a lower efficiency rate than the other two classes. The middle Duna series offers better performance based on standardized lander configurations. They can use the Ranger modules either as direct support or as enhancers. The Tundra series, the largest of the three, has both 2.5 and 3.75 form factor that serves well as either a landed or orbital base. For more information, see the Parts page. There are many trade-offs when designing a base. Here are several of the main ones. USI-LS introduced the concept of Supplies being converted by Kerbals into Mulch just by the fact the Kerbals exist. MKS introduces two other primary resource conversions. First, everything that does "work" requires Machinery, which slowly wears down into Recyclables. Similarly, there are replenishable nuclear power sources that work by converting EnrichedUranium into DepletedFuel and Xenon gas. Both of these conversions take place whenever the modules are used and are at a fixed rate. Beginning bases can normally operate for quite some time on shipped-in resources. Each Kerbal requires 10.8 Supplies per 6-hour Kerbin-day. A single Mini-Pak will hold over 9 days worth of Supplies (100 / 10.8), and a single 1.25 flat tank hold enough for over 46 (500 / 10.8) days. However, as you move further out and shipping becomes less convenient, you'll need to start producing your consumables. For more information, see Functions (Manufacturing). This choice is often made based on what local resources you can find. If you find a good location with a good supply of Water and either Substrate or Dirt, the Cultivate converters make more sense as they're more efficient. If not, you can use the Agroponics converter. MKS provides a means of transporting resources around without having to have parts physically connected. Parts that are within 2000m of each other can be pulled from one part into another. In addition, ISM or Logistics module, resources can be moved into "Planetary Logistics" where they can be retrieved from any other ISM or Logistics module. Resources are only pulled when necessary by a part that is performing an operation. The resources are pulled in bulk when required, instead of of trickling in. See Functions (Logistics) for more information. Orbital stations can be created from any of the MKS parts, but the Tundra parts are especially designed for that task, as well as the Expandable Habitats. By design, orbital stations can never be 100% self sufficient as they have no source of resources. They also cannot participate in Planetary Logistics, at least not yet. See Base Construction for more information. Particular Kerbals provide increases in the efficiencies of particular operations. Also, their experience level impacts the part efficiencies. Initially, you may be able to get by with just Jeb and Bill, but eventually, you'll need to bring additional, less experienced scientists, engineers, and pilots. You can use the Tundra Training Akademy to raise their experience, or train them in the traditional way. Supplies are not just food. Food is in fact a very small amount of that mass (less than 10%). Supplies are 'everything a Kerbal needs to keep alive and reasonably healthy'. 90% of that mass is water. For drinking, as well as sanitation and food prep. These are based on NASA numbers. 'Mulch' is just a nice way of saying waste water / grey water / candy bar wrappers / kerbal poo. The two are mass neutral - i.e. there is no mass loss in any of the processes. Your simplest conversion is a basic reclamation system / biological filter / algae tank. Take your waste output, recycle it with a bit of fertilizer to grow new stuff. Easy peasy. (that is, 10kg of supplies become 10kg of mulch. 10kg of mulch plus 1kg of Fertilizer becomes 11t of supplies. So as long as you have fertilizer to feed back into the equation, you can keep the algae tanks filled indefinitely. Cultivation (i.e. making a much larger growth of simple foods - algae, etc.). Takes some medium (Substrate or Dirt), Water, and Fertilizer to make more supplies than you could make purely by recycling mulch. The next step up beyond that would be complex plants and simple edible animals (fish, etc.). NASA has toyed with this as well. This is covered under Organics in MKS. And for this you need everything for cultivation (just a lot more of it) plus a starting stock of Organics. There are more steps to prepare them (i.e. having a pig is nice, but it takes more work to turn it into wrapped packages of bacon and pork chops), but that's the basics. Recyclables and Machinery are like 'life support for stuff'. Kerbals consume supplies and produce mulch. Mechanical processes and conversions consume machinery and produce recyclables. So same concept in parallel. RoverDude provided the following clarification before releasing 0.50.9. It gives a great insight into how he thinks when playing with KSP, so it gives some insight into how he designed this mod. Note that the referenced ratios and amounts are for 0.50.9. They may not be exact for other releases. Assembly is just taking some stuff and turning it into other stuff. Machinery and ColonySupplies do this in a lossless way (that is, toss in five tons of raw materials, you get five tons of stuff). Think of this as (literally) people doing simple construction with generic pre-fab parts, or packaging things up in the case of ColonySupplies. This is space efficient, and no real space to improve the process as it is already lossless. Fabrication is where we take raw materials (sheets of metal and polymers, bins of chemicals, etc.) and make a finished good. These are also lossless (we assume any scrap is recycled), with the exception of SpecializedParts, where we lose some of our silicon. And even in that wors-case, the 50% loss is taken by the silicon (the rares/exotics are reclaimed). Since this process as well is also pretty lossless except for SpecParts, but that's a small percentage of our used mass downstream in assembly anyway. So going one step down... we get to refining. Since this is the step where we're taking rock and getting valuable stuff out of it, the lossiness is significant (to the point where you'd never want to ship around raw ore). The standard here is 25:1 for everything other than dirt, which is 50:1. Resource Lodes (coming in the next update) are the exception, in that they represent almost pure deposits (i.e. you are actively seeking and retrieving these vastly superior chunks vs. passively pulling stuff out of rocks). Optimistically, this could drop to 10:1 or even 5:1, though below that would get a little sketchy. But you are never going to realistically live in a world where your refinery is the same size as your fabricator with 100% efficiency and throughput on both ends as that would just not make sense. In real world terms, this number is reasonable. While you can get really good yields out of high quality iron ore (say, a 3:1 ratio), copper ore would be more like a 100:1 ratio. So the refinery to make that iron ingot will be significantly larger than the machine shop that shapes it into rivets and panels if you want said machine shop to operate non-stop at 100% capacity. In gameplay terms, there are a couple of goals. First, to discourage the whole 'pokemon' base approach where you just include one of each module (and expect it to work with 100% efficiency). Second, to give a valid reason why you would want to shuffle around shipments of refined materials vs ores. That being said, we've always had the issue of folks picking lousy spots to mine on, then getting upset when they have to drill spam. Active harvesting sorts some of this, but prospecting remains the best approach. But even with that, I can see a yield change from 25:1 to 5:1 being a reasonable tweak. Let's run through some examples (and science!). A refinery, fully tuned for mineral processing and with a level 5 engineer, would take in 0.114375 minerals per second. So let's extrapolate this out. Assume I have a smallish industrial base with reasonable output levels. I have a level 5 engineer, two refineries and two drills. Based on the numbers above, I can conservatively expect to kick out 0.2 tons of harvested materials per second from the drills, which translates to 0.008 of refined materials per second. At that low of a volume, I can get away with a single workshop (it can handle up to 0.028 per second). That lets me crank out a ton of material kits every 34 hours. Now, let us scale things up a bit (and optimize). Unless we're on lousy deposits, I can feed a single refinery with a single drill. Worst case I need two. So let's go with five refineries total to match the 2/1/2 ratio of inputs for MaterialKits. That gives me an effective rate of 0.571875 per second in raw materials, or 0.022875 in refined materials (and material kits!) or, 1 ton of material kits in 12 hours. And still in the range of using the workshop. So if we extrapolate this out and assume that drills are not the bottleneck (i.e. their function is more about base selection and actually prospecting...) and we assume dedicated refinery/drill pairs per resource, then I should be able to kick out about 15 tons per kerbal month. For perspective, that's the earth equivalent of extracting and refining all of the materials to build a Toyota Camry in a week. TL;DR: Balance reasons and tweaks above. Refineries get a boost. Now onto other stuff!
# -*- coding: utf-8 -*- try: from setuptools import setup, find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages exec open('allura/version.py').read() PROJECT_DESCRIPTION=''' Allura is an open source implementation of a software "forge", a web site that manages source code repositories, bug reports, discussions, mailing lists, wiki pages, blogs and more for any number of individual projects. ''' setup( name='Allura', version=__version__, description='Base distribution of the Allura development platform', long_description=PROJECT_DESCRIPTION, author='SourceForge Team', author_email='develop@discussion.allura.p.re.sf.net', url='http://sourceforge.net/p/allura', keywords='sourceforge allura turbogears pylons jinja2 mongodb rabbitmq', license='Apache License, http://www.apache.org/licenses/LICENSE-2.0', platforms=[ 'Linux', 'MacOS X', ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Pylons', 'Framework :: TurboGears', 'Intended Audience :: Developers', 'Programming Language :: Python :: 2.6', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'License :: OSI Approved :: Apache Software License', ], install_requires=[ "TurboGears2", "pypeline", "datadiff", "BeautifulSoup", "PasteScript", "Babel >= 0.9.4", "jinja2", "pysolr", "Markdown >= 2.0.3", "Pygments >= 1.1.1", "python-openid >= 2.2.4", "EasyWidgets >= 0.1.1", "PIL >= 1.1.7", "iso8601", "chardet >= 1.0.1", "feedparser >= 5.0.1", "oauth2 >= 1.2.0", "Ming >= 0.2.2dev-20110930", ], setup_requires=["PasteScript >= 1.7"], paster_plugins=['PasteScript', 'Pylons', 'TurboGears2', 'Ming'], packages=find_packages(exclude=['ez_setup']), include_package_data=True, test_suite='nose.collector', tests_require=['WebTest >= 1.2', 'BeautifulSoup', 'poster', 'nose'], package_data={'allura': ['i18n/*/LC_MESSAGES/*.mo', 'templates/**.html', 'templates/**.py', 'templates/**.xml', 'templates/**.txt', 'public/*/*/*/*/*', ]}, message_extractors={'allura': [ ('**.py', 'python', None), ('templates/**.mako', 'mako', None), ('templates/**.html', 'genshi', None), ('public/**', 'ignore', None)]}, entry_points=""" [paste.app_factory] main = allura.config.middleware:make_app task = allura.config.middleware:make_task_app tool_test = allura.config.middleware:make_tool_test_app [paste.app_install] main = pylons.util:PylonsInstaller tool_test = pylons.util:PylonsInstaller [allura] profile = allura.ext.user_profile:UserProfileApp admin = allura.ext.admin:AdminApp search = allura.ext.search:SearchApp home = allura.ext.project_home:ProjectHomeApp [allura.auth] local = allura.lib.plugin:LocalAuthenticationProvider ldap = allura.lib.plugin:LdapAuthenticationProvider [allura.user_prefs] local = allura.lib.plugin:LocalUserPreferencesProvider [allura.project_registration] local = allura.lib.plugin:LocalProjectRegistrationProvider [allura.theme] allura = allura.lib.plugin:ThemeProvider [paste.paster_command] taskd = allura.command.taskd:TaskdCommand task = allura.command.taskd:TaskCommand models = allura.command:ShowModelsCommand reindex = allura.command:ReindexCommand ensure_index = allura.command:EnsureIndexCommand script = allura.command:ScriptCommand set-tool-access = allura.command:SetToolAccessCommand smtp_server=allura.command:SMTPServerCommand create-neighborhood = allura.command:CreateNeighborhoodCommand update-neighborhood-home-tool = allura.command:UpdateNeighborhoodCommand create-trove-categories = allura.command:CreateTroveCategoriesCommand set-neighborhood-features = allura.command:SetNeighborhoodFeaturesCommand [easy_widgets.resources] ew_resources=allura.config.resources:register_ew_resources [easy_widgets.engines] jinja = allura.config.app_cfg:JinjaEngine """, )
That night, the audience, including me, also got to hear from Greta and from a young woman named Rain Pierce, who graduated from Merci’s Refuge. Both women shared gripping stories of God’s redemption through abuse and violence as well as inspiring accounts of babies being saved. Their stories are a powerful glimpse of how God is working through these pregnancy centers. I was so moved and inspired by their stories I wanted you to hear them. Greta and Rain are guests on our Focus on the Family Broadcast “Reaching Women in Crisis.” Tune in on your local radio station, online, on iTunes, via Podcast, or on our free phone app, or watch the full program on our YouTube channel. Pregnancy resource centers are doing fantastic work in the name of the Lord to save babies and to minister to their mothers. Focus supports them through our Option Ultrasound program, which provides grants for sonogram training and technology. To date, we have distributed more than 800 grants to centers around the country that have been used to purchase ultrasound machines and provide medical training to workers, and more than 425,000 babies have been saved as a result of the effort. I’m looking forward to hitting the million mark sometime in the near future. Saving babies is core to what we do here at Focus on the Family, and it’s a core component of the Christian faith, to be a voice for the voiceless. It’s not always popular, but it’s the right thing to do. With that in mind, I’d like to extend an invitation for you to become a special partner with us through our monthly “Friends of Focus on the Family” program. What’s amazing is that it only takes $60 to save a baby’s life because we’re able to partner with organizations like Greta’s by providing ultrasound machines and additional resources. If you haven’t supported Focus before, why not support us now with a pledge that will save a baby’s life? It’s one of the best investments you can make in the kingdom of God. To make your pledge, or for more information, visit our website or call 1-800-A-FAMILY (232-6459). Thank you for touching others with the love of Christ. Interesting how your "support" for women is the anti-choice kind which doesn't trust women enough to make their own medical decisions, but wants the government or, even worse, religions to make those choices for them. When's the last time you addressed the REAL threat to women in the form of domestic violence; the greatest health threat to women 18-44 is the husband or boyfriend in their own household.
from flask import Flask, render_template, request, session, Response import argparse from vivo2notld.definitions import definitions, list_definitions from vivo2notld.utility import execute, execute_list formats = { "xml": "text/xml", "json": "text/json", "yaml": "text/yaml" } app = Flask(__name__) default_definition = None default_list_definition = None default_subject_namespace = None default_subject_identifier = None default_list_subject_namespace = None default_list_subject_identifier = None default_endpoint = None default_username = None default_password = None default_format = None default_definition_type = "list" default_list_limit = 10 default_is_limited = False default_list_offset = None default_is_offset = False def get_definitions(defs): return {definition: definition.replace("_", " ") for definition in defs} @app.route('/', methods=["GET"]) def crosswalk_form(output=None, obj=None, graph=None, query=None, select_query=None, count_query=None): return render_template("crosswalk_form.html", definition_type=session.get("definition_type") or default_definition_type, definitions=get_definitions(definitions), list_definitions=get_definitions(list_definitions), definition=session.get("definition") or default_definition, list_definition=session.get("list_definition") or default_list_definition, subject_namespace=session.get("subject_namespace") or default_subject_namespace, subject_identifier=session.get("subject_identifier") or default_subject_identifier, list_subject_namespace=session.get("list_subject_namespace") or default_list_subject_namespace, list_subject_identifier=session.get("list_subject_identifier") or default_list_subject_identifier, list_limit=session.get("list_limit") or default_list_limit, is_limited=session.get("is_limited") or default_is_limited, list_offset=session.get("list_offset") or default_list_offset, is_offset=session.get("is_offset") or default_is_offset, endpoint=session.get("endpoint") or default_endpoint, username=session.get("username") or default_username, password=session.get("password") or default_password, format=session.get("format") or default_format, output_html=session.get("output_html", True), output=output, obj=obj, graph=graph.serialize(format="turtle").decode("utf-8") if graph else None, query=query, select_query=select_query, count_query=count_query) @app.route('/', methods=["POST"]) def crosswalk(): session["definition"] = request.form.get("definition") session["list_definition"] = request.form.get("list_definition") session["subject_namespace"] = request.form.get("subject_namespace") session["subject_identifier"] = request.form.get("subject_identifier") session["list_subject_namespace"] = request.form.get("list_subject_namespace") session["list_subject_identifier"] = request.form.get("list_subject_identifier") session["list_limit"] = request.form.get("list_limit") session["is_limited"] = True if "is_limited" in request.form else False session["list_offset"] = request.form.get("list_offset") session["is_offset"] = True if "is_offset" in request.form else False session["endpoint"] = request.form.get("endpoint") session["username"] = request.form.get("username") session["password"] = request.form.get("password") session["format"] = request.form.get("format") session["output_html"] = True if "output_html" in request.form else False session["definition_type"] = request.form.get("definition_type") select_q = None count_q = None definition_type = request.form.get("definition_type") if not definition_type: if "definition" in request.form and "list_definition" not in request.form: definition_type = "individual" elif "definition" not in request.form and "list_definition" in request.form: definition_type = "list" else: definition_type = default_definition_type if definition_type == "individual": o, s, g, q = execute(definitions[request.form.get("definition", default_definition)], request.form.get("subject_namespace", default_subject_namespace), request.form.get("subject_identifier", default_subject_identifier), request.form.get("endpoint", default_endpoint), request.form.get("username", default_username), request.form.get("password", default_password), serialization_format=request.form.get("format", default_format)) else: o, s, g, q, select_q, count_q = execute_list( list_definitions[request.form.get("list_definition", default_list_definition)], request.form.get("list_subject_namespace", default_subject_namespace), request.form.get("list_subject_identifier", default_subject_identifier), request.form.get("endpoint", default_endpoint), request.form.get("username", default_username), request.form.get("password", default_password), serialization_format=request.form.get("format", default_format), offset=request.form.get("list_offset", default_list_offset) if "is_offset" in request.form else None, limit=request.form.get("list_limit", default_list_limit) if "is_limited" in request.form else None, ) if "output_html" in request.form: return crosswalk_form(output=o, obj=s, graph=g, query=q, select_query=select_q, count_query=count_q) else: return Response(o, content_type=formats[request.form.get("format", default_format)]) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--debug", action="store_true") parser.add_argument("--port", type=int, default="5000", help="The port the service should run on. Default is 5000.") parser.add_argument("--format", default="json", choices=formats.keys(), help="The format for serializing. Default is json.") parser.add_argument("--endpoint", dest="endpoint", help="Endpoint for SPARQL Query of VIVO instance,e.g., http://localhost/vivo/api/sparqlQuery.") parser.add_argument("--username", dest="username", help="Username for VIVO root.") parser.add_argument("--password", dest="password", help="Password for VIVO root.") parser.add_argument("--namespace", default="http://vivo.mydomain.edu/individual/", help="Namespace for the subject. Default is http://vivo.mydomain.edu/individual/.") parser.add_argument("--identifier", help="Identifier for the subject, e.g., n123.") parser.add_argument("--list-namespace", default="http://vivo.mydomain.edu/individual/", help="Namespace for the list subject. Default is http://vivo.mydomain.edu/individual/.") parser.add_argument("--list-identifier", help="Identifier for the list subject, e.g., n123.") parser.add_argument("--definition", default="person", choices=definitions.keys(), help="Default is person.") parser.add_argument("--list-definition", default="person_summary_with_positions_in", choices=list_definitions.keys(), help="Default is person_summary_with_positions_in.") parser.add_argument("--limit", type=int, help="List limit.") parser.add_argument("--offset", type=int, help="List offset.") #Parse args = parser.parse_args() app.debug = args.debug app.secret_key = "vivo2notld" default_definition = args.definition default_list_definition = args.list_definition default_subject_namespace = args.namespace default_subject_identifier = args.identifier default_list_subject_namespace = args.list_namespace default_list_subject_identifier = args.list_identifier default_endpoint = args.endpoint default_username = args.username default_password = args.password default_format = args.format if args.limit: default_list_limit = args.limit default_is_limited = True if args.offset: default_list_offset = args.offset default_is_offset = True app.run(host="0.0.0.0", port=args.port)
The collection consists of a five-page play written by Mary Foster while she was teaching at Ansley, Custer County, Nebraska, in the 1950s. The play is about a missing five year old boy from the Ihlow (Eloe) family and members of the Kerr family helping to look for him. Themes explored in the play include friendship; the struggles of pioneer life; the importance of education; and the immigrant experience. The play is based on a true story.
from __future__ import absolute_import import logging import os import json from colorlog import ColoredFormatter import driveami.keys as keys import driveami.scripts as scripts from driveami.reduce import (Reduce, AmiVersion) from driveami.serialization import (Datatype, make_serializable, save_calfile_listing, save_rawfile_listing, load_listing) from ._version import get_versions __version__ = get_versions()['version'] del get_versions logger = logging.getLogger('ami') def ensure_dir(dirname): if not os.path.isdir(dirname): os.makedirs(dirname) def process_rawfile(rawfile, output_dir, reduce, script, file_logging=True ): """ A convenience function applying sensible defaults to reduce a rawfile. Args: rawfile: Name of a file in the ami data dir, e.g. "SWIFT121101-121101.raw" output_dir: Folder where UVFITS for the target and calibrator will be output. reduce: instance of ami.Reduce array: 'LA' or 'SA' (Default: LA) script: Reduction commands. Returns: - A dictionary containing information about the rawfile, e.g. pointing, calibrator name, rain modulation. See also: ``ami.keys`` """ r = reduce if file_logging: file_logdir = output_dir else: file_logdir = None r.set_active_file(rawfile, file_logdir) r.run_script(script) r.update_flagging_info() write_command_overrides = {} if r.ami_version=='legacy': write_command_overrides['channels'] = '3-8' if r.files[rawfile]['raster']: write_command_overrides['fits_or_multi'] = 'multi' write_command_overrides['offsets'] = 'all' r.write_files(rawfile, output_dir, write_command_overrides=write_command_overrides) r.files[rawfile][keys.obs_name] = os.path.splitext(rawfile)[0] info_filename = os.path.splitext(rawfile)[0] + '.json' with open(os.path.join(output_dir, info_filename), 'w') as f: json.dump(make_serializable(r.files[rawfile]), f, sort_keys=True, indent=4) return r.files[rawfile] def get_color_log_formatter(): date_fmt = "%y-%m-%d (%a) %H:%M:%S" color_formatter = ColoredFormatter( "%(log_color)s%(asctime)s:%(levelname)-8s%(reset)s %(blue)s%(message)s", datefmt=date_fmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } ) return color_formatter def get_color_stdout_loghandler(level): stdout_loghandler = logging.StreamHandler() stdout_loghandler.setFormatter(get_color_log_formatter()) stdout_loghandler.setLevel(level) return stdout_loghandler
FC Barcelona vs Manchester United - Final Champions 2011 Tornem a Wembley! 110 Back Hawk Air Cadet Squadron C.O.'s Parade. Held April 05, 2011. One of many held throughout the year leading up to the Annual Parade. This vid shows just the March-Past.
#!/usr/bin/env python # encoding: utf-8 """ Created by Softwell on 2008-07-10. Copyright (c) 2008 Softwell. All rights reserved. """ class GnrCustomWebPage(object): maintable = 'libcd.album' py_requires = 'public:Public,standard_tables:TableHandler,public:IncludedView' ######################## STANDARD TABLE OVERRIDDEN METHODS ############### def windowTitle(self): return '!!Album' def pageAuthTags(self, method=None, **kwargs): return '' def tableWriteTags(self): return '' def tableDeleteTags(self): return '' def barTitle(self): return '!!Album' def columnsBase(self, ): return """year,title,rating""" def orderBase(self): return 'title' def conditionBase(self): pass def queryBase(self): return dict(column='title', op='contains', val='%') ############################## FORM METHODS ################################## def formBase(self, parentBC, disabled=False, **kwargs): pane = parentBC.contentPane(**kwargs) fb = pane.formbuilder(cols=2, border_spacing='4px', disabled=disabled) fb.field('title') fb.field('year') fb.field('rating') fb.field('artist_id')
Our Cloud Backup service offers dependable, low-touch data protection in the cloud, eliminating the need for manual tape and disk based backups. The agentless system satisfies regulatory needs with long-term, offsite cloud storage, and supports certificates of destruction for data auditing and compliance. Data is vital to business, but many organizations continue to use data protection strategies that are decades old. Backup policies often rely on older technologies like tape, are fragmented and demand manual intervention. Our Cloud Backup service offers fast and secure data transfer to offsite locations. The service is server hardware agnostic and backup data is compressed, de-duplicated and encrypted (FIPS 140-2) before being sent over the network to one of our seismically-hardened, secure cloud data centers for storage. Data is fully recoverable, whether it be a single file or a major multi-location disaster recovery. Our low-touch Cloud Backup solution excels at safeguarding data distributed across LANs, desktops, laptops and smartphones. The agentless disk based Cloud Backup is an automated, unattended process that requires minimal manual intervention. Cloud Backup allows you to focus resources on tasks that are strategic in nature, not tactical. Backups are easy to schedule, freeing up resources and reducing backup related costs. Recovering data protected by our Cloud Backup service is fast and simple. Cloud Backup support professionals are available 24x7 to perform restores, or maintain greater control over the recovery process with our client-initiated restores. Fast data recovery from Cloud Backup helps you meet service level requirements and improve recovery point objectives (RPO) and recovery time objectives (RTO) for critical business applications. RPO measures how much data an application can tolerate losing following an outage. Frequent incremental Cloud Backups will help minimize RPO. RTO measures allowable downtime for business applications. Following an outage, data must be recovered and application access restored before the RTO time has elapsed. Round-the-clock access to Cloud Backup support professionals will help you achieve stringent RTO service levels. Cloud Backup helps you achieve compliance by ensuring all data is backed up, stored offsite for as long as needed, and readily available should a restore be necessary. Data is stored fully encrypted on enterprise-class storage in our premiere global data centers. Cloud Backup gives you the ability to generate a certificate of destruction for use in your data auditing and compliance processes. Message level restore supported for Microsoft Exchange, Novell GroupWise, Lotus Notes and Novell Netware. Database hot backup supported for Microsoft SQL Server, Oracle, MySQL and PostgreSQL. Cloud Backup integrates with VMWare vSphere, Microsoft Hyper-V and Citrix XenSource.
# Copyright 2010-2011 OpenStack Foundation # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.views import addresses as views_addresses from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images from nova import availability_zones as avail_zone from nova.compute import api as compute from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.network import security_group_api from nova import objects from nova.objects import fields from nova.objects import virtual_interface from nova.policies import extended_server_attributes as esa_policies from nova.policies import flavor_extra_specs as fes_policies from nova.policies import servers as servers_policies from nova import utils LOG = logging.getLogger(__name__) class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "servers" _progress_statuses = ( "ACTIVE", "BUILD", "REBUILD", "RESIZE", "VERIFY_RESIZE", "MIGRATING", ) _fault_statuses = ( "ERROR", "DELETED" ) # These are the lazy-loadable instance attributes required for showing # details about an instance. Add to this list as new things need to be # shown. _show_expected_attrs = ['flavor', 'info_cache', 'metadata'] def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() self._address_builder = views_addresses.ViewBuilder() self._image_builder = views_images.ViewBuilder() self._flavor_builder = views_flavors.ViewBuilder() self.compute_api = compute.API() def create(self, request, instance): """View that should be returned when an instance is created.""" server = { "server": { "id": instance["uuid"], "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } self._add_security_grps(request, [server["server"]], [instance], create_request=True) return server def basic(self, request, instance, show_extra_specs=False, show_extended_attr=None, show_host_status=None, show_sec_grp=None, bdms=None, cell_down_support=False, show_user_data=False): """Generic, non-detailed view of an instance.""" if cell_down_support and 'display_name' not in instance: # NOTE(tssurya): If the microversion is >= 2.69, this boolean will # be true in which case we check if there are instances from down # cells (by checking if their objects have missing keys like # `display_name`) and return partial constructs based on the # information available from the nova_api database. return { "server": { "id": instance.uuid, "status": "UNKNOWN", "links": self._get_links(request, instance.uuid, self._collection_name), }, } return { "server": { "id": instance["uuid"], "name": instance["display_name"], "links": self._get_links(request, instance["uuid"], self._collection_name), }, } def get_show_expected_attrs(self, expected_attrs=None): """Returns a list of lazy-loadable expected attributes used by show This should be used when getting the instances from the database so that the necessary attributes are pre-loaded before needing to build the show response where lazy-loading can fail if an instance was deleted. :param list expected_attrs: The list of expected attributes that will be requested in addition to what this view builder requires. This method will merge the two lists and return what should be ultimately used when getting an instance from the database. :returns: merged and sorted list of expected attributes """ if expected_attrs is None: expected_attrs = [] # NOTE(mriedem): We sort the list so we can have predictable test # results. return sorted(list(set(self._show_expected_attrs + expected_attrs))) def _show_from_down_cell(self, request, instance, show_extra_specs, show_server_groups): """Function that constructs the partial response for the instance.""" ret = { "server": { "id": instance.uuid, "status": "UNKNOWN", "tenant_id": instance.project_id, "created": utils.isotime(instance.created_at), "links": self._get_links( request, instance.uuid, self._collection_name), }, } if 'flavor' in instance: # If the key 'flavor' is present for an instance from a down cell # it means that the request is ``GET /servers/{server_id}`` and # thus we include the information from the request_spec of the # instance like its flavor, image, avz, and user_id in addition to # the basic information from its instance_mapping. # If 'flavor' key is not present for an instance from a down cell # down cell it means the request is ``GET /servers/detail`` and we # do not expose the flavor in the response when listing servers # with details for performance reasons of fetching it from the # request specs table for the whole list of instances. ret["server"]["image"] = self._get_image(request, instance) ret["server"]["flavor"] = self._get_flavor(request, instance, show_extra_specs) # in case availability zone was not requested by the user during # boot time, return UNKNOWN. avz = instance.availability_zone or "UNKNOWN" ret["server"]["OS-EXT-AZ:availability_zone"] = avz ret["server"]["OS-EXT-STS:power_state"] = instance.power_state # in case its an old request spec which doesn't have the user_id # data migrated, return UNKNOWN. ret["server"]["user_id"] = instance.user_id or "UNKNOWN" if show_server_groups: context = request.environ['nova.context'] ret['server']['server_groups'] = self._get_server_groups( context, instance) return ret @staticmethod def _get_host_status_unknown_only(context): # We will use the unknown_only variable to tell us what host status we # can show, if any: # * unknown_only = False means we can show any host status. # * unknown_only = True means that we can only show host # status: UNKNOWN. If the host status is anything other than # UNKNOWN, we will not include the host_status field in the # response. # * unknown_only = None means we cannot show host status at all and # we will not include the host_status field in the response. unknown_only = None # Check show:host_status policy first because if it passes, we know we # can show any host status and need not check the more restrictive # show:host_status:unknown-only policy. if context.can( servers_policies.SERVERS % 'show:host_status', fatal=False): unknown_only = False # If we are not allowed to show any/all host status, check if we can at # least show only the host status: UNKNOWN. elif context.can( servers_policies.SERVERS % 'show:host_status:unknown-only', fatal=False): unknown_only = True return unknown_only def show(self, request, instance, extend_address=True, show_extra_specs=None, show_AZ=True, show_config_drive=True, show_extended_attr=None, show_host_status=None, show_keypair=True, show_srv_usg=True, show_sec_grp=True, show_extended_status=True, show_extended_volumes=True, bdms=None, cell_down_support=False, show_server_groups=False, show_user_data=True): """Detailed view of a single instance.""" if show_extra_specs is None: # detail will pre-calculate this for us. If we're doing show, # then figure it out here. show_extra_specs = False if api_version_request.is_supported(request, min_version='2.47'): context = request.environ['nova.context'] show_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) if cell_down_support and 'display_name' not in instance: # NOTE(tssurya): If the microversion is >= 2.69, this boolean will # be true in which case we check if there are instances from down # cells (by checking if their objects have missing keys like # `display_name`) and return partial constructs based on the # information available from the nova_api database. return self._show_from_down_cell( request, instance, show_extra_specs, show_server_groups) ip_v4 = instance.get('access_ip_v4') ip_v6 = instance.get('access_ip_v6') server = { "server": { "id": instance["uuid"], "name": instance["display_name"], "status": self._get_vm_status(instance), "tenant_id": instance.get("project_id") or "", "user_id": instance.get("user_id") or "", "metadata": self._get_metadata(instance), "hostId": self._get_host_id(instance), "image": self._get_image(request, instance), "flavor": self._get_flavor(request, instance, show_extra_specs), "created": utils.isotime(instance["created_at"]), "updated": utils.isotime(instance["updated_at"]), "addresses": self._get_addresses(request, instance, extend_address), "accessIPv4": str(ip_v4) if ip_v4 is not None else '', "accessIPv6": str(ip_v6) if ip_v6 is not None else '', "links": self._get_links(request, instance["uuid"], self._collection_name), # NOTE(sdague): historically this was the # os-disk-config extension, but now that extensions # are gone, we merge these attributes here. "OS-DCF:diskConfig": ( 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } if server["server"]["status"] in self._fault_statuses: _inst_fault = self._get_fault(request, instance) if _inst_fault: server['server']['fault'] = _inst_fault if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) context = request.environ['nova.context'] if show_AZ: az = avail_zone.get_instance_availability_zone(context, instance) # NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new # attributes after v2.1. They are only in v2.1 for backward compat # with v2.0. server["server"]["OS-EXT-AZ:availability_zone"] = az or '' if show_config_drive: server["server"]["config_drive"] = instance["config_drive"] if show_keypair: server["server"]["key_name"] = instance["key_name"] if show_srv_usg: for k in ['launched_at', 'terminated_at']: key = "OS-SRV-USG:" + k # NOTE(danms): Historically, this timestamp has been generated # merely by grabbing str(datetime) of a TZ-naive object. The # only way we can keep that with instance objects is to strip # the tzinfo from the stamp and str() it. server["server"][key] = (instance[k].replace(tzinfo=None) if instance[k] else None) if show_sec_grp: self._add_security_grps(request, [server["server"]], [instance]) if show_extended_attr is None: show_extended_attr = context.can( esa_policies.BASE_POLICY_NAME, fatal=False) if show_extended_attr: properties = ['host', 'name', 'node'] if api_version_request.is_supported(request, min_version='2.3'): # NOTE(mriedem): These will use the OS-EXT-SRV-ATTR prefix # below and that's OK for microversion 2.3 which is being # compatible with v2.0 for the ec2 API split out from Nova. # After this, however, new microversions should not be using # the OS-EXT-SRV-ATTR prefix. properties += ['reservation_id', 'launch_index', 'hostname', 'kernel_id', 'ramdisk_id', 'root_device_name'] # NOTE(gmann): Since microversion 2.75, PUT and Rebuild # response include all the server attributes including these # extended attributes also. But microversion 2.57 already # adding the 'user_data' in Rebuild response in API method. # so we will skip adding the user data attribute for rebuild # case. 'show_user_data' is false only in case of rebuild. if show_user_data: properties += ['user_data'] for attr in properties: if attr == 'name': key = "OS-EXT-SRV-ATTR:instance_%s" % attr elif attr == 'node': key = "OS-EXT-SRV-ATTR:hypervisor_hostname" else: # NOTE(mriedem): Nothing after microversion 2.3 should use # the OS-EXT-SRV-ATTR prefix for the attribute key name. key = "OS-EXT-SRV-ATTR:%s" % attr server["server"][key] = getattr(instance, attr) if show_extended_status: # NOTE(gmann): Removed 'locked_by' from extended status # to make it same as V2. If needed it can be added with # microversion. for state in ['task_state', 'vm_state', 'power_state']: # NOTE(mriedem): The OS-EXT-STS prefix should not be used for # new attributes after v2.1. They are only in v2.1 for backward # compat with v2.0. key = "%s:%s" % ('OS-EXT-STS', state) server["server"][key] = instance[state] if show_extended_volumes: # NOTE(mriedem): The os-extended-volumes prefix should not be used # for new attributes after v2.1. They are only in v2.1 for backward # compat with v2.0. add_delete_on_termination = api_version_request.is_supported( request, min_version='2.3') if bdms is None: bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid( context, [instance["uuid"]]) self._add_volumes_attachments(server["server"], bdms, add_delete_on_termination) if (api_version_request.is_supported(request, min_version='2.16')): if show_host_status is None: unknown_only = self._get_host_status_unknown_only(context) # If we're not allowed by policy to show host status at all, # don't bother requesting instance host status from the compute # API. if unknown_only is not None: host_status = self.compute_api.get_instance_host_status( instance) # If we are allowed to show host status of some kind, set # the host status field only if: # * unknown_only = False, meaning we can show any status # OR # * if unknown_only = True and host_status == UNKNOWN if (not unknown_only or host_status == fields.HostStatus.UNKNOWN): server["server"]['host_status'] = host_status if api_version_request.is_supported(request, min_version="2.9"): server["server"]["locked"] = (True if instance["locked_by"] else False) if api_version_request.is_supported(request, min_version="2.73"): server["server"]["locked_reason"] = (instance.system_metadata.get( "locked_reason")) if api_version_request.is_supported(request, min_version="2.19"): server["server"]["description"] = instance.get( "display_description") if api_version_request.is_supported(request, min_version="2.26"): server["server"]["tags"] = [t.tag for t in instance.tags] if api_version_request.is_supported(request, min_version="2.63"): trusted_certs = None if instance.trusted_certs: trusted_certs = instance.trusted_certs.ids server["server"]["trusted_image_certificates"] = trusted_certs if show_server_groups: server['server']['server_groups'] = self._get_server_groups( context, instance) return server def index(self, request, instances, cell_down_support=False): """Show a list of servers without many details.""" coll_name = self._collection_name return self._list_view(self.basic, request, instances, coll_name, False, cell_down_support=cell_down_support) def detail(self, request, instances, cell_down_support=False): """Detailed view of a list of instance.""" coll_name = self._collection_name + '/detail' context = request.environ['nova.context'] if api_version_request.is_supported(request, min_version='2.47'): # Determine if we should show extra_specs in the inlined flavor # once before we iterate the list of instances show_extra_specs = context.can(fes_policies.POLICY_ROOT % 'index', fatal=False) else: show_extra_specs = False show_extended_attr = context.can( esa_policies.BASE_POLICY_NAME, fatal=False) instance_uuids = [inst['uuid'] for inst in instances] bdms = self._get_instance_bdms_in_multiple_cells(context, instance_uuids) # NOTE(gmann): pass show_sec_grp=False in _list_view() because # security groups for detail method will be added by separate # call to self._add_security_grps by passing the all servers # together. That help to avoid multiple neutron call for each server. servers_dict = self._list_view(self.show, request, instances, coll_name, show_extra_specs, show_extended_attr=show_extended_attr, # We process host_status in aggregate. show_host_status=False, show_sec_grp=False, bdms=bdms, cell_down_support=cell_down_support) if api_version_request.is_supported(request, min_version='2.16'): unknown_only = self._get_host_status_unknown_only(context) # If we're not allowed by policy to show host status at all, don't # bother requesting instance host status from the compute API. if unknown_only is not None: self._add_host_status(list(servers_dict["servers"]), instances, unknown_only=unknown_only) self._add_security_grps(request, list(servers_dict["servers"]), instances) return servers_dict def _list_view(self, func, request, servers, coll_name, show_extra_specs, show_extended_attr=None, show_host_status=None, show_sec_grp=False, bdms=None, cell_down_support=False): """Provide a view for a list of servers. :param func: Function used to format the server data :param request: API request :param servers: List of servers in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :param show_extended_attr: If the server extended attributes should be included in the response dict. :param show_host_status: If the host status should be included in the response dict. :param show_sec_grp: If the security group should be included in the response dict. :param bdms: Instances bdms info from multiple cells. :param cell_down_support: True if the API (and caller) support returning a minimal instance construct if the relevant cell is down. :returns: Server data in dictionary format """ server_list = [func(request, server, show_extra_specs=show_extra_specs, show_extended_attr=show_extended_attr, show_host_status=show_host_status, show_sec_grp=show_sec_grp, bdms=bdms, cell_down_support=cell_down_support)["server"] for server in servers # Filter out the fake marker instance created by the # fill_virtual_interface_list online data migration. if server.uuid != virtual_interface.FAKE_UUID] servers_links = self._get_collection_links(request, servers, coll_name) servers_dict = dict(servers=server_list) if servers_links: servers_dict["servers_links"] = servers_links return servers_dict @staticmethod def _get_metadata(instance): return instance.metadata or {} @staticmethod def _get_vm_status(instance): # If the instance is deleted the vm and task states don't really matter if instance.get("deleted"): return "DELETED" return common.status_from_state(instance.get("vm_state"), instance.get("task_state")) @staticmethod def _get_host_id(instance): host = instance.get("host") project = str(instance.get("project_id")) return utils.generate_hostid(host, project) def _get_addresses(self, request, instance, extend_address=False): # Hide server addresses while the server is building. if instance.vm_state == vm_states.BUILDING: return {} context = request.environ["nova.context"] networks = common.get_networks_for_instance(context, instance) return self._address_builder.index(networks, extend_address)["addresses"] def _get_image(self, request, instance): image_ref = instance["image_ref"] if image_ref: image_id = str(common.get_id_from_href(image_ref)) bookmark = self._image_builder._get_bookmark_link(request, image_id, "images") return { "id": image_id, "links": [{ "rel": "bookmark", "href": bookmark, }], } else: return "" def _get_flavor_dict(self, request, instance_type, show_extra_specs): flavordict = { "vcpus": instance_type.vcpus, "ram": instance_type.memory_mb, "disk": instance_type.root_gb, "ephemeral": instance_type.ephemeral_gb, "swap": instance_type.swap, "original_name": instance_type.name } if show_extra_specs: flavordict['extra_specs'] = instance_type.extra_specs return flavordict def _get_flavor(self, request, instance, show_extra_specs): instance_type = instance.get_flavor() if not instance_type: LOG.warning("Instance has had its instance_type removed " "from the DB", instance=instance) return {} if api_version_request.is_supported(request, min_version="2.47"): return self._get_flavor_dict(request, instance_type, show_extra_specs) flavor_id = instance_type["flavorid"] flavor_bookmark = self._flavor_builder._get_bookmark_link(request, flavor_id, "flavors") return { "id": str(flavor_id), "links": [{ "rel": "bookmark", "href": flavor_bookmark, }], } def _load_fault(self, request, instance): try: mapping = objects.InstanceMapping.get_by_instance_uuid( request.environ['nova.context'], instance.uuid) if mapping.cell_mapping is not None: with nova_context.target_cell(instance._context, mapping.cell_mapping): return instance.fault except exception.InstanceMappingNotFound: pass # NOTE(danms): No instance mapping at all, or a mapping with no cell, # which means a legacy environment or instance. return instance.fault def _get_fault(self, request, instance): if 'fault' in instance: fault = instance.fault else: fault = self._load_fault(request, instance) if not fault: return None fault_dict = { "code": fault["code"], "created": utils.isotime(fault["created_at"]), "message": fault["message"], } if fault.get('details', None): is_admin = False context = request.environ["nova.context"] if context: is_admin = getattr(context, 'is_admin', False) if is_admin or fault['code'] != 500: fault_dict['details'] = fault["details"] return fault_dict def _add_host_status(self, servers, instances, unknown_only=False): """Adds the ``host_status`` field to the list of servers This method takes care to filter instances from down cells since they do not have a host set and as such we cannot determine the host status. :param servers: list of detailed server dicts for the API response body; this list is modified by reference by updating the server dicts within the list :param instances: list of Instance objects :param unknown_only: whether to show only UNKNOWN host status """ # Filter out instances from down cells which do not have a host field. instances = [instance for instance in instances if 'host' in instance] # Get the dict, keyed by instance.uuid, of host status values. host_statuses = self.compute_api.get_instances_host_statuses(instances) for server in servers: # Filter out anything that is not in the resulting dict because # we had to filter the list of instances above for down cells. if server['id'] in host_statuses: host_status = host_statuses[server['id']] if unknown_only and host_status != fields.HostStatus.UNKNOWN: # Filter servers that are not allowed by policy to see # host_status values other than UNKNOWN. continue server['host_status'] = host_status def _add_security_grps(self, req, servers, instances, create_request=False): if not len(servers): return # If request is a POST create server we get the security groups # intended for an instance from the request. This is necessary because # the requested security groups for the instance have not yet been sent # to neutron. # Starting from microversion 2.75, security groups is returned in # PUT and POST Rebuild response also. if not create_request: context = req.environ['nova.context'] sg_instance_bindings = ( security_group_api.get_instances_security_groups_bindings( context, servers)) for server in servers: groups = sg_instance_bindings.get(server['id']) if groups: server['security_groups'] = groups # This section is for POST create server request. There can be # only one security group for POST create server request. else: # try converting to json req_obj = jsonutils.loads(req.body) # Add security group to server, if no security group was in # request add default since that is the group it is part of servers[0]['security_groups'] = req_obj['server'].get( 'security_groups', [{'name': 'default'}]) @staticmethod def _get_instance_bdms_in_multiple_cells(ctxt, instance_uuids): inst_maps = objects.InstanceMappingList.get_by_instance_uuids( ctxt, instance_uuids) cell_mappings = {} for inst_map in inst_maps: if (inst_map.cell_mapping is not None and inst_map.cell_mapping.uuid not in cell_mappings): cell_mappings.update( {inst_map.cell_mapping.uuid: inst_map.cell_mapping}) bdms = {} results = nova_context.scatter_gather_cells( ctxt, cell_mappings.values(), nova_context.CELL_TIMEOUT, objects.BlockDeviceMappingList.bdms_by_instance_uuid, instance_uuids) for cell_uuid, result in results.items(): if isinstance(result, Exception): LOG.warning('Failed to get block device mappings for cell %s', cell_uuid) elif result is nova_context.did_not_respond_sentinel: LOG.warning('Timeout getting block device mappings for cell ' '%s', cell_uuid) else: bdms.update(result) return bdms def _add_volumes_attachments(self, server, bdms, add_delete_on_termination): # server['id'] is guaranteed to be in the cache due to # the core API adding it in the 'detail' or 'show' method. # If that instance has since been deleted, it won't be in the # 'bdms' dictionary though, so use 'get' to avoid KeyErrors. instance_bdms = bdms.get(server['id'], []) volumes_attached = [] for bdm in instance_bdms: if bdm.get('volume_id'): volume_attached = {'id': bdm['volume_id']} if add_delete_on_termination: volume_attached['delete_on_termination'] = ( bdm['delete_on_termination']) volumes_attached.append(volume_attached) # NOTE(mriedem): The os-extended-volumes prefix should not be used for # new attributes after v2.1. They are only in v2.1 for backward compat # with v2.0. key = "os-extended-volumes:volumes_attached" server[key] = volumes_attached @staticmethod def _get_server_groups(context, instance): try: sg = objects.InstanceGroup.get_by_instance_uuid(context, instance.uuid) return [sg.uuid] except exception.InstanceGroupNotFound: return []
Learn more in the hardware help guide ( https://www.raspberrypi.org/learning/hardware-guide/ ) . Read basic and advanced guides to configuring your Raspberry Pi ( https://www.raspberrypi.org/documentation/configuration/ ) . Read about accessing your Raspberry Pi remotely ( https://www.raspberrypi.org/documentation/remote-access/ ) . NOOBS (New Out Of Box Software) ( https://www.raspberrypi.org/documentation/installation/noobs.md ) operating system installation manager, which gives the user a choice of operating system from the standard distributions. SD cards with NOOBS pre-installed should be available from any of our global distributors and resellers. Alternatively, you can download NOOBS ( https://www.raspberrypi.org/downloads/ ) . You can browse basic examples to help you get started with some of the software available in Raspbian ( https://www.raspberrypi.org/documentation/usage/ ) , find more detail about the Raspbian operating system ( https://www.raspberrypi.org/documentation/raspbian/ ), or read information on fundamental Linux usage and commands for navigating the Raspberry Pi ( https://www.raspberrypi.org/documentation/linux/ ) and managing its file system and users. View and download global compliance certificates for Raspberry Pi products ( https://www.raspberrypi.org/documentation/hardware/raspberrypi/conformity.md ).
# # Copyright (C) 2012-2018 Uninett AS # # This file is part of Network Administration Visualized (NAV). # # NAV is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License version 3 as published by the Free # Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. You should have received a copy of the GNU General Public # License along with NAV. If not, see <http://www.gnu.org/licenses/>. # """Report backend URL config.""" from django.conf.urls import url from nav.web.report import views # Subsystem: Report # Naming convention: report-<result>-<query> urlpatterns = [ url(r'^$', views.index, name='report-index'), url(r'^matrix$', views.matrix_report, name='report-matrix'), url(r'^matrix/(?P<scope>[^&]+)$', views.matrix_report, name='report-matrix-scope'), url(r'^reportlist$', views.report_list, name='report-reportlist'), url(r'^(?P<report_name>[^/]+)$', views.get_report, name='report-by-name'), url(r'^widget/add/', views.add_report_widget, name='report-add-widget'), url(r'^widget/(?P<report_name>[^/]+)$', views.get_report_for_widget, name='widget-report-by-name'), ]
The quote above is from the closing words of Jennifer Baskerville-Burrow’s reflection and they serve as a perfect invitation to our Lenten journey. I recently asked a group of people what they thought the difference was between going on a trip and going on a journey. The group was unanimous in their response. They said that going on a journey implies that you are changed internally by the experience, whereas going on a trip does not necessarily create such a change within us. Given this distinction, it makes perfect sense for Jennifer to frame Lent as a journey because in this season we seek to be changed by deepening our connection with God, our neighbor, and ourselves. The word salvation means healing and comes from the root word salve, as in something that you put on a wound. Jennifer wrote in her reflection that she hoped our Lenten journey would lead to healing, both of ourselves and our world. All journeys are from one point to another, and as we begin our Lenten journey, we invite you to reflect on the point from which you are starting by reflecting on the question in the “Making it Personal” section below. Making It Personal: As you begin your Lenten journey, what are your hopes for this experience? Is there any dis-ease or lack of ease in your life right now for which you seek healing? If so, might that dis-ease be related in any way to the theme of forgiveness that we will focus on in this devotional?
# Copyright (c) 2012 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import unittest import luigi import luigi.util import luigi.notifications luigi.notifications.DEBUG = True class A(luigi.Task): x = luigi.IntParameter(default=3) class B(luigi.util.Derived(A)): y = luigi.IntParameter(default=4) class A2(luigi.Task): x = luigi.IntParameter(default=3) g = luigi.IntParameter(is_global=True, default=42) class B2(luigi.util.Derived(A2)): pass class UtilTest(unittest.TestCase): def test_derived_extended(self): b = B(1, 2) self.assertEquals(b.x, 1) self.assertEquals(b.y, 2) a = A(1) self.assertEquals(b.parent_obj, a) def test_derived_extended_default(self): b = B() self.assertEquals(b.x, 3) self.assertEquals(b.y, 4) def test_derived_global_param(self): # Had a bug with this b = B2() self.assertEquals(b.g, 42)
While the game looks fun, I don't think it should be a fifty dollar game. A nice twenty dollar eShop game would seem to fit the bill better in my opinion. Then again, I haven't played it or seen any long in-depth previews. I'll have to wait to reserve judgement until I see more about it.
#!/usr/bin/env python """ Copyright (C) 2013 Legoktm Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import re import pywikibot import mwparserfromhell SITE = pywikibot.Site() months=['January','February','March','April','May','June','July','August','September','October','November','December'] j='|'.join(months) regex=re.compile('(?P<month>'+j+'|)\s*(?P<year>\d\d\d\d)') def gen(): page = pywikibot.Page(SITE, 'Template:Infobox NRHP') for c in page.getReferences(onlyTemplateInclusion=True,namespaces=[0], content=True): yield c def process_page(page): text = original = page.get() code = mwparserfromhell.parse(text) for template in code.filter_templates(): if template.name.lower().strip() == 'infobox nrhp': if template.has_param('built'): val = template.get('built').value.strip() s=regex.search(val) if not s: return d=s.groupdict() if int(d['year']) < 1583: return if d['month']: d['month'] = months.index(d['month'])+1 template.get('built').value = '{{Start date|{year}|{month}}}'.format(**d) else: template.get('built').value = '{{Start date|{year}}}'.format(**d) text = unicode(code) if original == text: return page.put(text, 'Bot: Wrapping date in {{start date}} to add [[WP:UF|microformats]]') def main(): for page in gen(): process_page(page) if __name__ == "__main__": try: main() finally: pass
We would like to say a very special thanks to each of the Artists appearing in this Gallery for trusting us with their Tone and supporting our Art. We are honored and humbled to have one of the most impressive Artist Rosters in our industry and we work hard every day to keep it inspired and growing. MESA® Artists don’t get specially chosen or modified amplifiers or cabinets, they play the exact same amps and cabs available to you. So when you see someone playing MESA®, you know it’s real and that you can play that very same amp or rig! We’ve been helping Artists, famous or not, sound their very best for 45 years now … and you can trust we will be there for you throughout your playing career as well. Learn more about the MESA/Boogie Artist Relations Program.
# Copyright (c) 2013 Red Hat, Inc. # Author: William Benton (willb@redhat.com) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from proxy import Proxy, proxied_attr from proxy import proxied_attr_get as pag, proxied_attr_set as pas, proxied_attr_getset as pags from arc_utils import arcmethod import errors from errors import not_implemented, fail import urllib class group(Proxy): name = property(pag("name")) features = property(*pags("features")) parameters = property(*pags("parameters")) # alias for backwards-compatibility params = property(pag("parameters")) modifyFeatures = arcmethod(*pags("features"), heterogeneous=True, preserve_order=True) def getConfig(self, **options): if len(options) > 0: not_implemented() return self.cm.fetch_json_resource("/config/group/%s" % urllib.quote_plus(self.name)) def explain(self): not_implemented() def modifyParams(self, command, params, **options): command = command.upper() if command == "ADD": for k, v in params.iteritems(): self.parameters[k] = v elif command == "REMOVE": for k in [k for k in params if k in self.parameters]: del self.parameters[k] elif command == "REPLACE": self.parameters = params else: fail(errors.make(errors.BAD_COMMAND, errors.GROUP), "Invalid command %s" % command) self.update() def members(self): all_nodes = [self.cm.make_proxy_object("node", node, True) for node in self.cm.list_objects("node")] return [node.name for node in all_nodes if self.name in node.memberships] membership = property(members) proxied_attr(group, "name") proxied_attr(group, "features") proxied_attr(group, "parameters")
At Dumpster Labs, we strive to bring you the best service at the best price available. We have a wide selection of dumpster sizes in stock in Saint Stephens Church, VA, with flexible & timley pickup and delivery. Give us a call today for all your dumpster rental and roll off needs. By renting a dumpster you'll be able to store exactly what is not required and have absolutely it migrated out once the work is complete. This but not only keeps your house clean just about all helps keep the neighbors pleased. During a upgrading process, if perhaps waste is just not managed adequately, the nearby houses may also have to keep with dirt and debris. Therefore, the most effective way out can be to rent a dumpster as soon as the remodeling commences. Every type of business requires some type for dumpster accommodations for its house. The type of dumpster accommodations service along with size of dumpster needed by a specific business depends on the particular of business. Dumpster accommodations services supply different pick up options for instance daily, each week, or affair monthly pickups. These companies furthermore rent out specialized dumpsters (made specifically recyclable products) to the businesses. In case you require a compacting dumpster with regard to boxes along with cardboard things, you can rent these from the exact same waste operations company that will rents out and about any other type connected with dumpster. Dumpsters are big trash containers that are used to dispose enormous tons of junk on finishing major jobs. Containers generally arrive in 2 different types, rotate off dumpster along with trash dumpster. Typically, roll away dumpsters are rented out based on the amount of junk they can hold in terms of inside cubic yards, although heavier materials will make the dumpster reach the brink long before it will become completely packed. The actual level of weight that can be put into any given container would depend on the size of the bin showcased, so under is an format of the regular roll away container rental sizes, along with their greatest weight annuities. These dumpsters will probably be of great help with situations connected with natural disasters like surges, earth quakes. Specialist 40 Yard Dumpster in Saint Stephens Church, VA services will probably pile up every one of the waste in dumpster and reuse them. Environmental friendly waste management methods followed by the professional 40 Yard Dumpster in Saint Stephens Church, VA support reduces the impact of the pure calamity on the environment presently there by reducing large spreading connected with infectious ailments across the affected area. For more information on 40 Yard Dumpster in Saint Stephens Church, VA providers check out the Finances Dumpster company. They're a nationwide company with a decent reputation for reasonably priced, upfront, all inclusive pricing along with great customer care. 1-866-284-6164 Ideally this company has been around for a minimum of several years to really establish on their own. This will make sure they are not likely to make mistakes in addition to their reputation will be more important to these. They have spent too long building a name for on their own to simply rebrand when they get a lot of complaints. Cleaning out an entire residence, building, as well as storage unit indicates sifting by way of items to decide what's salvageable along with what's junk. Some things can be held or sold; others proceed straight to the recycling center or the local dump. You will want somewhere to placed junk when it's out of the building-and chances are, there is no need the time as well as energy to generate back and forth to the dump every hour. That's where a professional junk treatment team is available in, to get that junk off the house and from a life. If you're dealing with the decline of a loved one, you would like the estate cleanout to be seeing that smooth along with painless as is possible. A crap hauling company will strive to make that happen. Fourth, when you are your dumpster, make certain that the materials you happen to be throwing away are approved by the city you live in. You can't dispose of flammable or maybe hazardous products or car tires and devices in your dumpster. You can obtain hit that has a fine when you put illegitimate materials inside your dumpster. So, learn what the rules are about what you may and cannot throw away before starting any project. In addition to making an attempt to gauge you can actually level of support, another good signal is what kind of experience the company has. You'll simply end up purchasing the free room if you hire a roll down dumpster when your junk disposal is rather less. Consult the 40 Yard Dumpster in Saint Stephens Church corporations if they rent junk dumpster that are modest in size in contrast to roll down dumpsters. These junk dumpsters usually consist of 2 yard to 8 backyard. A company using fantastic support services will help you to decide what size dumpster you will need to rent. You should have the collection of small, choice or huge. The small dumpster is good for small household remodeling tasks such as your bathroom. The choice dumpster is good for 1 layer associated with replacing roofing or cooking area remodeling. The massive dumpster is for very big home redecorating projects and may fit right up to ten vehicle loads or maybe four a lot of debris. You can actually rent a overnight dumpster in Annapolis. It should be as simple as making a speedy phone call to agenda your rental. While you're reducing, bear in mind to reuse and also recycle. Mining harvests like copper mineral can be marketed at save shops if you've got scrap available. Old unfilled cans can be used to store fingernails or screws. You could even try marketing old gym equipment you no longer use on the internet (or maybe give it to someone). Or, if you've got way more as opposed to occasional product that you will no longer want or maybe need, leverage the newfound room to have a garage sale. If you're in a rush to get rid of every thing, though, be happy to put it inside your rented dumpster. Many 40 Yard Dumpster in Saint Stephens Church companies should go the extra mile to reduce their environment impact through going through the dumpster's material to find reusable and eco friendly materials that they can either recycling or contribute to charity. Enjoying a 40 Yard Dumpster in Saint Stephens Church does not require virtually any big tactical plans as in case of one's big small business issues. You simply need to book a new dumpster specifying the time of 40 Yard Dumpster in Saint Stephens Church. To e-book a dumpster, you will need to place an order using 40 Yard Dumpster in Saint Stephens Church service provider. An earlier intimation of Twenty four hours will help the 40 Yard Dumpster in Saint Stephens Church service provider to area a right measurement dumpster at your door step. In a business that relies so greatly on do it again customers and also overall reputation, you want a 40 Yard Dumpster in Saint Stephens Church company that has weathered the long haul and also survived: in other words, an established and also experienced company. Many of the 40 Yard Dumpster in Saint Stephens Church corporations you may find could end up being untrustworthy pop up rental companies which exist for a couple of years, fall apart under a bad reputation, then pop up some other place under a distinct name, making an attempt to pull the same tricks. Some sort of well-established company may have none of these issues and will be able to prove worthy of their own reputation. As a possible added provision, feel free to check out reviews for every single company you concentrate on and ask for referrals if you like. Beneficial reviews and also good referrals will be a fairly easy determiner of a genuinely excellent company. As a creator you know that lots of supplies are needed when making a house. A lot of the supplies consist of plastic wrapping, Styrofoam, pressboard boxes, etc. You need to continue building your home and getting lessen the waste accruing because the building task moves combined. Every dumpster features a limit around the tonnage of the trash disposed, making the adviser know about the tonnage on the trash thrown away will place appropriate sized dumpster at your door stage. Any excessive trash thrown away above the mentioned limit will probably invite more charges on your part, so it is extremely important for you to allow the representative know about the tonnage of the trash that has to be thrown away. Be it a home, corporate or possibly a private corporation, each of these groups can make use of these kinds of services. These types of can help you eliminate off the trash in a green and easy way. You have to be offered following day 40 Yard Dumpster in Saint Stephens Church, VA so you can get your job started and also done easily. The dumpster corporation should inform you what size dumpster you simply must complete your job. Let the member of staff know exactly what you should be setting up the dumpster. In this way, he or she can inform you what size you should have. Make sure you determine what the dumpster corporation does not permit to be discarded inside of the dumpster. For instance, harmful chemicals such as Freon isn't permitted. If you have left over color in the course's basement, wet paint isn't aloud nevertheless dry color cans are usually aloud. M . d . have different rules and regulations concerning trash collection. You would need to call up your township to find out how many luggage of garbage can be put out there each week and also which goods must be reused. You can totally skip to perform . by renting a construction dumpster. Most homeowners find that 40 Yard Dumpster in Saint Stephens Church, VA is the best way to maintain your trash while going through a home renovation. This kind of service must provide you with all the container and also, after you are through with the box, they will discard the trash you've got left inside it. The services should have a group fee for every single sized dumpster they rent out, however, if you go above the weight limit, the fee could be adjusted. For just a large-scale cleanout, you're going to need a lot more than a sole trash can. A lot of junk treatment companies supply 40 Yard Dumpster in Saint Stephens Church, VA and will leave the clear dumpster and figure it out when it's whole, based on your schedule. Subsequently, they'll discard the waste following almost all local unique codes and legislation. A dumpster could act as a central database for things that have been officially crossed away your "to-examine" checklist: once one thing is in the dumpster, it is possible to move on. Without needing to make frequent trips for the landfill or maybe recycling centre will also help you save time, as you can stay on the home and property and continue to monitor the cleaning and organizing process. It is the most affordable strategy for throwing out virtually any tons of trash. The type throw away dumped from the dumpster varies depending on the project, and so dumpster arrives to your job location with customized shapes to suit the project requirements. Dumpster rental is the best investment given it wipes out there any a ton of trash in one go at affordable expense without any waiting around periods. Recycling where possible all the entitled litter from the dump would be the additional reward attached to the 40 Yard Dumpster in Saint Stephens Church, VA. Discarding the throw away through 40 Yard Dumpster in Saint Stephens Church, VA will probably recycle the trash in the environment friendly fashion and encourage environmental recognition in the community.
""" Django settings for ob_census project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'w58)8zhyk$h8$f!tau@b3k0utr4az1-1n(q_tnl%4f%cow$ofy' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'census', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'ob_census_project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ob_census_project.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
Young Academic can today bring news of yet another festival from over the water, this time in the shape of Stop Making Sense Festival. Set on the coast of Croatia, this festival ticks all the boxes with sand, sea and sun along with top class DJs and boat parties. With three days of musical delights in the middle of August, this festival really is the ultimate break away from your studies! In the afterglow of our Best New European Festival nomination, Stop Making Sense festival reveal this years line up that includes Body & Soul (NYC), Francois K, Joaquin ‘Joe’ Claussell and Danny Krivit. Taking place on the full moon weekend of 12th – 14th August, SMS brings an almighty lunar party to the shores of Croatia’s beautiful Adriatic coast. Stop making Sense is an international music festival with an irreverently independent vibe that showcases top quality dance music and live acts and internationally acclaimed DJ’s in the sunshine and under the stars. The first ever Stop Making Sense festival in 2010 featured headliner sets from Carl Craig, The Very Best, Optimo, secretsundaze and Allez Allez amongst others. 2011 will raise the standard even higher with the likes of; Body & Soul from New York, Subclub from Glasgow, BBC Radio 1s Benji B’s Deviation, Favela Chic from Paris and London’s Dalston Superstore. Body&Soul will be bringing their lighting extraordinaire Ariel with them to work his magic on the ambience as they play from 5pm – 1am on Sunday, closing the festival with the Body&Soul experience on the shores of Croatia. SMS is curated by music lovers and promoters NEED2SOUL, DEVIATION, SUBCLUB, SECRETSUNDAZE, BRIDGING THE GAP, WARM, FAVELA CHIC, BODY AND SOUL, HYBRID LIFE, and TROUBLE VISION. Situated on a woody peninsula in the small village of Petrcane on the stunning Adriatic coast of Croatia, the Stop Making Sense weekend promises to entertain and thrill in equal measure. More than a music/dance festival, this is a holiday in a beautiful spot. Brought forward by a like minded collective of club & concert promoters, the SMS event presents a broad selection of musical delights over 3 days & nights. With an open air main stage for cutting edge live music under the stars, a tiki bar alfresco dance floor lapped by the waves, and an incredible 70s style nightclub to lose yourself in til dawn, all capped off with raucous twice daily boat parties! PARTY PLANNERS INCLUDE: BODY & SOUL, DEVIATION, SECRETSUNDAZE, NEED2SOUL, BRIDGING THE GAP, HYBRID LIFE, DALSTON SUPERSTORE, FAVELA CHIC, TROUBLE VISION, SUBCLUB & WARM.
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models for Oppia feedback threads and messages.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.platform import models import feconf import python_utils import utils from google.appengine.ext import ndb (base_models, user_models) = models.Registry.import_models([ models.NAMES.base_model, models.NAMES.user]) # Allowed feedback thread statuses. STATUS_CHOICES_OPEN = 'open' STATUS_CHOICES_FIXED = 'fixed' STATUS_CHOICES_IGNORED = 'ignored' STATUS_CHOICES_COMPLIMENT = 'compliment' STATUS_CHOICES_NOT_ACTIONABLE = 'not_actionable' STATUS_CHOICES = [ STATUS_CHOICES_OPEN, STATUS_CHOICES_FIXED, STATUS_CHOICES_IGNORED, STATUS_CHOICES_COMPLIMENT, STATUS_CHOICES_NOT_ACTIONABLE, ] # Constants used for generating new ids. _MAX_RETRIES = 10 _RAND_RANGE = 127 * 127 class GeneralFeedbackThreadModel(base_models.BaseModel): """Threads for each entity. The id of instances of this class has the form [entity_type].[entity_id].[generated_string] """ # The type of entity the thread is linked to. entity_type = ndb.StringProperty(required=True, indexed=True) # The ID of the entity the thread is linked to. entity_id = ndb.StringProperty(required=True, indexed=True) # ID of the user who started the thread. This may be None if the feedback # was given anonymously by a learner. original_author_id = ndb.StringProperty(indexed=True) # Latest status of the thread. status = ndb.StringProperty( default=STATUS_CHOICES_OPEN, choices=STATUS_CHOICES, required=True, indexed=True, ) # Latest subject of the thread. subject = ndb.StringProperty(indexed=True, required=True) # Summary text of the thread. summary = ndb.TextProperty(indexed=False) # Specifies whether this thread has a related suggestion. has_suggestion = ( ndb.BooleanProperty(indexed=True, default=False, required=True)) # Cached value of the number of messages in the thread. message_count = ndb.IntegerProperty(indexed=True, default=0) # Cached text of the last message in the thread with non-empty content, or # None if there is no such message. last_nonempty_message_text = ndb.TextProperty(indexed=False) # Cached ID for the user of the last message in the thread with non-empty # content, or None if the message was made anonymously or if there is no # such message. last_nonempty_message_author_id = ndb.StringProperty(indexed=True) @staticmethod def get_deletion_policy(): """General feedback thread needs to be pseudonymized for the user.""" return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE @classmethod def get_export_policy(cls): """Model contains user data.""" return dict(super(cls, cls).get_export_policy(), **{ 'entity_type': base_models.EXPORT_POLICY.EXPORTED, 'entity_id': base_models.EXPORT_POLICY.EXPORTED, 'original_author_id': base_models.EXPORT_POLICY.EXPORTED, 'status': base_models.EXPORT_POLICY.EXPORTED, 'subject': base_models.EXPORT_POLICY.EXPORTED, 'summary': base_models.EXPORT_POLICY.EXPORTED, 'has_suggestion': base_models.EXPORT_POLICY.EXPORTED, 'message_count': base_models.EXPORT_POLICY.EXPORTED, 'last_nonempty_message_text': base_models.EXPORT_POLICY.EXPORTED, 'last_nonempty_message_author_id': base_models.EXPORT_POLICY.EXPORTED }) @classmethod def has_reference_to_user_id(cls, user_id): """Check whether GeneralFeedbackThreadModel exists for user. Args: user_id: str. The ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return cls.query(ndb.OR( cls.original_author_id == user_id, cls.last_nonempty_message_author_id == user_id )).get(keys_only=True) is not None @classmethod def export_data(cls, user_id): """Exports the data from GeneralFeedbackThreadModel into dict format for Takeout. Args: user_id: str. The ID of the user whose data should be exported. Returns: dict. Dictionary of the data from GeneralFeedbackThreadModel. """ user_data = dict() feedback_models = cls.get_all().filter( cls.original_author_id == user_id).fetch() for feedback_model in feedback_models: user_data[feedback_model.id] = { 'entity_type': feedback_model.entity_type, 'entity_id': feedback_model.entity_id, 'status': feedback_model.status, 'subject': feedback_model.subject, 'has_suggestion': feedback_model.has_suggestion, 'summary': feedback_model.summary, 'message_count': feedback_model.message_count, 'last_updated_msec': utils.get_time_in_millisecs( feedback_model.last_updated) } return user_data @classmethod def generate_new_thread_id(cls, entity_type, entity_id): """Generates a new thread ID which is unique. Args: entity_type: str. The type of the entity. entity_id: str. The ID of the entity. Returns: str. A thread ID that is different from the IDs of all the existing threads within the given entity. Raises: Exception. There were too many collisions with existing thread IDs when attempting to generate a new thread ID. """ for _ in python_utils.RANGE(_MAX_RETRIES): thread_id = ( entity_type + '.' + entity_id + '.' + utils.base64_from_int(utils.get_current_time_in_millisecs()) + utils.base64_from_int(utils.get_random_int(_RAND_RANGE))) if not cls.get_by_id(thread_id): return thread_id raise Exception( 'New thread id generator is producing too many collisions.') @classmethod def create(cls, thread_id): """Creates a new FeedbackThreadModel entry. Args: thread_id: str. Thread ID of the newly-created thread. Returns: GeneralFeedbackThreadModel. The newly created FeedbackThreadModel instance. Raises: Exception. A thread with the given thread ID exists already. """ if cls.get_by_id(thread_id): raise Exception('Feedback thread ID conflict on create.') return cls(id=thread_id) @classmethod def get_threads( cls, entity_type, entity_id, limit=feconf.DEFAULT_QUERY_LIMIT): """Returns a list of threads associated with the entity, ordered by their "last updated" field. The number of entities fetched is limited by the `limit` argument to this method, whose default value is equal to the default query limit. Args: entity_type: str. The type of the entity. entity_id: str. The ID of the entity. limit: int. The maximum possible number of items in the returned list. Returns: list(GeneralFeedbackThreadModel). List of threads associated with the entity. Doesn't include deleted entries. """ return cls.get_all().filter(cls.entity_type == entity_type).filter( cls.entity_id == entity_id).order(-cls.last_updated).fetch(limit) class GeneralFeedbackMessageModel(base_models.BaseModel): """Feedback messages. One or more of these messages make a thread. The id of instances of this class has the form [thread_id].[message_id] """ # ID corresponding to an entry of FeedbackThreadModel. thread_id = ndb.StringProperty(required=True, indexed=True) # 0-based sequential numerical ID. Sorting by this field will create the # thread in chronological order. message_id = ndb.IntegerProperty(required=True, indexed=True) # ID of the user who posted this message. This may be None if the feedback # was given anonymously by a learner. author_id = ndb.StringProperty(indexed=True) # New thread status. Must exist in the first message of a thread. For the # rest of the thread, should exist only when the status changes. updated_status = ndb.StringProperty(choices=STATUS_CHOICES, indexed=True) # New thread subject. Must exist in the first message of a thread. For the # rest of the thread, should exist only when the subject changes. updated_subject = ndb.StringProperty(indexed=False) # Message text. Allowed not to exist (e.g. post only to update the status). text = ndb.TextProperty(indexed=False) # Whether the incoming message is received by email (as opposed to via # the web). received_via_email = ( ndb.BooleanProperty(default=False, indexed=True, required=True)) @staticmethod def get_deletion_policy(): """General feedback message needs to be pseudonymized for the user.""" return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE @classmethod def get_export_policy(cls): """Model contains user data.""" return dict(super(cls, cls).get_export_policy(), **{ 'thread_id': base_models.EXPORT_POLICY.EXPORTED, 'message_id': base_models.EXPORT_POLICY.EXPORTED, 'author_id': base_models.EXPORT_POLICY.EXPORTED, 'updated_status': base_models.EXPORT_POLICY.EXPORTED, 'updated_subject': base_models.EXPORT_POLICY.EXPORTED, 'text': base_models.EXPORT_POLICY.EXPORTED, 'received_via_email': base_models.EXPORT_POLICY.EXPORTED }) @classmethod def has_reference_to_user_id(cls, user_id): """Check whether GeneralFeedbackMessageModel exists for user. Args: user_id: str. The ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return cls.query(cls.author_id == user_id).get( keys_only=True) is not None @classmethod def export_data(cls, user_id): """Exports the data from GeneralFeedbackMessageModel into dict format for Takeout. Args: user_id: str. The ID of the user whose data should be exported. Returns: dict. Dictionary of the data from GeneralFeedbackMessageModel. """ user_data = dict() feedback_models = cls.get_all().filter(cls.author_id == user_id).fetch() for feedback_model in feedback_models: user_data[feedback_model.id] = { 'thread_id': feedback_model.thread_id, 'message_id': feedback_model.message_id, 'updated_status': feedback_model.updated_status, 'updated_subject': feedback_model.updated_subject, 'text': feedback_model.text, 'received_via_email': feedback_model.received_via_email } return user_data @classmethod def _generate_id(cls, thread_id, message_id): """Generates full message ID given the thread ID and message ID. Args: thread_id: str. Thread ID of the thread to which the message belongs. message_id: int. Message ID of the message. Returns: str. Full message ID. """ return '.'.join([thread_id, python_utils.UNICODE(message_id)]) @property def entity_id(self): """Returns the entity_id corresponding to this thread instance. Returns: str. The entity_id. """ return self.id.split('.')[1] @property def entity_type(self): """Returns the entity_type corresponding to this thread instance. Returns: str. The entity_type. """ return self.id.split('.')[0] @classmethod def create(cls, message_identifier): """Creates a new GeneralFeedbackMessageModel entry. Args: message_identifier: FullyQualifiedMessageIdentifier. The message identifier consists of the thread_id and its corresponding message_id. Returns: GeneralFeedbackMessageModel. Instance of the new GeneralFeedbackMessageModel entry. Raises: Exception. A message with the same ID already exists in the given thread. """ return cls.create_multi([message_identifier])[0] @classmethod def create_multi(cls, message_identifiers): """Creates a new GeneralFeedbackMessageModel entry for each (thread_id, message_id) pair. Args: message_identifiers: list(FullyQualifiedMessageIdentifier). Each message identifier consists of the thread_id and its corresponding message_id. Returns: list(GeneralFeedbackMessageModel). Instances of the new GeneralFeedbackMessageModel entries. Raises: Exception. The number of thread_ids must be equal to the number of message_ids. Exception. A message with the same ID already exists in the given thread. """ thread_ids = [ message_identifier.thread_id for message_identifier in message_identifiers] message_ids = [ message_identifier.message_id for message_identifier in message_identifiers] # Generate the new ids. instance_ids = [ cls._generate_id(thread_id, message_id) for thread_id, message_id in python_utils.ZIP(thread_ids, message_ids) ] # Check if the new ids are valid. current_instances = cls.get_multi(instance_ids) conflict_ids = [ current_instance.id for current_instance in current_instances if current_instance is not None ] if len(conflict_ids) > 0: raise Exception( 'The following feedback message ID(s) conflicted on ' 'create: %s' % (' '.join(conflict_ids)) ) return [cls(id=instance_id) for instance_id in instance_ids] @classmethod def get(cls, thread_id, message_id, strict=True): """Gets the GeneralFeedbackMessageModel entry for the given ID. Raises an error if no undeleted message with the given ID is found and strict == True. Args: thread_id: str. ID of the thread. message_id: int. ID of the message. strict: bool. Whether to raise an error if no FeedbackMessageModel entry is found for the given IDs. Returns: GeneralFeedbackMessageModel or None. If strict == False and no undeleted message with the given message_id exists in the datastore, then returns None. Otherwise, returns the GeneralFeedbackMessageModel instance that corresponds to the given ID. Raises: EntityNotFoundError. The value of strict is True and either (i) message ID is not valid (ii) message is marked as deleted. No error will be raised if strict == False. """ instance_id = cls._generate_id(thread_id, message_id) return super(GeneralFeedbackMessageModel, cls).get( instance_id, strict=strict) @classmethod def get_messages(cls, thread_id): """Returns a list of messages in the given thread. The number of messages returned is capped by feconf.DEFAULT_QUERY_LIMIT. Args: thread_id: str. ID of the thread. Returns: list(GeneralFeedbackMessageModel). A list of messages in the given thread, up to a maximum of feconf.DEFAULT_QUERY_LIMIT messages. """ return cls.get_all().filter( cls.thread_id == thread_id).fetch(feconf.DEFAULT_QUERY_LIMIT) @classmethod def get_most_recent_message(cls, thread_id): """Returns the last message in the thread. Args: thread_id: str. ID of the thread. Returns: GeneralFeedbackMessageModel. Last message in the thread. """ thread = GeneralFeedbackThreadModel.get_by_id(thread_id) return cls.get(thread_id, thread.message_count - 1) @classmethod def get_message_count(cls, thread_id): """Returns the number of messages in the thread. Includes the deleted entries. Args: thread_id: str. ID of the thread. Returns: int. Number of messages in the thread. """ return cls.get_message_counts([thread_id])[0] @classmethod def get_message_counts(cls, thread_ids): """Returns a list containing the number of messages in the threads. Includes the deleted entries. Args: thread_ids: list(str). ID of the threads. Returns: list(int). List of the message counts for the threads. """ thread_models = GeneralFeedbackThreadModel.get_multi(thread_ids) return [thread_model.message_count for thread_model in thread_models] @classmethod def get_all_messages(cls, page_size, urlsafe_start_cursor): """Fetches a list of all the messages sorted by their last updated attribute. Args: page_size: int. The maximum number of messages to be returned. urlsafe_start_cursor: str or None. If provided, the list of returned messages starts from this datastore cursor. Otherwise, the returned messages start from the beginning of the full list of messages. Returns: 3-tuple of (results, cursor, more). Where: results: List of query results. cursor: str or None. A query cursor pointing to the next batch of results. If there are no more results, this might be None. more: bool. If True, there are (probably) more results after this batch. If False, there are no further results after this batch. """ return cls._fetch_page_sorted_by_last_updated( cls.query(), page_size, urlsafe_start_cursor) class GeneralFeedbackThreadUserModel(base_models.BaseModel): """Model for storing the ids of the messages in the thread that are read by the user. Instances of this class have keys of the form [user_id].[thread_id] """ user_id = ndb.StringProperty(required=True, indexed=True) thread_id = ndb.StringProperty(required=True, indexed=True) message_ids_read_by_user = ndb.IntegerProperty(repeated=True, indexed=True) @staticmethod def get_deletion_policy(): """General feedback thread user can be deleted since it only contains information relevant to the one user. """ return base_models.DELETION_POLICY.DELETE @classmethod def get_export_policy(cls): """Model contains user data.""" return dict(super(cls, cls).get_export_policy(), **{ 'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'thread_id': base_models.EXPORT_POLICY.EXPORTED, 'message_ids_read_by_user': base_models.EXPORT_POLICY.EXPORTED }) @classmethod def apply_deletion_policy(cls, user_id): """Delete instance of GeneralFeedbackThreadUserModel for the user. Args: user_id: str. The ID of the user whose data should be deleted. """ ndb.delete_multi( cls.query(cls.user_id == user_id).fetch(keys_only=True)) @classmethod def has_reference_to_user_id(cls, user_id): """Check whether GeneralFeedbackThreadUserModel exists for user. Args: user_id: str. The ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return cls.query(cls.user_id == user_id).get(keys_only=True) is not None @classmethod def generate_full_id(cls, user_id, thread_id): """Generates the full message id of the format: <user_id.thread_id>. Args: user_id: str. The user id. thread_id: str. The thread id. Returns: str. The full message id. """ return '%s.%s' % (user_id, thread_id) @classmethod def get(cls, user_id, thread_id): """Gets the FeedbackThreadUserModel corresponding to the given user and the thread. Args: user_id: str. The id of the user. thread_id: str. The id of the thread. Returns: FeedbackThreadUserModel. The FeedbackThreadUserModel instance which matches with the given user_id, and thread id. """ instance_id = cls.generate_full_id(user_id, thread_id) return super(GeneralFeedbackThreadUserModel, cls).get( instance_id, strict=False) @classmethod def create(cls, user_id, thread_id): """Creates a new FeedbackThreadUserModel instance and returns it. Args: user_id: str. The id of the user. thread_id: str. The id of the thread. Returns: FeedbackThreadUserModel. The newly created FeedbackThreadUserModel instance. """ return cls.create_multi(user_id, [thread_id])[0] @classmethod def create_multi(cls, user_id, thread_ids): """Creates new FeedbackThreadUserModel instances for user_id for each of the thread_ids. Args: user_id: str. The id of the user. thread_ids: list(str). The ids of the threads. Returns: list(FeedbackThreadUserModel). The newly created FeedbackThreadUserModel instances. """ new_instances = [] for thread_id in thread_ids: instance_id = cls.generate_full_id(user_id, thread_id) new_instance = cls( id=instance_id, user_id=user_id, thread_id=thread_id) new_instances.append(new_instance) GeneralFeedbackThreadUserModel.put_multi(new_instances) return new_instances @classmethod def get_multi(cls, user_id, thread_ids): """Gets the ExplorationUserDataModel corresponding to the given user and the thread ids. Args: user_id: str. The id of the user. thread_ids: list(str). The ids of the threads. Returns: list(FeedbackThreadUserModel). The FeedbackThreadUserModels corresponding to the given user ans thread ids. """ instance_ids = [ cls.generate_full_id(user_id, thread_id) for thread_id in thread_ids] return super(GeneralFeedbackThreadUserModel, cls).get_multi( instance_ids) @classmethod def export_data(cls, user_id): """Takeout: Export GeneralFeedbackThreadUserModel user-based properties. Args: user_id: str. The user_id denotes which user's data to extract. Returns: dict. A dict containing the user-relevant properties of GeneralFeedbackThreadUserModel, i.e., which messages have been read by the user (as a list of ids) in each thread. """ found_models = cls.get_all().filter(cls.user_id == user_id) user_data = {} for user_model in found_models: user_data[user_model.thread_id] = ( user_model.message_ids_read_by_user) return user_data class FeedbackAnalyticsModel(base_models.BaseMapReduceBatchResultsModel): """Model for storing feedback thread analytics for an exploration. The key of each instance is the exploration ID. """ # The number of open feedback threads for this exploration. num_open_threads = ndb.IntegerProperty(default=None, indexed=True) # Total number of feedback threads for this exploration. num_total_threads = ndb.IntegerProperty(default=None, indexed=True) @staticmethod def get_deletion_policy(): """Feedback analytic model should be kept if the associated exploration is public. """ return base_models.DELETION_POLICY.KEEP_IF_PUBLIC @classmethod def get_export_policy(cls): """Model does not contain user data.""" return dict(super(cls, cls).get_export_policy(), **{ 'num_open_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'num_total_threads': base_models.EXPORT_POLICY.NOT_APPLICABLE }) @classmethod def has_reference_to_user_id(cls, unused_user_id): """FeedbackAnalyticsModel doesn't reference any user_id directly. Args: unused_user_id: str. The (unused) ID of the user whose data should be checked. Returns: bool. Whether any models refer to the given user ID. """ return False @classmethod def create(cls, model_id, num_open_threads, num_total_threads): """Creates a new FeedbackAnalyticsModel entry. Args: model_id: str. ID of the model instance to be created. This is the same as the exploration ID. num_open_threads: int. Number of open feedback threads for this exploration. num_total_threads: int. Total number of feedback threads for this exploration. """ cls( id=model_id, num_open_threads=num_open_threads, num_total_threads=num_total_threads ).put() class UnsentFeedbackEmailModel(base_models.BaseModel): """Model for storing feedback messages that need to be sent to creators. Instances of this model contain information about feedback messages that have been received by the site, but have not yet been sent to creators. The model instances will be deleted once the corresponding email has been sent. The id of each model instance is the user_id of the user who should receive the messages. """ # The list of feedback messages that need to be sent to this user. # Each element in this list is a dict with keys 'entity_type', 'entity_id', # 'thread_id' and 'message_id'; this information is used to retrieve # corresponding FeedbackMessageModel instance. feedback_message_references = ndb.JsonProperty(repeated=True) # The number of failed attempts that have been made (so far) to # send an email to this user. retries = ndb.IntegerProperty(default=0, required=True, indexed=True) @staticmethod def get_deletion_policy(): """Unsent feedback email is kept until sent.""" return base_models.DELETION_POLICY.KEEP @classmethod def get_export_policy(cls): """Model does not contain user data.""" return dict(super(cls, cls).get_export_policy(), **{ 'feedback_message_references': base_models.EXPORT_POLICY.NOT_APPLICABLE, 'retries': base_models.EXPORT_POLICY.NOT_APPLICABLE }) @classmethod def has_reference_to_user_id(cls, user_id): """Check whether UnsentFeedbackEmailModel exists for user. Args: user_id: str. The ID of the user whose data should be checked. Returns: bool. Whether the model for user_id exists. """ return cls.get_by_id(user_id) is not None
Amazing Mango ICE E-Liquid by Naked 100 is a new take on their original Amazing Mango flavor to give it a chilly finish that entices those who prefer Menthol products. This tasty vape juice starts off with a blast of sweet Mango picked ripe from the tree to get your mouth watering. Then a rich Cream blended with juicy white Peaches rushes through, creating a tornado of high quality flavor. To finish it all off, Naked 100 has given this incredible e-juice a cooling Menthol finish to soothe the soul. Amazing Mango ICE was crafted with all natural ingredients derived from the United States so you can get a premium vaping experience. Made with a 30/70 PG/VG ratio, Amazing Mango ICE E-Liquid by Naked 100 will have you chasing clouds while saving flavor all throughout the day.
"""Support for the Airly air_quality service.""" from homeassistant.components.air_quality import ( ATTR_AQI, ATTR_PM_2_5, ATTR_PM_10, AirQualityEntity, ) from homeassistant.const import CONF_NAME from .const import ( ATTR_API_ADVICE, ATTR_API_CAQI, ATTR_API_CAQI_DESCRIPTION, ATTR_API_CAQI_LEVEL, ATTR_API_PM10, ATTR_API_PM10_LIMIT, ATTR_API_PM10_PERCENT, ATTR_API_PM25, ATTR_API_PM25_LIMIT, ATTR_API_PM25_PERCENT, DOMAIN, ) ATTRIBUTION = "Data provided by Airly" LABEL_ADVICE = "advice" LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description" LABEL_AQI_LEVEL = f"{ATTR_AQI}_level" LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit" LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit" LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit" LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit" async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Airly air_quality entity based on a config entry.""" name = config_entry.data[CONF_NAME] coordinator = hass.data[DOMAIN][config_entry.entry_id] async_add_entities( [AirlyAirQuality(coordinator, name, config_entry.unique_id)], False ) def round_state(func): """Round state.""" def _decorator(self): res = func(self) if isinstance(res, float): return round(res) return res return _decorator class AirlyAirQuality(AirQualityEntity): """Define an Airly air quality.""" def __init__(self, coordinator, name, unique_id): """Initialize.""" self.coordinator = coordinator self._name = name self._unique_id = unique_id self._icon = "mdi:blur" @property def name(self): """Return the name.""" return self._name @property def should_poll(self): """Return the polling requirement of the entity.""" return False @property def icon(self): """Return the icon.""" return self._icon @property @round_state def air_quality_index(self): """Return the air quality index.""" return self.coordinator.data[ATTR_API_CAQI] @property @round_state def particulate_matter_2_5(self): """Return the particulate matter 2.5 level.""" return self.coordinator.data[ATTR_API_PM25] @property @round_state def particulate_matter_10(self): """Return the particulate matter 10 level.""" return self.coordinator.data[ATTR_API_PM10] @property def attribution(self): """Return the attribution.""" return ATTRIBUTION @property def unique_id(self): """Return a unique_id for this entity.""" return self._unique_id @property def available(self): """Return True if entity is available.""" return self.coordinator.last_update_success @property def device_state_attributes(self): """Return the state attributes.""" return { LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION], LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE], LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL], LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT], LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]), LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT], LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]), } async def async_added_to_hass(self): """Connect to dispatcher listening for entity data notifications.""" self.async_on_remove( self.coordinator.async_add_listener(self.async_write_ha_state) ) async def async_update(self): """Update Airly entity.""" await self.coordinator.async_request_refresh()
Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India on the Elevation Map. Topographic Map of Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India. This tool can be used to get Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India elevation, Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India altitude, Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India latitude and Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India longitude. You can also find the Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India Elevation Map and the exact Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India elevation in the most used measurement units used: meters, kilometers and miles. Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India interactive elevation with tools that provide altitudes of many locations like Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India, Topographic Map of Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India, along with detailed location for Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India and places around the globe. See the Topographic Map of Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India. Altitude tool provides exact height for Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India. You can also get the Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India elevation above the sea level (Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India altitude). Get the altitude of Narmada Niwas Jagdish Pura Hoshangabad Madhya Pradesh 461001 India above sea level by searching with our elevation tool.
"""Block ud.pt.AddMwt for heuristic detection of Portuguese contractions. According to the UD guidelines, contractions such as "dele" = "de ele" should be annotated using multi-word tokens. Note that this block should be used only for converting legacy conllu files. Ideally a tokenizer should have already split the MWTs. """ import udapi.block.ud.addmwt MWTS = { 'à': {'form': 'a a', 'lemma': 'a o'}, 'às': {'form': 'a as', 'lemma': 'a o'}, 'ao': {'form': 'a o', 'lemma': 'a o'}, 'aos': {'form': 'a os', 'lemma': 'a o'}, 'da': {'form': 'de a', 'lemma': 'de o'}, 'das': {'form': 'de as', 'lemma': 'de o'}, 'dessa': {'form': 'de essa', 'lemma': 'de esse'}, 'dessas': {'form': 'de essas', 'lemma': 'de esse'}, 'desse': {'form': 'de esse', 'lemma': 'de esse'}, 'desses': {'form': 'de esses', 'lemma': 'de esse'}, 'desta': {'form': 'de esta', 'lemma': 'de este'}, 'destas': {'form': 'de estas', 'lemma': 'de este'}, 'deste': {'form': 'de este', 'lemma': 'de este'}, 'destes': {'form': 'de estes', 'lemma': 'de este'}, 'disso': {'form': 'de isso', 'lemma': 'de este'}, 'disto': {'form': 'de isto', 'lemma': 'de este'}, 'do': {'form': 'de o', 'lemma': 'de o'}, # 'upos': 'ADP PRON', 'deprel': 'case *'' 'dos': {'form': 'de os', 'lemma': 'de o'}, 'dum': {'form': 'de um', 'lemma': 'de um'}, 'duma': {'form': 'de uma', 'lemma': 'de um'}, 'dumas': {'form': 'de umas', 'lemma': 'de um'}, 'duns': {'form': 'de uns', 'lemma': 'de um'}, 'na': {'form': 'em a', 'lemma': 'em o'}, 'nas': {'form': 'em as', 'lemma': 'em o'}, # ADP PRON 'nesses': {'form': 'em esses', 'lemma': 'em esse'}, 'nesta': {'form': 'em esta', 'lemma': 'em este'}, 'neste': {'form': 'em este', 'lemma': 'em este'}, 'nisso': {'form': 'em isso', 'lemma': 'em este'}, 'nisto': {'form': 'em isto', 'lemma': 'em este', 'upos': 'ADP PRON', 'main': 1, 'shape': 'subtree'}, 'no': {'form': 'em o', 'lemma': 'em o'}, # PRON cases are excluded below 'nos': {'form': 'em os', 'lemma': 'em o'}, # PRON cases are excluded below 'num': {'form': 'em um', 'lemma': 'em um'}, 'numa': {'form': 'em uma', 'lemma': 'em um'}, 'numas': {'form': 'em umas', 'lemma': 'em um'}, 'nuns': {'form': 'em uns', 'lemma': 'em um'}, 'pela': {'form': 'por a', 'lemma': 'por o'}, 'pelas': {'form': 'por as', 'lemma': 'por o'}, 'pelos': {'form': 'por os', 'lemma': 'por o'}, 'pelo': {'form': 'por o', 'lemma': 'por o'}, # TODO daí = de aí = ADP ADV = case advmod } # shared values for all entries in MWTS for v in MWTS.values(): if not v.get('upos'): v['upos'] = 'ADP DET' if not v.get('deprel'): v['deprel'] = 'case det' v['feats'] = '_ *' # The following are the default values # v['main'] = 0 # which of the two words will inherit the original children (if any) # v['shape'] = 'siblings', # the newly created nodes will be siblings for pronoun in 'ela ele eles elas'.split(): MWTS['d' + pronoun] = { 'form': 'de ' + pronoun, 'lemma': 'de ' + pronoun, 'upos': 'ADP PRON', 'deprel': 'case *', 'main': 1, 'shape': 'subtree', } class AddMwt(udapi.block.ud.addmwt.AddMwt): """Detect and mark MWTs (split them into words and add the words to the tree).""" def multiword_analysis(self, node): """Return a dict with MWT info or None if `node` does not represent a multiword token.""" # "no" can be either a contraction of "em o", or a pronoun if node.form.lower() in ('no', 'nos') and node.upos == 'PRON': return analysis = MWTS.get(node.form.lower(), None) # If the input is e.g.: # 1 na _ ADP _ _ deprel_x ? # 2 verdade _ NOUN _ _ fixed 1 # The expected output is: # 1-2 na _ _ _ _ _ _ # 1 em _ ADP _ _ deprel_x ? # 2 a _ DET _ _ fixed 1 # 3 verdade _ NOUN _ _ fixed 1 if analysis and analysis['deprel'] == 'case det' and node.udeprel != 'case': copy = dict(analysis) copy['deprel'] = '* det' copy['shape'] = 'subtree' first_child = next((c for c in node.children if node.precedes(c)), None) if first_child is not None and first_child.udeprel == 'fixed': copy['deprel'] = '* fixed' return copy if analysis is not None: return analysis if node.form.lower().endswith('-se') and node.upos == 'VERB': return { 'form': node.form.lower()[:-3] + ' se', 'lemma': '* se', 'upos': '* PRON', 'feats': '* _', 'deprel': '* nsubj', # or '* expl' 'main': 0, 'shape': 'subtree', } elif node.form.lower().endswith('-lo') and node.upos == 'VERB': return { 'form': node.form.lower()[:-3] + ' lo', 'lemma': '* ele', 'upos': '* PRON', 'feats': '* _', 'deprel': '* obj', 'main': 0, 'shape': 'subtree', } elif node.form.lower().endswith('-los') and node.upos == 'VERB': return { 'form': node.form.lower()[:-4] + ' los', 'lemma': '* eles', 'upos': '* PRON', 'feats': '* _', 'deprel': '* obj', 'main': 0, 'shape': 'subtree', } elif node.form.lower().endswith('-o') and node.upos == 'VERB': return { 'form': node.form.lower()[:-2] + ' o', 'lemma': '* ele', 'upos': '* PRON', 'feats': '* _', 'deprel': '* obj', 'main': 0, 'shape': 'subtree', } return None
I have the impression that the names Columbus and Copernicus are not the forms actually used on a daily basis by those people, but rather Latin forms used to identify them in in scholarly or academic connection. The vast majority of Copernicus's surviving works are in Latin, which in his lifetime was the language of academia in Europe. Latin was also the official language of the Roman Catholic Church and of Poland's royal court, and thus all of Copernicus's correspondence with the Church and with Polish leaders was in Latin… The surname Copernik, Koppernigk is recorded in Kraków from c. 1350, in various spellings… During his childhood, about 1480, the name of his father (and thus of the future astronomer) was recorded in Thorn as Niclas Koppernigk. At Kraków he signed himself, in Latin, Nicolaus Nicolai de Torunia (Nicolaus, son of Nicolaus, of Toruń). At Bologna, in 1496, he registered… as Dominus Nicolaus Kopperlingk de Thorn – IX grosseti. At Padua he signed himself "Nicolaus Copernik", later "Coppernicus". The astronomer thus Latinized his name to Coppernicus, generally with two "p"s (in 23 of 31 documents studied), but later in life he used a single "p". On the title page of De revolutionibus, Rheticus published the name (in the genitive, or possessive, case) as "Nicolai Copernici". I’ve noticed too that the mathematician Gauss is sometimes called Karl (with a K, that seems more German), and sometimes called Carl (with a C, that seems to associate it with the Latin name Carolus. How, when and why did Latin cease to be the language of international scholarly communication? To expand the answer of b.Lorenz with few examples: it was a slow process, and the speed in various areas was very different. Some examples. Proceedings of St. Petersburg Academy (mid 18th century) had the following rule: papers in all sciences are published in French, except mathematics, mathematics was published in Latin. The last important mathematical book that I know which was written in Latin is Fundamenta Nova by Jacobi (1829). But no one wrote on physics or chemistry in Latin in 19th century. But most journal papers were not published in Latin since the middle of 19th century, in all sciences. Latin disappeared from the mandatory curricula in high schools and universities in the beginning of 20th century (in many European countries like Russia and England). It come about at different times in different subjects, for example Latin names of conditions and anatomical objects are still used in medical science. It generally happened in the 18th and 19th century. The answer of Alex gives some nice examples on when it happened. In the Middle Ages the only subject considered worthy for scholarly interest were theology and philosopy and ancient history (this is a stereotypical oversimplification, there were books on medicine, herbs, art, etc... too), so the main occupation of scholars was to read works of ancient auctors writing in Latin. (or rarely Greek, Hebrew or Arabic) After the 17. century new disciplines and fields emerged, many of them based on experiments or newly invented everyday activities (like economics or engineering). It would have been cumbersome to invent, introduce and learn latin worlds for all these new concepts. The Reformation made it possible to work in native tongue in theology and religious teaching, and after that the use of modern languages slowly permeated all the other areas of written word like law, governance or high literature. In the most critical decades of the 18th and 19th century it was expected from an educated European man to understand French, so there was a replacement international language ready. When the international cooperation needed for 'big science' became important in the 20th century, Anglo-saxon countries had already grabbed the lead in many modern sciences and technologies, so their language became the standard. Latin was considered 'cool' in Renaissance and Classicism, while it was definitely 'uncool' in Romanticism. Not the answer you're looking for? Browse other questions tagged latin academics or ask your own question.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Parameter', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('value', models.CharField(max_length=255)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='UserParameter', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=255)), ('value', models.CharField(max_length=255)), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), ]
Recent research, carried out by Cardiff University in collaboration with the Universities of Dundee and Birmingham, has led to the discovery of a molecule which could help to slow down and potentially treat Parkinson’s disease. Parkinson's disease is a long-term degenerative disorder of the central nervous system that, acording to the charity, Parkinson’s UK, affects one person in every 500. That means an estimated 127,000 people are currently living with Parkinson’s disease in the UK alone. One of the various causes of this illness is the malfunction of a protein called PINK1. This latest research focused on how a new class of molecules, already proven to be effective in treating cancer and viral infections, can activate the protein to slow down neurodegeneration and treat Parkinson's disease. In the process, a new molecule was found, called nucleoside, which targets the PINK1 protein and therefore has the potential to treat Parkinson’s disease. Further work is being carried out in the laboratory to increase the potency of this molecule and explore how it can best be used to develop effective treatments for Parkinson’s disease. The paper was published in the Journal of Medicinal Chemistry and can be accessed here.
# coding: utf-8 from cms.models import Placeholder # noqa - needed, circular import otherwise from cms.plugin_rendering import ContentRenderer # , RenderedPlaceholder try: # cms 3.5 or 3.7+ from cms.plugin_rendering import StructureRenderer except ImportError: StructureRenderer = None # load conf at startup from .conf import UntranslatedPlaceholderConf # noqa # import signals at startup from .signals import * # noqa (will forget to update otherwise!) from .utils import get_untranslated_default_language_if_enabled def new_renderer__init__(self, request): self.__original_init__(request) lang = get_untranslated_default_language_if_enabled() if lang: self.request_language = lang # monkey patch! # for normal plugin rendering. ContentRenderer.__original_init__ = ContentRenderer.__init__ ContentRenderer.__init__ = new_renderer__init__ def new_structure_render_placeholder(self, placeholder, language, page=None): language = language or self.request_language return self.__original_render_placeholder(placeholder, language, page) if StructureRenderer: # for structure mode StructureRenderer.__original_init__ = StructureRenderer.__init__ StructureRenderer.__init__ = new_renderer__init__ StructureRenderer.__original_render_placeholder = StructureRenderer.render_placeholder StructureRenderer.render_placeholder = new_structure_render_placeholder
Today we have a great giveaway from our friends at Eco Store USA. Ecostore is a company that produces cleaning and personal products made from plant- and mineral-based ingredients, free of toxic chemicals that bring people closer to nature with non-toxic, environmentally safe solutions that also help to reduce our carbon footprint. The winner of this giveaway will receive $25 worth of full size products of their choice. * Pop on over to Eco Store USA and check out all of their great products. Then come back and leave us a comment telling us what products you would most like to try. This contest ends 3/24/09 at 11:59pm (pdst) so don't wait - make sure you've gotten all six entries before it's too late! Please note: entries received after the cutoff time will be deleted prior to drawing a winner. i love ecostore stuff!! i did a product review and giveaway a couple of weeks ago. they are the nicest people!! and...the products are wonderful!! I'd like to try the Rosemary Orange Conditioner. Thanks. I visited the ecostore. I'm a fan on the lemongrass bar soap and sandalwood hair care products! It looks like a great company! This is a great giveaway! I have been looking at their site and selection for a while now and their stuff looks great, I'd love to try their lemongrass and vanilla soaps! I'm a soap/bath and body product junkie! I would really love to try the Vanilla Shampoo & Conditioner. I am following you on Twitter...my user name is Nelsby. I'd love to try the lemongrass soap! I'd like to try the Citrus Spray Cleaner. I am following you on Twitter! My username is OklasLoca..Thanks! I have been wanting to try the Vanilla Shampoo and Conditioner! I would like to try the Aloe Vera Shampoo! I would really like to try the Manuka Honey & Kelp Soap and that is what I would buy if I won. I would like to try the dishwash liquid. I have heard their products are really good. Thanks. I would love to try some products from Ecostore USA, they sound fantastic! I would like to try the Manuka Honey & Kelp Soap, this sounds wonderful. Also the Baby Sleepytime Bath, this is perfect for my 6 month old daughter. The Rosemary Orange Shampoo and matching conditioner sounds nourishing for the hair and the scent sounds so refreshing! Love it! Thank you so much for the opportunity to enter. I am signed up to receive Pink Lemonade through email. Rosemary Orange Conditioner...sounds great to me! I would love to try the Vanilla Shampoo and Conditioner. Thanks! I would like to try a couple of the shampoos, Rosemary Orange and Manuka Sandalwood to start. I would like to try the Front Loader Laundry Powder. I really love to try the Vanilla Shampoo & Conditioner. THE VANILLA SHAMPOO AND CONDITIONER SOUNDS SO GOOD TO ME. IF IT IS GOOD FOR THE ENVIRONMENT IT IS GOOD FOR YOUR HAIR AND SCALP, NO DOUBT ABOUR THIS! i LIKE TO TRY IT! I'd love to try the Hypoallergenic Pet Shampoo! I like the Vanilla Shampoo & Conditioner. I would love to try the Laundry Powder. Thanks for a really ecologically-based giveaway!! I would like to try the Pure Oxygen Whitener and the Citrus Spray Cleaner. Thank you! I would most like to try the Manuka Honey & Kelp Soap. Thanks for the giveaway! Baby Sleepytime bath would be awesome. I'd like to try the aloe vera shampoo and the laundry powder. Thanks. I'm a fan on your Facebook page, Terra Heck. I'd love get the laundry powder and the pet leave-in conditioner. I'd like to try the Laundry Liquid. Thanks! I would like to try the laundry soap and the vanilla bar soap. I'd like to try the Rosemary Orange Shampoo. I'd love to try the Manuka Sandalwood Conditioner & Shampoo!!! Thanks. I'd like to try the Coconut Soap, the Rosemary Orange Shampoo and the Citrus Spray Cleaner. Thanks for the giveaway! I'd like to try the Manuka Sandalwood Conditioner & Shampoo. I love the smell of sandalwood. I would like the chance to win, thanks. i would like the vanilla shampoo and conditioner. I've been looking at their products at the store but haven't bought yet. I'd love to try the Baby Care products, starting with the Moisturizer. I really want to try the Coconut Soap. The vanilla shampoo sounds nice! I'd like to try the Lemongrass and Coconut soaps. I would love to try the baby shampoo and the baby moisturizer! I would love to try the Auto Dish Powder. Thanks!! I'd love to try their Vanilla Shampoo and their Rosemary Orange Shampoo. Christina - xristya@rock.com - I'd love to try the Rosemary Orange Shampoo, which sounds like a wonderful combination! I would like to try the coconut soap! Their Oxygen Whitener and Dish Washing Liquid look great! I have been dying to try their products--especially the dish washing liquid. I have yet to find a good eco-friendly alternative that gets my dishes clean. The Lemongrass Soap and Coconut Soap look great!! I would love to try the Manuka Sandalwood Shampoo and Conditioner. Thanks! I'm following you on Twitter. User lisalmg. I'm a Facebook fan! User Lisa Garner! I'm also an email subscriber. I like the Sleepytime Baby Wash. I would be interested in try Ecostore's hair care products because they're sulfate and parabens free. That is so much more gentle on your hair. I'd be delighted to try the Vanilla soap and Vanilla shampoo/conditioner. Their Aloe Vera shampoo is the most enticing of the offerings. I'd like to try the Citrus Spray Cleaner. Thanks for the contest. The baby sleepytime bath. that would be great to try... so soothing! I'd like to try the cream cleanser. It's hard to find something safe and effective for my stainless appliances. I'd love to try all of their yummy soaps and the Vanilla Shampoo & Conditioner. With a $25 gift certificate, I could try the Citrus Spray Cleaner, the Toilet Cleaner, and the Wool Wash. i'd love to try the vanilla shampoo and conditioner. I'd try the baby sleepytime bath. Aloe Vera Shampoo would be nice to try. I would use all of the cleaning products! I hate nasty cleaning products with all those chemicals! We all have to clean..what great product ideas. I love lemongrass so I'd like to try their lemongrass soap. Thank you! Definitley the Manuka Honey and Kelp Soap. As a mommy-to-be for the first time, I'd love to try Eco Store's line of baby care products! I would love to try the laundry powder! The Rosemary Orange Shampoo and Conditioner sound amazing! Citrus Spray Cleaner looks great! I'd love to try the Vanilla Shampoo & Conditioner! And their pet care products look great too! My dog would smell so much better! I'd love to try the Pet Ear Wash. I would love to try the Herbal Fresh Dog Shampoo and Lemongrass soap! I would try their laundry soap. Thanks! I would like to try the Aloe Vera shampoo & conditioner. Thanks for the giveaway! The Rosemary Orange Conditioner sounds amazing! I would love to try the Cream Cleanser! I subscribe to feed & emails........follow on blogger & you're on my blog roll. Thanks for the giveaway...The Ecostore USA's "Cream Cleanser" would feel quite welcome in the cleaning nightmare that is our bathroom / shower (s) !!! I would like the Laundry Liquid. Thanks. I'd love to try the dish wash liquid. I just saw these products in a local store and was excited. I would like to try the Coconut Soap. The Rosemary Orange Shampoo sounds perfect for me! I would also like to try the Manuka Honey and Kelp soap. Thanks for the chance to win! I'm also a subscriber via e~mail.
""" BitBake "Fetch" repo (git) implementation """ # Copyright (C) 2009 Tom Rini <trini@embeddedalley.com> # # Based on git.py which is: # Copyright (C) 2005 Richard Purdie # # SPDX-License-Identifier: GPL-2.0-only # import os import bb from bb.fetch2 import FetchMethod from bb.fetch2 import runfetchcmd from bb.fetch2 import logger class Repo(FetchMethod): """Class to fetch a module or modules from repo (git) repositories""" def supports(self, ud, d): """ Check to see if a given url can be fetched with repo. """ return ud.type in ["repo"] def urldata_init(self, ud, d): """ We don"t care about the git rev of the manifests repository, but we do care about the manifest to use. The default is "default". We also care about the branch or tag to be used. The default is "master". """ ud.basecmd = d.getVar("FETCHCMD_repo") or "/usr/bin/env repo" ud.proto = ud.parm.get('protocol', 'git') ud.branch = ud.parm.get('branch', 'master') ud.manifest = ud.parm.get('manifest', 'default.xml') if not ud.manifest.endswith('.xml'): ud.manifest += '.xml' ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch)) def download(self, ud, d): """Fetch url""" if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK): logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath) return repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo") gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", ".")) codir = os.path.join(repodir, gitsrcname, ud.manifest) if ud.user: username = ud.user + "@" else: username = "" repodir = os.path.join(codir, "repo") bb.utils.mkdirhier(repodir) if not os.path.exists(os.path.join(repodir, ".repo")): bb.fetch2.check_network_access(d, "%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url) runfetchcmd("%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir) bb.fetch2.check_network_access(d, "%s sync %s" % (ud.basecmd, ud.url), ud.url) runfetchcmd("%s sync" % ud.basecmd, d, workdir=repodir) scmdata = ud.parm.get("scmdata", "") if scmdata == "keep": tar_flags = "" else: tar_flags = "--exclude='.repo' --exclude='.git'" # Create a cache runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d, workdir=codir) def supports_srcrev(self): return False def _build_revision(self, ud, d): return ud.manifest def _want_sortable_revision(self, ud, d): return False
On Friday, “Morning Joe” co-host Mika Brzezinski called Bill Clinton a “predator” during a conversation about Alabama Senate candidate Roy Moore. We have to look at what the goal is here. What’s the goal? What are we trying to do? Because I think the goal is to shine light on really bad behavior … we need to create an environment where everyone feels safe, where this behavior is shunned, where it’s unwelcome, and where it’s prevented. So, is the goal to bring people down based on politics or hate? There are cases that are proven, where there are settlements, where there is evidence and the sexual predator has been brought down. Harvey Weinstein. There are cases where the predator flourished and continued to be the President of the United States. I’m talking about Bill Clinton. Where the women were attacked, where they were settled with, and their lives were ruined – Monica Lewinsky.
__all__ = ['Serializer', 'SerializerError'] from error import YAMLError from events import * from nodes import * class SerializerError(YAMLError): pass class Serializer(object): ANCHOR_TEMPLATE = u'id%03d' def __init__(self, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): self.use_encoding = encoding self.use_explicit_start = explicit_start self.use_explicit_end = explicit_end self.use_version = version self.use_tags = tags self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 self.closed = None def open(self): if self.closed is None: self.emit(StreamStartEvent(encoding=self.use_encoding)) self.closed = False elif self.closed: raise SerializerError("serializer is closed") else: raise SerializerError("serializer is already opened") def close(self): if self.closed is None: raise SerializerError("serializer is not opened") elif not self.closed: self.emit(StreamEndEvent()) self.closed = True #def __del__(self): # self.close() def serialize(self, node): if self.closed is None: raise SerializerError("serializer is not opened") elif self.closed: raise SerializerError("serializer is closed") self.emit(DocumentStartEvent(explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags)) self.anchor_node(node) self.serialize_node(node, None, None) self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) self.serialized_nodes = {} self.anchors = {} self.last_anchor_id = 0 def anchor_node(self, node): if node in self.anchors: if self.anchors[node] is None: self.anchors[node] = self.generate_anchor(node) else: self.anchors[node] = None if isinstance(node, SequenceNode): for item in node.value: self.anchor_node(item) elif isinstance(node, MappingNode): for key, value in node.value: self.anchor_node(key) self.anchor_node(value) def generate_anchor(self, node): self.last_anchor_id += 1 return self.ANCHOR_TEMPLATE % self.last_anchor_id def serialize_node(self, node, parent, index): alias = self.anchors[node] if node in self.serialized_nodes: self.emit(AliasEvent(alias)) else: self.serialized_nodes[node] = True self.descend_resolver(parent, index) if isinstance(node, ScalarNode): detected_tag = self.resolve(ScalarNode, node.value, (True, False)) default_tag = self.resolve(ScalarNode, node.value, (False, True)) implicit = (node.tag == detected_tag), (node.tag == default_tag) self.emit(ScalarEvent(alias, node.tag, implicit, node.value, style=node.style)) elif isinstance(node, SequenceNode): implicit = (node.tag == self.resolve(SequenceNode, node.value, True)) self.emit(SequenceStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) index = 0 for item in node.value: self.serialize_node(item, node, index) index += 1 self.emit(SequenceEndEvent()) elif isinstance(node, MappingNode): implicit = (node.tag == self.resolve(MappingNode, node.value, True)) self.emit(MappingStartEvent(alias, node.tag, implicit, flow_style=node.flow_style)) for key, value in node.value: self.serialize_node(key, node, None) self.serialize_node(value, node, key) self.emit(MappingEndEvent()) self.ascend_resolver()
Everything you and your family have been searching for is here at Desert Mountain, situated on 8,000 acres in the high Sonoran Desert. Tucked into the rolling hills and dramatic scenery of North Scottsdale, Arizona, Desert Mountain is among the finest private golf and recreational communities in the world, and is consistently ranked among the top private clubs in the nation. It is the only private community worldwide with six Jack Nicklaus Signature Golf Courses, each one designed to offer an individual playing experience at all skill levels. But great golf is just the beginning. A wide variety of amenities, activities, and events for every member of the family makes the Desert Mountain lifestyle like no other. Always evolving with new experiences, the legendary community defines the ultimate in high desert living.
import os import subprocess from Bio import Seq, SeqIO, AlignIO from Bio.Phylo.PAML import codeml, baseml import numpy as np if __name__ == '__main__': path = '/Users/xji3/Genconv/IdenticalParalogAlignment/' pairs = [] with open('./All_Pairs.txt', 'r') as f: for line in f.readlines(): pairs.append(line.replace('\n','').split('_')) pairs.remove(['YLR028C', 'YMR120C']) pairs.append(['YLR284C','YOR180C']) # this pair didn't appear this time #pairs.remove(['YML026C', 'YDR450W'])# remove it for now #pairs = [pairs[-1]] tree_pair = ['YML026C', 'YDR450W'] with open('./YeastTree_paml.newick', 'r') as f: all_tree_lines = f.readlines() with open('./codeml_tail.ctl', 'r') as f: all_codeml_ctl_lines = f.readlines() with open('./baseml_tail.ctl', 'r') as f: all_baseml_ctl_lines = f.readlines() codeml = '/Users/xji3/Downloads/paml4.8/bin/codeml' baseml = '/Users/xji3/Downloads/paml4.8/bin/baseml' for pair in pairs: print 'Now run paml on pair ' + ' '.join(pair) seqfile = path + '_'.join(pair) + '/' + '_'.join(pair) + '_IdenticalParalog_paml_input.fasta' treefile = path + '_'.join(pair) + '/' + '_'.join(pair) + '_tree.newick' with open(treefile, 'w+') as f: for line in all_tree_lines: new_line = line.replace(tree_pair[0], pair[0]) new_line = new_line.replace(tree_pair[1], pair[1]) f.write(new_line) outfile_codeml = path + '_'.join(pair) + '/' + '_'.join(pair) + '_IdenticalParalog_codeml' codeml_ctlfile = path + '_'.join(pair) + '/' + '_'.join(pair) + '_IdenticalParalog_codeml_control.ctl' with open(codeml_ctlfile, 'w+') as f: f.writelines(['seqfile = ' + seqfile + '\n', 'treefile = ' + treefile + '\n', 'outfile = ' + outfile_codeml + '\n']) f.writelines(all_codeml_ctl_lines) codeml_cmd = [codeml, '_'.join(pair) + '_IdenticalParalog_codeml_control.ctl'] os.chdir(path + '_'.join(pair) + '/') #os.system(' '.join(codeml_cmd)) subprocess.check_output(codeml_cmd) outfile_baseml = path + '_'.join(pair) + '/' + '_'.join(pair) + '_IdenticalParalog_baseml' baseml_ctlfile = path + '_'.join(pair) + '/' + '_'.join(pair) + '_IdenticalParalog_baseml_control.ctl' with open(baseml_ctlfile, 'w+') as f: f.writelines(['seqfile = ' + seqfile + '\n', 'treefile = ' + treefile + '\n', 'outfile = ' + outfile_baseml + '\n']) f.writelines(all_baseml_ctl_lines) baseml_cmd = [baseml, '_'.join(pair) + '_IdenticalParalog_baseml_control.ctl'] subprocess.check_output(baseml_cmd) ## summary_mat = [] ## finished_list = [] ## label = ['MG94_codeml_tree_length', 'MG94_codeml_lnL', 'MG94_codeml_omega', 'MG94_codeml_kappa', ## 'HKY_baseml_tree_length', 'HKY_baseml_lnL', 'HKY_baseml_kappa'] ## footer = ' '.join(label) ## ## for pair in pairs: ## codeml_result = codeml.read('/Users/xji3/Genconv/NewClusterPackRun/NewPairsAlignment/' + '_'.join(pair) + '/' + '_'.join(pair) + '_codeml') ## baseml_result = baseml.read('/Users/xji3/Genconv/NewClusterPackRun/NewPairsAlignment/' + '_'.join(pair) + '/' + '_'.join(pair) + '_baseml') ## summary_mat.append([codeml_result['NSsites'][0]['tree length'], ## codeml_result['NSsites'][0]['lnL'], ## codeml_result['NSsites'][0]['parameters']['omega'], ## codeml_result['NSsites'][0]['parameters']['kappa'], ## baseml_result['tree length'], ## baseml_result['lnL'], ## baseml_result['parameters']['kappa']]) ## finished_list.append(pair) ## ## header = ' '.join(['_'.join(pair) for pair in finished_list]) # column labels ## np.savetxt(open('/Users/xji3/Genconv/NewClusterPackRun/NewPairsAlignment/paml_summary.txt', 'w+'), np.matrix(summary_mat).T, delimiter = ' ', footer = footer, header = header)
Wastholm.com » Blog Posts Tagged "propaganda" While the US drone programme is shrouded in secrecy, security sources regularly brief the media on the names of those suspected militants targeted or killed in the strikes. Frequently, those individuals are reported to have been targeted or killed on multiple occasions. We covet diamonds in America for a simple reason: the company that stands to profit from diamond sales decided that we should. De Beers’ marketing campaign single handedly made diamond rings the measure of one’s success in America. Despite its complete lack of inherent value, the company manufactured an image of diamonds as a status symbol. And to keep the price of diamonds high, despite the abundance of new diamond finds, De Beers executed the most effective monopoly of the 20th century. It's great to see Africa starting to explore the benefits of open source not only as a way of rolling out software more cheaply than would be the case for proprietary programs, whose Western pricing makes them particularly costly for emerging nations, but also as an effective means of building up a vibrant indigenous software industry that is not based simply on shovelling lots of money to the US. However, it's sad to see that Microsoft seems to have learned nothing from its earlier, unsuccessful attempts to spread FUD about open source, and seems intent on recapitulating that shabby and rather pathetic history in Africa too. According to reports in the Associated Press, the setting up of China's Confucius Peace Prize was intended to protest the 2010 Nobel Peace Prize award to Chinese dissident Liu Xiaobo. This year will witness the third Confucius Peace Prize since its setup. However, the previous two award ceremonies of this prize didn't go very well. The laureates selected never showed up nor even cared about receiving such a prize. Some observers saw the affair as a complete farce. The award was given to a terrified small child, supposed to represent Kuomintang Honorary Chairman Lien Chan at the first ceremony and two Russian hotties, supposed to represent Russian President Vladimir Putin, at the second, which just added to the entertainment value. The Tohoku 9.0 earthquake, fifth largest ever recorded, created a tsunami with large waves up to 40 meters, with walls of water swallowing coastal towns, has been one of the worst natural disasters in recent history with the death toll reaching just below 20,000 people, estimated damage $310 billion. The scale of the calamity is truly epic. Hence, the Fukushima nuclear accident should have been only a side show. Not so, it immediately became the principal show. Coverage in the U.S. media replicated hysteria, sensationalism, scaremongering and disinformation that characterized coverage of the Three Mile Island (TMI) accident in 1979. It appears that coverage in Europe wasn’t much better. Initially the mainstream media paraded a stream of anti-nuclear activists who excelled in predicting an equivalent of Armageddon with cataclysmic consequences. Här har vi alltså ett kontroversiellt avtal, med lagstiftningsliknande effekter. Utlåtandet om huruvida det är lagligt eller ej hemligstämplas. Diskussionen förs bakom stängda dörrar. Alla som kan ha invändningar luras att tro att frågan inte kommer att behandlas i dag. Och när det ändå sker, då tillåts utskottet inte ens rösta om huruvida mötet skall vara offentligt eller bakom stängda dörrar. The movie industry claims that piracy is costing them billions of dollars a year. Luckily for Hollywood, many Americans choose to consume their online media through legal services such as Netflix. In fact, there are now so many that the total Internet traffic generated by Netflix has outgrown that of BitTorrent. This made us wonder – what would happen if all movie-downloading BitTorrent users made the switch to Netflix? What if movie piracy via BitTorrent disappeared?
#!/usr/bin/env python3 import sys import os import json def main(argv): """ read all of files in directory inputed recursively. rename and copy(instead move for safety) file to specific rules. """ os.makedirs('./outputs', exist_ok = True) dir_queue = argv[1:] file_queue = get_all_files(dir_queue) process_files(file_queue) def process_files(file_queue): for target in file_queue: filename = target.split('/')[-1] user = filename.split('_')[0] user_num = user[1:] user_num = int(user_num) user = 'B' + '{:02}'.format(user_num) date = filename.split('_')[1] data = (filename.split('_')[2]).split('.')[0] target_path = (data + '/' + date + '/' + user + '.json') target_path = './outputs/' + target_path os.makedirs('./outputs/' + data + '/' + date, exist_ok = True) with open(target, 'r') as read_filep: with open(target_path, 'w') as write_filep: read_json = json.load(read_filep) json.dump(read_json, write_filep, indent = 4) print(target_path) def get_all_files(dir_queue): file_queue = list() while len(dir_queue) > 0: path = dir_queue.pop() with os.scandir(path) as it: for entry in it: if not entry.name.startswith('.') and entry.is_file(): file_queue.append(entry.path) else: dir_queue.append(entry.path) return file_queue # is it good thing, right? if __name__ == '__main__': sys.exit(main(sys.argv))
Putting in pre assembled laundry room cabinets laundry cabinets is going to undoubtedly be much easy if it’s carried out by just two different people. It really don’t require many applications and significant construction involvement. A few measures below can allow you to set up the walls by yourself. Ensure to know that the specific dimension of the distance prior to buying a storage, especially within the toilet cupboard. Take your measuring tape to understand the width of the wall as well as the span from top of container . The minimum distance between the cabinet’s underside and also the surface of the tank needs to be about 2 ft. Take a great look at your own stuffs. It’s suggested for you to expand the storage if you may love to keep significant things in it. It will continue to keep the storage stable as well as robust. To get just about any storage solution you opt for you have to be certain you select the decorative pre assembled laundry room cabinets laundry cabinets. Pay awareness of the important points, designs, and colours. They’ll affect your cabinet overall look. Despite the fact that quartz is a engineered stone, its sturdiness is comparable even for the strongest granites. Quartz is quite popular due to its flexibility. This caliber makes quartz installation and cutting process super uncomplicated. Despite the fact that quartz is extremely expensive, the cost really matches its own quality. Granite has become the most chosen stuff of pure rock for countertops for cabinet. It’s completely immune to scratchmoist, moist, and also humidity. When it is suitably sealed, then granite is more absolutely durable for several years. Additionally, granite comes with a lot of colors and layouts. It may add value for a own home by having granite counter tops. Contrary to granite, quartz does not need sealing since it’s the most popular natural rock and is rich in minerals. pre assembled laundry room cabinets laundry cabinets, primarily quartz is included with resins, pigments, and recycled materials. Quartz is resistant to bacteria, stains, and humidity. It’s likewise encouraged to choose fittings with smallscale items. The compact and sculptural fixtures will optimize your flooring area and match with streamline apartment vibe. Next, add illusion on your cabinet with glass. The glass shower enclosure comprising distance illusion in tiny cabinet by lets people inside to find that the ending of the space to some other. Utilize cabinet planning. It will be simpler if we draw on the position of this window of the cabinet, doors, towel railsand switches of sink, lights, etc.. Choose freestanding cabinet that’s flexible to put anyplace we enjoy, or the one that is fitted to the wall, or the one that is fitted anywhere we enjoy not only about the wall since it’s more flexible. It is for you to select. Supplies: wood, glass, mixed-material, metal, etc.. All you need to do is learn about the pluses and minuses of the substances deeply. Groups: classic, contemporary, classic, etc.. It’s all about preference. Opt for the one which meets the way you live. Thus, are you ready to buy the ideal pre assembled laundry room cabinets laundry cabinets for you personally along with your family members? cabinet will probably stay a terrific solution for limited spaced cabinet. However, it can offer a serious cleanup issue. There are plenty of secrets to clean the corner vanity in the cabinet efficiently. Clean the debris first. Before applying some other cleaner, then it’s necessary for you to clean the dust or some other sterile dirt in the firsttime. You’re able to make use of a duster as well as also a dust pan to wash the dry dust. It can make you more easy to scrutinize just how dirty the cupboard isstill. This Pre Assembled Laundry Room Cabinets Laundry Cabinets the gallery form Laundry Room Cabinets. Hopefully you can find the best inspiration from our gallery here.
"""Support for Synology DSM cameras.""" import logging from typing import Dict from synology_dsm.api.surveillance_station import SynoSurveillanceStation from synology_dsm.exceptions import SynologyDSMAPIErrorException from homeassistant.components.camera import SUPPORT_STREAM, Camera from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from . import SynoApi, SynologyDSMCoordinatorEntity from .const import ( COORDINATOR_SURVEILLANCE, DOMAIN, ENTITY_CLASS, ENTITY_ENABLE, ENTITY_ICON, ENTITY_NAME, ENTITY_UNIT, SYNO_API, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: """Set up the Synology NAS cameras.""" data = hass.data[DOMAIN][entry.unique_id] api = data[SYNO_API] if SynoSurveillanceStation.CAMERA_API_KEY not in api.dsm.apis: return # initial data fetch coordinator = data[COORDINATOR_SURVEILLANCE] await coordinator.async_refresh() async_add_entities( SynoDSMCamera(api, coordinator, camera_id) for camera_id in coordinator.data["cameras"] ) class SynoDSMCamera(SynologyDSMCoordinatorEntity, Camera): """Representation a Synology camera.""" def __init__( self, api: SynoApi, coordinator: DataUpdateCoordinator, camera_id: int ): """Initialize a Synology camera.""" super().__init__( api, f"{SynoSurveillanceStation.CAMERA_API_KEY}:{camera_id}", { ENTITY_NAME: coordinator.data["cameras"][camera_id].name, ENTITY_ENABLE: coordinator.data["cameras"][camera_id].is_enabled, ENTITY_CLASS: None, ENTITY_ICON: None, ENTITY_UNIT: None, }, coordinator, ) Camera.__init__(self) self._camera_id = camera_id self._api = api @property def camera_data(self): """Camera data.""" return self.coordinator.data["cameras"][self._camera_id] @property def device_info(self) -> Dict[str, any]: """Return the device information.""" return { "identifiers": { ( DOMAIN, self._api.information.serial, self.camera_data.id, ) }, "name": self.camera_data.name, "model": self.camera_data.model, "via_device": ( DOMAIN, self._api.information.serial, SynoSurveillanceStation.INFO_API_KEY, ), } @property def available(self) -> bool: """Return the availability of the camera.""" return self.camera_data.is_enabled and self.coordinator.last_update_success @property def supported_features(self) -> int: """Return supported features of this camera.""" return SUPPORT_STREAM @property def is_recording(self): """Return true if the device is recording.""" return self.camera_data.is_recording @property def motion_detection_enabled(self): """Return the camera motion detection status.""" return self.camera_data.is_motion_detection_enabled def camera_image(self) -> bytes: """Return bytes of camera image.""" _LOGGER.debug( "SynoDSMCamera.camera_image(%s)", self.camera_data.name, ) if not self.available: return None try: return self._api.surveillance_station.get_camera_image(self._camera_id) except (SynologyDSMAPIErrorException) as err: _LOGGER.debug( "SynoDSMCamera.camera_image(%s) - Exception:%s", self.camera_data.name, err, ) return None async def stream_source(self) -> str: """Return the source of the stream.""" _LOGGER.debug( "SynoDSMCamera.stream_source(%s)", self.camera_data.name, ) if not self.available: return None return self.camera_data.live_view.rtsp def enable_motion_detection(self): """Enable motion detection in the camera.""" _LOGGER.debug( "SynoDSMCamera.enable_motion_detection(%s)", self.camera_data.name, ) self._api.surveillance_station.enable_motion_detection(self._camera_id) def disable_motion_detection(self): """Disable motion detection in camera.""" _LOGGER.debug( "SynoDSMCamera.disable_motion_detection(%s)", self.camera_data.name, ) self._api.surveillance_station.disable_motion_detection(self._camera_id)
This is a night of appreciation for all of our wonderful Vacation Bible School Volunteers. We will have pizza, a salad bar, & an ice cream sundae bar. There will also be prizes, and free chair massages from Active Life Chiropractic. Thank you to our amazing volunteers!!!
""" Django settings for myproject project. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '%ovvje%lh&k-%0v!@_c1gygt#aq-!o3*t$(hpee7@aj&35cr3a' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'myapp', 'tastypie', ) MIDDLEWARE = MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'myproject.urls' WSGI_APPLICATION = 'myproject.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
 Dawn USA Colorado River 2" Grid Shower Drain Go to Shop. This shower drain is made with superior workmanship to ensure that it is of the highest quality. Product Warranty: Limited Life Time Product Type: Grid Installation Required: Yes Overall: 24 W x 3 D Outlet Diameter: 2. This Dawn USA Colorado River 2" Grid Shower Drain is popular product from online market. This is a right time to order Dawn USA Colorado River 2" Grid Shower Drain I hope you can buy this Dawn USA Colorado River 2" Grid Shower Drain at very low price. My vriendin het al 'n lang tyd 'n ogie oor hierdie produk gehou en haar verjaarsdag het net aangekom, so ek het besluit om dit as 'n geskenk te koop. Sy is lief vir dit! Dit is die perfekte een! Dit pas alles en pas by enige plek. Groot produk vir kwaliteit. This a Beautiful shower drain. I'm glad a I bought it. I want to order now! After 3-4 days it comes in a lovely Box. I'm very pleased. The minute you enter an online shower drain store, you can find what you want a lot faster than you can going from department to department in a big mall store. Ek het onlangs 2 produkte gekoop. Een vir my vriend en een vir my. My vriend sê dat hierdie produk ongelooflik is. Hierdie produk is baie veelsydig. Maklik om skoon te maak en omkeerbaar is 'n groot voordeel. Gekoop in swart. Ons is albei van ons produk lief. Ek kan sien hoekom hy sulke goeie resensies het. Ek wil! Welcome to my website & Thanks for visit my website. I collect the Popular Shower Drain from the trusted online stores. I hope this website is useful for you. If you are looking for Dawn USA Colorado River 2" Grid Shower Drain or Shower Drain. and you may read more information or click "Go to store" button for check last updated price and view more images.
from __future__ import unicode_literals from collections import OrderedDict import json from .common import Allele, VCFLine CLNSIG_INDEX = { '0': "unknown", '1': "untested", '2': "non-pathogenic", '3': "probably non-pathogenic", '4': "probably pathogenic", '5': "pathogenic", '6': "affecting drug response", '7': "affecting histocompatibility", '255': "other"} class ClinVarAllele(Allele): """Store ClinVar data relating to one allele.""" def __init__(self, *args, **kwargs): """ Initialize ClinVarAllele object A ClinVarAllele is an allele for a genomic position that has data from ClinVar associated with it. Required arguments: sequence: String of DNA letters (A, C, G, or T) for the allele; may be empty (to represent a deletion) frequency: Preferred allele frequency alleleid: ClinVar Allele ID clnhgvs: HGVS nomenclature for this allele clnsig: ClinVar clinical significance clndn: ClinVar disease name clndisdb: Database IDs of disease database entries (tag-value pairs) clnvi: Database IDs of clinical sources (tag-value pairs) """ (self.clnalleleid, self.hgvs, self.clnsig, self.clndn, self.clndisdb, self.clnvi) = [ kwargs[x] for x in ['alleleid', 'clnhgvs', 'clnsig', 'clndn', 'clndisdb', 'clnvi']] super(ClinVarAllele, self).__init__(*args, **kwargs) def as_dict(self, *args, **kwargs): """Return ClinVarAllele data as dict object.""" self_as_dict = super(ClinVarAllele, self).as_dict(*args, **kwargs) self_as_dict['hgvs'] = self.hgvs self_as_dict['clnalleleid'] = self.clnalleleid self_as_dict['clnsig'] = self.clnsig self_as_dict['clndn'] = self.clndn self_as_dict['clndisdb'] = self.clndisdb self_as_dict['clnvi'] = self.clnvi return self_as_dict class ClinVarVCFLine(VCFLine): """Store ClinVar data from a VCF line.""" def __init__(self, *args, **kwargs): """Initialize ClinVarVCFLine with VCF line""" kwargs['skip_info'] = False super(ClinVarVCFLine, self).__init__(self, *args, **kwargs) def as_dict(self): """Dict representation of parsed ClinVar VCF line""" return {'chrom': self.chrom, 'start': self.start, 'ref_allele': self.ref_allele, 'alt_alleles': self.alt_alleles, 'info': self.info, 'alleles': [x.as_dict() for x in self.alleles]} def _parse_frequencies(self): """Parse frequency data in ClinVar VCF""" frequencies = OrderedDict([ ('EXAC', 'Unknown'), ('ESP', 'Unknown'), ('TGP', 'Unknown')]) pref_freq = 'Unknown' for source in frequencies.keys(): freq_key = 'AF_' + source if freq_key in self.info: frequencies[source] = self.info[freq_key] if pref_freq == 'Unknown': pref_freq = frequencies[source] return pref_freq, frequencies def _parse_allele_data(self): """Parse alleles for ClinVar VCF, overrides parent method.""" # Get allele frequencies if they exist. pref_freq, frequencies = self._parse_frequencies() info_clnvar_single_tags = ['ALLELEID', 'CLNSIG', 'CLNHGVS'] cln_data = {x.lower(): self.info[x] if x in self.info else None for x in info_clnvar_single_tags} cln_data.update( {'clndisdb': [x.split(',') for x in self.info['CLNDISDB'].split('|')] if 'CLNDISDB' in self.info else []}) cln_data.update({'clndn': self.info['CLNDN'].split('|') if 'CLNDN' in self.info else []}) cln_data.update({'clnvi': self.info['CLNVI'].split(',') if 'CLNVI' in self.info else []}) try: sequence = self.alt_alleles[0] except IndexError: sequence = self.ref_allele allele = ClinVarAllele(frequency=pref_freq, sequence=sequence, **cln_data) # A few ClinVar variants are only reported as a combination with # other variants, and no single-variant effect is proposed. Skip these. if not cln_data['clnsig']: return [] return [allele]
Fascinating. This man must have yiddishe blood in him….. Wow! That breaks it down in simple terms!
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2017 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <ecino@compassion.ch> # # The licence is in the file __manifest__.py # ############################################################################## from odoo.addons.thankyou_letters.models.res_partner import setlocale from datetime import date, datetime from odoo import api, models, fields, _ class ResPartner(models.Model): """ Add fields for retrieving values for communications. """ _inherit = 'res.partner' @api.multi def get_receipt_text(self, year): """ Formats the donation amount for the tax receipt. """ return '{:,.2f}'.format(self.get_receipt(year)).replace( '.00', '.-').replace(',', "'") @api.multi def get_receipt(self, year): """ Return the amount paid from the partner in the given year :param year: int: year of selection :return: float: total amount """ self.ensure_one() start_date = date(year, 1, 1) end_date = date(year, 12, 31) invoice_lines = self.env['account.invoice.line'].search([ ('last_payment', '>=', fields.Date.to_string(start_date)), ('last_payment', '<=', fields.Date.to_string(end_date)), ('state', '=', 'paid'), ('product_id.requires_thankyou', '=', True), '|', ('partner_id', '=', self.id), ('partner_id.parent_id', '=', self.id), ]) return sum(invoice_lines.mapped('price_subtotal')) @api.multi def _compute_date_communication(self): lang_map = { 'fr_CH': u'le %d %B %Y', 'fr': u'le %d %B %Y', 'de_DE': u'%d. %B %Y', 'de_CH': u'%d. %B %Y', 'en_US': u'%d %B %Y', 'it_IT': u'%d %B %Y', } today = datetime.today() city = _("Yverdon-les-Bains") for partner in self: lang = partner.lang with setlocale(lang): date = today.strftime( lang_map.get(lang, lang_map['en_US'])).decode('utf-8') partner.date_communication = city + u", " + date
When John C. Reynders arrived on campus in 1999 to become the 12th president of Morningside College, the institution was at a crossroads. The school faced declining enrollment, and budget deficits threatened the college’s ability to pursue its mission. The board, faculty, staff and students quickly rose to the president’s challenge. They joined in strategic planning, and the college developed a course of action that focused primarily on improving aging facilities, which would enhance the overall Morningside experience and make the college more attractive to prospective students visiting campus. This effort has been resoundingly successful. Over the past 10 years, Morningside has raised more than $72 million for facilities improvements, annual scholarships and endowment funding. Full-time undergraduate student enrollment has grown from 744 students nearly 15 years ago to over 1,200 students each of the past four years. President Reynders came to Morningside from Allegheny College in Meadville, Pa., where he was vice president for administrative services and treasurer. He has experience at all levels of higher education, having also served at Allegheny as director and dean of enrollment planning and financial aid, special assistant to the provost, associate director of athletics, associate professor of physical education, instructor in mathematics and head men's basketball coach. In addition to his teaching and administrative responsibilities, President Reynders is a member of the Iowa Association of Independent Colleges and Universities (IAICU); Iowa College Foundation; and Council of Independent Colleges (CIC). He also has provided higher education consulting services through George Dehne and Associates. President Reynders holds a Master of Arts in education and a Bachelor of Science in mathematics from Allegheny. He and his wife, Robin, share their sons: Mathew, Chuck, and Jeremy.
# PirateBox Message lib (C)2012-2014 # Matthias Strubel import string import socket import base64 import sys class message: def __init__(self, name="generate"): if name == "generate": self.name = socket.gethostname() else: self.name = name self.type = "gc" self.decoded = "" def set(self, content=" "): base64content = base64.b64encode(content) self.decoded = "piratebox;" + self.type + ";01;" + self.name + ";" + \ base64content def get(self): # TODO Split decoded part message_parts = string.split(self.decoded, ";") if message_parts[0] != "piratebox": return None b64_content_part = message_parts[4] content = base64.b64decode(b64_content_part) return content def get_sendername(self): return self.name def get_message(self): return self.decoded def set_message(self, decoded): self.decoded = decoded class shoutbox_message(message): def __init__(self, name="generate"): message.__init__(self, name) self.type = "sb"
This traditional, handcrafted Hinge is cast in Bronze, consisting of 90% Copper and 10% Tin. Bronze is an ideal material for use in gate hardware because it will never rust in any environment. This beautiful Gate Hinge will compliment any wood gate, is quality made and will last a lifetime. Dimensions : 12" | 17" | 24" | 30"
# cf. Michael Clerx answer @ http://stackoverflow.com/questions/1690953/transitive-reduction-algorithm-pseudocode def prima(m, title=None): """ Prints a matrix to the terminal """ if title: print(title) for row in m: print(', '.join([str(x) for x in row])) print ('') def path(m): """ Returns a path matrix """ p = [list(row) for row in m] n = len(p) for i in range(0, n): for j in range(0, n): if i == j: continue if p[j][i]: for k in range(0, n): if p[j][k] == 0: p[j][k] = p[i][k] return p def hsu(m): """ Transforms a given directed acyclic graph into its minimal equivalent """ n = len(m) for j in range(n): for i in range(n): if m[i][j]: for k in range(n): if m[j][k]: m[i][k] = 0 m = [ [0, 1, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]] prima(m, 'Original matrix') hsu(m) prima(m, 'After Hsu') p = path(m) prima(p, 'Path matrix') hsu(p) prima(p, 'After Hsu')
Menzies School of Health Research is one of Australian’s leading health research institutes and the only one with a primary focus on Indigenous health. Our vision: Health equity for everyone living in Australia and the Asia-Pacific region. Our mission: To break the cycle of disease and improve health outcomes for people in Australia and the Asia-Pacific region, particularly Aboriginal and Torres Strait Islander communities, through excellence and leadership in research, education and capacity development.
#!/usr/bin/python # # Originally from node.py - Bitcoin P2P network half-a-node # # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Forked by atcsecure # # Blocknet's Routing Protocol node import struct import socket import asyncore import binascii import time import sys import re import random import cStringIO import hashlib MY_VERSION = 71037 MY_SUBVERSION = "/pynode:0.0.2/" # Default Settings if no configuration file is given settings = { "host": "104.131.186.93", "port": 21357, "debug": True, "network": "mainnet" } def new_block_event(block): if block.is_valid(): print " - Valid Block: %s" % block.hash else: print " - Invalid Block: %s" % block.hash def new_transaction_event(tx): if tx.is_valid(): print " - Valid TX: %s" % tx.hash else: print " - Invalid TX: %s" % tx.hash def sha256(s): return hashlib.new('sha256', s).digest() def hash256(s): return sha256(sha256(s)) def deser_string(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] return f.read(nit) def ser_string(s): if len(s) < 253: return chr(len(s)) + s elif len(s) < 0x10000: return chr(253) + struct.pack("<H", len(s)) + s elif len(s) < 0x100000000L: return chr(254) + struct.pack("<I", len(s)) + s return chr(255) + struct.pack("<Q", len(s)) + s def deser_uint256(f): r = 0L for i in xrange(8): t = struct.unpack("<I", f.read(4))[0] r += t << (i * 32) return r def ser_uint256(u): rs = "" for i in xrange(8): rs += struct.pack("<I", u & 0xFFFFFFFFL) u >>= 32 return rs def uint256_from_str(s): r = 0L t = struct.unpack("<IIIIIIII", s[:32]) for i in xrange(8): r += t[i] << (i * 32) return r def uint256_from_compact(c): nbytes = (c >> 24) & 0xFF v = (c & 0xFFFFFFL) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] r = [] for i in xrange(nit): t = c() t.deserialize(f) r.append(t) return r def ser_vector(l): r = "" if len(l) < 253: r = chr(len(l)) elif len(l) < 0x10000: r = chr(253) + struct.pack("<H", len(l)) elif len(l) < 0x100000000L: r = chr(254) + struct.pack("<I", len(l)) else: r = chr(255) + struct.pack("<Q", len(l)) for i in l: r += i.serialize() return r def deser_uint256_vector(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] r = [] for i in xrange(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = "" if len(l) < 253: r = chr(len(l)) elif len(s) < 0x10000: r = chr(253) + struct.pack("<H", len(l)) elif len(s) < 0x100000000L: r = chr(254) + struct.pack("<I", len(l)) else: r = chr(255) + struct.pack("<Q", len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] r = [] for i in xrange(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = "" if len(l) < 253: r = chr(len(l)) elif len(s) < 0x10000: r = chr(253) + struct.pack("<H", len(l)) elif len(s) < 0x100000000L: r = chr(254) + struct.pack("<I", len(l)) else: r = chr(255) + struct.pack("<Q", len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] r = [] for i in xrange(nit): t = struct.unpack("<i", f.read(4))[0] r.append(t) return r def ser_int_vector(l): r = "" if len(l) < 253: r = chr(len(l)) elif len(s) < 0x10000: r = chr(253) + struct.pack("<H", len(l)) elif len(s) < 0x100000000L: r = chr(254) + struct.pack("<I", len(l)) else: r = chr(255) + struct.pack("<Q", len(l)) for i in l: r += struct.pack("<i", i) return r def show_debug_msg(msg): if settings['debug']: print "DEBUG: " + msg class CAddress(object): def __init__(self): self.nServices = 1 self.pchReserved = "\x00" * 10 + "\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f): self.nServices = struct.unpack("<Q", f.read(8))[0] self.pchReserved = f.read(12) self.ip = socket.inet_ntoa(f.read(4)) self.port = struct.unpack(">H", f.read(2))[0] def serialize(self): r = "" r += struct.pack("<Q", self.nServices) r += self.pchReserved r += socket.inet_aton(self.ip) r += struct.pack(">H", self.port) return r def __repr__(self): return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port) class CInv(object): typemap = { 0: "Error", 1: "TX", 2: "Block"} def __init__(self): self.type = 0 self.hash = 0L def deserialize(self, f): self.type = struct.unpack("<i", f.read(4))[0] self.hash = deser_uint256(f) def serialize(self): r = "" r += struct.pack("<i", self.type) r += ser_uint256(self.hash) return r def __repr__(self): return "CInv(type=%s hash=%064x)" % (self.typemap[self.type], self.hash) class CBlockLocator(object): def __init__(self): self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vHave = deser_uint256_vector(f) def serialize(self): r = "" r += struct.pack("<i", self.nVersion) r += ser_uint256_vector(self.vHave) return r def __repr__(self): return "CBlockLocator(nVersion=%i vHave=%s)" % (self.nVersion, repr(self.vHave)) class COutPoint(object): def __init__(self): self.hash = 0 self.n = 0 def deserialize(self, f): self.hash = deser_uint256(f) self.n = struct.unpack("<I", f.read(4))[0] def serialize(self): r = "" r += ser_uint256(self.hash) r += struct.pack("<I", self.n) return r def __repr__(self): return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) class CTxIn(object): def __init__(self): self.prevout = COutPoint() self.scriptSig = "" self.nSequence = 0 def deserialize(self, f): self.prevout = COutPoint() self.prevout.deserialize(f) self.scriptSig = deser_string(f) self.nSequence = struct.unpack("<I", f.read(4))[0] def serialize(self): r = "" r += self.prevout.serialize() r += ser_string(self.scriptSig) r += struct.pack("<I", self.nSequence) return r def __repr__(self): return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" % ( repr(self.prevout), binascii.hexlify(self.scriptSig), self.nSequence) class CTxOut(object): def __init__(self): self.nValue = 0 self.scriptPubKey = "" def deserialize(self, f): self.nValue = struct.unpack("<q", f.read(8))[0] self.scriptPubKey = deser_string(f) def serialize(self): r = "" r += struct.pack("<q", self.nValue) r += ser_string(self.scriptPubKey) return r def __repr__(self): return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" % ( self.nValue // 100000000, self.nValue % 100000000, binascii.hexlify(self.scriptPubKey)) class CTransaction(object): def __init__(self): self.nVersion = 1 self.vin = [] self.vout = [] self.nLockTime = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vin = deser_vector(f, CTxIn) self.vout = deser_vector(f, CTxOut) self.nLockTime = struct.unpack("<I", f.read(4))[0] def serialize(self): r = "" r += struct.pack("<i", self.nVersion) r += ser_vector(self.vin) r += ser_vector(self.vout) r += struct.pack("<I", self.nLockTime) return r def calc_sha256(self): if self.sha256 is None: self.sha256 = uint256_from_str(hash256(self.serialize())) self.hash = hash256(self.serialize())[::-1].encode('hex_codec') def is_valid(self): self.calc_sha256() for tout in self.vout: if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L: return False return True def __repr__(self): return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" % ( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlock(object): def __init__(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.vtx = [] self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.hashPrevBlock = deser_uint256(f) self.hashMerkleRoot = deser_uint256(f) self.nTime = struct.unpack("<I", f.read(4))[0] self.nBits = struct.unpack("<I", f.read(4))[0] self.nNonce = struct.unpack("<I", f.read(4))[0] self.vtx = deser_vector(f, CTransaction) def serialize(self): r = "" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) r += ser_vector(self.vtx) return r def calc_sha256(self): if self.sha256 is None: r = "" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) self.sha256 = uint256_from_str(hash256(r)) self.hash = hash256(r)[::-1].encode('hex_codec') def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False hashes = [] for tx in self.vtx: if not tx.is_valid(): return False tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) while len(hashes) > 1: newhashes = [] for i in xrange(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes if uint256_from_str(hashes[0]) != self.hashMerkleRoot: return False return True def __repr__(self): return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" % ( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class CUnsignedAlert(object): def __init__(self): self.nVersion = 1 self.nRelayUntil = 0 self.nExpiration = 0 self.nID = 0 self.nCancel = 0 self.setCancel = [] self.nMinVer = 0 self.nMaxVer = 0 self.setSubVer = [] self.nPriority = 0 self.strComment = "" self.strStatusBar = "" self.strReserved = "" def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.nRelayUntil = struct.unpack("<q", f.read(8))[0] self.nExpiration = struct.unpack("<q", f.read(8))[0] self.nID = struct.unpack("<i", f.read(4))[0] self.nCancel = struct.unpack("<i", f.read(4))[0] self.setCancel = deser_int_vector(f) self.nMinVer = struct.unpack("<i", f.read(4))[0] self.nMaxVer = struct.unpack("<i", f.read(4))[0] self.setSubVer = deser_string_vector(f) self.nPriority = struct.unpack("<i", f.read(4))[0] self.strComment = deser_string(f) self.strStatusBar = deser_string(f) self.strReserved = deser_string(f) def serialize(self): r = "" r += struct.pack("<i", self.nVersion) r += struct.pack("<q", self.nRelayUntil) r += struct.pack("<q", self.nExpiration) r += struct.pack("<i", self.nID) r += struct.pack("<i", self.nCancel) r += ser_int_vector(self.setCancel) r += struct.pack("<i", self.nMinVer) r += struct.pack("<i", self.nMaxVer) r += ser_string_vector(self.setSubVer) r += struct.pack("<i", self.nPriority) r += ser_string(self.strComment) r += ser_string(self.strStatusBar) r += ser_string(self.strReserved) return r def __repr__(self): return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" % ( self.nVersion, self.nRelayUntil, self.nExpiration, self.nID, self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority, self.strComment, self.strStatusBar, self.strReserved) class CAlert(object): def __init__(self): self.vchMsg = "" self.vchSig = "" def deserialize(self, f): self.vchMsg = deser_string(f) self.vchSig = deser_string(f) def serialize(self): r = "" r += ser_string(self.vchMsg) r += ser_string(self.vchSig) return r def __repr__(self): return "CAlert(vchMsg.sz %d, vchSig.sz %d)" % (len(self.vchMsg), len(self.vchSig)) class msg_version(object): command = "version" def __init__(self): self.nVersion = MY_VERSION self.nServices = 1 self.nTime = time.time() self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) self.strSubVer = MY_SUBVERSION self.nStartingHeight = -1 def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] if self.nVersion == 10300: self.nVersion = 300 self.nServices = struct.unpack("<Q", f.read(8))[0] self.nTime = struct.unpack("<q", f.read(8))[0] self.addrTo = CAddress() self.addrTo.deserialize(f) if self.nVersion >= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f) self.nNonce = struct.unpack("<Q", f.read(8))[0] self.strSubVer = deser_string(f) if self.nVersion >= 209: self.nStartingHeight = struct.unpack("<i", f.read(4))[0] else: self.nStartingHeight = None else: self.addrFrom = None self.nNonce = None self.strSubVer = None self.nStartingHeight = None def serialize(self): r = "" r += struct.pack("<i", self.nVersion) r += struct.pack("<Q", self.nServices) r += struct.pack("<q", self.nTime) r += self.addrTo.serialize() r += self.addrFrom.serialize() r += struct.pack("<Q", self.nNonce) r += ser_string(self.strSubVer) r += struct.pack("<i", self.nStartingHeight) return r def __repr__(self): return "msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)" % ( self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, self.strSubVer, self.nStartingHeight) class msg_verack(object): command = "verack" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return "" def __repr__(self): return "msg_verack()" class msg_addr(object): command = "addr" def __init__(self): self.addrs = [] def deserialize(self, f): self.addrs = deser_vector(f, CAddress) def serialize(self): return ser_vector(self.addrs) def __repr__(self): return "msg_addr(addrs=%s)" % (repr(self.addrs)) class msg_alert(object): command = "alert" def __init__(self): self.alert = CAlert() def deserialize(self, f): self.alert = CAlert() self.alert.deserialize(f) def serialize(self): r = "" r += self.alert.serialize() return r def __repr__(self): return "msg_alert(alert=%s)" % (repr(self.alert),) class msg_inv(object): command = "inv" def __init__(self): self.inv = [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_inv(inv=%s)" % (repr(self.inv)) class msg_getdata(object): command = "getdata" def __init__(self): self.inv = [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_getdata(inv=%s)" % (repr(self.inv)) class msg_getblocks(object): command = "getblocks" def __init__(self): self.locator = CBlockLocator() self.hashstop = 0L def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = "" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getblocks(locator=%s hashstop=%064x)" % (repr(self.locator), self.hashstop) class msg_tx(object): command = "tx" def __init__(self): self.tx = CTransaction() def deserialize(self, f): self.tx.deserialize(f) def serialize(self): return self.tx.serialize() def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) class msg_block(object): command = "block" def __init__(self): self.block = CBlock() def deserialize(self, f): self.block.deserialize(f) def serialize(self): return self.block.serialize() def __repr__(self): return "msg_block(block=%s)" % (repr(self.block)) class msg_getaddr(object): command = "getaddr" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return "" def __repr__(self): return "msg_getaddr()" class msg_checkpoint(object): command = "msg_checkpoint" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return "" def __repr__(self): return "msg_checkpoint()" # msg_checkorder # msg_submitorder # msg_reply class msg_ping(object): command = "ping" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return "" def __repr__(self): return "msg_ping()" class NodeConn(asyncore.dispatcher): messagemap = { "version": msg_version, "verack": msg_verack, "addr": msg_addr, "alert": msg_alert, "inv": msg_inv, "getdata": msg_getdata, "getblocks": msg_getblocks, "tx": msg_tx, "block": msg_block, "getaddr": msg_getaddr, "ping": msg_ping, "checkpoint": msg_checkpoint } MAGIC_BYTES = { "mainnet": "\xa1\xa0\xa2\xa3", # mainnet "testnet3": "\x0b\x11\x09\x07" # testnet3 } def __init__(self, dstaddr, dstport): asyncore.dispatcher.__init__(self) self.dstaddr = dstaddr self.dstport = dstport self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.sendbuf = "" self.recvbuf = "" self.ver_send = 209 self.ver_recv = 209 self.last_sent = 0 self.state = "connecting" # stuff version msg into sendbuf vt = msg_version() vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.send_message(vt, True) print "\n PyNode - MiniNode" print " -------------------------------------------------------------------------" print " Connecting to Bitcoin Node IP # " + settings['host'] + ":" + str(settings['port']) try: self.connect((dstaddr, dstport)) except: self.handle_close() self.send_message(vt, True) def handle_connect(self): print " Connected & Listening :)\n" self.state = "connected" #send version msg t = msg_version() t.addrTo.ip = self.dstaddr t.addrTo.port = self.dstport t.addrFrom.ip = "0.0.0.0" t.addrFrom.port = 0 self.send_message(t) def handle_close(self): print " Closing Conection ... bye :)" self.state = "closed" self.recvbuf = "" self.sendbuf = "" try: self.close() except: pass def handle_read(self): try: t = self.recv(8192) except: self.handle_close() return if len(t) == 0: print 'len is zero...' self.handle_close() return self.recvbuf += t self.got_data() def readable(self): return True def writable(self): return (len(self.sendbuf) > 0) def handle_write(self): try: sent = self.send(self.sendbuf) except: self.handle_close() return self.sendbuf = self.sendbuf[sent:] def got_data(self): while True: if len(self.recvbuf) < 4: return if self.recvbuf[:4] != self.MAGIC_BYTES[settings['network']]: raise ValueError("got garbage %s" % repr(self.recvbuf)) if self.ver_recv < 209: if len(self.recvbuf) < 4 + 12 + 4: return command = self.recvbuf[4:4 + 12].split("\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4 + 12:4 + 12 + 4])[0] checksum = None if len(self.recvbuf) < 4 + 12 + 4 + msglen: return msg = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + msglen] self.recvbuf = self.recvbuf[4 + 12 + 4 + msglen:] else: if len(self.recvbuf) < 4 + 12 + 4 + 4: return command = self.recvbuf[4:4 + 12].split("\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4 + 12:4 + 12 + 4])[0] checksum = self.recvbuf[4 + 12 + 4:4 + 12 + 4 + 4] if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: return msg = self.recvbuf[4 + 12 + 4 + 4:4 + 12 + 4 + 4 + msglen] th = sha256(msg) h = sha256(th) if checksum != h[:4]: raise ValueError("got bad checksum %s" % repr(self.recvbuf)) self.recvbuf = self.recvbuf[4 + 12 + 4 + 4 + msglen:] if command in self.messagemap: f = cStringIO.StringIO(msg) t = self.messagemap[command]() t.deserialize(f) self.got_message(t) else: show_debug_msg("Unknown command: '" + command + "' " + repr(msg)) def send_message(self, message, pushbuf=False): if self.state != "connected" and not pushbuf: return show_debug_msg("Send %s" % repr(message)) print 'Sending Message...%s' % repr(message) command = message.command data = message.serialize() tmsg = self.MAGIC_BYTES[settings['network']] tmsg += command tmsg += "\x00" * (12 - len(command)) tmsg += struct.pack("<I", len(data)) if self.ver_send >= 209: th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data self.sendbuf += tmsg self.last_sent = time.time() def got_message(self, message): print 'got message' if self.last_sent + 30 * 60 < time.time(): self.send_message(msg_ping()) show_debug_msg("Recv %s" % repr(message)) if message.command == "version": if message.nVersion >= 209: self.send_message(msg_verack()) print 'version is greater than 209' self.ver_send = min(MY_VERSION, message.nVersion) if message.nVersion < 209: self.ver_recv = self.ver_send print 'version is less than 209' elif message.command == "verack": self.ver_recv = self.ver_send elif message.command == "inv": want = msg_getdata() for i in message.inv: if i.type == 1: want.inv.append(i) elif i.type == 2: want.inv.append(i) if len(want.inv): self.send_message(want) elif message.command == "tx": new_transaction_event(message.tx) elif message.command == "block": new_block_event(message.block) if __name__ == '__main__': if len(sys.argv) == 2: f = open(sys.argv[1]) for line in f: m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() settings['port'] = int(settings['port']) c = NodeConn(settings['host'], settings['port']) asyncore.loop()
Discover Law, Justice forums, share your thoughts, informations, images and videos with thoushands of users around the world on thai-forum. 6/4 We are SphinX forever. 6/4 We are SphinX forever. vrsphinx4ever. 5forum. biz 6/4, We, are, SphinX, forever. บอร์ดของชาว ม. 3/3 รุ่น 112 KKW.
# -*- coding: utf-8 -*- import argparse import logging import time import sys import os sys.path.append(os.path.abspath(os.path.join(__file__, "../../.."))) from mycodo.config import DAEMON_PID_FILE from mycodo.config import KEEPUP_LOG_FILE def check_daemon(print_msg=True, start_daemon=True): if os.path.exists(DAEMON_PID_FILE): with open(DAEMON_PID_FILE, 'r') as pid_file: if not os.path.exists("/proc/{pid}".format(pid=pid_file.read())): message = "Daemon is not running, restarting" logging.info(message) if print_msg: print(message) try: os.remove(DAEMON_PID_FILE) if start_daemon: rcode = os.system('/usr/sbin/service mycodo restart') if rcode != 0: logging.error("Unable to execute restart command " "{}".format(rcode)) except OSError as e: message = "Unable to remove pid file: {}".format(e) logging.warning(message) if print_msg: print(message) else: if print_msg: message = "Daemon is currently running" logging.info(message) print(message) elif print_msg: message = "Mycodo previously shut down properly" logging.info(message) print(message) def parseargs(par): par.add_argument('-c', '--continuouscheck', action='store_true', help="Continually check if the daemon has crashed and start it") par.add_argument('-d', '--deletepid', action='store_true', help="Only delete the PID file if the daemon isn't running. Don't start it.") return par.parse_args() if __name__ == '__main__': log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(filename=KEEPUP_LOG_FILE, format=log_format, level=logging.DEBUG) parser = argparse.ArgumentParser( description="Script to check if the Mycodo daemon has crashed and " "restart it if so.") args = parseargs(parser) if args.continuouscheck: print("Beginning monitor of the Mycodo daemon and start it if it is found to not be running") while True: check_daemon(print_msg=False) time.sleep(30) elif args.deletepid: check_daemon(start_daemon=False) else: check_daemon()
Search and apply for all top Jobs in Marketing Advertising Mr Pr coimbatore only on Joblistindia.com. we have a huge and comprehensive database of Jobs in Marketing Advertising Mr Pr coimbatore in India. Here are the lists of latest and updated Jobs in Marketing Advertising Mr Pr coimbatore . This list of Jobs in Marketing Advertising Mr Pr coimbatore is modernized every day with the new vacancies, so don't forget to check back regularly or bookmark this page.
# -*- coding: utf-8 -*- from base import Entidade from pynfe.utils.flags import TIPOS_DOCUMENTO, CODIGO_BRASIL class Cliente(Entidade): # Dados do Cliente # - Nome/Razão Social (obrigatorio) razao_social = str() # - Tipo de Documento (obrigatorio) - default CNPJ - TIPOS_DOCUMENTO tipo_documento = 'CNPJ' # - Numero do Documento (obrigatorio) numero_documento = str() # - Inscricao Estadual inscricao_estadual = str() # - Inscricao SUFRAMA inscricao_suframa = str() # - Isento do ICMS (Sim/Nao) isento_icms = False # Endereco # - Logradouro (obrigatorio) endereco_logradouro = str() # - Numero (obrigatorio) endereco_numero = str() # - Complemento endereco_complemento = str() # - Bairro (obrigatorio) endereco_bairro = str() # - CEP endereco_cep = str() # - Pais (seleciona de lista) endereco_pais = CODIGO_BRASIL # - UF (obrigatorio) endereco_uf = str() # - Municipio (obrigatorio) endereco_municipio = str() # - Telefone endereco_telefone = str() def __str__(self): return ' '.join([self.tipo_documento, self.numero_documento])
The FMEDA method was invented to predict failure rates for each failure mode of a device, subsystem, or component. The ”Practical Mechanical FMEDA with FMEDAx” course explains the FMEDA method, objectives, and output. In this course an example device FMEDA will be done showing the fundamental concepts including environmental profile selection, diagnostic coverage analysis, proof test coverage analysis, part selection, and functional failure modes. The course includes a copy of the book “Final Elements in Safety Instrumented Systems: IEC 61511 Compliant Systems and IEC 61508 Compliant Products” by Chris O’Brien, Loren Stewart, & Lindsey Bredemeyer. The course also includes a trial license of the FMEDAx tool with the MCRHTM mechanical component database. This database contains failure rates, failure modes and failure mode distributions for a variety of operational profiles and applications.
# ManageBacToTheFuture: ManageBac for Humans # Copyright (C) 2015 Sam Parkinson # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re import requests from datetime import datetime from bs4 import BeautifulSoup from dateutil.relativedelta import relativedelta from files import Files from message import Messages from calender import Calender import errors def login(username, password): ''' Logs into ManageBac Returns a token Raises: ManageBacCommunicationException, BadLogin ''' r = requests.post('https://telopeapark.managebac.com/sessions', data={'login': username, 'password': password}) if r.ok and r.status_code == 200: if 'Invalid login or password, please try again.' in r.text: # I wish managebac was more RESTful raise errors.BadLogin else: return {'_managebac_session': r.cookies['_managebac_session']} else: raise errors.ManageBacCommunicationException class Class(): ''' Represents a class on managebac ''' def __init__(self, id_, name=None): self.id_ = id_ self.name = name def get_files(self, token): ''' Get the class's files section Returns :class:`managebac.files.Files` ''' return Files('https://telopeapark.managebac.com/classes/' '{}/assets'.format(self.id_), token) def get_messages(self, token): ''' Get the class's files section Returns :class:`managebac.message.Messages` ''' return Messages('https://telopeapark.managebac.com/classes/' '{}/messages'.format(self.id_), token) def get_calender(self, token, start=0, end=3000000000): ''' Get the class's calender section Returns :class:`managebac.calender.Calender` ''' return Calender(self.id_, token, start=start, end=end) def get_merged(self, token): fil = self.get_files(token) msg = self.get_messages(token) cal = self.get_calender(token) for m in msg: if not m.loaded: m.load(token) l = fil + msg + cal # <HACK> # Naive convertion between tz and non-tz objects for x in l: x.time = x.time.replace(tzinfo=None) # </HACK> l.sort(key=lambda x: x.time) l.reverse() return l class Classes(list): ''' Gets and holds a list of :class:`Class` es for a given user Downloads the classes of the user behind the token. Raises: BadToken, ManageBacCommunicationException ''' def __init__(self, token): r = requests.get('https://telopeapark.managebac.com/home', cookies=token) if r.ok and r.status_code == 200: soup = BeautifulSoup(r.text) # Dashboard | Profile | MYP | [Classes] | Groups menu = soup.find(id='menu').findAll('li')[3] # The 1st a is just a link to a classes list for a in menu.findAll('a')[1:]: self.append(Class( id_=int(re.search( '/classes/([0-9]+)', a['href']).group(1)), name=a.text[len('\nIB MYP\n\n'):].strip('\n') )) elif r.status_code == 302: raise errors.BadToken else: raise errors.ManageBacCommunicationException
Foreign applicants who have gained a first university degree in the areas of Architecture, Interior Design, Monument Conservation, Urban Planning/Urban Development, Regional Planning, Landscape Architecture, Landscape Planning at the latest by the time they commence their scholarship-supported study programme. This programme only funds projects in the area of Design/Planning. Other DAAD scholarship programmes are available for applicants from the field of History of Architecture or applicants with a scientific project. Reimbursement of the fees for the TestDaF test which has either been taken in the home country after receipt of the Scholarship Award Letter or in Germany before the end of the funding period. A special DAAD committee made up of professors from German universities makes the final decision about scholarships in the field of architecture. The decision is based upon written applications and work samples which have to be submitted (see: www.daad.de/extrainfo). Confirmation of a scholarship does not automatically guarantee admission to one of the host universities. The individual university decides whether or not to offer scholarship holders a place. In most cases, applicants are required to take an entrance examination. Please find out in time about admission requirements and application deadlines and interview dates at your chosen university, and bear in mind that these may be several months before the planned start of your studies or even before the DAAD awards the scholarship. We cannot cover the costs of the entrance examination. If an applicant is offered a scholarship by the DAAD but fails to be admitted to a university, he/she may not make use of the scholarship that has already been awarded. Applicants in the field of architecture should have a knowledge of the language of instruction that corresponds to the requirements of the chosen university at the latest by the time they start their scholarship. If you do not yet have the language skills required by the university at the time of your application, your application should indicate the extent to which you are in a position to reach the required level. After you have been awarded a scholarship, take advantage of the funding opportunities described under "Value". If available, letter of acceptance from the host university. All university certificates on all annual examinations with individual grade(s), incl. explanation of the grading system. German or English translations of all documents submitted in the national language. A declaration (form) that the designs/plans have been produced or created by the applicant or the names of those persons involved in producing collaborative designs or plans. In the case of collaborative work or projects, the applicant's own contribution must be marked as such or appropriately described. Other documents you think might be of relevance to your application (e.g. certificates of employment, proof of placements). A portfolio of work samples (for formal requirements, read: "Additional Information for DAAD Study Scholarships in the Field of Architecture" at: www.daad.de/extrainfo). Please do not upload any work samples and/or portfolios to the DAAD portal! All application documents listed under 1. and 2. must be submitted in German or English. Documents in other languages must be translated. Upload the completed application form and the required appendices (except reference and the work samples which you have to submit) to the portal in PDF format. Translations, if applicable, should be uploaded together with the document issued in the original language.
#!/.../python3 # # This file is part of EXECTASK. # # EXECTASK is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EXECTASK is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EXECTASK. If not, see <http://www.gnu.org/licenses/>. # from exectask.actions import * from exectask.context import * from exectask.merge import * from exectask.printer import * import argparse import importlib import json import os import os.path import sys import traceback import types # # Main function # def main(): # Parse arguments def argparse_directory(path): if not os.path.exists(path): msg = '\'{}\' directory does not exist'.format(path) raise argparse.ArgumentTypeError(msg) if not os.path.isdir(path): msg = '\'{}\' is not a directory'.format(path) raise argparse.ArgumentTypeError(msg) if os.path.isabs(path): return path return os.path.abspath(path) parser = argparse.ArgumentParser( description='Executes tasks in a coordinated manner.' ) parser.add_argument( 'taskfile', type=argparse.FileType('r'), nargs='+', help='file containing a task definition' ) parser.add_argument( '-a', metavar='actionsdir', type=argparse_directory, nargs='+', required=False, default=[], dest='actionsdir', help='action modules directory' ) parser.add_argument( '-s', metavar='settingsfile', type=argparse.FileType('r'), nargs='+', required=False, default=[], dest='settingsfile', help='file with settings' ) parser.add_argument( '-v', required=False, action='store_true', default=False, dest='verbose', help='verbose output' ) parser.add_argument( '-e', required=False, action='store_true', default=False, dest='exceptions', help='show exceptions stack trace' ) args = parser.parse_args(sys.argv) # Create printer factory if args.verbose: printer_fact_level = 1 else: printer_fact_level = 0 printer_fact = PrinterFactory(printer_fact_level) # Create action catalog actions = {} catalog = ActionCatalog(actions, printer_fact) for actionsdir in args.actionsdir: for fname in os.listdir(actionsdir): path = '{}/{}'.format(actionsdir, fname) if os.path.isfile(path) and fname.endswith('.py'): name = 'exectask.modules.{}'.format(fname[0:len(fname)-3]) action_catalog_load(catalog, printer_fact, name, path) # Gather top level variables variables = {} for settingsfile in args.settingsfile: try: merge_dict(variables, json.loads(settingsfile.read())) except: printer = printer_fact.printer(sys.stderr) msg = 'Warning: Could not read settings from' msg = '{} file \'{}\''.format(msg, settingsfile.name) printer.print(msg, 0, 'yellow') # Execute tasks if len(args.taskfile) > 1: context = ExecuteTaskContext(actions, printer_fact) for taskfile in args.taskfile[1:len(args.taskfile)]: try: task = json.loads(taskfile.read()) except: task = None printer = printer_fact.printer(sys.stderr) msg = 'Warning: Could not read task from' msg = '{} file \'{}\''.format(msg, taskfile.name) printer.print(msg, 0, 'yellow') if task is not None: try: # Define built-in variable 'basedir' dirname = os.path.dirname(taskfile.name) basedir = '\'{}\''.format(os.path.abspath(dirname)) variables['basedir'] = basedir # Execute task context.execute_task(task, variables) except BaseException as err: printer = printer_fact.printer(sys.stderr) msg = 'Error: There was a problem executing task' msg = '{} from file \'{}\''.format(msg, taskfile.name) msg = '{}\nCause: {}'.format(msg, err) if args.exceptions: msg = '{}:\n{}'.format(msg, traceback.format_exc()) printer.print(msg, 0, 'red', 'bright') # Tasks was already executed return 0
The who’s who of VIP guests are flocking to the more relaxed type of A-List destinations. There’s a huge trend of people at the top, wanting to wind down, on their holiday. The sorts of spots where you can be a fly on a wall – no attention attracted – and choose to be in the limelight when it suits. These spots have to have the perfect balance of not-a-care-in-the-world vibes, natural beauty by the sea, though with the buzz of chic shops & gourmet experiences. Byron Bay to Saint Tropez Planners curates travel, accommodation, reservations and daytrip itineraries alongside exclusive partners to provide unforgettably relaxed stays, with privacy in mind. Our experiences are positioned away from prying eyes and pressure, yet close enough to the action. Never out of the top 10 celebrity destinations 2018 / 2019 will be no different, with the usual chic jet-set crew having reserved stays, to decorate both the French Riviera and Byron Bay region. There’s comfort in knowing that St Tropez never goes out of style. Since the 1950s, the rich and famous have flocked to this Cote d’Azur favourite each summer. The South of France’s most exclusive destination has played host to countless icons over the years from Brigitte Bardot to the Beckhams, Kate Moss, Beyonce and Jay Z, Elton John and Roberto Cavalli, whose technicolour boat is moored there during summer to host the A-list parties. Byron Bay is the newcomer in the celebrity go-to destinations. This summer alone the boutique township had the pleasure of hosting many Hollywood celebs on their private holidays. It makes perfect sense: stunningly beautiful nature, small village feel, the ocean. With elegant chic-ness to boot. And the most amazing food trips in the rolling hills just beyond. It’s like the Australian Provence by Sea.
# Error codes : Taken from pilot1. To be removed once pilot2 API is ready class PilotErrors(object): """ Pilot error handling """ # error codes ERR_UNKNOWNERROR = 0 ERR_GENERALERROR = 1008 ERR_DIRECTIOFILE = 1009 # harmless, just means that copy-to-scratch was skipped in favor or direct i/o access ERR_GETDATAEXC = 1097 ERR_NOLOCALSPACE = 1098 ERR_STAGEINFAILED = 1099 ERR_REPNOTFOUND = 1100 ERR_LRCREGCONNREF = 1101 ERR_NOSUCHFILE = 1103 ERR_USERDIRTOOLARGE = 1104 ERR_LFCADDCSUMFAILED = 1105 ERR_STDOUTTOOBIG = 1106 ERR_MISSDBREL = 1107 ERR_FAILEDLCGREG = 1108 ERR_CMTCONFIG = 1109 ERR_SETUPFAILURE = 1110 ERR_RUNJOBEXC = 1111 ERR_PILOTEXC = 1112 ERR_GETLFCIMPORT = 1113 ERR_PUTLFCIMPORT = 1114 ERR_NFSSQLITE = 1115 ERR_QUEUEDATA = 1116 ERR_QUEUEDATANOTOK = 1117 ERR_CURLSPACE = 1118 ERR_DDMSPACE = 1119 ERR_NOSTMATCHDEST = 1120 # not used ERR_NOLFCSFN = 1122 ERR_MISSINGGUID = 1123 ERR_OUTPUTFILETOOLARGE = 1124 ERR_NOPFC = 1130 ERR_PUTFUNCNOCALL = 1131 ERR_LRCREG = 1132 ERR_NOSTORAGE = 1133 ERR_MKDIR = 1134 ERR_FAILEDSIZELOCAL = 1135 ERR_FAILEDMD5LOCAL = 1136 ERR_STAGEOUTFAILED = 1137 ERR_FAILEDSIZE = 1138 ERR_PUTWRONGSIZE = 1139 ERR_FAILEDMD5 = 1140 ERR_PUTMD5MISMATCH = 1141 ERR_CHMODTRF = 1143 ERR_PANDAKILL = 1144 ERR_GETMD5MISMATCH = 1145 ERR_DYNTRFINST = 1146 ERR_FAILEDRM = 1148 ERR_TRFDOWNLOAD = 1149 ERR_LOOPINGJOB = 1150 ERR_GETTIMEOUT = 1151 ERR_PUTTIMEOUT = 1152 ERR_LOSTJOBNOTFINISHED = 1153 ERR_LOSTJOBLOGREG = 1154 ERR_LOSTJOBFILETRANSFER = 1155 ERR_LOSTJOBRECOVERY = 1156 ERR_LOSTJOBMAXEDOUT = 1158 ERR_LOSTJOBPFC = 1159 ERR_LRCREGSTRSIZE = 1160 ERR_LOSTJOBXML = 1161 ERR_LRCREGDUP = 1162 ERR_NOPROXY = 1163 ERR_MISSINGLOCALFILE = 1164 ERR_MISSINGOUTPUTFILE = 1165 ERR_SIGPIPE = 1166 ERR_MISSFILEXML = 1167 ERR_SIZETOOLARGE = 1168 ERR_FAILEDLFCREG = 1169 ERR_FAILEDADLOCAL = 1170 ERR_GETADMISMATCH = 1171 ERR_PUTADMISMATCH = 1172 ERR_PANDAMOVERFILENOTCACHED = 1173 ERR_PANDAMOVERTRANSFER = 1174 ERR_GETWRONGSIZE = 1175 ERR_NOCHILDPROCESSES = 1176 ERR_NOVOMSPROXY = 1177 ERR_NOSTAGEDFILES = 1178 ERR_FAILEDLFCGETREPS = 1179 ERR_GETGLOBUSSYSERR = 1180 ERR_PUTGLOBUSSYSERR = 1181 ERR_FAILEDLFCGETREP = 1182 ERR_GUIDSEXISTSINLRC = 1183 ERR_MISSINGPFC = 1184 ERR_NOSOFTWAREDIR = 1186 ERR_NOPAYLOADMETADATA = 1187 ERR_LCGGETTURLS = 1188 ERR_LCGGETTURLSTIMEOUT = 1189 ERR_LFNTOOLONG = 1190 ERR_ZEROFILESIZE = 1191 ERR_DBRELNOTYETTRANSFERRED = 1192 ERR_SEPROBLEM = 1193 ERR_NOFILEVERIFICATION = 1194 ERR_COMMANDTIMEOUT = 1195 ERR_GETFAILEDTOMOUNTNFS4 = 1196 ERR_GETPNFSSYSTEMERROR = 1197 ERR_MKDIRWORKDIR = 1199 ERR_KILLSIGNAL = 1200 ERR_SIGTERM = 1201 ERR_SIGQUIT = 1202 ERR_SIGSEGV = 1203 ERR_SIGXCPU = 1204 # ERR_USERKILL = 1205 # not used by pilot ERR_SIGBUS = 1206 ERR_SIGUSR1 = 1207 ERR_NOPAYLOADOUTPUT = 1210 ERR_MISSINGINSTALLATION = 1211 ERR_PAYLOADOUTOFMEMORY = 1212 ERR_REACHEDMAXTIME = 1213 ERR_DAFSNOTALLOWED = 1214 ERR_NOTCPCONNECTION = 1215 ERR_NOPILOTTCPSERVER = 1216 ERR_CORECOUNTMISMATCH = 1217 ERR_RUNEVENTEXC = 1218 ERR_UUIDGEN = 1219 ERR_UNKNOWN = 1220 ERR_FILEEXIST = 1221 ERR_GETKEYPAIR = 1222 ERR_BADALLOC = 1223 ERR_ESRECOVERABLE = 1224 ERR_ESMERGERECOVERABLE = 1225 ERR_GLEXEC = 1226 ERR_ESATHENAMPDIED = 1227 ERR_ESFATAL = 1228 ERR_TEFATAL = 1229 ERR_TEBADURL = 1230 ERR_TEINVALIDGUID = 1231 ERR_TEWRONGGUID = 1232 ERR_TEHOSTNAME = 1233 ERR_EXECUTEDCLONEJOB = 1234 ERR_PAYLOADEXCEEDMAXMEM = 1235 ERR_FAILEDBYSERVER = 1236 ERR_ESKILLEDBYSERVER = 1237 ERR_NOEVENTS = 1238 ERR_OVERSUBSCRIBEDEVENTS = 1239 ERR_ESMESSAGESERVER = 1240 ERR_ESOBJECTSTORESETUP = 1241 ERR_CHKSUMNOTSUP = 1242 ERR_ESPREFETCHERDIED = 1243 ERR_NORELEASEFOUND = 1244 ERR_TOOFEWEVENTS = 1245 # internal error codes ERR_DDMREG = 1 ERR_FILEONTAPE = 2 pilotError = { ERR_UNKNOWNERROR : "", ERR_GENERALERROR : "General pilot error, consult batch log", ERR_GETDATAEXC : "Get function can not be called for staging input file", ERR_NOLOCALSPACE : "No space left on local disk", ERR_STAGEINFAILED : "Get error: Staging input file failed", ERR_REPNOTFOUND : "Get error: Replica not found", ERR_LRCREGCONNREF : "LRC registration error: Connection refused", # 1102 : "Expected output file does not exist", # not used, see ERR_MISSINGOUTPUTFILE below ERR_NOSUCHFILE : "No such file or directory", ERR_USERDIRTOOLARGE : "User work directory too large", ERR_LFCADDCSUMFAILED : "Put error: Failed to add file size and checksum to LFC", ERR_STDOUTTOOBIG : "Payload stdout file too big", ERR_MISSDBREL : "Get error: Missing DBRelease file", ERR_FAILEDLCGREG : "Put error: LCG registration failed", ERR_CMTCONFIG : "Required CMTCONFIG incompatible with WN", ERR_SETUPFAILURE : "Failed during setup", ERR_RUNJOBEXC : "Exception caught by RunJob*", ERR_PILOTEXC : "Exception caught by pilot", ERR_GETLFCIMPORT : "Get error: Failed to import LFC python module", ERR_PUTLFCIMPORT : "Put error: Failed to import LFC python module", ERR_NFSSQLITE : "NFS SQLite locking problems", ERR_QUEUEDATA : "Pilot could not download queuedata", ERR_QUEUEDATANOTOK : "Pilot found non-valid queuedata", ERR_CURLSPACE : "Pilot could not curl space report", ERR_DDMSPACE : "Pilot aborted due to DDM space shortage", ERR_NOSTMATCHDEST : "Space token descriptor does not match destination path", # 1121 : "Can not read the xml file for registering output files to dispatcher", # not used ERR_NOLFCSFN : "Bad replica entry returned by lfc_getreplicas(): SFN not set in LFC for this guid", ERR_MISSINGGUID : "Missing guid in output file list", ERR_OUTPUTFILETOOLARGE : "Output file too large", ERR_NOPFC : "Get error: Failed to get PoolFileCatalog", ERR_PUTFUNCNOCALL : "Put function can not be called for staging out", ERR_LRCREG : "LRC registration error (consult log file)", ERR_NOSTORAGE : "Put error: Fetching default storage URL failed", ERR_MKDIR : "Put error: Error in mkdir on localSE, not allowed or no available space", ERR_FAILEDSIZELOCAL : "Could not get file size in job workdir", ERR_FAILEDMD5LOCAL : "Error running md5sum on the file in job workdir", ERR_STAGEOUTFAILED : "Put error: Error in copying the file from job workdir to localSE", ERR_FAILEDSIZE : "Put error: could not get the file size on localSE", ERR_PUTWRONGSIZE : "Put error: Problem with copying from job workdir to local SE: size mismatch", ERR_FAILEDMD5 : "Put error: Error running md5sum on the file on local SE", ERR_PUTMD5MISMATCH : "Put error: Problem with copying from job workdir to local SE: md5sum mismatch", # 1142 : "Put error: failed to register the file on local SE", # not used ERR_CHMODTRF : "Failed to chmod trf", ERR_PANDAKILL : "This job was killed by panda server", ERR_GETMD5MISMATCH : "Get error: md5sum mismatch on input file", ERR_DYNTRFINST : "Trf installation dir does not exist and could not be installed", # 1147 : "Put error: dccp returned readOnly", # not used ERR_FAILEDRM : "Put error: Failed to remove readOnly file in dCache", ERR_TRFDOWNLOAD : "wget command failed to download trf", ERR_LOOPINGJOB : "Looping job killed by pilot", ERR_GETTIMEOUT : "Get error: Input file staging timed out", ERR_PUTTIMEOUT : "Put error: File copy timed out", ERR_LOSTJOBNOTFINISHED : "Lost job was not finished", ERR_LOSTJOBLOGREG : "Failed to register log file", ERR_LOSTJOBFILETRANSFER : "Failed to move output files for lost job", ERR_LOSTJOBRECOVERY : "Pilot could not recover job", # 1157 : "Could not create log file", # not used ERR_LOSTJOBMAXEDOUT : "Reached maximum number of recovery attempts", ERR_LOSTJOBPFC : "Job recovery could not read PoolFileCatalog.xml file (guids lost)", ERR_LRCREGSTRSIZE : "LRC registration error: file name string size exceeded limit of 250", ERR_LOSTJOBXML : "Job recovery could not generate xml for remaining output files", ERR_LRCREGDUP : "LRC registration error: Non-unique LFN", ERR_NOPROXY : "Grid proxy not valid", ERR_MISSINGLOCALFILE : "Get error: Local input file missing", ERR_MISSINGOUTPUTFILE : "Put error: Local output file missing", ERR_SIGPIPE : "Put error: File copy broken by SIGPIPE", ERR_MISSFILEXML : "Get error: Input file missing in PoolFileCatalog.xml", ERR_SIZETOOLARGE : "Get error: Total file size too large", ERR_FAILEDLFCREG : "Put error: File registration failed", ERR_FAILEDADLOCAL : "Error running adler32 on the file in job workdir", ERR_GETADMISMATCH : "Get error: adler32 mismatch on input file", ERR_PUTADMISMATCH : "Put error: adler32 mismatch on output file", ERR_PANDAMOVERFILENOTCACHED : "PandaMover staging error: File is not cached", ERR_PANDAMOVERTRANSFER : "PandaMover transfer failure", ERR_GETWRONGSIZE : "Get error: Problem with copying from local SE to job workdir: size mismatch", ERR_NOCHILDPROCESSES : "Pilot has no child processes (job wrapper has either crashed or did not send final status)", ERR_NOVOMSPROXY : "Voms proxy not valid", ERR_NOSTAGEDFILES : "Get error: No input files are staged", ERR_FAILEDLFCGETREPS : "Get error: Failed to get replicas", ERR_GETGLOBUSSYSERR : "Get error: Globus system error", ERR_PUTGLOBUSSYSERR : "Put error: Globus system error", ERR_FAILEDLFCGETREP : "Get error: Failed to get replica", ERR_GUIDSEXISTSINLRC : "LRC registration error: Guid-metadata entry already exists", ERR_MISSINGPFC : "Put error: PoolFileCatalog could not be found in workdir", # 1185 : "Put error: Error running adler32 on the file in job workdir", # not used ERR_NOSOFTWAREDIR : "Software directory does not exist", ERR_NOPAYLOADMETADATA : "Payload metadata is not available", ERR_LCGGETTURLS : "lcg-getturls failed", ERR_LCGGETTURLSTIMEOUT : "lcg-getturls was timed-out", ERR_LFNTOOLONG : "LFN too long (exceeding limit of 150 characters)", ERR_ZEROFILESIZE : "Illegal zero file size", ERR_DBRELNOTYETTRANSFERRED : "DBRelease file has not been transferred yet", ERR_NOFILEVERIFICATION : "File verification failed", ERR_COMMANDTIMEOUT : "Command timed out", ERR_GETFAILEDTOMOUNTNFS4 : "Get error: Failed to mount NSF4", ERR_GETPNFSSYSTEMERROR : "Get error: PNFS system error", # 1198 : "Can not check the child process status from the heartbeat process", # not used ERR_MKDIRWORKDIR : "Could not create directory", ERR_KILLSIGNAL : "Job terminated by unknown kill signal", ERR_SIGTERM : "Job killed by signal: SIGTERM", ERR_SIGQUIT : "Job killed by signal: SIGQUIT", ERR_SIGSEGV : "Job killed by signal: SIGSEGV", ERR_SIGXCPU : "Job killed by signal: SIGXCPU", ERR_SIGUSR1 : "Job killed by signal: SIGUSR1", ERR_SIGBUS : "Job killed by signal: SIGBUS", ERR_NOPAYLOADOUTPUT : "No payload output", ERR_MISSINGINSTALLATION : "Missing installation", ERR_PAYLOADOUTOFMEMORY : "Payload ran out of memory", ERR_REACHEDMAXTIME : "Reached batch system time limit", ERR_DAFSNOTALLOWED : "Site does not allow requested direct access or file stager", ERR_NOTCPCONNECTION : "Failed to open TCP connection to localhost (worker node network problem)", ERR_NOPILOTTCPSERVER : "Pilot TCP server has died", ERR_CORECOUNTMISMATCH : "Mismatch between core count in job and queue definition", ERR_RUNEVENTEXC : "Exception caught by runEvent", ERR_UUIDGEN : "uuidgen failed to produce a guid", ERR_UNKNOWN : "Job failed due to unknown reason (consult log file)", ERR_FILEEXIST : "File already exist", ERR_GETKEYPAIR : "Failed to get security key pair", ERR_BADALLOC : "TRF failed due to bad_alloc", ERR_ESMERGERECOVERABLE : "Recoverable Event Service Merge error", ERR_ESRECOVERABLE: "Recoverable Event Service error", ERR_GLEXEC: "gLExec related error", ERR_ESATHENAMPDIED: "AthenaMP ended Event Service job prematurely", ERR_ESFATAL: "Fatal Event Service error", ERR_TEFATAL: "Fatal Token Extractor error", ERR_TEHOSTNAME: "Token Extractor error: Host name could not be resolved", ERR_TEBADURL: "Token Extractor error: Bad URL", ERR_TEINVALIDGUID: "Token Extractor error: Invalid GUID length", ERR_TEWRONGGUID: "Token Extractor error: No tokens for this GUID", ERR_EXECUTEDCLONEJOB: "Already executed clone job", ERR_PAYLOADEXCEEDMAXMEM: "Payload exceeded maximum allowed memory", ERR_FAILEDBYSERVER: "Failed by server", ERR_ESKILLEDBYSERVER: "Event Service job killed by server", ERR_NOEVENTS: "Event Service no available events", ERR_OVERSUBSCRIBEDEVENTS: "Event Service over subscribed events", ERR_ESMESSAGESERVER: "Event service message server error", ERR_ESOBJECTSTORESETUP: "Event service objectstore setup", ERR_CHKSUMNOTSUP: "Mover error: query checksum is not supported", ERR_ESPREFETCHERDIED: "Prefetcher ended Event Service job prematurely", ERR_NORELEASEFOUND: "No release candidates found", ERR_TOOFEWEVENTS: "Too few events, less events than minimal requirement", } getErrorCodes = [1097, 1099, 1100, 1103, 1107, 1113, 1130, 1145, 1151, 1164, 1167, 1168, 1171, 1175, 1178, 1179, 1180, 1182] putErrorCodes = [1101, 1114, 1122, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1140, 1141, 1152, 1154, 1155, 1181] recoverableErrorCodes = [0] + putErrorCodes # Error codes that will issue a Pilot-controlled resubmission PilotResubmissionErrorCodes = [1008, 1098, 1099, 1110, 1113, 1114, 1115, 1116, 1117, 1137, 1139, 1151, 1152, 1171, 1172, 1177, 1179, 1180, 1181, 1182, 1188, 1189, 1195, 1196, 1197, 1219] # Error codes used with FAX fail-over (only an error code in this list will allow FAX fail-over) PilotFAXErrorCodes = [1103] + PilotResubmissionErrorCodes def getPilotErrorDiag(self, code=0): """ Return text corresponding to error code """ pilotErrorDiag = "" if code in self.pilotError.keys(): pilotErrorDiag = self.pilotError[code] else: pilotErrorDiag = "Unknown pilot error code" return pilotErrorDiag def isGetErrorCode(self, code=0): """ Determine whether code is in the put error list or not """ state = False if code in self.getErrorCodes: state = True return state def isPutErrorCode(self, code=0): """ Determine whether code is in the put error list or not """ state = False if code in self.putErrorCodes: state = True return state @classmethod def isRecoverableErrorCode(self, code=0): """ Determine whether code is a recoverable error code or not """ return code in self.recoverableErrorCodes def isPilotResubmissionErrorCode(self, code=0): """ Determine whether code issues a Pilot-controlled resubmission """ state = False if code in self.PilotResubmissionErrorCodes: state = True return state def isPilotFAXErrorCode(self, code=0): """ Determine whether code allows for a FAX fail-over """ state = False if code in self.PilotFAXErrorCodes: state = True return state @classmethod def getErrorStr(self, code): """ Avoids exception if an error is not in the dictionary. An empty string is returned if the error is not in the dictionary. """ return self.pilotError.get(code, '') def getErrorName(self, code): """ From the error code to get the error name""" for k in self.__class__.__dict__.keys(): if self.__class__.__dict__[k] == code: return k return None class PilotException(Exception): def __init__(self, message, code=PilotErrors.ERR_GENERALERROR, state='', *args): self.code = code self.state = state self.message = message super(PilotException, self).__init__(*args) @property def code(self): return self._code @code.setter def code(self, code): self._code = code self.code_description = PilotErrors.getErrorStr(code) def __str__(self): return "%s: %s: %s%s" % (self.__class__.__name__, self.code, self.message, ' : %s' % self.args if self.args else '') def __repr__(self): return "%s: %s: %s%s" % (self.__class__.__name__, repr(self.code), repr(self.message), ' : %s' % repr(self.args) if self.args else '')
Taught one of the owners two sons and son-in-law how to fly. Great people. I still remember sitting around with the boss trying to figure out what kind of plane they should get to replace the electra's. The c-97's were too old but there was talk of 737's with the gravel kit. Then one day a couple c-130's show up. Then another day we hear, "hey, I bought another c-130!" That family loves to fly. Favorite plane was the super stinson on floats.
#!/usr/bin/env python # # Copyright 2014 Michele Filannino # # gnTEAM, School of Computer Science, University of Manchester. # All rights reserved. This program and the accompanying materials # are made available under the terms of the GNU General Public License. # # author: Michele Filannino # email: filannim@cs.man.ac.uk # # For details, see www.cs.man.ac.uk/~filannim/ import argparse import codecs import glob import logging import os from mantime.mantime import ManTIME from mantime.readers import TempEval3FileReader from mantime.writers import TempEval3Writer from mantime.attributes_extractor import FullExtractor def main(): """ It annotates documents in a specific folder. """ logging.basicConfig(format='%(asctime)s: %(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p') # Parse input parser = argparse.ArgumentParser( description='ManTIME: temporal information extraction') parser.add_argument('mode', choices=['train', 'test'], help='Train or Test mode?') parser.add_argument('input_folder', help='Input data folder path') parser.add_argument('model', help='Name of the model to use (case sensitive)') parser.add_argument('-v', '--version', help='show the version and exit', action='store_true') parser.add_argument('-ppp', '--post_processing_pipeline', action='store_true', help='it uses the post processing pipeline.') args = parser.parse_args() # ManTIME mantime = ManTIME(reader=TempEval3FileReader(), writer=TempEval3Writer(), extractor=FullExtractor(), model_name=args.model, pipeline=args.post_processing_pipeline) if args.mode == 'train': # Training mantime.train(args.input_folder) else: # Testing assert os.path.exists(args.input_folder), 'Model not found.' input_files = os.path.join(args.input_folder, '*.*') documents = sorted(glob.glob(input_files)) assert documents, 'Input folder is empty.' for index, doc in enumerate(documents, start=1): basename = os.path.basename(doc) writein = os.path.join('./output/', basename) position = '[{}/{}]'.format(index, len(documents)) # if writein not in glob.glob('./output/*.*'): file_path = '.'.join(writein.split('.')[:-1]) with codecs.open(file_path, 'w', encoding='utf8') as output: # try: logging.info('{} Doc {}.'.format(position, basename)) output.write(mantime.label(doc)[0]) logging.info('{} Doc {} annotated.'.format(position, basename)) # except Exception: # logging.error('{} Doc {} ** skipped **!'.format( # position, basename)) # os.remove(file_path) # else: # logging.info('{} Doc {} already in output folder.'.format( # position, basename)) if __name__ == '__main__': main()
For driving tuition covering, all of Bury including Radcliffe, Prestwich & Ramsbottom, Bolton, Haslingden, Rawtenstall, Rochdale and North Manchester call Heathcote Driver Training today on 0161 796 8118. Here at Heathcote Driver Training we are always striving to give the highest quality Driver Training to meet today’s standards and prepare the driver with the right attitude and high level of driving skills needed. Increasing difficulties in today’s road and traffic environments make it all the more important to gain‘Thorough Driver Training’, with a large emphasis on the individual’s ‘Attitude to Driving’. Driving is a ‘Life Skill’ which should continue to improve after you have passed your test until the day you stop driving. We, at Heathcote Driver Training, will always give you our highest standards in training, from beginner to advanced, refresher to motorway courses, also Instructor and Fleet Training Courses. Always remember ‘Safe Driving for Life! Big Enough to Cope - Small Enough to Care! If you are in Bury, Bolton, Rochdale, Lancashire, Greater Manchester area and would like to learn how to drive a car then you have come to the right place – Heathcote Driver Training is a very professional and friendly Driver Training Company teaching you how to drive carefully and confidently and pass your practical and theory driving tests. With Heathcote Driver Training you will learn a skill for life. Driver Training covering, all of Bury including Radcliffe, Prestwich, Ramsbottom, Bolton, Haslingden, Rawtenstall, Rochdale and North Manchester. We look forward to hearing from you.
# Demo program using ManGrating. # # Copyright (C) 2010-2011 Huang Xin # # See LICENSE.TXT that came with this file. """ USAGE: Move the mouse cursor to change the position of the grating. Scroll the mouse wheel to change the orientation. Press right arrow to increase the spatial frequency. Press left arrow to decrease the spatial frequency. Press up arrow to increase the temporal frequency. ... """ from __future__ import division from StimControl.LightStim.Core import DefaultScreen from StimControl.LightStim.LightData import dictattr from StimControl.LightStim.FrameControl import FrameSweep from StimControl.LightStim.ManGrating import ManGrating # Manual Grating experiment parameters, all must be scalars DefaultScreen(['control','left','right']) p = dictattr() # mask, one of: None, 'gaussian', or 'circle' p.mask = 'circle' p.maskSizeStepDeg = 0.5 # initial grating phase p.phase0 = 0 # grating mean luminance (0-1) p.ml = 0.5 # grating contrast (0-1) p.contrast = 1 # background brightness (0-1) p.bgbrightness = 0 # antialiase the bar? p.antialiase = True # flash the grating? p.flash = False # duration of each on period (sec) p.flashduration = 0.5 # duration of each off period (sec) p.flashinterval = 0.3 # factor to chage bar width and height by left/right/up/down key p.sizemultiplier = 1.02 # factor to change temporal freq by on up/down p.tfreqmultiplier = 1.01 # factor to change spatial freq by on left/right p.sfreqmultiplier = 1.01 # factor to change contrast by on +/- p.contrastmultiplier = 1.005 # orientation step size to snap to when scrolling mouse wheel (deg) p.snapDeg = 12 stimulus_control = ManGrating(disp_info=True, params=p, viewport='control') stimulus_left = ManGrating(disp_info=False, params=p, viewport='left') stimulus_right = ManGrating(disp_info=False, params=p, viewport='right') sweep = FrameSweep() sweep.add_stimulus(stimulus_control) sweep.add_stimulus(stimulus_left) sweep.add_stimulus(stimulus_right) sweep.go()
Innovation isn’t easy. It takes courage to experiment and advance a new idea. Europe has always excelled at this. Radio, television, and mobile communications advances all originated in Europe. But past success won’t ensure Europe’s long tradition of innovation continues. New technologies require more risk-taking and the ability to launch new products with speed and scale. In its manifesto for growth and jobs 2015-2020, TechUK sets out what the government must do to build on the progress already achieved to secure Britain’s digital potential. This includes the appointment of dedicated digital ministers in every department, a new chief privacy officer, and a new Foreign and Commonwealth Office ‘digital trade tsar’ to be a leading voice in Europe. It is no longer enough for tech startups to exist in silos of isolation. Tech businesses now need the energy, talent and diversity of the world’s megacities to thrive. Even in California, home to the Silicon Valley, startups are looking for the next thing and flocking to the big cities. The growth rate of Internet access in Latin America is among the highest in the world — only Africa and the Middle East are growing more rapidly. One reason for this is the proliferation of cheap smartphones. The award-winning alternative London, UK creative tech trade mission is back. Hackney House Austin 2014 opens on Friday 7 March and runs all weekend through to Monday 10 March. We are based at 721 Congress Avenue, corner of 8th Street, Austin, TX 78701. The UK “Centre for Entrepreneurs” think tank is to take on the running of the national enterprise campaign “StartUp Britain” when the founders step down next month. The official handover will be on Thursday March 27, creating future initiatives for real British entrepreneurs instead of the prior focus on small business retail and PopUp Shops. Mattel announced that an entrepreneur doll would join its “I Can Be” career line as its Career of the Year. The entrepreneur doll sports a sleek pink shift dress and carries a tiny tablet and smartphone. With nearly 8.6 million female business owners, it’s not a stretch that the new career for women is one she forges herself.
# Big Data Smart Socket # Copyright (C) 2016 Clemson University # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import argparse import logging import os import traceback import tempfile import requests from ..config import client_destination, metadata_repository_url, dtn_host, dtn_user, dtn_path from ..transfer.base import Transfer from ..transfer.data import run_data_transfer from ..transfer.mechanisms import available_mechanisms from ..transfer.reporting import ReportsFile, send_report from ..util import run_subprocess cli_help = "Download data file(s)." logger = logging.getLogger("bdss") def configure_parser(parser): input_group = parser.add_mutually_exclusive_group(required=True) input_group.add_argument("manifest_file", help="File containing a list of URLs to transfer", nargs="?", type=argparse.FileType("r")) input_group.add_argument("--urls", "-u", dest="urls", help="URL(s) of data files to transfer", metavar="URL", nargs="+") parser.add_argument("--destination", "-d", dest="destination_directory", default=os.getcwd(), help="Path to directory to store transferred files in") parser.add_argument("--dry-run", action="store_true", help="Display available sources for files, but do not transfer") parser.add_argument("--transfer-report", "-t", dest="report_file", help="Path to write transfer report to", type=argparse.FileType("w")) def output_file_name(url): return url.partition("?")[0].rpartition("/")[2] def get_transfers(url, mechanisms): transfers = [] data = dict( available_mechanisms=mechanisms, url=url ) if client_destination: data["destination"] = client_destination logger.info("Requesting transfers for %s" % url) try: response = requests.post("%s/transfers" % metadata_repository_url, data=data, headers={"Accept": "application/json"}) response = response.json() transfers = [Transfer(**r) for r in response["transfers"]] if not transfers: logger.warn("Received no transfers") except: logger.warn("Request for transfers failed") logger.debug(traceback.format_exc()) # As a last resort, fall back to original URL and its default mechanism # Defaults are defined in mechanisms/__init__ module default_transfer = Transfer(url) if default_transfer not in transfers: transfers.append(default_transfer) return transfers def handle_dtn_action(args, parser, reports_file): # We want to download into a temporary file. tf = tempfile.NamedTemporaryFile() dest_dir = "".join([dtn_path, '/', tf.name]) conn_str = "%s@%s" % (dtn_user, dtn_host) logger.info("Initiating transfer with DTN: %s", conn_str) # Download the files using the DTN by calling BDSS on that server instead. bdss_cmd = " ".join(['bdss', 'transfer', '--urls', " ".join(args.urls), '--destination', dest_dir]) run_subprocess(["ssh", conn_str, bdss_cmd]) # Move the files to where they should be. logger.info("Copying files from DTN...") run_subprocess(["scp", "-r", "%s:%s/*" % (conn_str, dest_dir), args.destination_directory]) # Finally delete the files on the remote server logger.info("Removing from from DTN...") run_subprocess(["ssh", conn_str, "rm -rf %s" % dest_dir]) def handle_local_action(args, parser, reports_file): for url in args.urls: output_path = os.path.abspath(os.path.join(args.destination_directory, output_file_name(url))) if os.path.isfile(output_path): logger.warn("File at %s already exists at %s", url, output_path) continue transfers = get_transfers(url, available_mechanisms()) logger.info("%d transfer(s) for %s", len(transfers), url) logger.info("------------------") for t in transfers: logger.info(str(t)) if args.dry_run: continue transfer_success = False for t in transfers: report = run_data_transfer(t, output_path) if report.success: transfer_success = True logger.info("Transfer successful") logger.debug(report) send_report(report) if reports_file: reports_file.write_report(report) break else: logger.warn("Transfer failed") send_report(report) if not transfer_success: logger.error("Failed to transfer file") def handle_action(args, parser): if args.manifest_file: args.urls = [line.strip() for line in args.manifest_file if line.strip()] os.makedirs(args.destination_directory, exist_ok=True) reports_file = ReportsFile(args.report_file) if args.report_file else None if dtn_host: handle_dtn_action(args, parser, reports_file) else: handle_local_action(args, parser, reports_file)
The days are getting that little bit longer as we slowly edge towards summer. Just the small matter of spring in between and once it’s over you can bring out the barbecue and deckchairs. But put them on hold for now and give your garden a little TLC to make your summer lounging more enjoyable. Before you start perfecting your outdoor space, remove any weeds and deadhead any of the early flowering shrubs that have flowered and gone already. It’s also beneficial to thin any older wood while pruning, which will help to improve future growth of plants. Although this stage may seem annoying, it gives you a clean canvas to create your horticultural haven. Make your garden a space for all the family to enjoy, and even get the kids involved in the process. Opt for easy bedding plants, like Sweet Peas, Busy Lizzies, or Lobelia, that are easy to plant — meaning they’re great for mini gardeners. Or how about placing these in hanging baskets, adding a splash of colour to all angles of your garden? Pop out bird or squirrel feeders and your garden may just become a little more exciting, with small visitors other than your own children! ‘Garden ornaments’ doesn’t necessarily have to mean covering your garden in gnomes, cute as they may sometimes be. How about beautiful butterfly mosaic ornaments, or ladybugs and toadstools? Be creative. There’s plenty to choose from. No British summer is complete without a barbecue or two (no matter how damp they might be), so invest in a barbecue that you don’t need to dispose of this year. It doesn’t need to break the budget, opt for a small ‘bucket barbecue’ which is low-cost and ideal for smaller spaces, or, splash out and go for a larger, gas barbecue. Either way, the burgers will go down a treat! Transform your child friendly garden into a relaxed space in which to unwind on a cool summer evening. Coil cute, low-budget-friendly outdoor fairy lights around garden walls, fences, benches, or any object that would benefit from a little bit of twinkle. Or, how about some mosaic tea light holders, perfect for citronella tea lights to keep the midges at bay? Add a patio heater and those nights won’t seem quite so chilly. All that’s left to do is put your feet up and enjoy your beautiful garden under the starry, silver sky. Gardening is fun for the whole family and those little green fingertips might just make this job a whole lot easier. Roll out the picnic blanket, and roll on summer! "Why Blogging About Your Job Is Not Your Best Career Move"
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""System for specifying garbage collection (GC) of path based data. This framework allows for GC of data specified by path names, for example files on disk. gc.Path objects each represent a single item stored at a path and may be a base directory, /tmp/exports/0/... /tmp/exports/1/... ... or a fully qualified file, /tmp/train-1.ckpt /tmp/train-2.ckpt ... A gc filter function takes and returns a list of gc.Path items. Filter functions are responsible for selecting Path items for preservation or deletion. Note that functions should always return a sorted list. For example, base_dir = "/tmp" # create the directories for e in xrange(10): os.mkdir("%s/%d" % (base_dir, e), 0o755) # create a simple parser that pulls the export_version from the directory def parser(path): match = re.match("^" + base_dir + "/(\\d+)$", path.path) if not match: return None return path._replace(export_version=int(match.group(1))) path_list = gc.get_paths("/tmp", parser) # contains all ten Paths every_fifth = gc.mod_export_version(5) print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"] largest_three = gc.largest_export_versions(3) print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"] both = gc.union(every_fifth, largest_three) print both(all_paths) # shows ["/tmp/0", "/tmp/5", # "/tmp/7", "/tmp/8", "/tmp/9"] # delete everything not in 'both' to_delete = gc.negation(both) for p in to_delete(all_paths): gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2", # "/tmp/3", "/tmp/4", "/tmp/6", """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import heapq import math import os from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated Path = collections.namedtuple('Path', 'path export_version') @deprecated('2017-06-30', 'Please use SavedModel instead.') def largest_export_versions(n): """Creates a filter that keeps the largest n export versions. Args: n: number of versions to keep. Returns: A filter function that keeps the n largest paths. """ def keep(paths): heap = [] for idx, path in enumerate(paths): if path.export_version is not None: heapq.heappush(heap, (path.export_version, idx)) keepers = [paths[i] for _, i in heapq.nlargest(n, heap)] return sorted(keepers) return keep @deprecated('2017-06-30', 'Please use SavedModel instead.') def one_of_every_n_export_versions(n): r"""Creates a filter that keeps one of every n export versions. Args: n: interval size. Returns: A filter function that keeps exactly one path from each interval [0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an interval the largest is kept. """ def keep(paths): keeper_map = {} # map from interval to largest path seen in that interval for p in paths: if p.export_version is None: # Skip missing export_versions. continue # Find the interval (with a special case to map export_version = 0 to # interval 0. interval = math.floor( (p.export_version - 1) / n) if p.export_version else 0 existing = keeper_map.get(interval, None) if (not existing) or (existing.export_version < p.export_version): keeper_map[interval] = p return sorted(keeper_map.values()) return keep @deprecated('2017-06-30', 'Please use SavedModel instead.') def mod_export_version(n): """Creates a filter that keeps every export that is a multiple of n. Args: n: step size. Returns: A filter function that keeps paths where export_version % n == 0. """ def keep(paths): keepers = [] for p in paths: if p.export_version % n == 0: keepers.append(p) return sorted(keepers) return keep @deprecated('2017-06-30', 'Please use SavedModel instead.') def union(lf, rf): """Creates a filter that keeps the union of two filters. Args: lf: first filter rf: second filter Returns: A filter function that keeps the n largest paths. """ def keep(paths): l = set(lf(paths)) r = set(rf(paths)) return sorted(list(l|r)) return keep @deprecated('2017-06-30', 'Please use SavedModel instead.') def negation(f): """Negate a filter. Args: f: filter function to invert Returns: A filter function that returns the negation of f. """ def keep(paths): l = set(paths) r = set(f(paths)) return sorted(list(l-r)) return keep @deprecated('2017-06-30', 'Please use SavedModel instead.') def get_paths(base_dir, parser): """Gets a list of Paths in a given directory. Args: base_dir: directory. parser: a function which gets the raw Path and can augment it with information such as the export_version, or ignore the path by returning None. An example parser may extract the export version from a path such as "/tmp/exports/100" an another may extract from a full file name such as "/tmp/checkpoint-99.out". Returns: A list of Paths contained in the base directory with the parsing function applied. By default the following fields are populated, - Path.path The parsing function is responsible for populating, - Path.export_version """ raw_paths = gfile.ListDirectory(base_dir) paths = [] for r in raw_paths: p = parser(Path(os.path.join(base_dir, r), None)) if p: paths.append(p) return sorted(paths)
This book presents a comprehensive overview of the current, most critical political, economic and social issues, and challenges facing Serbia on its road towards integration in the European Union. The chapters in the book are written by highly renowned authorities in their respective research fields including prominent scholars, academics, and researchers. The book provides a representative account of the most important, current challenges and issues in Serbia. It can serve as an authoritative source of information on the covered topics for the general public, as well as for specialists in different fields, interested in having a deeper insight into these topics.
#!/usr/bin/env python ############################################################################ # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################ # # Script to handle launching the query server process. # # usage: queryserver.py [start|stop|makeWinServiceDesc] [-Dhadoop=configs] # import datetime import getpass import os import os.path import signal import subprocess import sys import tempfile try: import daemon daemon_supported = True except ImportError: # daemon script not supported on some platforms (windows?) daemon_supported = False import phoenix_utils phoenix_utils.setPath() command = None args = sys.argv if len(args) > 1: if args[1] == 'start': command = 'start' elif args[1] == 'stop': command = 'stop' elif args[1] == 'makeWinServiceDesc': command = 'makeWinServiceDesc' if command: # Pull off queryserver.py and the command args = args[2:] else: # Just pull off queryserver.py args = args[1:] if os.name == 'nt': args = subprocess.list2cmdline(args) else: import pipes # pipes module isn't available on Windows args = " ".join([pipes.quote(v) for v in args]) # HBase configuration folder path (where hbase-site.xml reside) for # HBase/Phoenix client side property override hbase_config_path = phoenix_utils.hbase_conf_dir hadoop_config_path = phoenix_utils.hadoop_conf hadoop_classpath = phoenix_utils.hadoop_classpath # TODO: add windows support phoenix_file_basename = 'phoenix-%s-queryserver' % getpass.getuser() phoenix_log_file = '%s.log' % phoenix_file_basename phoenix_out_file = '%s.out' % phoenix_file_basename phoenix_pid_file = '%s.pid' % phoenix_file_basename # load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR hbase_env_path = None hbase_env_cmd = None if os.name == 'posix': hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh') hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path] elif os.name == 'nt': hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd') hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path] if not hbase_env_path or not hbase_env_cmd: print >> sys.stderr, "hbase-env file unknown on platform %s" % os.name sys.exit(-1) hbase_env = {} if os.path.isfile(hbase_env_path): p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE) for x in p.stdout: (k, _, v) = x.partition('=') hbase_env[k.strip()] = v.strip() java_home = hbase_env.get('JAVA_HOME') or os.getenv('JAVA_HOME') if java_home: java = os.path.join(java_home, 'bin', 'java') else: java = 'java' tmp_dir = os.path.join(tempfile.gettempdir(), 'phoenix') opts = os.getenv('PHOENIX_QUERYSERVER_OPTS') or hbase_env.get('PHOENIX_QUERYSERVER_OPTS') or '' pid_dir = os.getenv('PHOENIX_QUERYSERVER_PID_DIR') or hbase_env.get('HBASE_PID_DIR') or tmp_dir log_dir = os.getenv('PHOENIX_QUERYSERVER_LOG_DIR') or hbase_env.get('HBASE_LOG_DIR') or tmp_dir pid_file_path = os.path.join(pid_dir, phoenix_pid_file) log_file_path = os.path.join(log_dir, phoenix_log_file) out_file_path = os.path.join(log_dir, phoenix_out_file) # " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \ # " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \ # The command is run through subprocess so environment variables are automatically inherited java_cmd = '%(java)s -cp ' + hbase_config_path + os.pathsep + hadoop_config_path + os.pathsep + \ phoenix_utils.phoenix_client_jar + os.pathsep + phoenix_utils.phoenix_loadbalancer_jar + \ os.pathsep + phoenix_utils.phoenix_queryserver_jar + os.pathsep + hadoop_classpath + \ " -Dproc_phoenixserver" + \ " -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \ " -Dpsql.root.logger=%(root_logger)s" + \ " -Dpsql.log.dir=%(log_dir)s" + \ " -Dpsql.log.file=%(log_file)s" + \ " " + opts + \ " org.apache.phoenix.queryserver.server.QueryServer " + args if command == 'makeWinServiceDesc': cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA,console', 'log_dir': log_dir, 'log_file': phoenix_log_file} slices = cmd.split(' ') print "<service>" print " <id>queryserver</id>" print " <name>Phoenix Query Server</name>" print " <description>This service runs the Phoenix Query Server.</description>" print " <executable>%s</executable>" % slices[0] print " <arguments>%s</arguments>" % ' '.join(slices[1:]) print "</service>" sys.exit() if command == 'start': if not daemon_supported: print >> sys.stderr, "daemon mode not supported on this platform" sys.exit(-1) # run in the background d = os.path.dirname(out_file_path) if not os.path.exists(d): os.makedirs(d) with open(out_file_path, 'a+') as out: context = daemon.DaemonContext( pidfile = daemon.PidFile(pid_file_path, 'Query Server already running, PID file found: %s' % pid_file_path), stdout = out, stderr = out, ) print 'starting Query Server, logging to %s' % log_file_path with context: # this block is the main() for the forked daemon process child = None cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': log_dir, 'log_file': phoenix_log_file} # notify the child when we're killed def handler(signum, frame): if child: child.send_signal(signum) sys.exit(0) signal.signal(signal.SIGTERM, handler) print '%s launching %s' % (datetime.datetime.now(), cmd) child = subprocess.Popen(cmd.split()) sys.exit(child.wait()) elif command == 'stop': if not daemon_supported: print >> sys.stderr, "daemon mode not supported on this platform" sys.exit(-1) if not os.path.exists(pid_file_path): print >> sys.stderr, "no Query Server to stop because PID file not found, %s" % pid_file_path sys.exit(0) if not os.path.isfile(pid_file_path): print >> sys.stderr, "PID path exists but is not a file! %s" % pid_file_path sys.exit(1) pid = None with open(pid_file_path, 'r') as p: pid = int(p.read()) if not pid: sys.exit("cannot read PID file, %s" % pid_file_path) print "stopping Query Server pid %s" % pid with open(out_file_path, 'a+') as out: print >> out, "%s terminating Query Server" % datetime.datetime.now() os.kill(pid, signal.SIGTERM) else: # run in the foreground using defaults from log4j.properties cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'} # Because shell=True is not set, we don't have to alter the environment child = subprocess.Popen(cmd.split()) sys.exit(child.wait())
This is that second book! A 27-page PDF download full of the Wizard's most important writing techniques. 1. Where Will You Show Your Mental Movie? Click the button below to add the Accidental Magic - The First 16 Chapters - PDF to your wish list.