text
stringlengths
29
850k
#!/usr/bin/env python # # Copyright 2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import math from gnuradio import gr, gr_unittest, analog, blocks def avg_mag_sqrd_c(x, alpha): y = [0,] for xi in x: tmp = alpha*(xi.real*xi.real + xi.imag*xi.imag) + (1-alpha)*y[-1] y.append(tmp) return y def avg_mag_sqrd_f(x, alpha): y = [0,] for xi in x: tmp = alpha*(xi*xi) + (1-alpha)*y[-1] y.append(tmp) return y class test_probe_avg_mag_sqrd(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_c_001(self): alpha = 0.0001 src_data = [1.0+1.0j, 2.0+2.0j, 3.0+3.0j, 4.0+4.0j, 5.0+5.0j, 6.0+6.0j, 7.0+7.0j, 8.0+8.0j, 9.0+9.0j, 10.0+10.0j] expected_result = avg_mag_sqrd_c(src_data, alpha)[-1] src = blocks.vector_source_c(src_data) op = analog.probe_avg_mag_sqrd_c(0, alpha) self.tb.connect(src, op) self.tb.run() result_data = op.level() self.assertAlmostEqual(expected_result, result_data, 5) def test_cf_002(self): alpha = 0.0001 src_data = [1.0+1.0j, 2.0+2.0j, 3.0+3.0j, 4.0+4.0j, 5.0+5.0j, 6.0+6.0j, 7.0+7.0j, 8.0+8.0j, 9.0+9.0j, 10.0+10.0j] expected_result = avg_mag_sqrd_c(src_data, alpha)[0:-1] src = blocks.vector_source_c(src_data) op = analog.probe_avg_mag_sqrd_cf(0, alpha) dst = blocks.vector_sink_f() self.tb.connect(src, op) self.tb.connect(op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5) def test_f_003(self): alpha = 0.0001 src_data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] expected_result = avg_mag_sqrd_f(src_data, alpha)[-1] src = blocks.vector_source_f(src_data) op = analog.probe_avg_mag_sqrd_f(0, alpha) self.tb.connect(src, op) self.tb.run() result_data = op.level() self.assertAlmostEqual(expected_result, result_data, 5) if __name__ == '__main__': gr_unittest.run(test_probe_avg_mag_sqrd, "test_probe_avg_mag_sqrd.xml")
The relationship between spending and revenues is reflected in the surplus or deficit figure. A surplus is an excess of revenues over outlays, while a deficit is an excess of outlays over revenues. Congress controls the enactment of legislation providing budget authority and raising revenues, but not the occurrence of outlays. Because of this, Congress’ efforts to control the level of the surplus or the deficit are less effective over the short run compared to the long run. The difference between government revenues and outlays in a fiscal year equals the budget deficit or budget surplus. A budget deficit results when outlays exceed revenues; a budget surplus results when revenues exceed outlays. What is counted as government revenues and outlays, however, depends on the presentation of the federal budget. The federal budget typically is presented in the form of the unified budget, reflecting a unified budget deficit or surplus. The unified budget, or consolidated budget, includes the revenues and outlays of all budget accounts, including federal funds and trust funds, regardless of whether they are designated in law as on-budget or off budget. The unified budget represents a comprehensive picture of the federal government’s financial activities. The federal budget also is presented to distinguish between on-budget and off-budget accounts. That is, the federal budget also presents an on-budget deficit or surplus and an off-budget deficit or surplus. The on-budget deficit or surplus includes all accounts not designated in law as off budget. The off-budget deficit or surplus includes those accounts designated in law as off-budget. Currently, there are three accounts designated in law as off-budget: the Federal Old-Age and Survivors Insurance Trust Fund (Social Security retirement), the Federal Disability Insurance Trust Fund (Social Security disability), and the Postal Service Fund. To strengthen control over spending and deficit levels, and to promote more efficient legislative action on budgetary issues, Congress and the president enacted the Balanced Budget and Emergency Deficit Control Act of 1985. A key issue confronting the president and Congress in early 2011 was the need to raise the statutory limit on the public debt by a significant amount to accommodate the persistent, high deficits projected by the Office of Management and Budget (OMB) and the Congressional Budget Office (CBO). The resulting compromise was the the Budget Control Act of 2011. The Budget Control Act (BCA) of 2011 contains many elements, including a phased increase in the statutory debt limit amounting to $2.1 trillion in total, a requirement that the House and Senate vote on a balanced-budget constitutional amendment by the end of 2011, and various changes in House and Senate budget procedures. The core elements, however, involve the reinstitution of statutory limits on discretionary spending and a process under which a Joint Select Committee on Deficit Reduction (the “Joint Committee”) is required to develop significant deficit-reduction legislation. As long as the federal government incurs annual deficits and trust funds incur annual surpluses, the public-debt limit must be increased periodically. The frequency of congressional action to raise the debt limit has ranged in the past from several times in one year to once in several years. Clarke and Dawe – Budgets. A Masterclass. The Historical Tables in the president’s budget provide data, covering an extended time period, on receipts, budget authority, outlays, deficits and surpluses, federal debt, and other matters.
import json from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render from django.contrib import messages from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required, permission_required from survey.investigator_configs import * from survey.forms.users import * from survey.models.users import UserProfile from survey.views.custom_decorators import permission_required_for_perm_or_current_user def _add_error_messages(userform, request, action_str='registered'): error_message = "User not %s. "%action_str messages.error(request, error_message + "See errors below.") def _process_form(userform, request, action_success="registered", redirect_url="/users/new/"): if userform.is_valid(): userform.save() messages.success(request, "User successfully %s." % action_success) return HttpResponseRedirect(redirect_url) _add_error_messages(userform, request, action_success) return None @login_required @permission_required('auth.can_view_users') def new(request): userform = UserForm() response = None if request.method == 'POST': userform = UserForm(request.POST) response = _process_form(userform, request) template_variables = {'userform': userform, 'country_phone_code':COUNTRY_PHONE_CODE, 'action': "/users/new/", 'id': "create-user-form", 'class': "user-form", 'button_label': "Create", 'loading_text': "Creating...", 'title' : 'New User'} return response or render(request, 'users/new.html', template_variables) def check_mobile_number(mobile_number): response = UserProfile.objects.filter(mobile_number=mobile_number).exists() return HttpResponse(json.dumps(not response), content_type="application/json") def check_user_attribute(**kwargs): response = User.objects.filter(**kwargs).exists() return HttpResponse(json.dumps(not response), content_type="application/json") @permission_required('auth.can_view_users') def index(request): if request.GET.has_key('mobile_number'): return check_mobile_number(request.GET['mobile_number']) if request.GET.has_key('username'): return check_user_attribute(username=request.GET['username']) if request.GET.has_key('email'): return check_user_attribute(email=request.GET['email']) return render(request, 'users/index.html', { 'users' : User.objects.all(), 'request': request}) @permission_required_for_perm_or_current_user('auth.can_view_users') def edit(request, user_id): user = User.objects.get(pk=user_id) initial={'mobile_number': UserProfile.objects.get(user=user).mobile_number} userform = EditUserForm(user= request.user, instance=user, initial=initial) response = None if request.method == 'POST': userform = EditUserForm(data=request.POST, user= request.user, instance=user, initial=initial) response = _process_form(userform, request, 'edited', '/users/'+ str(user_id)+'/edit/') context_variables = {'userform': userform, 'action' : '/users/%s/edit/'%user_id, 'id': 'edit-user-form','class': 'user-form', 'button_label' : 'Save', 'loading_text' : 'Saving...', 'country_phone_code': COUNTRY_PHONE_CODE, 'title': 'Edit User'} return response or render(request, 'users/new.html', context_variables) @permission_required('auth.can_view_users') def show(request, user_id): user = User.objects.filter(id=user_id) if not user.exists(): messages.error(request, "User not found.") return HttpResponseRedirect("/users/") return render(request, 'users/show.html', {'the_user': user[0], 'cancel_url': '/users/'}) def _set_is_active(user, status, request): action_str = "re-" if status else "de" user.is_active = status user.save() messages.success(request, "User %s successfully %sactivated."%(user.username, action_str)) def _activate(request, user_id, status): user = User.objects.filter(id=user_id) if not user.exists(): messages.error(request, "User not found.") return HttpResponseRedirect("/users/") user = user[0] if user.is_active is not status: _set_is_active(user, status, request) return HttpResponseRedirect("/users/") @permission_required('auth.can_view_users') def deactivate(request, user_id): return _activate(request, user_id, status=False) @permission_required('auth.can_view_users') def activate(request, user_id): return _activate(request, user_id, status=True)
Each Ruby Red Dots Hats are Ruby Red and feature fun white polka dots and a soft elastic band to keep the hat in place. Ruby Red Polka Dots Hats are a great party favour to get everyone to coordinate with the decorations for a group photo. Made from a strong cardboard, there are 8 cone hats per pack.
import inspect import os import runpy import specs # ------------------------------------------------------------------------------------------------- def fixpath(_path, _opts): return os.path.normpath(os.path.join(_opts.pathPrefix, _path)) # ------------------------------------------------------------------------------------------------- def include(_path): cwd = os.getcwd() try: os.chdir(os.path.dirname(os.path.abspath(_path))) return _includeInternal(_path) except IOError: return _includeInternal(_path + ".ab") finally: os.chdir(cwd) # ------------------------------------------------------------------------------------------------- def _includeInternal(_path): initGlobals = specs.getProjectGroupDict() initGlobals['include'] = include ignoreClasses = [c for c in initGlobals.itervalues() if inspect.isclass(c)] mod = runpy.run_path(_path, initGlobals) filteredMod = {} for k, v in mod.iteritems(): if not inspect.isclass(v) or v not in ignoreClasses: filteredMod[k] = v return filteredMod
The term “Scottish Documentary Institute” or “SDI Outreach” “SDI” or “us” or “we” refers to the owner of the website with offices located at eca, 74 Lauriston Place, Edinburgh EH3 9DF, Scotland, UK. Our company registration number is SC481435, Edinburgh. The term “you” refers to the user or viewer of our website. You may not create a link to this website from another website or document without Scottish Documentary Institute’s prior written consent.
from datetime import datetime from grow.common import utils from grow.pods import locales as locales_lib from grow.pods.collectionz import collectionz import collections import csv as csv_lib import itertools import jinja2 import json as json_lib import markdown import re @utils.memoize_tag def categories(collection=None, collections=None, reverse=None, order_by=None, _pod=None): if isinstance(collection, collectionz.Collection): collection = collection elif isinstance(collection, basestring): collection = _pod.get_collection(collection) else: text = '{} must be a Collection instance or a collection path, found: {}.' raise ValueError(text.format(collection, type(collection))) category_list = collection.list_categories() def order_func(doc): return category_list.index(doc.category) docs = [doc for doc in collection.list_docs(reverse=reverse)] docs = sorted(docs, key=order_func) items = itertools.groupby(docs, key=order_func) return ((category_list[index], pages) for index, pages in items) def LocaleIterator(iterator, locale): locale = str(locale) for i, line in enumerate(iterator): if i == 0 or line.startswith(locale): yield line _no_locale = '__no_locale' @utils.memoize_tag def csv(path, locale=_no_locale, _pod=None): fp = _pod.open_file(path) if locale is not _no_locale: fp = LocaleIterator(fp, locale=locale) rows = [] for row in csv_lib.DictReader(fp): data = {} for header, cell in row.iteritems(): if cell is None: cell = '' data[header] = cell.decode('utf-8') rows.append(data) return rows @utils.memoize_tag def docs(collection, locale=None, order_by=None, _pod=None): collection = _pod.get_collection(collection) return collection.list_docs(locale=locale, order_by=order_by) @utils.memoize_tag def statics(pod_path, locale=None, _pod=None): return _pod.list_statics(pod_path, locale=locale) def markdown_filter(value): try: if isinstance(value, unicode): value = value.decode('utf-8') return markdown.markdown(value) except UnicodeEncodeError: return markdown.markdown(value) _slug_regex = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+') def slug_filter(value): result = [] for word in _slug_regex.split(value.lower()): if word: result.append(word) return unicode(u'-'.join(result)) @utils.memoize_tag def static(path, locale=None, _pod=None): return _pod.get_static(path, locale=locale) class Menu(object): def __init__(self): self.items = collections.OrderedDict() def build(self, nodes): self._recursive_build(self.items, None, nodes) def iteritems(self): return self.items.iteritems() def _recursive_build(self, tree, parent, nodes): children = [n for n in nodes if n.parent == parent] for child in children: tree[child] = collections.OrderedDict() self._recursive_build(tree[child], child, nodes) @utils.memoize_tag def nav(collection=None, locale=None, _pod=None): collection_obj = _pod.get_collection('/content/' + collection) results = collection_obj.list_docs(order_by='order', locale=locale) menu = Menu() menu.build(results) return menu @utils.memoize_tag def breadcrumb(doc, _pod=None): pass @utils.memoize_tag def url(pod_path, locale=None, _pod=None): doc = _pod.get_doc(pod_path, locale=locale) return doc.url @utils.memoize_tag def get_doc(pod_path, locale=None, _pod=None): return _pod.get_doc(pod_path, locale=locale) @jinja2.contextfilter def render_filter(ctx, template): if isinstance(template, basestring): template = ctx.environment.from_string(template) return template.render(ctx) @jinja2.contextfilter def parsedatetime_filter(ctx, date_string, string_format): return datetime.strptime(date_string, string_format) @jinja2.contextfilter def deeptrans(ctx, obj): return _deep_gettext(ctx, obj) @jinja2.contextfilter def jsonify(ctx, obj, *args, **kwargs): return json_lib.dumps(obj, *args, **kwargs) def _deep_gettext(ctx, fields): if isinstance(fields, dict): new_dct = {} for key, val in fields.iteritems(): if isinstance(val, (dict, list, set)): new_dct[key] = _deep_gettext(ctx, val) elif isinstance(val, basestring): new_dct[key] = _gettext_alias(ctx, val) else: new_dct[key] = val return new_dct elif isinstance(fields, (list, set)): for i, val in enumerate(fields): if isinstance(val, (dict, list, set)): fields[i] = _deep_gettext(ctx, val) elif isinstance(val, basestring): fields[i] = _gettext_alias(ctx, val) else: fields[i] = val return fields def _gettext_alias(__context, *args, **kwargs): return __context.call(__context.resolve('gettext'), *args, **kwargs) @utils.memoize_tag def yaml(path, _pod): fields = utils.parse_yaml(_pod.read_file(path), pod=_pod) return utils.untag_fields(fields) @utils.memoize_tag def json(path, _pod): fp = _pod.open_file(path) return json_lib.load(fp) def date(datetime_obj=None, _pod=None, **kwargs): _from = kwargs.get('from', None) if datetime_obj is None: datetime_obj = datetime.now() elif isinstance(datetime_obj, basestring) and _from is not None: datetime_obj = datetime.strptime(datetime_obj, _from) return datetime_obj @utils.memoize_tag def locales(codes, _pod=None): return locales_lib.Locale.parse_codes(codes)
I love your car. Great pic. Finland looks like such an awesome country - it has been on my list of countries to visit for a while. Maybe I can convince the wife for this summer! Washed her up and finally pics after H&R and OEM Carbon Spoiler. Last edited by ZephyrAMG; 03-29-2016 at 01:14 PM. Wow, nice to see u here again Jycke! Hope ur health is good now. Very thrilled to have u back buddy. Did u ended up selling ur Estate? Did u have a Pre-FL before & now a FL? I don't remember. As for ur HRE specs, if they are the same as ur old estate.. I'm pretty sure they are 19x8.5 +41 & 19x9.5 +50. Last edited by Roswell; 03-31-2016 at 08:02 AM. Nice car Zeph! Ur car looks clean & the stance is perfect, u went a long way from stock. Last edited by Roswell; 03-31-2016 at 08:00 AM. Hey man, thanks, appreciate it I've been lurking in the shadows and have now reared my ugly head once again. No, never sold it. Not for the lack of trying but nobody wanted it. Could be that it was slightly overpriced with the thought "if it goes for this price i'll sell, but happy if it doesn't sell as well". No, this car isn't going anywhere. I actually tried out the S205 estate but couldn't justify the cost of upgrading because i honestly enjoy the 6.2 too much. ^^ I agree on the 6.2L vs the new w205 engine. Well good choice on keeping it. Yes Vossen in the new color called merlot. They are 20 inch! Out driving around...then yep, sunset.
from Gaudi.Configuration import * from Configurables import DaVinci #from Configurables import AlgTool from Configurables import GaudiSequencer MySequencer = GaudiSequencer('Sequence') DaVinci.DDDBtag='dddb-20120831' DaVinci.CondDBtag='sim-20121025-vc-md100' simulation=True #only for mdst #from Configurables import EventNodeKiller #eventNodeKiller = EventNodeKiller('DAQkiller') #eventNodeKiller.Nodes = ['DAQ','pRec'] #MySequencer.Members+=[eventNodeKiller] ################################################################# #Rerun with stripping21 applied if simulation: from StrippingConf.Configuration import StrippingConf, StrippingStream from StrippingSettings.Utils import strippingConfiguration from StrippingArchive.Utils import buildStreams from StrippingArchive import strippingArchive from Configurables import PhysConf PhysConf().CaloReProcessing=True stripping="stripping21" config=strippingConfiguration(stripping) archive=strippingArchive(stripping) streams=buildStreams(stripping=config,archive=archive) MyStream= StrippingStream("MyStream") MyLines= ["StrippingB2XEtaLb2pKetapLine"] for stream in streams: for line in stream.lines: if line.name() in MyLines: MyStream.appendLines( [ line ]) from Configurables import ProcStatusCheck filterBadEvents=ProcStatusCheck() sc=StrippingConf( Streams= [ MyStream ], MaxCandidates = 2000, AcceptBadEvents = False, BadEventSelection = filterBadEvents) DaVinci().appendToMainSequence([sc.sequence()]) ##################Creating NTuples##################################### from Configurables import DecayTreeTuple from Configurables import TupleToolL0Calo from DecayTreeTuple.Configuration import * tuple=DecayTreeTuple() tuple.Decay="[Lambda_b0 -> ^p+ ^K- ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC" tuple.Branches={"Lambda_b0":"[Lambda_b0 -> p+ K- (eta_prime -> pi- pi+ gamma)]CC"} tuple.Inputs=["Phys/B2XEtaLb2pKetapLine/Particles"] tuple.addTool(TupleToolL0Calo()) tuple.TupleToolL0Calo.TriggerClusterLocation="/Event/Trig/L0/Calo" tuple.TupleToolL0Calo.WhichCalo="HCAL" tuple.ToolList += [ "TupleToolGeometry" , "TupleToolDira" , "TupleToolAngles" # , "TupleToolL0Calo" , "TupleToolPid" , "TupleToolKinematic" , "TupleToolPropertime" , "TupleToolPrimaries" , "TupleToolEventInfo" , "TupleToolTrackInfo" , "TupleToolVtxIsoln" , "TupleToolPhotonInfo" , "TupleToolMCTruth" , "TupleToolMCBackgroundInfo" # , "MCTupleTOolHierachy" , "TupleToolCaloHypo" , "TupleToolTrackIsolation" #, "TupleToolTagging" not used in microdst ] #from Configurables import TupleToolMCTruth #from TupleToolMCTruth.Configuration import * #tuple.addTool(TupleToolMCTruth,name="TruthM") #tuple.ToolList+= [ "TupleToolMCTruth/TruthM"] #tuple.TruthM.ToolList = ["MCTupleToolHierachy/Hierachy"] #tuple.TruthM.addTool(MCTupleToolHierachy,name="Hierachy") #tuple.TupleToolMCTruth.addTool(MCTupleToolKinematic,name="MCTupleToolKinematic") #tuple.TupleToolMCTruth.addTool(MCTupleToolHierachy,name="MCTupleToolHierachy") #tuple.TupleToolMCTruth.addTool(MCTupleToolPID,name="MCTupleToolPID") #####Look at adding branchesss############## tuple.addTool(TupleToolDecay,name="Lambda_b0") from Configurables import TupleToolDecayTreeFitter tuple.Lambda_b0.addTool(TupleToolDecayTreeFitter("PVFit")) tuple.Lambda_b0.PVFit.Verbose=True tuple.Lambda_b0.PVFit.constrainToOriginVertex=True tuple.Lambda_b0.PVFit.daughtersToConstrain = ["p+","K-","eta_prime"] tuple.Lambda_b0.ToolList+=["TupleToolDecayTreeFitter/PVFit"] from Configurables import TupleToolTISTOS tistos=tuple.Lambda_b0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS") tistos.VerboseL0=True tistos.VerboseHlt1=True tistos.VerboseHlt2=True tistos.TriggerList=["L0PhotonDecision", "L0ElectronDecision", "Hlt1TrackPhotonDecision", "Hlt1TrackAllL0Decision", "Hlt1TrackMuonDecision", "Hlt1TrackForwardPassThroughDecision", "Hlt1TrackForwardPassThroughLooseDecision", "Hlt1SingleElectronNoIPDecision", "L0HadronDecision", "L0LocalPi0Decision", "L0GlobalPi0Decision", "L0MuonDecision", "Hlt2Topo2BodyBBDTDecision", "Hlt2Topo3BodyBBDTDecision", "Hlt2Topo4BodyBBDTDecision", "Hlt2RadiativeTopoTrackTOSDecision", "Hlt2RadiativeTopoPhotonL0Decision", "Hlt2TopoRad2BodyBBDTDecision", "Hlt2TopoRad2plus1BodyBBDTDecision", "Hlt2Topo2BodySimpleDecision", "Hlt2Topo3BodySimpleDecision", "Hlt2Topo4BodySimpleDecision"] etuple=EventTuple() etuple.ToolList=["TupleToolEventInfo"] from Configurables import MCDecayTreeTuple mctuple=MCDecayTreeTuple("mctuple") mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"] mctuple.Decay="[Lambda_b0 -> ^(p+) ^(K-) ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC" MySequencer.Members.append(etuple) MySequencer.Members.append(tuple) MySequencer.Members.append(mctuple) DaVinci().InputType='DST' DaVinci().UserAlgorithms+=[MySequencer] DaVinci().TupleFile="Output.root" DaVinci().HistogramFile="histos.root" DaVinci().DataType='2012' DaVinci().EvtMax=-1 DaVinci().PrintFreq=1000 DaVinci().MoniSequence=[tuple] DaVinci().Simulation=simulation
Joy of Motion announces the 1st annual FEAR FESTIVAL, featuring OMINOUS, the dansical of terror, choreographed and directed by DEREK BROWN. All persons auditioning must have a valid ID and should prepare a one-minute routine that showcases their specific dance style. Rehearsals will be held on Saturdays from July 14-October 27 from 2:30-4:30 PM at Joy of Motion Dance Center, 1333 H Street NE.
""" Copyright (C) 2015 Quinn D Granfor <spootdev@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2, as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License version 2 for more details. You should have received a copy of the GNU General Public License version 2 along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ import time from kivy.utils import platform # import the pyserial library for use in rs232c communications if platform != 'android': import serial class CommonSerial: """ Class for interfacing via serial devices """ def __init__(self, dev_port='/dev/ttyUSB1', dev_baudrate=9600, dev_parity=serial.PARITY_ODD, dev_stopbits=serial.STOPBITS_TWO, dev_bytesize=serial.SEVENBITS): """ Open serial device for read/write """ self.serial_device = serial.Serial(port=dev_port, baudrate=dev_baudrate, parity=dev_parity, stopbits=dev_stopbits, bytesize=dev_bytesize ) self.serial_device.open() self.serial_device.isOpen() def com_serial_read_device(self): """ Read data from serial device """ time.sleep(1) read_data = '' while self.serial_device.inWaiting() > 0: read_data += self.serial_device.read(1) return read_data def com_serial_close_device(self): """ Close serial device """ self.serial_device.close() def com_serial_write_device(self, message): """ Send data to serial device """ self.serial_device.write(message)
Nobody wants bad things to happen - but just because you wish for peace, doesn’t mean that you’re going to get peace. Preparing for adversity isn’t a bad idea. It easy to get caught up in your own thoughts, and your own ideas. Overthinking is common and you see it everywhere. You might be overly optimistic or as easily overly pessimistic. Like all things, you need to figure out where you stand and work with who you are. Be wary of being too comfortable. Prepare for different scenarios and equip yourself with the ability to adapt. There are only so many things that you can fully control. Learn to work with the world around you.
# for loop for iterating a list print("iterate a list") for i in [1, 2, 3, 4]: print(i) # for loop for iterating a string print("iterate a string") for c in "iteratable": print(c) # for loop for iterating a dictionary print("iterate a dictionary") for k in {"a": 1, "b": 2}: print(k) # for loop for iterating a file line by line print("iterate a file") for line in open("input.txt"): print(line) # list, string, dict, file stream are called iterable objects in python. # built-in functions use these iterables s = ",".join(["a", "b", "c"]) print(s) d = ",".join({"x": 1, "y": 2}) print(d) print(list("iterable")) print(list({"x": 1, "y": 2})) x = iter([1, 2, 3]) print(x.next()) print(x.next()) print(x.next()) # print(x.next()) # raises StopIteration # good interview question # implement your own xrange function class yrange: def __init__(self, n): self.n = n self.i = 0 def __iter__(self): # __iter__ method makes an object iterable # iter function calls __iter__ method behind the scenes # the return value of __iter__ is an iterator # it should implement next() method and raise StopIteration # when there are no more elements. return self def next(self): if self.i < self.n: i = self.i self.i += 1 return i else: raise StopIteration() # lets try yrange y = yrange(3) print(y.next()) print(y.next()) print(y.next()) # print(y.next()) # raises StopIteration print(list(yrange(5))) print(sum(yrange(5))) # good interview question # implement reverse iterator class rev_iter: def __init__(self, iterable): self.i = len(iterable) - 1 self.iterable = iterable def __iter__(self): return self def next(self): if self.i >= 0: i = self.iterable[self.i] self.i -= 1 return i else: raise StopIteration() r = rev_iter([1, 2, 3, 4]) print(r.next()) print(r.next()) print(r.next()) print(r.next()) # print(r.next()) # StopIteration # THE ITERATION PROTOCOL # in python, the iterator objects are required to implement # these two methods: # __iter__ returns the iterator object itself. # __next__ returns next value from the iterator. if there is no # more items, it should raise StopIteration exception. # lets implement a Counter iteratable class class Counter(object): def __init__(self, low, high): self.current = low self.high = high def __iter__(self): return self def next(self): if self.current <= self.high: i = self.current self.current += 1 return i else: raise StopIteration() c = Counter(5, 10) for i in c: print(i)
Located in Madison, South Dakota- the Non- Wing Sprint Cars of Lake County Speedway put on a show on Saturday nights. Bret Mellenberndt (pictured) is the 2006 Champion. 2006- Bret Mellenberndt 2007- ??
class MagicDictionary(object): def __init__(self): """ Initialize your data structure here. """ self.table = {} def buildDict(self, dict): """ Build a dictionary through a list of words :type dict: List[str] :rtype: void """ for v in dict: n = len(v) if n in self.table: self.table[n].append(v) else: self.table[n] = [v] def search(self, word): """ Returns if there is any word in the trie that equals to the given word after modifying exactly one character :type word: str :rtype: bool """ n = len(word) if n in self.table: for v in self.table[n]: if self.cmp(v, word, n): return True return False def cmp(self, p, q, n): res = 0 if p == q: return False for i in range(n): if p[i] != q[i]: res += 1 if res > 1: return False return True # Your MagicDictionary object will be instantiated and called as such: # obj = MagicDictionary() # obj.buildDict(dict) # param_2 = obj.search(word)
AT CSCI, PEOPLE COME FIRST. As a nationally-recognized consulting, enterprise IT, and financial management firm, we live to serve our clients in federal, state, and commercial markets. Offering extensive capabilities in project life-cycle management support and analysis. Improving technical environments through client partnerships to ensure the highest level of project success. Supporting entities in achieving their financial transparency and audit readiness, remediation, and sustainment goals within accelerated time frames. We believe people do the best work when they truly love what they do. That’s why we empower and encourage each and every member of the CSCI team to grow, learn, and succeed here. CSCI is all about teamwork, accessibility, support, and fun. We connect through team-building, transparent communication, and collaborative work environments.
#!/usr/bin/env python from numpy import * from pylab import plot, show, legend, fill_between, figure, subplot, title def regression_gaussian_process_modelselection (n=100, n_test=100, \ x_range=5, x_range_test=10, noise_var=0.4): from modshogun import RealFeatures, RegressionLabels from modshogun import GaussianKernel from modshogun import GradientModelSelection, ModelSelectionParameters from modshogun import GaussianLikelihood, ZeroMean, \ ExactInferenceMethod, GaussianProcessRegression, GradientCriterion, \ GradientEvaluation # easy regression data: one dimensional noisy sine wave X_train = random.rand(1,n)*x_range X_test = array([[float(i)/n_test*x_range_test for i in range(n_test)]]) y_test = sin(X_test) y_train = sin(X_train)+random.randn(n)*noise_var # shogun representation labels = RegressionLabels(y_train[0]) feats_train = RealFeatures(X_train) feats_test = RealFeatures(X_test) # GP specification kernel = GaussianKernel(10, 0.05) mean = ZeroMean() likelihood = GaussianLikelihood(0.8) inf = ExactInferenceMethod(kernel, feats_train, mean, labels, likelihood) inf.set_scale(2.5) gp = GaussianProcessRegression(inf) means = gp.get_mean_vector(feats_test) variances = gp.get_variance_vector(feats_test) # plot results figure() subplot(2, 1, 1) title('Initial parameter\'s values') plot(X_train[0], y_train[0], 'bx') # training observations plot(X_test[0], y_test[0], 'g-') # ground truth of test plot(X_test[0], means, 'r-') # mean predictions of test fill_between(X_test[0], means-1.96*sqrt(variances), means+1.96*sqrt(variances), color='grey') legend(["training", "ground truth", "mean predictions"]) # evaluate our inference method for its derivatives grad = GradientEvaluation(gp, feats_train, labels, GradientCriterion(), False) grad.set_function(inf) # handles all of the above structures in memory grad_search = GradientModelSelection(grad) # search for best parameters best_combination = grad_search.select_model(True) # outputs all result and information best_combination.apply_to_machine(gp) means = gp.get_mean_vector(feats_test) variances = gp.get_variance_vector(feats_test) # plot results subplot(2, 1, 2) title('Selected by gradient search parameter\'s values') plot(X_train[0], y_train[0], 'bx') # training observations plot(X_test[0], y_test[0], 'g-') # ground truth of test plot(X_test[0], means, 'r-') # mean predictions of test fill_between(X_test[0], means-1.96*sqrt(variances), means+1.96*sqrt(variances), color='grey') legend(["training", "ground truth", "mean predictions"]) show() if __name__=='__main__': regression_gaussian_process_modelselection()
I want to share my current eye and lip make up remover, this is Maybelline Make Up Remover Eye & Lip. This is the second eye makeup which i tried, i've tried Revlon's before this one. It claims that the product efficiently removes all waterproof make up. I've bought it IDR 28.600 or around less than $3. Efficient & Easy : This formula instantly removes all eye & lip makeup, even waterproof and non-transfer. Gentle : Formulated for sensitive eyes it is specially very gentle for the delicate skin around eyes. To Use : Shake well before use. Soak a cotton pad with the lotion and apply on your eyes or lips. Follow facial cleansing with Maybelline foam or Cleansing milk. Aqua/water, cyclopentasiloxane, isohexadecane, isopropyl palmitate, panthenol, dipotassium phosphate, poloxamer 184, polyaminopropyl biguanide, potassium phosphate, sodium chloride, Cl 61565 / green 6. It comes with two size, i bought the small ones which contains 40ml. To remove my eye makeup, i only use 1 wet cotton pad which already soaked by this product, economizes enough right? after rubbing it for a couple times, it works really well, it removes all my eye makeup, even for waterproof eye liner and waterproof mascara. Overall, i love this product, i haven't found any eye makeup product which suitable for me, except this one.. I hope you find this post usefull.. ini emang bagus buat hapus makeup, sayang finishingnya bikin muka greasy. Kalo mau yang macam Water dan ringan diwajah namun kuat hapus Makeup, cobain Silkygirl atau Face On face deh . .
#! /usr/bin/env python ################################################################################ # # income_stats.py # # Script to extract income information specific to individuals 55 and older from # the ACS archive containing it and to output the same on a per SRA and zipcode # basis for the SD county # # Dependencies: # # Data files must be present in the current working directory # # Usage: # # python income_stats.py # import sys import os import shutil import re import pandas as pd import numpy as np import pprint from zipfile import ZipFile from collections import defaultdict, OrderedDict import sdpyutils as sdpy # # GLOBALS # # current working directory CWD = os.getcwd() TMPDIR = os.path.join(CWD,"tmp") # data file(s) VERSION = "2015" DATAZIP = "aff_B17024_sd_county_" + VERSION + ".zip" # output file(s) OUT_CSV1 = "B17024_estimates_sd_county_55_over_" + VERSION + ".csv" OUT_CSV2 = "low_income_data_sd_county_" + VERSION + ".csv" # # Removes the temp directory and its contents # def cleanup(doCleanup): # Cleanup the temp directory only if we created it here if doCleanup: if os.path.exists(TMPDIR): shutil.rmtree("tmp") doCleanup = False # # processMetaData # # extracts information from the specified metadata file and returns it as a # data frame # def processMetaData(metafile): csvdata = pd.read_csv(metafile,header=None) #print csvdata print("parsing file: " + metafile) return csvdata # # modifyDataLabels # # function to modify data lables for the specified target using values in # dict_fields # # Returns: # ratio_dict - dictionary of modified labels grouped by ratio range # age_dict - dictionary of modified labels grouped by age range # modifiedLabels - full list of modified labels (same ordering as that of # targetLabels) # def modifyDataLabels(targetLabels, df_fields): # convert to dictionary for easier lookup dict_fields = df_fields.set_index(0).T.to_dict('list') # generate the regex instance for the specified pattern prefix = " - " regex = re.compile('(.*); (.*) years(.*):(.*)') # generate replacement labels for targeted labels using metadata # in df_fields modifiedLabels = [] # FIX ME: need an ordered defualt dict; for now use ordered dict only ratio_dict = OrderedDict(); age_dict = OrderedDict() for name in targetLabels[1:]: if name in dict_fields: m = regex.match(dict_fields[name][0]) ratioTag = ""; ageTag = "" if m.group(4).startswith(prefix): ratioTag = m.group(4)[len(prefix):] else: ratioTag = "Total" ageTag = m.group(2) + m.group(3) label = ratioTag + " (" + ageTag + ")" #print (name + ": " + label) if ageTag in age_dict: age_dict[ageTag].append(label) else: age_dict[ageTag] = [label] if ratioTag in ratio_dict: ratio_dict[ratioTag].append(label) else: ratio_dict[ratioTag] = [label] modifiedLabels.append(label) else: modifiedLabels.append(name) return ratio_dict, age_dict, modifiedLabels # # addSRAaggregates # # aggregates per zipcode/ZCTA data and populates the unique entry per SRA with # the aggreagated values (in the specified data frame) and returns the modified # data frame # # Note: this requires that data be in a specific format (see df_geoids dataframe) # def addSRAaggregates(df,targetCols): for name, group in df.groupby('SRA'): idx = group.last_valid_index() #print df.loc[[idx]] for col in targetCols: df.set_value(idx,col,group[col].sum()) return df # # computeLowIncomeData # # aggregates data for all ratios below 2.00 for all age groups and returns # the result in a new data frame # def computeLowIncomeData(df_incomes,df_geoids,ratio_dict,age_dict): # low income is defined as 200% (or below) of the federal poverty level # i.e.: the income to poverty level ratio under 2.0 LOW_INCOME_RATIO_TRESH = "1.85 to 1.99" geoCols = df_geoids.columns.tolist() df = df_incomes.iloc[:,len(geoCols):] df = df_incomes.reset_index(drop=True) df_sum_list = [] cols = [] for age_group, colnames in age_dict.iteritems(): #print(str(age_group) + ": " + str(colnames)) try: idx = [i for i, s in enumerate(colnames) if LOW_INCOME_RATIO_TRESH in s] df_sum = df[colnames[1:(idx[0]+1)]].sum(axis=1) df_sum_list.append(df_sum) except Exception, e: df_sum = pd.DataFrame(columns=[age_group], data=np.zeros(shape=(len(df_geoids.index),1))) df_sum_list.append(df_sum) cols.append(age_group + " (Low Income)") df1 = pd.concat(df_sum_list,axis=1) df1.columns = cols df1["55 and Over (Low Income)"] = df1[cols].sum(axis=1) df1["65 and Over (Low Income)"] = df1[cols[1:]].sum(axis=1) li_df = pd.concat([df_geoids,df1],axis=1) li_df = addSRAaggregates(li_df,df1.columns.tolist()) #print li_df return li_df # # processData # # extracts relevant information from the specified data file and carries out # transformations to generate income data for age groups 55 and over as well # for low income individuals 55 and over on a per ZCTA basis # # results are written to CSV files specified by OUT_CSV{1,2} # def processData(df_fields,datafile): # index of GEO.id2 which contains ZCTA as numbers COL_ZCTA_IDX = 1 COL_ZCTA = 'GEO.id2' # this is the first field that holds income info for 55+ age groups START_COL = 'HD01_VD93' # extract only data for income estimates for 55 and over categories startIndex = df_fields[df_fields[0] == START_COL].index.tolist()[0] endIndex = len(df_fields) - 1 # print("si: " + str(startIndex) + " ei: " + str(endIndex)) l = df_fields[0].tolist() # we skip over cols that contain margins of error (i.e.: every other col) cols = [l[COL_ZCTA_IDX]] + l[startIndex:endIndex:2] csvdata = pd.read_csv(datafile,skipinitialspace=True,usecols=cols) #print csvdata.head() print("parsing data file: " + datafile) df_geoids = sdpy.createGeoidsData() geoCols = df_geoids.columns.tolist() # add single level col headers with age and ratio tags ratio_dict, age_dict, modifiedCols = modifyDataLabels(cols,df_fields) out_df = pd.merge(left=df_geoids,right=csvdata[1:],left_on='ZCTA', right_on=COL_ZCTA,how='left').fillna(0) out_df.drop(COL_ZCTA,axis=1,inplace=True) out_df.columns = geoCols + modifiedCols tmp_df = out_df[modifiedCols].apply(pd.to_numeric) out_df = pd.concat([df_geoids,tmp_df],axis=1) out_df.columns = geoCols + modifiedCols li_df = computeLowIncomeData(tmp_df,df_geoids,ratio_dict,age_dict) #print li_df.head() li_df.to_csv(OUT_CSV2, index=False) print("output: " + OUT_CSV2) out_df = addSRAaggregates(out_df,modifiedCols) #print out_df.head() out_df.to_csv(OUT_CSV1, index=False) print("output: " + OUT_CSV1) ################################################################################ # # main # def main(): # indicates whether to cleanup before exiting the script doCleanup = False metadataFile = ''; dataFile = '' if not os.path.exists(TMPDIR): os.makedirs(TMPDIR) doCleanup = True # unzip the archive try: zipf = ZipFile(os.path.join(CWD,DATAZIP),'r') zipf.extractall(TMPDIR) zipf.close() for file in os.listdir(TMPDIR): if file.endswith("metadata.csv"): metadataFile = file elif file.endswith("ann.csv"): dataFile = file else: continue #print("metadata file: " + metadataFile + " data file: " + dataFile) df_fields = processMetaData(os.path.join(TMPDIR,metadataFile)) processData(df_fields, os.path.join(TMPDIR,dataFile)) except: e = sys.exc_info()[0] print("Error: Failed to extract data archive") print("Error: " + str(e)) cleanup(doCleanup) exit() cleanup(doCleanup) # end: main if __name__ == "__main__": main() else: # do nothing pass
1Navoi State Mining Institute, South street 27-a, 210100 Navoi City, Uzbekistan. 2Karshi of machines of basalt processing plants and insignifi- . al stripping methods are explained by plant . in: energy industry, construction, road and. It is most commonly crushed for use as an aggregate in construction projects. Crushed basalt is used for road base, concrete aggregate, asphalt pavement aggregate Impact crusher with end products in cubic shape,better than the others mine If you need a big capacity of basalt ,a basalt production line would be prefect. Line of products Information material Manual History Surface miners are just one step in a whole process chain but have a vital bauxite, limestone or granite, achieving high degrees of purity in the process. leads the world market in road construction and repair, as well as in mining natural rock and pay minerals. Jan 4, 2012 traditional construction materials and also permit the condi- tions that exclude from mines and open-air quarries around the world, only a few dozen The process requires a single feed line to carry crushed basalt rock into the .. covers of motor roads, takeoff and landing strips and taxiway of airfields. 1.9 Business model for a Continuous Basalt Fibre Plant . .. With the possible mining of special basalt for the production of continuous bas-. alt fibres, a new .. to access it by roads and the location must not be sensitive e.g. not in a national park. The construction, transportation and shipping industries – requiring. Basalt fibres are used in the production of high quality textile fibres, floor tiles, basalt When an animal or plant dies its body can end up being buried by mud or other under high volume roads and is found in uncontaminated construction fill. To ensure that the design development process adequately incorporates the key principles staging addressing buildability, traffic capacity and safety during construction, geotechnical . currently disused Casino-Murwillumbah railway line. .. consists of a large open cut pit with vertical basaltic walls which is currently. The construction of the kilns dates back to the early 1880s, a time of great change and was staked out with mining claims and homesteaders quickly moved into the area A halfway house for the stage lines was also built during this period and, The kilns have upper and lower openings typical for this type of processing. Winner Groups mining screening and crushing plant with a simple operation, highly automated machines, high efficiency, large Basalt Crushing Processing. murum, brick earth, bentonite, road metal, slate, marble, stones used for making They include coal, manganese ore, iron ore, bauxite, limestone, powers delegated under section 15 of Mines and Minerals Regulation and Development Act,. 1957 .. processing plant within 3 year from date of grant of lease within State. Oct 21, 2016 Abstract: Mineral processing can become more environmentally sustainable by Limestone Plant I and basalt Plant II rock mining located in the construction Fig. 1 DPIM .. Road speed reduction: As reported by the. Basalt stone processing plant Calcite mining and processing plant Sand is widely used in road construction, real estate development, concrete making etc. Jun 16, 2014 U.S. production of construction aggregates in 2012 was 2.18 billion short The average unit value, which is the f.o.b. at the plant price of a mine and process crushed stone and construction sand and gravel. was limestone and dolomite and 11 percent was construction sand 421 Great Circle Road. Mar 6, 2017 regolith processing, reagent processing, product storage & delivery, power, manufacturing, etc. How can basalt rock be formed to be comparable to concrete as a construction material? and Road Construction . What is the Best Lunabot Regolith Mining Design for the Moon?? . plant on pallet. Cast Basalt Division - Bmw Steels Ltd. like Alumina Ceramic & Cast Basalt were added to the product range. Mining Industry Mineral Processing Units. Construction Industry Alumina Ceramic Liner and Grinding Media ALUMINA92 . TM . Navipur Road, Hathras - 204 101. Jan 4, 2003 Figure 18: Summary of the quarrying process for vessels and statue . Abdou Salem geologist Egyptian Geological Survey and Mining . Aswan granite Quarries and the Widan el-faras Basalt Quarries in the Faiyum. The whole site has been churned up by the recent road construction works, but.
# Copyright 2014-2015 Whitewood Encryption Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''WES Entropy Engine''' import logging import time import WesEntropy.Engine.utilities as utilities import WesEntropy.Engine.sp800_90a as sp800_90a import WesEntropy.Engine.entropysource as entropysource VERSION = '1.0' #pylint: disable=R0903 class EntropyEngine(object): ''' Construct an entropy engine of the following form: drbg_source raw_source | | | (rate) | V V drbg---------------->XOR | | V rand_bits This abstracts all the constructions in NIST SP800-90C, while also allowing for other implementations as needed. The sources are to be EntropySource objects, or a specification for constructing an EntropySource object. The rate at which the DRBG is to be reseeded can be numeric, indicating the number of times we can pull bits from the source before we reseed, or one of the following string values: MINIMAL : Go the longest that NIST SP800-90A allows in this case. LINESPEED : Put in one bit of entropy for each bit we take out. ''' def __init__(self, drbg_spec, drbg_source, drbg_reseed_rate, raw_source): self.drbg = None self.raw = None if drbg_spec is not None and drbg_source is not None: self.drbg = sp800_90a.new(drbg_spec, drbg_source, drbg_reseed_rate) if raw_source is not None: self.raw = entropysource.new(raw_source) self.total_bytes = 0 self.start_time = time.time() if not self.drbg and not self.raw: raise ValueError( 'Cannot construct engine with neither DRBG nor raw source.') def get_stats(self): '''Get statistics on amount of entropy consumed/produced''' stats = {'info': {}, 'consumed': {}, 'produced': {}} stats['info']['engine_uptime'] = time.time() - self.start_time stats['info']['version'] = VERSION if self.raw is not None: stats['info']['stream'] = self.raw.get_name() if self.drbg is not None: stats['info']['seed'] = self.drbg.entropy_source.get_name() stats['info']['drbg'] = self.drbg.get_name() stats['consumed'] = entropysource.get_all_stats() stats['produced'] = self.total_bytes return stats def cleanup(self): '''Uninstantiate DRBG and close any raw entropy source''' if self.drbg: self.drbg.uninstantiate() if self.raw: self.raw.close_entropy_source() #pylint: disable=R0911 def generate(self, n_bits, security_strength = None, prediction_resistance = None, additional_input = ''): 'Generate bits from the entropy engine.' # # If we have a DRBG then use it if self.drbg: status, drbg_bits = self.drbg.generate( n_bits, security_strength, prediction_resistance, additional_input) # The DRBG, once instantiated, should never fail if status != 'SUCCESS' and status != 'RESEED_FAILED': return status, "DRBG failed" # If we are combining the DRBG with raw input then get raw bits if self.raw: status, raw_bits = self.raw.get_entropy_input( security_strength, n_bits, n_bits, prediction_resistance) # Failure here is allowable, because we still have the DRBG if status != 'SUCCESS': logging.debug( "Using drbg only. %s, %s", status, raw_bits) self.total_bytes += len(drbg_bits) return 'DRBG_ONLY', drbg_bits # If we have both sources working then XOR them together comb_bits = utilities.binstr_xor(drbg_bits, raw_bits) self.total_bytes += len(comb_bits) return 'SUCCESS', comb_bits # If we only have a DRBG, then return just those bits else: self.total_bytes += len(drbg_bits) return 'SUCCESS', drbg_bits # If we have no DRBG then we must have a raw entropy source elif self.raw: status, raw_bits = self.raw.get_entropy_input( security_strength, n_bits, n_bits, prediction_resistance) # If this fails with no DRBG to back it up, return an error if status != 'SUCCESS': return status, "Raw source failed" # Otherwise return the raw bits self.total_bytes += len(raw_bits) return 'SUCCESS', raw_bits # If we have neither DRBG nor raw source, we cannot generate bits return 'ERROR', "Neither DRBG nor raw source available" #pylint: enable=R0911
LVAA continues its long-standing efforts to enhance and beautify our neighborhood by supporting and implementing beautification projects. Our most recent beautification effort involves planting "Yankee Point” Ceanothus in the parkway located on the east side of Linda Vista Avenue, south of Seco. The planted area covers the parkway along Linda Vista Avenue, across from the corner of Mira Vista where the telephone junction box is located, continuing for roughly 300 yards, ending at Seco Street. This was a joint collaboration between the City of Pasadena and LVAA, with both sharing the cost. Tom Hunter, with the Department of Public Works, Parks and Natural Resources, oversaw the project for the City. Mr. Hunter’s work crews prepared the soil, installed an irrigation system, and planted 500 “Yankee Point” Ceanothus. The Department then planted two new oak trees in the parkway. Mr. Hunter also installed temporary barrier materials to protect the project from the recent Half-Marathon that ran along this portion of Linda Vista Avenue last month. For LVAA, Ray Hyde, chairman of the Beautification Committee, saw the need and conceived of the idea for the project. Andy Gantner, Beautification Committee member, assisted Ray, and oversaw the project for LVAA.
#!/usr/bin/env python # coding: utf-8 # # Wire # Copyright (C) 2017 Wire Swiss GmbH # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # import os import requests import zipfile HOCKEY_ID = os.environ.get('WIN_HOCKEY_ID') HOCKEY_TOKEN = os.environ.get('WIN_HOCKEY_TOKEN') VERSION = os.environ.get('WRAPPER_BUILD').split('#')[1] HOCKEY_UPLOAD = 'https://rink.hockeyapp.net/api/2/apps/%s/app_versions/' % HOCKEY_ID HOCKEY_NEW = 'https://rink.hockeyapp.net/api/2/apps/%s/app_versions/new' % HOCKEY_ID bin_root = os.path.dirname(os.path.realpath(__file__)) wire_exe = os.path.join(bin_root, '..', 'wrap', 'internal', 'WireInternal-win32-ia32', 'WireInternalSetup.exe') wire_zip = os.path.join(bin_root, 'WireInternalSetup.zip') def zipit(source, dest): os.chdir(os.path.dirname(os.path.abspath(source))) filename = os.path.basename(source) zipf = zipfile.ZipFile(dest, 'w') zipf.write(filename) zipf.close() if __name__ == '__main__': print 'Compressing...' zipit(wire_exe, wire_zip) print 'Uploading %s...' % VERSION semver_version = VERSION.split('.') headers = { 'X-HockeyAppToken': HOCKEY_TOKEN, } data = { 'notify': 0, 'notes': 'Jenkins Build', 'status': 2, 'bundle_short_version': '%s.%s' % (semver_version[0], semver_version[1]), 'bundle_version': semver_version[2], } files = { 'ipa': open(wire_zip, 'rb') } response = requests.post(HOCKEY_NEW, data=data, headers=headers) response = requests.put('%s%s' % (HOCKEY_UPLOAD, response.json()['id']), files=files, data=data, headers=headers) if response.status_code in [200, 201]: print 'Uploaded!' else: print 'Error :('
Officer Jesse Whitten had had occasionally encountered the woman while on patrol. During one encounter Whitten's wife was at his side. The two women struck up a conversation, and several months later learned that the homeless woman specifically requested that the Whittens adopt the baby girl. Image courtesy of Santa Rosa Police Department / Facebook. An officer with the Santa Rosa (CA) Police Department has adopted the newborn daughter of a homeless woman battling drug addiction, according to CBS News. Officer Jesse Whitten had occasionally encountered the woman while on patrol. During one encounter Whitten's wife was at his side. The two women struck up a conversation, and several months later learned that the homeless woman specifically requested that the Whittens adopt the baby girl. "Not long ago, while Ofc. Whitten was working he met a pregnant woman needing help," The department said in a Facebook Post. "She didn't ask for assistance the way a typical call for service goes, she was looking for a home and a family for her unborn baby. Ofc. Whitten, the proud father of three girls already, opened his heart and his home to this baby. And now it's official! After a moving adoption hearing, she is a part of Ofc. Whitten's family. Congratulations, Whitten family, and welcome, baby, to the SRPD family!"
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'attachment.ui' # # Created by: PyQt5 UI code generator 5.9.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Attachment(object): def setupUi(self, Attachment): Attachment.setObjectName("Attachment") Attachment.resize(717, 205) self.horizontalLayout = QtWidgets.QHBoxLayout(Attachment) self.horizontalLayout.setObjectName("horizontalLayout") self.number = QtWidgets.QLabel(Attachment) self.number.setObjectName("number") self.horizontalLayout.addWidget(self.number) self.fields = QtWidgets.QTreeWidget(Attachment) self.fields.setMinimumSize(QtCore.QSize(0, 187)) self.fields.setObjectName("fields") self.fields.header().setVisible(True) self.fields.header().setMinimumSectionSize(100) self.horizontalLayout.addWidget(self.fields) self.extract_btn = QtWidgets.QPushButton(Attachment) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/export.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.extract_btn.setIcon(icon) self.extract_btn.setObjectName("extract_btn") self.horizontalLayout.addWidget(self.extract_btn) self.horizontalLayout.setStretch(1, 1) self.retranslateUi(Attachment) QtCore.QMetaObject.connectSlotsByName(Attachment) def retranslateUi(self, Attachment): _translate = QtCore.QCoreApplication.translate Attachment.setWindowTitle(_translate("Attachment", "Form")) self.number.setText(_translate("Attachment", "Number")) self.fields.headerItem().setText(0, _translate("Attachment", "Item")) self.fields.headerItem().setText(1, _translate("Attachment", "Value")) self.extract_btn.setText(_translate("Attachment", "Extract")) from . import resource_rc
"Agent for Jadeveon Clowney threatens legal action over sub shop marquee." Want to Rebuild Your Dormant College Football Program? You Need a Coaching Moses!
#!/usr/bin/env python3 import argparse, sys, os, requests from subprocess import Popen, PIPE from pprint import pprint from api.amara_api import Amara from utils import answer_me, download_yt_subtitles from time import sleep def read_cmd(): """Function for reading command line options.""" desc = "Program for downloading subtitles from Amara or YouTube. \ The video from YouTube can be downloaded as well." parser = argparse.ArgumentParser(description=desc) parser.add_argument('input_file', metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.') parser.add_argument('-l','--lang', dest='lang', required = True, help='Which language do we download?') parser.add_argument('-y', dest='youtube', action="store_true", help='Download subtitles from YouTube.') parser.add_argument('-a', dest='amara', action="store_true", help='Download subtitles from Amara.') parser.add_argument('-v', '--video', dest='video', action="store_true", default=False, help='Download video from YouTube in addition to subtitles.') parser.add_argument('-d', '--dir', dest='dirname', default='subs', help='Destination directory for subtitles') parser.add_argument( '--sub-format', dest = 'sub_format', required = False, default = 'vtt', help='What language?') parser.add_argument( '-s', '--sleep', dest = 'sleep_int', required = False, type = float, default = -1, help='Sleep interval (seconds)') return parser.parse_args() opts = read_cmd() if opts.youtube and opts.sub_format != 'vtt': eprint("ERROR: YouTube download only support vtt format!") sys.exit(1) if opts.youtube == True and opts.amara == True: print('Conflicting options "-y" and "-a"') print('Type "-h" for help') sys.exit(1) if opts.youtube == False and opts.amara == False: print('Please, set either "-y" or "-a".') print('Type "-h" for help') sys.exit(1) # List ytids may also contain filenames ytids = [] # Reading file with YT id's with open(opts.input_file, "r") as f: for line in f: l = line.split(' ') if l[0][0] != "#": ytids.append(line.split()) if not os.path.isdir(opts.dirname): os.mkdir(opts.dirname) try: os.remove("youtubedl.out") os.remove("youtubedl.err") except: pass if opts.amara: amara = Amara() # Main loop for i in range(len(ytids)): ytid = ytids[i][0] video_url = 'https://www.youtube.com/watch?v=%s' % ytid amara_id = '' if opts.video: video_download_cmd = "youtube-dl %s" % video_url p = Popen(video_download_cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() with open("youtubedl.out", 'a') as f: f.write(out.decode('UTF-8')) if err: print(err) sys.exit(1) else: print("Successfully downloaded video from %s" % video_url) if opts.youtube: subs = download_yt_subtitles(opts.lang, opts.sub_format, ytid, opts.dirname) elif opts.amara: # TODO: Extract this to utils as well. # First, get Amara ID amara_response = amara.check_video(video_url) if amara_response['meta']['total_count'] == 0: print("ERROR: Video is not on Amara! YTID=%s" % ytid) sys.exit(1) else: amara_id = amara_response['objects'][0]['id'] amara_title = amara_response['objects'][0]['title'] print("Downloading %s subtitles for YTID=%s" % (opts.lang, ytid)) print("Title: %s" % amara_title) print("%s/cs/videos/%s" % (amara.AMARA_BASE_URL, amara_id)) # Check whether subtitles for a given language are present, is_present, sub_version = amara.check_language(amara_id, opts.lang) if is_present and sub_version > 0: print("Subtitle revision number: %d" % sub_version) else: print("ERROR: Amara does not have subtitles for language %s for this video!" % opts.lang) sys.exit(1) # Download and write subtitles from Amara for a given language subs = amara.download_subs(amara_id, opts.lang, opts.sub_format) fname = "%s/%s.%s.%s" % (opts.dirname, ytid, opts.lang, opts.sub_format) with open(fname, 'w') as f: f.write(subs) # Trying to reduce E 429 if opts.sleep_int > 0: sleep(opts.sleep_int)
Okay so my eyebrows aren’t exactly *brand new* as I got them done a few months back however I’ve had so many questions about them lately so thought I would pop up a little post explaining what I had done. So I am from the oh so unfortunate generation who decided it was a brilliant idea to pluck every freaking eyebrow hair we had and leave about 3 creepy hairs in the shape of a semi-circle. You can’t see me RN but my eyes are rolling so far back into my head – sigh. All those beautiful bushy brows ruined by our crazy tweezer happy hands. Anyhoo I have moved on since and tried my hardest the last couple of years to grow in bushy, thick brows of my own but they never quite resemble the natural full brows I had when I was a kid. Don’t get me wrong when I went on nights out etc I would give myself nice full eyebrows with some Dip brow Pomade however I literally wear make up once a week if I’m lucky nowadays so having full eyebrows on a daily basis is something I have yearned for. So after some deliberation I decided I was going to try Permanent Cosmetics to give my eyebrows some much needed extra fullness. I did my research and was so happy I went with Emma Lee Permanent Cosmetics based in Glasgow. So I had my first appointment with Emma which is around 2+ hours where I had a consultation to talk about the final result I was looking for. A small medical questionnaire was also carried out at this point to ensure I was eligible for the treatment. Emma made me confident that she fully understood the style, colour etc I was after so I was more than happy to go ahead with the initial treatment. Emma is honestly so lovely and put my nervous self at ease with her professional attitude. Her clinic is intimate giving you the full one on one treatment. Once I went through to the treatment room Emma started my custom brow design. This is some technical business with measurements and looking into the symmetry of your face. I swear once I seen the brows drawn on at this part I knew I had made the right decision. I have never been able to draw my brows on anywhere near as good as this in my puff. We then looked at a few colours and once I choose one I loved we went ahead and started the treatment. I can truthfuly say I didn’t find the treatment sore. I was numbed up prior and during with some super fabulous numbing cream which resulted in me not being able to feel any pain. If anything I felt the tattoo needle a little tickley making me want to sneeze. So here we are a few months on and I can honestly say girls it is one of the best beauty decisions I made in 2018. I feel a million times better with no make up on knowing I have natural looking defined eyebrows. I cannot recommend Emma enough. If it’s something you have been considering please don’t hesitate to message me any questions about my experience or reach out to Emma on the links below.
from ..playbook.PBConstants import (GOALIE, CHASER) import man.motion.SweetMoves as SweetMoves ### # Reimplementation of Game Controller States for pBrunswick ### def gameInitial(player): """ Ensure we are sitting down and head is snapped forward. In the future, we may wish to make the head move a bit slower here Also, in the future, gameInitial may be responsible for turning off the gains """ if player.firstFrame(): player.isChasing = False player.inKickingState = False player.justKicked = False player.stopWalking() player.gainsOn() player.zeroHeads() player.GAME_INITIAL_satDown = False elif (player.brain.nav.isStopped() and not player.GAME_INITIAL_satDown and not player.motion.isBodyActive()): player.GAME_INITIAL_satDown = True player.executeMove(SweetMoves.SIT_POS) return player.stay() def gamePenalized(player): if player.firstFrame(): player.isChasing = False player.inKickingState = False player.justKicked = False player.stopWalking() player.penalizeHeads() return player.stay() def gameReady(player): """ Stand up, and pan for localization """ if player.firstFrame(): player.isChasing = False player.inKickingState = False player.justKicked = False player.brain.CoA.setRobotGait(player.brain.motion) if player.squatting: player.executeMove(SweetMoves.GOALIE_SQUAT_STAND_UP) player.squatting = False else: player.standup() if player.brain.gameController.ownKickOff: player.hasKickedOffKick = False else: player.hasKickedOffKick = True player.standup() player.brain.tracker.locPans() if player.lastDiffState == 'gameInitial': return player.goLater('relocalize') if player.firstFrame() and \ player.lastDiffState == 'gamePenalized': player.brain.resetLocalization() return player.goLater('playbookPosition') def gameSet(player): """ Fixate on the ball, or scan to look for it """ if player.firstFrame(): player.isChasing = False player.inKickingState = False player.justKicked = False player.brain.CoA.setRobotGait(player.brain.motion) if player.firstFrame() and player.lastDiffState == 'gamePenalized': player.brain.resetLocalization() if player.firstFrame(): player.stopWalking() player.brain.loc.resetBall() if player.brain.play.isRole(GOALIE): player.brain.resetGoalieLocalization() if player.brain.play.isRole(CHASER): player.brain.tracker.trackBall() else: player.brain.tracker.activeLoc() return player.stay() def gamePlaying(player): if player.firstFrame(): player.brain.CoA.setRobotGait(player.brain.motion) if (player.firstFrame() and player.lastDiffState == 'gamePenalized'): player.brain.resetLocalization() roleState = player.getRoleState() return player.goNow(roleState) def penaltyShotsGameReady(player): if player.firstFrame(): player.brain.CoA.setRobotGait(player.brain.motion) if player.firstFrame(): if player.lastDiffState == 'gamePenalized': player.brain.resetLocalization() player.brain.tracker.locPans() player.walkPose() if player.brain.play.isRole(GOALIE): player.brain.resetGoalieLocalization() return player.stay() def penaltyShotsGameSet(player): if player.firstFrame(): player.brain.CoA.setRobotGait(player.brain.motion) if player.firstFrame(): player.stopWalking() player.brain.loc.resetBall() if player.lastDiffState == 'gamePenalized': player.brain.resetLocalization() if player.brain.play.isRole(GOALIE): player.brain.tracker.trackBall() else: player.brain.tracker.activeLoc() if player.brain.play.isRole(GOALIE): player.brain.resetGoalieLocalization() return player.stay() def penaltyShotsGamePlaying(player): if player.firstFrame(): player.brain.CoA.setRobotGait(player.brain.motion) if player.lastDiffState == 'gamePenalized' and \ player.firstFrame(): player.brain.resetLocalization() if player.brain.play.isRole(GOALIE): return player.goNow('penaltyGoalie') return player.goNow('penaltyKick') def fallen(player): """ Stops the player when the robot has fallen """ player.isChasing = False player.inKickingState = False player.justKicked = False return player.stay() def gameFinished(player): """ Ensure we are sitting down and head is snapped forward. In the future, we may wish to make the head move a bit slower here Also, in the future, gameInitial may be responsible for turning off the gains """ if player.firstFrame(): player.isChasing = False player.inKickingState = False player.justKicked = False player.stopWalking() player.zeroHeads() player.GAME_FINISHED_satDown = False return player.stay() # Sit down once we've finished walking if (player.brain.nav.isStopped() and not player.GAME_FINISHED_satDown and not player.motion.isBodyActive()): player.GAME_FINISHED_satDown = True player.executeMove(SweetMoves.SIT_POS) return player.stay() if not player.motion.isBodyActive() and player.GAME_FINISHED_satDown: player.gainsOff() return player.stay()
우리는 중국에서 저렴한 여권 홀더 제조 업체 및 공급 업체 / 공장 전문화되어 있습니다. 저렴한 여권 홀더 중 하나 인 Shenzhen Olinb Bags Co., Ltd. 중국 유명 브랜드 중 하나 인 저렴한 가격 / 저렴한 가격으로 고품질의 저렴한 여권 홀더 도매업. Wholesale 저렴한 여권 홀더 from China, Need to find cheap 저렴한 여권 홀더 as low price but leading manufacturers. Just find high-quality brands on 저렴한 여권 홀더 produce factory, You can also feedback about what you want, start saving and explore our 저렴한 여권 홀더, We'll reply you in fastest.
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import optparse import sys import os import logging import tempfile import urllib2 import socket import json import base64 import time AMBARI_HOSTNAME = None AMBARI_PORT = 8080 CLUSTER_NAME = None PROTOCOL = "http" USERNAME = "admin" PASSWORD = "admin" DEFAULT_TIMEOUT = 10 # seconds START_ON_RELOCATE = False # Supported Actions RELOCATE_ACTION = 'relocate' ALLOWED_ACTUAL_STATES_FOR_RELOCATE = [ 'INIT', 'UNKNOWN', 'DISABLED', 'UNINSTALLED' ] ALLOWED_HOST_STATUS_FOR_RELOCATE = [ 'HEALTHY' ] STATUS_WAIT_TIMEOUT = 120 # seconds STATUS_CHECK_INTERVAL = 10 # seconds # API calls GET_CLUSTERS_URI = "/api/v1/clusters/" GET_HOST_COMPONENTS_URI = "/api/v1/clusters/{0}/services/{1}/components/{2}" +\ "?fields=host_components" GET_HOST_COMPONENT_DESIRED_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\ "/host_components/{2}" +\ "?fields=HostRoles/desired_state" GET_HOST_COMPONENT_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\ "/host_components/{2}" +\ "?fields=HostRoles/state" GET_HOST_STATE_URL = "/api/v1/clusters/{0}/hosts/{1}?fields=Hosts/host_state" HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts/{1}/host_components/{2}" ADD_HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts?Hosts/host_name={1}" logger = logging.getLogger() class PreemptiveBasicAuthHandler(urllib2.BaseHandler): def __init__(self): password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, getUrl(''), USERNAME, PASSWORD) self.passwd = password_mgr self.add_password = self.passwd.add_password def http_request(self, req): uri = req.get_full_url() user = USERNAME pw = PASSWORD raw = "%s:%s" % (user, pw) auth = 'Basic %s' % base64.b64encode(raw).strip() req.add_unredirected_header('Authorization', auth) return req class AmbariResource: def __init__(self, serviceName, componentName): self.serviveName = serviceName self.componentName = componentName self.isInitialized = False def initializeResource(self): global CLUSTER_NAME if CLUSTER_NAME is None: CLUSTER_NAME = self.findClusterName() if self.serviveName is None: raise Exception('Service name undefined') if self.componentName is None: raise Exception('Component name undefined') handler = PreemptiveBasicAuthHandler() opener = urllib2.build_opener(handler) # Install opener for all requests urllib2.install_opener(opener) self.urlOpener = opener self.old_hostname = self.getHostname() self.isInitialized = True def relocate(self, new_hostname): if not self.isInitialized: raise Exception('Resource not initialized') # If old and new hostname are the same exit harmlessly if self.old_hostname == new_hostname: logger.error('New hostname is same as existing host name, %s' % self.old_hostname) sys.exit(2) pass try: self.verifyHostComponentStatus(self.old_hostname, new_hostname, self.componentName) except Exception, e: logger.error("Exception caught on verify relocate request.") logger.error(e.message) sys.exit(3) # Put host component in Maintenance state self.updateHostComponentStatus(self.old_hostname, self.componentName, "Disable", "DISABLED") # Delete current host component self.deleteHostComponent(self.old_hostname, self.componentName) # Add component on the new host self.addHostComponent(new_hostname, self.componentName) # Install host component self.updateHostComponentStatus(new_hostname, self.componentName, "Installing", "INSTALLED") # Wait on install self.waitOnHostComponentUpdate(new_hostname, self.componentName, "INSTALLED") if START_ON_RELOCATE: # Start host component self.updateHostComponentStatus(new_hostname, self.componentName, "Starting", "STARTED") # Wait on start self.waitOnHostComponentUpdate(new_hostname, self.componentName, "STARTED") pass pass def waitOnHostComponentUpdate(self, hostname, componentName, status): logger.info("Waiting for host component status to update ...") sleep_itr = 0 state = None while sleep_itr < STATUS_WAIT_TIMEOUT: try: state = self.getHostComponentState(hostname, componentName) if status == state: logger.info("Status update successful. status: %s" % state) return pass except Exception, e: logger.error("Caught an exception waiting for status update.. " "continuing to wait...") pass time.sleep(STATUS_CHECK_INTERVAL) sleep_itr += STATUS_CHECK_INTERVAL pass if state and state != status: logger.error("Timed out on wait, status unchanged. status = %s" % state) sys.exit(1) pass pass def addHostComponent(self, hostname, componentName): data = '{"host_components":[{"HostRoles":{"component_name":"%s"}}]}' % self.componentName req = urllib2.Request(getUrl(ADD_HOST_COMPONENT_URI.format(CLUSTER_NAME, hostname)), data) req.add_header("X-Requested-By", "ambari_probe") req.get_method = lambda: 'POST' try: logger.info("Adding host component: %s" % req.get_full_url()) resp = self.urlOpener.open(req) self.logResponse('Add host component response: ', resp) except Exception, e: logger.error('Create host component failed, component: {0}, host: {1}' .format(componentName, hostname)) logger.error(e) raise e pass def deleteHostComponent(self, hostname, componentName): req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME, hostname, componentName))) req.add_header("X-Requested-By", "ambari_probe") req.get_method = lambda: 'DELETE' try: logger.info("Deleting host component: %s" % req.get_full_url()) resp = self.urlOpener.open(req) self.logResponse('Delete component response: ', resp) except Exception, e: logger.error('Delete {0} failed.'.format(componentName)) logger.error(e) raise e pass def updateHostComponentStatus(self, hostname, componentName, contextStr, status): # Update host component data = '{"RequestInfo":{"context":"%s %s"},"Body":{"HostRoles":{"state":"%s"}}}' % (contextStr, self.componentName, status) req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME, hostname, componentName)), data) req.add_header("X-Requested-By", "ambari_probe") req.get_method = lambda: 'PUT' try: logger.info("%s host component: %s" % (contextStr, req.get_full_url())) resp = self.urlOpener.open(req) self.logResponse('Update host component response: ', resp) except Exception, e: logger.error('Update Status {0} failed.'.format(componentName)) logger.error(e) raise e pass def verifyHostComponentStatus(self, old_hostname, new_hostname, componentName): # Check desired state of host component is not STOPPED or host is # unreachable actualState = self.getHostComponentState(old_hostname, componentName) if actualState not in ALLOWED_ACTUAL_STATES_FOR_RELOCATE: raise Exception('Aborting relocate action since host component ' 'state is %s' % actualState) hostState = self.getHostSatus(new_hostname) if hostState not in ALLOWED_HOST_STATUS_FOR_RELOCATE: raise Exception('Aborting relocate action since host state is %s' % hostState) pass def getHostSatus(self, hostname): hostStateUrl = getUrl(GET_HOST_STATE_URL.format(CLUSTER_NAME, hostname)) logger.info("Requesting host status: %s " % hostStateUrl) urlResponse = self.urlOpener.open(hostStateUrl) state = None if urlResponse: response = urlResponse.read() data = json.loads(response) logger.debug('Response from getHostSatus: %s' % data) if data: try: hostsInfo = data.get('Hosts') if not hostsInfo: raise Exception('Cannot find host state for host: {1}'.format(hostname)) state = hostsInfo.get('host_state') except Exception, e: logger.error('Unable to parse json data. %s' % data) raise e pass else: logger.error("Unable to retrieve host state.") pass return state def getHostComponentState(self, hostname, componentName): hostStatusUrl = getUrl(GET_HOST_COMPONENT_STATE_URI.format(CLUSTER_NAME, hostname, componentName)) logger.info("Requesting host component state: %s " % hostStatusUrl) urlResponse = self.urlOpener.open(hostStatusUrl) state = None if urlResponse: response = urlResponse.read() data = json.loads(response) logger.debug('Response from getHostComponentState: %s' % data) if data: try: hostRoles = data.get('HostRoles') if not hostRoles: raise Exception('Cannot find host component state for component: ' +\ '{0}, host: {1}'.format(componentName, hostname)) state = hostRoles.get('state') except Exception, e: logger.error('Unable to parse json data. %s' % data) raise e pass else: logger.error("Unable to retrieve host component desired state.") pass return state # Log response for PUT, POST or DELETE def logResponse(self, text=None, response=None): if response is not None: resp = str(response.getcode()) if text is None: text = 'Logging response from server: ' if resp is not None: logger.info(text + resp) def findClusterName(self): clusterUrl = getUrl(GET_CLUSTERS_URI) clusterName = None logger.info("Requesting clusters: " + clusterUrl) urlResponse = self.urlOpener.open(clusterUrl) if urlResponse is not None: response = urlResponse.read() data = json.loads(response) logger.debug('Response from findClusterName: %s' % data) if data: try: clusters = data.get('items') if len(clusters) > 1: raise Exception('Multiple clusters found. %s' % clusters) clusterName = clusters[0].get('Clusters').get('cluster_name') except Exception, e: logger.error('Unable to parse json data. %s' % data) raise e pass else: logger.error("Unable to retrieve clusters data.") pass return clusterName def getHostname(self): hostsUrl = getUrl(GET_HOST_COMPONENTS_URI.format(CLUSTER_NAME, self.serviveName, self.componentName)) logger.info("Requesting host info: " + hostsUrl) urlResponse = self.urlOpener.open(hostsUrl) hostname = None if urlResponse is not None: response = urlResponse.read() data = json.loads(response) logger.debug('Response from getHostname: %s' % data) if data: try: hostRoles = data.get('host_components') if not hostRoles: raise Exception('Cannot find host component data for service: ' +\ '{0}, component: {1}'.format(self.serviveName, self.componentName)) if len(hostRoles) > 1: raise Exception('More than one hosts found with the same role') hostname = hostRoles[0].get('HostRoles').get('host_name') except Exception, e: logger.error('Unable to parse json data. %s' % data) raise e pass else: logger.error("Unable to retrieve host component data.") pass return hostname def getUrl(partial_url): return PROTOCOL + "://" + AMBARI_HOSTNAME + ":" + AMBARI_PORT + partial_url def get_supported_actions(): return [ RELOCATE_ACTION ] # # Main. # def main(): tempDir = tempfile.gettempdir() outputFile = os.path.join(tempDir, "ambari_reinstall_probe.out") parser = optparse.OptionParser(usage="usage: %prog [options]") parser.set_description('This python program is a Ambari thin client and ' 'supports relocation of ambari host components on ' 'Ambari managed clusters.') parser.add_option("-v", "--verbose", dest="verbose", action="store_false", default=False, help="output verbosity.") parser.add_option("-s", "--host", dest="server_hostname", help="Ambari server host name.") parser.add_option("-p", "--port", dest="server_port", default="8080" ,help="Ambari server port. [default: 8080]") parser.add_option("-r", "--protocol", dest="protocol", default = "http", help="Protocol for communicating with Ambari server (" "http/https) [default: http].") parser.add_option("-c", "--cluster-name", dest="cluster_name", help="Ambari cluster to operate on.") parser.add_option("-e", "--service-name", dest="service_name", help="Ambari Service to which the component belongs to.") parser.add_option("-m", "--component-name", dest="component_name", help="Ambari Service Component to operate on.") parser.add_option("-n", "--new-host", dest="new_hostname", help="New host to relocate the component to.") parser.add_option("-a", "--action", dest="action", default = "relocate", help="Script action. [default: relocate]") parser.add_option("-o", "--output-file", dest="outputfile", default = outputFile, metavar="FILE", help="Output file. [default: %s]" % outputFile) parser.add_option("-u", "--username", dest="username", default="admin" ,help="Ambari server admin user. [default: admin]") parser.add_option("-w", "--password", dest="password", default="admin" ,help="Ambari server admin password.") parser.add_option("-d", "--start-component", dest="start_component", action="store_false", default=False, help="Should the script start the component after relocate.") (options, args) = parser.parse_args() # set verbose if options.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) global AMBARI_HOSTNAME AMBARI_HOSTNAME = options.server_hostname global AMBARI_PORT AMBARI_PORT = options.server_port global CLUSTER_NAME CLUSTER_NAME = options.cluster_name global PROTOCOL PROTOCOL = options.protocol global USERNAME USERNAME = options.username global PASSWORD PASSWORD = options.password global START_ON_RELOCATE START_ON_RELOCATE = options.start_component global logger logger = logging.getLogger('AmbariProbe') handler = logging.FileHandler(options.outputfile) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) action = RELOCATE_ACTION if options.action is not None: if options.action not in get_supported_actions(): logger.error("Unsupported action: " + options.action + ", " "valid actions: " + str(get_supported_actions())) sys.exit(1) else: action = options.action socket.setdefaulttimeout(DEFAULT_TIMEOUT) ambariResource = AmbariResource(serviceName=options.service_name, componentName=options.component_name) ambariResource.initializeResource() if action == RELOCATE_ACTION: if options.new_hostname is not None: ambariResource.relocate(options.new_hostname) if __name__ == "__main__": try: main() except (KeyboardInterrupt, EOFError): print("\nAborting ... Keyboard Interrupt.") sys.exit(1)
Alter Ego Art Projects is an international initiative to promote contemporary art intersecting with science & technology, headquartered in Belgium. You can find us in Antwerp in "Het Groen Kwartier" at Artsen Zonder Grenzenstraat 41. Private viewing only. When art intersects with science, poetry is born. Alain is a business-nerd, but foremost he is passionate about art. He has been busy reconciling apparent contradictions for many years.
from numpy import * from colorsys import * import Tkinter as tk import ttk import PIL.Image, PIL.ImageTk ir = lambda x: int(round(x)) #----------------------------------------------------------------------------------------- # H(CL) picker class H_CL_Picker: hue_panel_size = 256, 256 r0, r1 = 0.4*hue_panel_size[0], 0.5*hue_panel_size[0] # Hue circle radii rt = r0-5 # CL-Triangle outer radius hue_img, cl_img = None, None hue, sat, val = 0, 0, 0 def __init__( self, parent, color_broadcast=None ): self.parent = parent self.frame = tk.Frame(self.parent) self.colorbc = color_broadcast # Get initial color self.receive_color() # setup frames self.canvas = tk.Canvas(self.frame, bd=-2, width=self.hue_panel_size[0], height=self.hue_panel_size[1] ) # Create initial images self.draw() # bind event handlers self.canvas.bind('<Button-1>', self.on_hue_click) self.canvas.bind('<B1-Motion>', self.on_hue_click) self.parent.bind('<<NotebookTabChanged>>', self.on_tab_changed) self.place() def place(self, **args): # place frames on grid self.frame.grid(args) self.canvas.grid(column=0, row=0, padx=(12,12), pady=(12,12), sticky=tk.N+tk.S) def draw(self): self.draw_hue() self.draw_cl() def draw_hue(self): W,H = self.hue_panel_size r0,r1 = self.r0, self.r1 xm,ym = W/2.,H/2. if (self.hue_img==None): # First call, create static hue-image d = 255*array([hsv_to_rgb(1.-x,0.9,0.9) for x in arange(256)/255.]) self.hue_data = d.copy() hue_scale = zeros((H,W,4), dtype=uint8) # Draw hue circle. THIS IS VERY SLOW! for y in range(int(ym),H): clip = lambda x,a,b: a if x<a else (b if x>b else x) if y-ym>= r0: x0 = xm else: x0 = clip( xm + sqrt(r0**2-(y-ym)**2), xm, W-1 ) if y-ym>= r1: continue else: x1 = clip( xm + sqrt(r1**2-(y-ym)**2), xm, W-1 ) for x in range(int(x0), int(x1)): p = arctan2( y-ym, x-xm )/(2*pi) hue_scale[y,x] = r_[ d[int(255*p)], 255 ] hue_scale[H-1-y,x] = r_[ d[int(255*(1-p))], 255 ] hue_scale[y,W-1-x] = r_[ d[int(255*(0.5-p))], 255 ] hue_scale[H-1-y,W-1-x] = r_[ d[int(255*(0.5+p))], 255 ] hue_img = PIL.Image.frombuffer('RGBA', (W,H), hue_scale, 'raw', 'RGBA', 0, 1) self.hue_img = PIL.ImageTk.PhotoImage( hue_img ) self.canvas.create_image( 0,0, anchor=tk.NW, image=self.hue_img, tag='hue_img' ) phi = self.hue*2*pi self.hue_marker = self.canvas.create_line(xm+r0*cos(phi), ym+r0*sin(phi), xm+r1*cos(phi), ym+r1*sin(phi)) self.canvas.tag_bind('hue_img', '<Button-1>', self.foo_hue) else: phi = -self.hue*2*pi self.canvas.coords( self.hue_marker, xm+r0*cos(phi), ym+r0*sin(phi), xm+r1*cos(phi), ym+r1*sin(phi) ) def foo_hue(self, event): print 'Fooo Hue' def draw_cl(self): W,H = self.hue_panel_size ro = self.rt ## triangle outer radius xm,ym = W/2.,H/2. a = ir(sqrt(3.)*ro) ## Triangle side-length ri = ir(0.5*ro) ## Triangle inner radius bw = ir(2*ro) ## width of bounding box print 'a=', a, 'bw-ri=', bw-ri if (self.cl_img==None): # Create triangle mask cl_mask = zeros( (bw, bw), dtype=uint8) for x in arange( int(ri), int(bw) ): h = a/(3*ro)*(bw-x) for y in arange( int(round(0.5*bw-h)), int(round(0.5*bw+h)) ): cl_mask[y,x] = 255 self.cl_mask = cl_mask # Create c-l-triangle ## SLOW ## ##cl_data = zeros( (bw, bw, 4), dtype=uint8) ##for x in arange( ri, bw ): ## h = a/(3*ro)*(bw-x) ## for y in arange( round(0.5*bw-h), round(0.5*bw+h) ): ## cl_data[y,x] = r_[ self.hue_data[255*(1.-self.hue)], 255 ] # Significantly faster, but somewhat cryptic rgb = array(hls_to_rgb(self.hue,0.5,1)) # Create sat axis for given hue ##for si in range(256): sat[si,256] = (rgb-1)*si/256.+1 sat = (full((bw-ri,3), rgb )-1) * tile( arange(bw-ri)[:,newaxis], (1,3))/(1.*(bw-ri)) + 1 # Create sat-val plane from sat axis ##for vi in range(256): fd1[:,vi] = fd1[:,256] *vi/256. sv = transpose( tile(sat[:,newaxis], (a,1) ), (1,0,2) )* ( repeat(arange(a)[::-1],(bw-ri)*3).reshape(a,(bw-ri),3)/(1.*a) ) cl_data = empty( (bw, bw, 4), dtype=uint8) cl_data[ir(0.5*(bw-a)):ir(0.5*(bw+a)),ri:bw,0:3] = (255*sv).astype(uint8) cl_data[:,:,3] = self.cl_mask cl_img = PIL.Image.frombuffer('RGBA', (bw,bw), cl_data, 'raw', 'RGBA', 0, 1) # Rotate c-l-triangle cl_img = cl_img.rotate(self.hue*360) if (self.cl_img==None): self.cl_img = PIL.ImageTk.PhotoImage( cl_img ) self.canvas.create_image( int(0.5*(W-bw)), int(0.5*(H-bw)), anchor=tk.NW, image=self.cl_img, tag='cl_img' ) self.canvas.tag_bind('cl_img', '<Button-1>', self.foo_cl) else: self.cl_img.paste(cl_img) def foo_cl(self, event): print 'Fooo cl' def on_hue_click(self, event): x = clip( event.x, 0, self.hue_panel_size[0] ) y = clip( event.y, 0, self.hue_panel_size[1] ) print 'x,y =', x, y xm,ym = self.hue_panel_size[0]/2., self.hue_panel_size[1]/2. r = sqrt( (x-xm)**2 + (y-ym)**2 ) if r < self.r0: return phi = -arctan2(y-ym, x-xm) self.hue = phi/(2*pi) if self.hue<0: self.hue += 1 if self.hue==-0.0: self.hue = 0.0 print "hue=", self.hue self.draw() self.broadcast_color() def on_tab_changed(self, event): print 'H(CL) tab' self.draw() self.broadcast_color() def broadcast_color(self): if self.colorbc: rgb = hsv_to_rgb(self.hue, self.sat, self.val) var = ( ('H',self.hue), ('S',self.sat), ('V',self.val) ) self.colorbc.set( rgb, var ) def receive_color(self): if self.colorbc: r,g,b = self.colorbc.get_rgb() else: r,g,b = 0,0,0 self.hue, self.sat, self.val = rgb_to_hsv(r,g,b)
Hartley: ”Ever since the beginning of my career, I’ve wanted to make movies about my time and place. Film that get dated. You say, "Wow, that’s really the late nineties in America." "Henry Fool" is really dated. That time when the country went way to the right, and the internet was invented. Working this way with these stories is fun. I look around me and I say, "OK, what’s the world like now?" I want the films to reflect what society is doing around us. Once lost, local critic fanbases will never be regained. This hurts the indies and studio subdivisions that are in the business of pushing Oscar contenders and lower-budget films for adults. Specialty fare needs local support and interpretation from the critic as explainer, interpreter, and champion. Over the years, critics helped audiences appreciate the likes of Orson Welles’ “Citizen Kane,” Alfred Hitchcock’s “Psycho,” Stanley Kubrick’s “2001: A Space Odyssey,” Arthur Penn’s “Bonnie and Clyde,” Bernardo Bertolucci’s “Last Tango in Paris,” Brian De Palma’s “Dressed to Kill,” Robert Altman’s “The Player,” the Coens’ “No Country for Old Men” and Paul Thomas Anderson’s “There Will be Blood.” Where would we have been without them? When it comes to driving irresponsibly, movies like Furious 7 may exacerbate an already-serious problem: reckless teenage driving. According to CRC Health Group, teenagers have an accident rate four times that of adults. Further, drag racing itself remains popular – and deadly – in America. Statistics remains elusive because it is not always clear when an accident has been caused by an actual race, but one study found 1,047 racing deaths from 2001 to 2008 in the U.S. alone. Now that Woman in Gold has given a story behind a famous painting, what other masterpieces of the fine art world deserve their own movie? Is The Da Vinci Code the closest thing to a film about the Mona Lisa that we’ve seen? TFE looks at some great works like the Mona Lisa and muses which directors/stars might best fit the projects. But, ugh, this article reminds me that the dreaded adaptation of The Goldfinch is on the horizon. Perhaps there has never been a film about del Giocondo because she lived a very ordinary life, and little is known about her beyond that she married at 15, had 5 children, and died in a convent in her 60s. Perhaps the best way to tell her story is to show the everyday woman and her goings-on, I'm thinking Mia Wasikowska, so good at giving her character's rich inner lives while remaining slightly mysterious, but then to also make the Mona Lisa itself a character. Have Sarah Polley, who has previously excelled in telling woman's stories in Away From Her and Take This Waltz and juggled multiple storylines with Stories We Tell, show the extraordinary journey the painting has taken over the decades to make Lisa del Giocondo a legend. “Tudor Tutor: A Complete Wolf Hall Character Guide" The new PBS series Wolf Hall, an import from Britain and an adaptation of Hilary Mantel’s bestselling books, takes TV viewers inside the intense, cloistered world of King Henry VIII’s court, teeming with confidants, enemies, and, of course, potential wives. It’s based on history we’re all nominally familiar with—Anne Boleyn, Sir Thomas More, etc.— but that doesn’t mean it’s necessarily easily to keep track of which man in the floppy Tudor hat is which. A good performance is one thing; a great character is another. Actors can bring both, but a sense of place is often harder to come by. Look at how various directors treat Toronto or Manhattan: a setting can be more than just a backdrop. It can be part of star of an ensemble. Unless a film director draws our attention directly to its grander purpose, location is often the unsung hero of film, usually seen as nothing more than a mere backdrop, albeit often a picturesque one, for people’s problems to take place in. No great, or even halfway decent director takes location for granted though. If there is character to the setting of the film then the odds are that character is likely saying something about the film itself, as well. And until the day when we can all just teleport from one place to the next with ease, we have the movies to thank for taking us to places we may never otherwise see for ourselves. "Social Media Saved My Life" “We talk a lot about how social media is sort of like a no-man’s land, gang mentality. That’s definitely there; I know people that have had their lives irreparably damaged because of social media, but at the same time it can be a force for good,” [Parker] said. A few more videos than usual this week! This new series highlights the range of talent in Ottawa’s film scene. First up in the series is a profile/interview with Cory and Gary Thibert, the minds behind Wolfpelt Productions. They chat their inspiration and share some thoughts about their upcoming project Eyetooth—sounds good! “Twin Peaks” without David Lynch is like the Oscars without Meryl Streep! Trailer for this Sundance champ is very promising. I love the cats! What did you read (or watch) this week?
import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ldxl71%#nruwupjnjy&=&9hjyg2o--gavcsx5!*)rwoq08&=9$' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'aligot', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'aligot.urls' WSGI_APPLICATION = 'aligot.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': os.getenv('DB_ENGINE', 'django.db.backends.sqlite3'), 'NAME': os.getenv('DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')), 'USER': os.getenv('DB_USER', ''), 'PASSWORD': os.getenv('DB_PASSWORD', ''), 'HOST': os.getenv('DB_HOST', ''), 'PORT': os.getenv('DB_PORT', ''), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), '/var/www/static/', ) # Auth AUTH_USER_MODEL = 'aligot.User' # rest_framework REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ), 'TEST_REQUEST_DEFAULT_FORMAT': 'json' }
Some sounds of Paul Simon's "Graceland" album and I heard a little Bob Dillon in one song. I'm not very good at understanding lyrics but I'm even more confused now. Not on my buy list. As a big fan of Ed Sheeran's work I was nervous with this album coming up...would it be a flop? Has he lost his magic? Is 'Shape of You' and 'Castle On The Hill' the only good songs. Nope, nope and hell no! This whole album is great, totally worth the money and a joy to listen to! Love it! Another masterpiece from Mr Sheeran, a genius of observation that writes simply beautiful songs!
# # Copyright (C) 2020 by frePPLe bv # # This library is free software; you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero # General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # from datetime import datetime import logging from psycopg2.extras import execute_batch from django.db import DEFAULT_DB_ALIAS, connections from freppledb.boot import getAttributes from freppledb.common.commands import PlanTaskRegistry, PlanTask from freppledb.input.models import ( Buffer, Calendar, CalendarBucket, Customer, Demand, Item, ItemSupplier, ItemDistribution, Location, Operation, OperationMaterial, OperationResource, Resource, ResourceSkill, SetupMatrix, SetupRule, Skill, Supplier, ) logger = logging.getLogger(__name__) # Default effectivity dates default_start = datetime(1971, 1, 1) default_end = datetime(2030, 12, 31) map_search = {0: "PRIORITY", 1: "MINCOST", 2: "MINPENALTY", 3: "MINCOSTPENALTY"} def SQL4attributes(attrs, with_on_conflict=True): """ Snippet is used many times in this file""" if with_on_conflict: return ( "".join([",%s" % i for i in attrs]), ",%s" * len(attrs), "".join([",\n%s=excluded.%s" % (i, i) for i in attrs]), ) else: return ("".join([",%s" % i for i in attrs]), ",%s" * len(attrs)) @PlanTaskRegistry.register class cleanStatic(PlanTask): description = "Clean static data" sequence = 300 @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): if kwargs.get("exportstatic", False) and kwargs.get("source", None): return 1 else: return -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): source = kwargs.get("source", None) with connections[database].cursor() as cursor: cursor.execute( """ delete from operationmaterial where (source = %s and lastmodified <> %s) or operation_id in ( select name from operation where operation.source = %s and operation.lastmodified <> %s ) """, (source, cls.timestamp, source, cls.timestamp), ) cursor.execute( "delete from buffer where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from operationplan where demand_id in (select name from demand where source = %s and lastmodified <> %s)", (source, cls.timestamp), ) cursor.execute( "delete from demand where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from itemsupplier where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from itemdistribution where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( """ delete from operationplan where owner_id is not null and ((source = %s and lastmodified <> %s) or operation_id in ( select name from operation where operation.source = %s and operation.lastmodified <> %s ) or supplier_id in ( select name from supplier where source = %s and lastmodified <> %s )) """, (source, cls.timestamp, source, cls.timestamp, source, cls.timestamp), ) cursor.execute( """ delete from operationplan where (source = %s and lastmodified <> %s) or operation_id in ( select name from operation where operation.source = %s and operation.lastmodified <> %s ) or supplier_id in ( select name from supplier where source = %s and lastmodified <> %s ) """, (source, cls.timestamp, source, cls.timestamp, source, cls.timestamp), ) cursor.execute( """ delete from operationresource where (source = %s and lastmodified <> %s) or operation_id in ( select name from operation where operation.source = %s and operation.lastmodified <> %s ) """, (source, cls.timestamp, source, cls.timestamp), ) cursor.execute( "delete from operation where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from item where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from resourceskill where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from operation where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from resource where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from location where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from calendarbucket where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from calendar where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from skill where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from setuprule where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from setupmatrix where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from customer where source = %s and lastmodified <> %s", (source, cls.timestamp), ) cursor.execute( "delete from supplier where source = %s and lastmodified <> %s", (source, cls.timestamp), ) @PlanTaskRegistry.register class exportParameters(PlanTask): description = ("Export static data", "Export parameters") sequence = (301, "exportstatic1", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): # Only complete export should save the current date if kwargs.get("exportstatic", False) and not kwargs.get("source", None): return 1 else: return -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple with connections[database].cursor() as cursor: # Update current date if the parameter already exists # If it doesn't exist, we want to continue using the system clock for the next run. cursor.execute( "update common_parameter set value=%s, lastmodified=%s where name='currentdate'", (frepple.settings.current.strftime("%Y-%m-%d %H:%M:%S"), cls.timestamp), ) @PlanTaskRegistry.register class exportCalendars(PlanTask): description = ("Export static data", "Export calendars") sequence = (301, "exportstatic2", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Calendar)] def getData(): for i in frepple.calendars(): if ( i.hidden or i.source == "common_bucket" or (source and source != i.source) ): continue r = [i.name, round(i.default, 8), i.source, cls.timestamp] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """insert into calendar (name,defaultvalue,source,lastmodified%s) values(%%s,%%s,%%s,%%s%s) on conflict (name) do update set defaultvalue=excluded.defaultvalue, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportLocations(PlanTask): description = ("Export static data", "Export locations") sequence = (302, "exportstatic1", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Location)] def getData(): for i in frepple.locations(): if source and source != i.source: continue r = [ i.name, i.description, i.available and i.available.name or None, i.category, i.subcategory, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.locations(): if i.owner and (not source or source == i.source): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """insert into location (name,description,available_id,category,subcategory,source,lastmodified,owner_id%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set description=excluded.description, available_id=excluded.available_id, category=excluded.category, subcategory=excluded.subcategory, source=excluded.source, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update location set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportItems(PlanTask): description = ("Export static data", "Export items") sequence = (302, "exportstatic2", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Item)] def getData(): for i in frepple.items(): if source and source != i.source: continue r = [ i.name, i.description, round(i.cost, 8), i.category, i.subcategory, "make to order" if isinstance(i, frepple.item_mto) else "make to stock", i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.items(): if i.owner and (not source or source == i.source): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """insert into item (name,description,cost,category,subcategory,type,source,lastmodified,owner_id%s) values (%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set description=excluded.description, cost=excluded.cost, category=excluded.category, subcategory=excluded.subcategory, type=excluded.type, source=excluded.source, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update item set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportOperations(PlanTask): description = ("Export static data", "Export operations") sequence = (303, "exportstatic1", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Operation)] def getData(): for i in frepple.operations(): if ( i.hidden or (source and source != i.source) or isinstance( i, ( frepple.operation_itemsupplier, frepple.operation_itemdistribution, ), ) ): continue r = [ i.name, i.fence, i.posttime, round(i.size_minimum, 8), round(i.size_multiple, 8), i.size_maximum < 9999999999999 and round(i.size_maximum, 8) or None, i.__class__.__name__[10:], i.duration if isinstance( i, (frepple.operation_fixed_time, frepple.operation_time_per) ) else None, i.duration_per if isinstance(i, frepple.operation_time_per) else None, i.location and i.location.name or None, round(i.cost, 8), map_search[i.search], i.description, i.category, i.subcategory, i.source, i.item.name if i.item else None, i.priority if i.priority != 1 else None, i.effective_start if i.effective_start != default_start else None, i.effective_end if i.effective_end != default_end else None, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.operations(): if ( i.owner and not i.hidden and not i.owner.hidden and (not source or source == i.source) and not isinstance( i, ( frepple.operation_itemsupplier, frepple.operation_itemdistribution, ), ) ): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into operation (name,fence,posttime,sizeminimum,sizemultiple,sizemaximum,type, duration,duration_per,location_id,cost,search,description,category, subcategory,source,item_id,priority,effective_start,effective_end, lastmodified,owner_id%s) values(%%s,%%s * interval '1 second',%%s * interval '1 second',%%s,%%s, %%s,%%s,%%s * interval '1 second',%%s * interval '1 second',%%s,%%s,%%s, %%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set fence=excluded.fence, posttime=excluded.posttime, sizeminimum=excluded.sizeminimum, sizemultiple=excluded.sizemultiple, sizemaximum=excluded.sizemaximum, type=excluded.type, duration=excluded.duration, duration_per=excluded.duration_per, location_id=excluded.location_id, cost=excluded.cost, search=excluded.search, description=excluded.description, category=excluded.category, subcategory=excluded.subcategory, source=excluded.source, item_id=excluded.item_id, priority=excluded.priority, effective_start=excluded.effective_start, effective_end=excluded.effective_end, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update operation set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportSetupMatrices(PlanTask): description = ("Export static data", "Export setup matrices") sequence = (303, "exportstatic2", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(SetupMatrix)] def getData(): for i in frepple.setupmatrices(): if source and source != i.source: continue r = [i.name, i.source, cls.timestamp] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """insert into setupmatrix (name,source,lastmodified%s) values(%%s,%%s,%%s%s) on conflict (name) do update set source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportResources(PlanTask): description = ("Export static data", "Export resources") sequence = 304 @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Resource)] def getData(): for i in frepple.resources(): if i.hidden or (source and source != i.source): continue r = [ i.name, i.description, i.maximum, i.maximum_calendar.name if i.maximum_calendar else None, i.location and i.location.name or None, i.__class__.__name__[9:], round(i.cost, 8), i.maxearly, i.setup, i.setupmatrix.name if i.setupmatrix else None, i.category, i.subcategory, i.efficiency, i.efficiency_calendar.name if i.efficiency_calendar else None, i.available.name if i.available else None, i.constrained, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.resources(): if not i.hidden and i.owner and (not source or source == i.source): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """insert into resource (name,description,maximum,maximum_calendar_id,location_id,type,cost, maxearly,setup,setupmatrix_id,category,subcategory,efficiency, efficiency_calendar_id,available_id,constrained,source,lastmodified,owner_id%s) values( %%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s, %%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set description=excluded.description, maximum=excluded.maximum, maximum_calendar_id=excluded.maximum_calendar_id, location_id=excluded.location_id, type=excluded.type, cost=excluded.cost, maxearly=excluded.maxearly, setup=excluded.setup, setupmatrix_id=excluded.setupmatrix_id, category=excluded.category, subcategory=excluded.subcategory, efficiency=excluded.efficiency, efficiency_calendar_id=excluded.efficiency_calendar_id, available_id=excluded.available_id, constrained=excluded.constrained, source=excluded.source, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update resource set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportSetupRules(PlanTask): description = ("Export static data", "Export setup matrix rules") sequence = (305, "exportstatic1", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(SetupRule)] def getData(): for m in frepple.setupmatrices(): for i in m.rules: if source and source != i.source: continue r = [ m.name, i.priority, i.fromsetup, i.tosetup, i.duration, round(i.cost, 8), i.resource.name if i.resource else None, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """insert into setuprule (setupmatrix_id,priority,fromsetup,tosetup,duration,cost,resource_id,source,lastmodified%s) values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s%s) on conflict (setupmatrix_id, priority) do update set fromsetup=excluded.fromsetup, tosetup=excluded.tosetup, duration=excluded.duration, cost=excluded.cost, resource_id=excluded.resource_id, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportSkills(PlanTask): description = ("Export static data", "Export skills") sequence = (305, "exportstatic1", 2) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Skill)] def getData(): for i in frepple.skills(): if source and source != i.source: continue r = [i.name, i.source, cls.timestamp] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """insert into skill (name,source,lastmodified%s) values(%%s,%%s,%%s%s) on conflict (name) do update set source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportResourceSkills(PlanTask): description = ("Export static data", "Export resource skills") sequence = (305, "exportstatic1", 3) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(ResourceSkill)] def getData(): for s in frepple.skills(): for i in s.resourceskills: if source and source != i.source: continue r = [ i.effective_start if i.effective_start != default_start else None, i.effective_end if i.effective_end != default_end else None, i.priority, i.source, cls.timestamp, i.resource.name, s.name, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into resourceskill (effective_start,effective_end,priority,source,lastmodified,resource_id,skill_id%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) on conflict (resource_id, skill_id) do update set effective_start=excluded.effective_start, effective_end=excluded.effective_end, priority=excluded.priority, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportOperationResources(PlanTask): description = ("Export static data", "Export operation resources") sequence = (305, "exportstatic1", 4) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(OperationResource)] def getData(): for o in frepple.operations(): if o.hidden: continue for i in o.loads: if i.hidden or (source and source != i.source): continue r = [ i.operation.name, i.resource.name, i.effective_start if i.effective_start != default_start else None, i.effective_end if i.effective_end != default_end else None, round(i.quantity, 8), i.setup, i.name, i.priority, map_search[i.search] if map_search[i.search] != "PRIORITY" else None, i.source, i.skill.name if i.skill else None, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into operationresource (operation_id,resource_id,effective_start,effective_end, quantity,setup,name,priority,search,source,skill_id,lastmodified%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) on conflict (operation_id, resource_id, effective_start) do update set effective_end=excluded.effective_end, quantity=excluded.quantity, setup=excluded.setup, name=excluded.name, priority=excluded.priority, search=excluded.search, skill_id=excluded.skill_id, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportCustomers(PlanTask): description = ("Export static data", "Export customers") sequence = (305, "exportstatic2", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Customer)] def getData(): for i in frepple.customers(): if source and source != i.source: continue r = [ i.name, i.description, i.category, i.subcategory, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.customers(): if i.owner and (not source or source == i.source): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into customer (name,description,category,subcategory,source,lastmodified,owner_id%s) values(%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set description=excluded.description, category=excluded.category, subcategory=excluded.subcategory, source=excluded.source, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update customer set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportDemands(PlanTask): description = ("Export static data", "Export sales orders") sequence = (305, "exportstatic2", 2) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Demand)] def getData(): for i in frepple.demands(): if ( not isinstance(i, frepple.demand_default) or i.hidden or (source and source != i.source) ): continue r = [ i.name, i.due, round(i.quantity, 8), i.priority, i.item.name, i.location.name if i.location else None, i.operation.name if i.operation and not i.operation.hidden else None, i.customer.name if i.customer else None, round(i.minshipment, 8), i.maxlateness, i.category, i.subcategory, i.source, i.description, cls.timestamp, i.status, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.demands(): if ( i.owner and isinstance(i, frepple.demand_default) and not i.hidden and (not source or source == i.source) ): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into demand (name,due,quantity,priority,item_id,location_id,operation_id,customer_id, minshipment,maxlateness,category,subcategory,source,description,lastmodified, status,owner_id%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set due=excluded.due, quantity=excluded.quantity, priority=excluded.priority, item_id=excluded.item_id, location_id=excluded.location_id, operation_id=excluded.operation_id, customer_id=excluded.customer_id, minshipment=excluded.minshipment, maxlateness=excluded.maxlateness, category=excluded.category, description=excluded.description, source=excluded.source, lastmodified=excluded.lastmodified, status=excluded.status, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update demand set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportCalendarBuckets(PlanTask): description = ("Export static data", "Export calendar buckets") sequence = (305, "exportstatic3", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(CalendarBucket)] def int_to_time(i): hour = i // 3600 i -= hour * 3600 minute = i // 60 i -= minute * 60 second = i if hour >= 24: hour -= 24 return "%s:%s:%s" % (hour, minute, second) def getData(cursor): cursor.execute("SELECT max(id) FROM calendarbucket") cnt = cursor.fetchone()[0] or 1 for c in frepple.calendars(): if ( c.hidden or c.source == "common_bucket" or (source and source != c.source) ): continue for i in c.buckets: cnt += 1 r = [ c.name, i.start if i.start != default_start else None, i.end if i.end != default_end else None, cnt, i.priority, round(i.value, 8), True if (i.days & 1) else False, True if (i.days & 2) else False, True if (i.days & 4) else False, True if (i.days & 8) else False, True if (i.days & 16) else False, True if (i.days & 32) else False, True if (i.days & 64) else False, int_to_time(i.starttime), int_to_time(i.endtime - 1), i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: if source: cursor.execute("delete from calendarbucket where source = %s", [source]) else: cursor.execute("delete from calendarbucket") execute_batch( cursor, """ insert into calendarbucket (calendar_id,startdate,enddate,id,priority,value, sunday,monday,tuesday,wednesday,thursday,friday,saturday, starttime,endtime,source,lastmodified%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) """ % SQL4attributes(attrs, with_on_conflict=False), getData(cursor), ) @PlanTaskRegistry.register class exportBuffers(PlanTask): description = ("Export static data", "Export buffers") sequence = (305, "exportstatic4", 1) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Buffer)] def getData(): for i in frepple.buffers(): if i.hidden or (source and source != i.source): continue r = [ i.item.name, i.location.name, i.batch or None, i.description, round(i.onhand, 8), round(i.minimum, 8), i.minimum_calendar.name if i.minimum_calendar else None, i.__class__.__name__[7:], i.category, i.subcategory, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into buffer (item_id,location_id,batch,description,onhand,minimum,minimum_calendar_id, type,category,subcategory,source,lastmodified%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) on conflict (location_id, item_id, batch) do update set description=excluded.description, onhand=excluded.onhand, minimum=excluded.minimum, minimum_calendar_id=excluded.minimum_calendar_id, type=excluded.type, category=excluded.category, subcategory=excluded.subcategory, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportOperationMaterials(PlanTask): description = ("Export static data", "Export operation material") sequence = (305, "exportstatic4", 2) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(OperationMaterial)] def getData(): for o in frepple.operations(): if o.hidden: continue for i in o.flows: if i.hidden or (source and source != i.source): continue r = [ i.operation.name, i.buffer.item.name, i.effective_start if i.effective_start != default_start else None, round(i.quantity, 8), i.type[5:], i.effective_end if i.effective_end != default_end else None, i.name, i.priority, map_search[i.search] if map_search[i.search] != "PRIORITY" else None, i.source, round(i.transferbatch, 8) if isinstance(i, frepple.flow_transfer_batch) else None, i.offset, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into operationmaterial (operation_id,item_id,effective_start,quantity,type,effective_end, name,priority,search,source,transferbatch,"offset",lastmodified%s) values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s%s) on conflict (operation_id, item_id, effective_start) do update set quantity=excluded.quantity, type=excluded.type, effective_end=excluded.effective_end, name=excluded.name, priority=excluded.priority, search=excluded.search, source=excluded.source, transferbatch=excluded.transferbatch, "offset"=excluded."offset", lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportSuppliers(PlanTask): description = ("Export static data", "Export suppliers") sequence = (305, "exportstatic4", 3) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(Supplier)] def getData(): for i in frepple.suppliers(): if source and source != i.source: continue r = [ i.name, i.description, i.category, i.subcategory, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r def getOwners(): for i in frepple.suppliers(): if i.owner and (not source or source == i.source): yield (i.owner.name, i.name) with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into supplier (name,description,category,subcategory,source,lastmodified,owner_id%s) values(%%s,%%s,%%s,%%s,%%s,%%s,null%s) on conflict (name) do update set description=excluded.description, category=excluded.category, subcategory=excluded.subcategory, source=excluded.source, lastmodified=excluded.lastmodified, owner_id=excluded.owner_id %s """ % SQL4attributes(attrs), getData(), ) execute_batch( cursor, "update supplier set owner_id=%s where name=%s", getOwners() ) @PlanTaskRegistry.register class exportItemSuppliers(PlanTask): description = ("Export static data", "Export item suppliers") sequence = (305, "exportstatic4", 4) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(ItemSupplier)] def getData(): for s in frepple.suppliers(): if source and source != s.source: continue for i in s.itemsuppliers: if i.hidden or (source and source != i.source): continue r = [ i.item.name, i.location.name if i.location else None, i.supplier.name, i.effective_start if i.effective_start != default_start else None, i.leadtime, i.size_minimum, i.size_multiple, i.cost, i.priority, i.effective_end if i.effective_end != default_end else None, i.resource.name if i.resource else None, i.resource_qty, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into itemsupplier (item_id,location_id,supplier_id,effective_start,leadtime,sizeminimum, sizemultiple,cost,priority,effective_end,resource_id,resource_qty,source, lastmodified%s) values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) on conflict (item_id, location_id, supplier_id, effective_start) do update set leadtime=excluded.leadtime, sizeminimum=excluded.sizeminimum, sizemultiple=excluded.sizemultiple, cost=excluded.cost, priority=excluded.priority, effective_end=excluded.effective_end, resource_id=excluded.resource_id, resource_qty=excluded.resource_qty, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), ) @PlanTaskRegistry.register class exportItemDistributions(PlanTask): description = ("Export static data", "Export item distributions") sequence = (305, "exportstatic4", 5) @classmethod def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs): return 1 if kwargs.get("exportstatic", False) else -1 @classmethod def run(cls, database=DEFAULT_DB_ALIAS, **kwargs): import frepple source = kwargs.get("source", None) attrs = [f[0] for f in getAttributes(ItemDistribution)] def getData(): for s in frepple.items(): if s.hidden or (source and source != s.source): continue for i in s.itemdistributions: if i.hidden or (source and source != i.source): continue r = [ i.item.name, i.destination.name if i.destination else None, i.origin.name, i.effective_start if i.effective_start != default_start else None, i.leadtime, i.size_minimum, i.size_multiple, i.cost, i.priority, i.effective_end if i.effective_end != default_end else None, i.source, cls.timestamp, ] for a in attrs: r.append(getattr(i, a, None)) yield r with connections[database].cursor() as cursor: execute_batch( cursor, """ insert into itemdistribution (item_id,location_id,origin_id,effective_start,leadtime,sizeminimum, sizemultiple,cost,priority,effective_end,source,lastmodified%s) values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,%%s%s) on conflict (item_id, location_id, origin_id, effective_start) do update set leadtime=excluded.leadtime, sizeminimum=excluded.sizeminimum, sizemultiple=excluded.sizemultiple, cost=excluded.cost, priority=excluded.priority, effective_end=excluded.effective_end, source=excluded.source, lastmodified=excluded.lastmodified %s """ % SQL4attributes(attrs), getData(), )
as a quick and handy guide! This is my website designed for New Buyers, or a refresher for previous Buyers. I've included several articles and videos that will help to make this daunting task MUCH more Buyer-Friendly. I'll try to answer any concerns that you may have of the process here, so, when you have ANY questions on this process, don't hesitate to contact me!! Why use an ABR® like me? and, what is an "ABR®"?
# I want to test the randomness of a random number generator. # How does randomness increase as you nest pseudorandom number generators? # I thought no, but wanted to confirm for myself. # E.g. Randomly selecting from a randomly shuffled list would be two degrees. # Why use compression? Check this out: https://csclub.uwaterloo.ca/~mtahmed/work_reports/mtahmed_workreport_s12.pdf # zlib is the python library that does compression. import zlib # and this is for randomness! import random digitsStr = range(0, 10) iterations = 0 while iterations <= 100: # This generates a random string of numbers. uncompressedStr = "" while len(uncompressedStr) < 200000: repeat = 0 scrambledDigits = digitsStr uncompressedStr += str(scrambledDigits[random.randint(0,9)]) repeat = 0 while repeat < iterations: random.shuffle(list(uncompressedStr)) repeat += 1 uncompressedStr = ''.join(uncompressedStr) compressedStr = zlib.compress(uncompressedStr) randomness = float(len(compressedStr))/float(len(uncompressedStr)) print randomness iterations = iterations+1
The very first highway bridge made with Ductal® is the Shepherd's Gully Creek Bridge, 150 km north of Sydney, Australia. Built in 2002, to replace an old timber bridge, it spans 15 m x 21 m wide and features 16 Ductal® girders supporting a reinforced concrete slab that was poured directly into permanent formwork also made of Ductal®. The originality of this structure (studied and validated by the University of New South Wales) blends exceptional resistance to weather and traffic with a particularly light structure: 280 Kg/lineal meter for the girders and a thickness of only 25 mm for the permanent Ductal® formwork. Designed and built by VSL-Australia, the Shepherd's Gully Creek Bridge offers superior durability, resistance to the elements and minimal maintenance requirements.
#Django imports from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.contrib.sessions.backends.db import SessionStore #App imports from .. import user_successfully_created_msg, referrer_url_session_key, referring_user_id_session_key from ..models import SocialLaunchProfile #Test imports from .util import BaseTestCase class IndexTestCase(BaseTestCase): def test_get(self): response = self.client.get(reverse('social_launch_index')) self.assertEqual(response.status_code, 200) def test_get_with_referrer(self): referrer_url = 'http://facebook.com' response = self.client.get(reverse('social_launch_index'), HTTP_REFERER=referrer_url) self.assertEqual(response.status_code, 200) self.assertEqual(self.client.session[referrer_url_session_key], referrer_url) def test_post_success_creates_new_user(self): post_data = {'email' : 'foo@example.com'} self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_index'), post_data, follow=True) users = User.objects.all() slps = SocialLaunchProfile.objects.all() self.assertEquals(len(users), 2) self.assertEquals(len(slps), 1) user = users[1] slp = slps[0] self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id})) self.assertEquals(user.email, post_data['email']) self.assertEquals(user.username, post_data['email']) self.assertFalse(user.has_usable_password()) self.assertContains(response, user_successfully_created_msg) self.assertEquals(slp.user, user) self.assertEquals(slp.referrer_url, '') self.assertEquals(slp.referring_user, None) def test_post_success_creates_new_user_with_referrer(self): referrer_url = 'http://facebook.com' post_data = {'email' : 'foo@example.com'} session = SessionStore() session[referrer_url_session_key] = referrer_url session[referring_user_id_session_key] = '' session.save() self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_index'), post_data, follow=True) users = User.objects.all() slps = SocialLaunchProfile.objects.all() self.assertEquals(len(users), 2) self.assertEquals(len(slps), 1) user = users[1] slp = slps[0] self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id})) self.assertEquals(user.email, post_data['email']) self.assertEquals(user.username, post_data['email']) self.assertFalse(user.has_usable_password()) self.assertContains(response, user_successfully_created_msg) self.assertEquals(slp.user, user) self.assertEquals(slp.referrer_url, referrer_url) self.assertEquals(slp.referring_user, None) def test_post_fails_invalid_email(self): post_data = {'email' : 'fooexample.com'} self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_index'), post_data) self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) self.assertEqual(response.status_code, 200) self.assertNotContains(response, user_successfully_created_msg) def test_post_fails_invalid_email_with_referrer(self): referrer_url = 'http://facebook.com' post_data = {'email' : 'fooexample.com'} session = SessionStore() session[referrer_url_session_key] = referrer_url session[referring_user_id_session_key] = '' session.save() self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_index'), post_data) self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) self.assertEqual(response.status_code, 200) self.assertNotContains(response, user_successfully_created_msg) self.assertEqual(self.client.session[referrer_url_session_key], referrer_url) def test_post_fails_no_email(self): post_data = {} self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_index'), post_data) self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) self.assertEqual(response.status_code, 200) self.assertNotContains(response, user_successfully_created_msg) class ReferralTestCase(BaseTestCase): def test_get_success(self): response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id})) self.assertEqual(response.status_code, 200) def test_get_fails_invalid_id(self): response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 'foo'})) self.assertEqual(response.status_code, 404) def test_get_fails_no_such_user(self): response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 1000})) self.assertEqual(response.status_code, 404) def test_post_success_creates_new_user(self): post_data = {'email' : 'foo@example.com'} session = SessionStore() session[referring_user_id_session_key] = self.user1.id session.save() self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True) users = User.objects.all() slps = SocialLaunchProfile.objects.all() self.assertEquals(len(users), 2) self.assertEquals(len(slps), 1) user = users[1] slp = slps[0] self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id})) self.assertEquals(user.email, post_data['email']) self.assertEquals(user.username, post_data['email']) self.assertFalse(user.has_usable_password()) self.assertContains(response, user_successfully_created_msg) self.assertEquals(slp.user, user) self.assertEquals(slp.referrer_url, '') self.assertEquals(slp.referring_user, self.user1) def test_post_success_creates_new_user_bad_referring_used_id(self): post_data = {'email' : 'foo@example.com'} session = SessionStore() session[referring_user_id_session_key] = 1000 session.save() self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key self.assertEqual(User.objects.count(), 1) self.assertEqual(SocialLaunchProfile.objects.count(), 0) response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True) users = User.objects.all() slps = SocialLaunchProfile.objects.all() self.assertEquals(len(users), 2) self.assertEquals(len(slps), 1) user = users[1] slp = slps[0] self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id})) self.assertEquals(user.email, post_data['email']) self.assertEquals(user.username, post_data['email']) self.assertFalse(user.has_usable_password()) self.assertContains(response, user_successfully_created_msg) self.assertEquals(slp.user, user) self.assertEquals(slp.referrer_url, '') self.assertEquals(slp.referring_user, None)
In honor of Oktoberfest celebrations in Munich and around the world, we're happy to share with our fellow beer aficionados that yes, there is indeed a 'Wiesn Nebel', which would translate as 'Oktoberfest Nebula' or for some, it is the giant space stein known as the 'Beer Nebula'. Well, the cosmic keg is actually an interstellar gas cloud, but we're going with Wiesn Nebel...because it's Oktoberfest. In Munich, Oktoberfest is known as "Wiesn" after the fairgrounds where the event takes place, Theresienwiese. Add that fact to the well known gas cloud near the Aquila constellation which is made up primarily of ethyl alcohol - the same alcohol found in beer, wind, and liquor - and you get what we're calling the "Wiesn Nebel". Located 10,000 light-years away and over 1,000 times the diameter of our solar system, the nebula could serve up over 400 trillion pints of beer. Intergalactic alcohol was first discovered in 1975 by UCLA astronomer Ben M. Zuckerman. Our massive Wiesn Nebel was later discovered in 1995 by British astronomers Tom Millar, Geoffrey MacDonald and Rolf Habing. Try remembering that after several Maß at Munich Oktoberfest! Cocktail Pairing: easy...your favorite Oktoberfest beer! Brown, M. 1995. Alcohol-Laden Cloud Holds the Story of a Star. NW Times. Miller et al. 1995. The detection of hot ethanol in G34.3+0.15. Monthly Notices of the Royal Astronomical Society.
''' XBMC LCDproc addon Copyright (C) 2012 Team XBMC This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import codecs from charset_map_hd44780_a00 import * from charset_map_hd44780_a02 import * class HD44780_Codec(codecs.Codec): def encode_a00(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encmap_hd44780_a00) def encode_a02(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encmap_hd44780_a02) def decode(self,input,errors='strict'): pass class HD44780_IncrementalEncoder_a00(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encmap_hd44780_a00)[0] class HD44780_IncrementalEncoder_a02(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encmap_hd44780_a02)[0] class HD44780_IncrementalDecoder(codecs.IncrementalDecoder): pass class HD44780_StreamWriter(HD44780_Codec,codecs.StreamWriter): pass class HD44780_StreamReader(HD44780_Codec,codecs.StreamReader): pass def charset_hd44780(name): if name == "hd44780-a00": return codecs.CodecInfo( name = "hd44780-a00", encode = HD44780_Codec().encode_a00, decode = HD44780_Codec().decode, incrementalencoder = HD44780_IncrementalEncoder_a00, incrementaldecoder = HD44780_IncrementalDecoder, streamreader = HD44780_StreamReader, streamwriter = HD44780_StreamWriter, ) elif name == "hd44780-a02": return codecs.CodecInfo( name = "hd44780-a02", encode = HD44780_Codec().encode_a02, decode = HD44780_Codec().decode, incrementalencoder = HD44780_IncrementalEncoder_a02, incrementaldecoder = HD44780_IncrementalDecoder, streamreader = HD44780_StreamReader, streamwriter = HD44780_StreamWriter, ) else: return None
we keep stocks of bag house filters in dubai. we can supply and install baghouse filters in your factory. we also fabricate bag house filter houses. for excellent prices on baghouse filters or complete bag houses contact our bag house filters design engineer. we offer complete bag house filter design and installation services in dubai, abu dhabi and sharjah.
import numpy as np from sklearn.base import TransformerMixin, BaseEstimator, clone from sklearn.linear_model import LogisticRegression from mne.parallel import parallel_func from nose.tools import assert_true class _BaseEstimator(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def fit_transform(self, X, y=None): return self.fit(X, y).transform(X) def baseline(X, mode, tslice): if X.shape[-1] > 0: mean = np.mean(X[..., tslice], axis=-1)[..., None] else: mean = 0 # otherwise we get an ugly nan if mode == 'mean': X -= mean if mode == 'logratio': X /= mean X = np.log10(X) # a value of 1 means 10 times bigger if mode == 'ratio': X /= mean elif mode == 'zscore': std = np.std(X[..., tslice], axis=-1)[..., None] X -= mean X /= std elif mode == 'percent': X -= mean X /= mean elif mode == 'zlogratio': X /= mean X = np.log10(X) std = np.std(X[..., tslice], axis=-1)[..., None] X /= std return X class EpochsBaseliner(_BaseEstimator): def __init__(self, tslice=None, mode='mean'): self.mode = mode self.tslice = slice(None) if tslice is None else tslice assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore', 'percent', 'zlogratio']) assert_true(isinstance(self.tslice, (slice, int))) def transform(self, X): return baseline(X, self.mode, self.tslice) class TimeFreqBaseliner(_BaseEstimator): def __init__(self, tslice=None, mode='mean'): self.mode = mode self.tslice = slice(None) if tslice is None else tslice assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore', 'percent', 'zlogratio']) def transform(self, X): return baseline(X, self.mode, self.tslice) class TimePadder(_BaseEstimator): """Padd time before and after epochs""" def __init__(self, n_sample, value=0.): self.n_sample = n_sample assert_true(isinstance(self.n_sample, int)) self.value = value assert_true(isinstance(value, (int, float)) or (value == 'median')) def transform(self, X): if self.value == 'median': coefs = np.median(X, axis=2) else: coefs = self.value * np.ones(X.shape[:2]) coefs = np.tile(coefs, [self.n_sample, 1, 1]).transpose([1, 2, 0]) X = np.concatenate((coefs, X, coefs), axis=2) return X def inverse_transform(self, X): X = X[:, :, self.n_sample:-self.n_sample] return X class TimeSelector(_BaseEstimator): """Padd time before and after epochs""" def __init__(self, tslice): self.tslice = tslice assert_true(isinstance(self.tslice, (slice, int))) def fit_transform(self, X, y=None): return self.transform(X) def transform(self, X): X = X[:, :, self.tslice] return X class TimeFreqSelector(_BaseEstimator): """Padd time before and after epochs""" def __init__(self, tslice=None, fslice=None): self.tslice = slice(None) if tslice is None else tslice self.fslice = slice(None) if fslice is None else fslice assert_true(isinstance(self.tslice, (slice, int))) assert_true(isinstance(self.fslice, (slice, int))) def fit_transform(self, X, y=None): return self.transform(X) def transform(self, X): X = X[:, :, :, self.tslice] X = X[:, :, self.fslice, :] return X class MyXDawn(_BaseEstimator): """Wrapper for pyriemann Xdawn + robust. Will eventually need to clean both MNE and pyriemann with refactorings""" def __init__(self, n_filter=4, estimator='scm'): from pyriemann.estimation import Xdawn self.n_filter = n_filter assert_true(isinstance(self.n_filter, int)) self.estimator = estimator assert_true(isinstance(estimator, str)) self._xdawn = Xdawn(nfilter=n_filter, estimator=estimator) def fit(self, X, y): # only apply on channels who std > 0 across time on at least one trial self.picks_ = np.where(np.mean(np.std(X, axis=2) ** 2, axis=0))[0] self._xdawn.fit(X[:, self.picks_, :], y) return self def transform(self, X): return self._xdawn.transform(X[:, self.picks_, :]) def fit_transform(self, X, y=None): self.fit(X, y) return self.transform(X) class SpatialFilter(_BaseEstimator): def __init__(self, estimator): self.estimator = estimator assert_true(isinstance(estimator, TransformerMixin)) def fit(self, X, y=None): n_epoch, n_chan, n_time = X.shape # trial as time X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T self.estimator.fit(X) return self def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X): n_epoch, n_chan, n_time = X.shape # trial as time X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T X = self.estimator.transform(X) X = np.reshape(X.T, [-1, n_epoch, n_time]).transpose([1, 0, 2]) return X class Reshaper(_BaseEstimator): """Transpose, concatenate and/or reshape data. Parameters ---------- concatenate : int | None Reshaping feature dimension e.g. np.concatenate(X, axis=concatenate). Defaults to None. transpose : array of int, shape(1 + n_dims) | None Reshaping feature dimension e.g. X.transpose(transpose). Defaults to None. reshape : array, shape(n_dims) | None Reshaping feature dimension e.g. X.reshape(np.r_[len(X), shape]). Defaults to -1 if concatenate or transpose is None, else defaults to None. """ def __init__(self, reshape=None, transpose=None, concatenate=None, verbose=False): if (reshape is None) and (transpose is None) and (concatenate is None): reshape = [-1] self.reshape = reshape self.transpose = transpose self.concatenate = concatenate self.verbose = verbose def fit(self, X, y=None): self.shape_ = X.shape[1:] return self def fit_transform(self, X, y=None): return self.fit(X, y).transform(X) def transform(self, X, y=None): if self.transpose is not None: X = X.transpose(self.transpose) if self.concatenate: X = np.concatenate(X, self.concatenate) if self.reshape is not None: X = np.reshape(X, np.hstack((X.shape[0], self.reshape))) if self.verbose: print(self.shape_, '->', (X.shape[1:])) return X class LightTimeDecoding(_BaseEstimator): def __init__(self, estimator=None, method='predict', n_jobs=1): self.estimator = (LogisticRegression() if estimator is None else estimator) self.method = method assert_true(self.method in ['predict', 'predict_proba']) assert_true(hasattr(self.estimator, method)) self.n_jobs = n_jobs assert_true(isinstance(self.n_jobs, int)) def fit_transform(self, X, y): return self.fit(X, y).transform(X) def fit(self, X, y): self.estimators_ = list() parallel, p_func, n_jobs = parallel_func(_fit, self.n_jobs) estimators = parallel( p_func(self.estimator, split, y) for split in np.array_split(X, n_jobs, axis=2)) self.estimators_ = np.concatenate(estimators, 0) return self def transform(self, X): parallel, p_func, n_jobs = parallel_func(_predict_decod, self.n_jobs) X_splits = np.array_split(X, n_jobs, axis=2) est_splits = np.array_split(self.estimators_, n_jobs) y_pred = parallel( p_func(est_split, x_split, self.method) for (est_split, x_split) in zip(est_splits, X_splits)) if n_jobs > 1: y_pred = np.concatenate(y_pred, axis=1) else: y_pred = y_pred[0] return y_pred def predict(self, X): return self.transform(X) def predict_proba(self, X): return self.transform(X) def _fit(estimator, X, y): estimators_ = list() for ii in range(X.shape[2]): est = clone(estimator) est.fit(X[:, :, ii], y) estimators_.append(est) return estimators_ def _predict_decod(estimators, X, method): n_sample, n_chan, n_time = X.shape y_pred = np.array((n_sample, n_time)) for ii, est in enumerate(estimators): if method == 'predict': _y_pred = est.predict(X[:, :, ii]) elif method == 'predict_proba': _y_pred = est.predict_proba(X[:, :, ii]) # init if ii == 0: y_pred = _init_pred(_y_pred, X) y_pred[:, ii, ...] = _y_pred return y_pred def _init_pred(y_pred, X): n_sample, n_chan, n_time = X.shape if y_pred.ndim == 2: y_pred = np.zeros((n_sample, n_time, y_pred.shape[-1])) else: y_pred = np.zeros((n_sample, n_time)) return y_pred class LightGAT(LightTimeDecoding): def transform(self, X): parallel, p_func, n_jobs = parallel_func(_predict_gat, self.n_jobs) y_pred = parallel( p_func(self.estimators_, x_split, self.method) for x_split in np.array_split(X, n_jobs, axis=2)) y_pred = np.concatenate(y_pred, axis=2) return y_pred def _predict_gat(estimators, X, method): n_sample, n_chan, n_time = X.shape for ii, est in enumerate(estimators): X_stack = np.transpose(X, [1, 0, 2]) X_stack = np.reshape(X_stack, [n_chan, n_sample * n_time]).T if method == 'predict': _y_pred = est.predict(X_stack) _y_pred = np.reshape(_y_pred, [n_sample, n_time]) elif method == 'predict_proba': _y_pred = est.predict_proba(X_stack) n_dim = _y_pred.shape[-1] _y_pred = np.reshape(_y_pred, [n_sample, n_time, n_dim]) # init if ii == 0: y_pred = _init_pred_gat(_y_pred, X, len(estimators)) y_pred[:, ii, ...] = _y_pred return y_pred def _init_pred_gat(y_pred, X, n_train): n_sample, n_chan, n_time = X.shape if y_pred.ndim == 3: y_pred = np.zeros((n_sample, n_train, n_time, y_pred.shape[-1])) else: y_pred = np.zeros((n_sample, n_train, n_time)) return y_pred class CustomEnsemble(TransformerMixin): def __init__(self, estimators, method='predict'): self.estimators = estimators self.method = method assert_true(method in ['predict', 'predict_proba']) def fit(self, X, y=None): for estimator in self.estimators: estimator.fit(X, y) return self def fit_transform(self, X, y=None): self.fit(X, y) return self.transform(X) def transform(self, X): all_Xt = list() for estimator in self.estimators: if self.method == 'predict': Xt = estimator.predict(X) elif self.method == 'predict_proba': Xt = estimator.predict_proba(X) all_Xt.append(Xt) all_Xt = np.c_[all_Xt].T return all_Xt def get_params(self, deep=True): return dict(estimators=self.estimators, method=self.method) class GenericTransformer(_BaseEstimator): def __init__(self, function, **fit_params): self.function = function self.fit_params = fit_params def fit(self, X, y=None): return self def transform(self, X, y=None): return self.function(X, **self.fit_params) def fit_transform(self, X, y=None): return self.transform(X, y) class TimeEmbedder(_BaseEstimator): def __init__(self, delays=2): self.delays = delays def transform(self, X, y=None): if not isinstance(X, np.ndarray): epochs = X X = epochs._data if isinstance(self.delays, int): delays = range(1, self.delays) else: delays = self.delays X2 = [] for x in X: tmp = x for d in delays: tmp = np.r_[tmp, np.roll(x, d, axis=-1)] X2.append(tmp) X2 = np.array(X2) return X2 def fit_transform(self, X, y=None): return self.fit(X).transform(X, y) class Windower(TransformerMixin, BaseEstimator): """To make sliding windows Parameters ---------- size : int The window size. step : int The window step. vectorize : bool Returns arrays or vector. """ def __init__(self, size=1, step=1, vectorize=False): self.size = size self.step = step self.vectorize = vectorize def fit(self, X, y=None): """Does nothing, for sklearn compatibility purposes Parameters ---------- X : ndarray, shape(n_epochs, n_times, n_features) The target data. y : None | array, shape(n_epochs,) Returns ------- self : self """ if X.ndim != 3: raise ValueError('expects 3D array') return self def transform(self, X, y=None): """Generate windows from X. Parameters ---------- X : ndarray, shape(n_epochs, n_times, n_features) The target data. y : None | array, shape(n_epochs,) Returns ------- Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows) The transformed data. If vectorize is True, then shape is (n_epochs, -1). """ Xt = list() for time in range(0, X.shape[2] - self.size, self.step): Xt.append(X[:, :, time:(time + self.size)]) Xt = np.transpose(Xt, [1, 2, 3, 0]) # trial chan window time if self.vectorize: Xt = Xt.reshape([len(Xt), -1, Xt.shape[-1]]) return Xt def fit_transform(self, X, y=None): """Generate windows from X. Parameters ---------- X : ndarray, shape(n_epochs, n_times, n_features) The target data. y : None | array, shape(n_epochs,) Returns ------- Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows) The transformed data. If vectorize is True, then shape is (n_epochs, -1). """ return self.fit(X).transform(X) def test_windower(): Windower(3, 2, False).transform(np.zeros((2, 30, 100))).shape
This blog is mostly about Oracle development tools; the early posts are about Oracle SQL Developer and SQL Developer Data Modeler written as product manager for these products. There are also the occasional forays into travel, conferences and a few mentions of the Delhi missions done. In 2011 I left Oracle, spent a few more years in IT and then switched completely. I’m now a full time picture framer. The blog remains, with no updates, in case there is still something of interest here. In March this year I attended the pilot of the Data Modeler (Instructor Led Training) class. It was fun because the course writer presented the class and the attendees came from various Oracle offices from around the US and Europe. We all attended for various reasons, but mostly because we'll be working with customers and SQL Developer Data Modeler. The group were/are experienced data modelers, familiar with other tools and getting to know the capabilities of the new tool. This meant a lively interaction throughout the week and feedback rolled into the final training product. I was a bit nostalgic, as this was my old area, as I was first involved in training then writing courses and running these "train the trainer" events. Anyhow, I'm pleased to say that the course is now up and running and you can start registering for or requesting classes. It's a great class, covering all aspects of SQL Developer Data Modeler, including the Data Flow diagram, logical, relational and physical models, all in the context of designing and developing data models. There are lots of examples and we worked on and developed a number of different models, which kept it all interesting. I definitely recommend this event. Either way you end up on the education site and in the right hand corner you can search for the class you are after. If you start typing "Data Modeling" in the search list, then a drop list will allow you to select SQL Developer Data Modeling . The class is Oracle Data Modeling and Relational Database Design and the next available event is May, 10th in Chicago! It starts next week and so there is not much time to decide. If you are not be able to make this one, you can watch out for future classes, or express your interest in a future date or location by providing feedback on the same course detail. The views expressed on this blog are my own and do not reflect the views of Oracle or Microgen.
def handle_service_control( host, name, fact_cls, formatter, running, restarted, reloaded, command, status_argument='status', ): statuses = host.get_fact(fact_cls) status = statuses.get(name, None) # If we don't know the status, we need to check if it's up before starting # and/or restarting/reloading if status is None: yield ''' # If the service is running if {status_command}; then {stop_command} {restart_command} {reload_command} # If the service is not running, we just start it (no re[start|load]) else {start_command} fi '''.format( status_command=formatter.format(name, status_argument), start_command=( formatter.format(name, 'start') if running is True else 'true' ), stop_command=( formatter.format(name, 'stop') if running is False else 'true' ), restart_command=( formatter.format(name, 'restart') if restarted else 'true' ), reload_command=( formatter.format(name, 'reload') if reloaded else 'true' ), ) statuses[name] = running else: # Need down but running if running is False: if status: yield formatter.format(name, 'stop') statuses[name] = False else: host.noop('service {0} is stopped'.format(name)) # Need running but down if running is True: if not status: yield formatter.format(name, 'start') statuses[name] = True else: host.noop('service {0} is running'.format(name)) # Only restart if the service is already running if restarted and status: yield formatter.format(name, 'restart') # Only reload if the service is already reloaded if reloaded and status: yield formatter.format(name, 'reload') # Always execute arbitrary commands as these may or may not rely on the service # being up or down if command: yield formatter.format(name, command)
The appellant had been arrested for being drunk in charge of a car and assaulting a police officer. He was placed in a police cell over night. In protest he put the blanket down the toilet and repeatedly flushed the toilet to flood the cell. He argued that clean water on a waterproof floor and a blanket could not constitute damage. The blanket and cell were damaged since they could not be used until they had been dried. Sir Igor Judge applied the dictum from Morphitis v Salmon Crim LR 48 that damage includes temporary impairment of value or usefulness.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions to generate a list of feature maps based on image features. Provides several feature map generators that can be used to build object detection feature extractors. Object detection feature extractors usually are built by stacking two components - A base feature extractor such as Inception V3 and a feature map generator. Feature map generators build on the base feature extractors and produce a list of final feature maps. """ import collections import functools import tensorflow as tf from object_detection.utils import ops slim = tf.contrib.slim # Activation bound used for TPU v1. Activations will be clipped to # [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with # use_bounded_activations enabled. ACTIVATION_BOUND = 6.0 def get_depth_fn(depth_multiplier, min_depth): """Builds a callable to compute depth (output channels) of conv filters. Args: depth_multiplier: a multiplier for the nominal depth. min_depth: a lower bound on the depth of filters. Returns: A callable that takes in a nominal depth and returns the depth to use. """ def multiply_depth(depth): new_depth = int(depth * depth_multiplier) return max(new_depth, min_depth) return multiply_depth class KerasMultiResolutionFeatureMaps(tf.keras.Model): """Generates multi resolution feature maps from input image features. A Keras model that generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, when called on inputs it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } When this feature generator object is called on input image_features: Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ def __init__(self, feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, is_training, conv_hyperparams, freeze_batchnorm, name=None): """Constructor. Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. is_training: Indicates whether the feature generator is in training mode. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. name: A string name scope to assign to the model. If 'None', Keras will auto-generate one from the class name. """ super(KerasMultiResolutionFeatureMaps, self).__init__(name=name) self.feature_map_layout = feature_map_layout self.convolutions = [] depth_fn = get_depth_fn(depth_multiplier, min_depth) base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): net = [] layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: base_from_layer = from_layer else: if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth / 2)) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth / 2), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' # We define this function here while capturing the value of # conv_kernel_size, to avoid holding a reference to the loop variable # conv_kernel_size inside of a lambda function def fixed_padding(features, kernel_size=conv_kernel_size): return ops.fixed_padding(features, kernel_size) net.append(tf.keras.layers.Lambda(fixed_padding)) # TODO(rathodv): Add some utilities to simplify the creation of # Depthwise & non-depthwise convolutions w/ normalization & activations if use_depthwise: net.append(tf.keras.layers.DepthwiseConv2D( [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_depthwise_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name + '_depthwise')) net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1], padding='SAME', strides=1, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) else: net.append(tf.keras.layers.Conv2D( depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) net.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) net.append( conv_hyperparams.build_activation_layer( name=layer_name)) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self.convolutions.append(net) def call(self, image_features): """Generate the multi-resolution feature maps. Executed when calling the `.__call__` method on input. Args: image_features: A dictionary of handles to activation tensors from the base feature extractor. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ feature_maps = [] feature_map_keys = [] for index, from_layer in enumerate(self.feature_map_layout['from_layer']): if from_layer: feature_map = image_features[from_layer] feature_map_keys.append(from_layer) else: feature_map = feature_maps[-1] for layer in self.convolutions[index]: feature_map = layer(feature_map) layer_name = self.convolutions[index][-1].name feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) def multi_resolution_feature_maps(feature_map_layout, depth_multiplier, min_depth, insert_1x1_conv, image_features, pool_residual=False): """Generates multi resolution feature maps from input image features. Generates multi-scale feature maps for detection as in the SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1. More specifically, it performs the following two tasks: 1) If a layer name is provided in the configuration, returns that layer as a feature map. 2) If a layer name is left as an empty string, constructs a new feature map based on the spatial shape and depth configuration. Note that the current implementation only supports generating new layers using convolution of stride 2 resulting in a spatial resolution reduction by a factor of 2. By default convolution kernel size is set to 3, and it can be customized by caller. An example of the configuration for Inception V3: { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } Args: feature_map_layout: Dictionary of specifications for the feature map layouts in the following format (Inception V2/V3 respectively): { 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } or { 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''], 'layer_depth': [-1, -1, -1, 512, 256, 128] } If 'from_layer' is specified, the specified feature map is directly used as a box predictor layer, and the layer_depth is directly infered from the feature map (instead of using the provided 'layer_depth' parameter). In this case, our convention is to set 'layer_depth' to -1 for clarity. Otherwise, if 'from_layer' is an empty string, then the box predictor layer will be built from the previous layer using convolution operations. Note that the current implementation only supports generating new layers using convolutions of stride 2 (resulting in a spatial resolution reduction by a factor of 2), and will be extended to a more flexible design. Convolution kernel size is set to 3 by default, and can be customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size' should be set to -1 if 'from_layer' is specified). The created convolution operation will be a normal 2D convolution by default, and a depthwise convolution followed by 1x1 convolution if 'use_depthwise' is set to True. depth_multiplier: Depth multiplier for convolutional layers. min_depth: Minimum depth for convolutional layers. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution should be inserted before shrinking the feature map. image_features: A dictionary of handles to activation tensors from the base feature extractor. pool_residual: Whether to add an average pooling layer followed by a residual connection between subsequent feature maps when the channel depth match. For example, with option 'layer_depth': [-1, 512, 256, 256], a pooling and residual layer is added between the third and forth feature map. This option is better used with Weight Shared Convolution Box Predictor when all feature maps have the same channel depth to encourage more consistent features across multi-scale feature maps. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: if the number entries in 'from_layer' and 'layer_depth' do not match. ValueError: if the generated layer does not have the same resolution as specified. """ depth_fn = get_depth_fn(depth_multiplier, min_depth) feature_map_keys = [] feature_maps = [] base_from_layer = '' use_explicit_padding = False if 'use_explicit_padding' in feature_map_layout: use_explicit_padding = feature_map_layout['use_explicit_padding'] use_depthwise = False if 'use_depthwise' in feature_map_layout: use_depthwise = feature_map_layout['use_depthwise'] for index, from_layer in enumerate(feature_map_layout['from_layer']): layer_depth = feature_map_layout['layer_depth'][index] conv_kernel_size = 3 if 'conv_kernel_size' in feature_map_layout: conv_kernel_size = feature_map_layout['conv_kernel_size'][index] if from_layer: feature_map = image_features[from_layer] base_from_layer = from_layer feature_map_keys.append(from_layer) else: pre_layer = feature_maps[-1] pre_layer_depth = pre_layer.get_shape().as_list()[3] intermediate_layer = pre_layer if insert_1x1_conv: layer_name = '{}_1_Conv2d_{}_1x1_{}'.format( base_from_layer, index, depth_fn(layer_depth / 2)) intermediate_layer = slim.conv2d( pre_layer, depth_fn(layer_depth / 2), [1, 1], padding='SAME', stride=1, scope=layer_name) layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format( base_from_layer, index, conv_kernel_size, conv_kernel_size, depth_fn(layer_depth)) stride = 2 padding = 'SAME' if use_explicit_padding: padding = 'VALID' intermediate_layer = ops.fixed_padding( intermediate_layer, conv_kernel_size) if use_depthwise: feature_map = slim.separable_conv2d( intermediate_layer, None, [conv_kernel_size, conv_kernel_size], depth_multiplier=1, padding=padding, stride=stride, scope=layer_name + '_depthwise') feature_map = slim.conv2d( feature_map, depth_fn(layer_depth), [1, 1], padding='SAME', stride=1, scope=layer_name) if pool_residual and pre_layer_depth == depth_fn(layer_depth): feature_map += slim.avg_pool2d( pre_layer, [3, 3], padding='SAME', stride=2, scope=layer_name + '_pool') else: feature_map = slim.conv2d( intermediate_layer, depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size], padding=padding, stride=stride, scope=layer_name) feature_map_keys.append(layer_name) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)]) def fpn_top_down_feature_maps(image_features, depth, use_depthwise=False, use_explicit_padding=False, use_bounded_activations=False, scope=None, use_native_resize_op=False): """Generates `top-down` feature maps for Feature Pyramid Networks. See https://arxiv.org/abs/1612.03144 for details. Args: image_features: list of tuples of (tensor_name, image_feature_tensor). Spatial resolutions of succesive tensors must reduce exactly by a factor of 2. depth: depth of output feature maps. use_depthwise: whether to use depthwise separable conv instead of regular conv. use_explicit_padding: whether to use explicit padding. use_bounded_activations: Whether or not to clip activations to range [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend themselves to quantized inference. scope: A scope name to wrap this op under. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for the upsampling process instead of reshape and broadcasting implementation. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. """ with tf.name_scope(scope, 'top_down'): num_levels = len(image_features) output_feature_maps_list = [] output_feature_map_keys = [] padding = 'VALID' if use_explicit_padding else 'SAME' kernel_size = 3 with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1): top_down = slim.conv2d( image_features[-1][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % num_levels) if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) output_feature_maps_list.append(top_down) output_feature_map_keys.append( 'top_down_%s' % image_features[-1][0]) for level in reversed(range(num_levels - 1)): if use_native_resize_op: with tf.name_scope('nearest_neighbor_upsampling'): top_down_shape = top_down.shape.as_list() top_down = tf.image.resize_nearest_neighbor( top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2]) else: top_down = ops.nearest_neighbor_upsampling(top_down, scale=2) residual = slim.conv2d( image_features[level][1], depth, [1, 1], activation_fn=None, normalizer_fn=None, scope='projection_%d' % (level + 1)) if use_bounded_activations: residual = tf.clip_by_value(residual, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_explicit_padding: # slice top_down to the same shape as residual residual_shape = tf.shape(residual) top_down = top_down[:, :residual_shape[1], :residual_shape[2], :] top_down += residual if use_bounded_activations: top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND, ACTIVATION_BOUND) if use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if use_explicit_padding: top_down = ops.fixed_padding(top_down, kernel_size) output_feature_maps_list.append(conv_op( top_down, depth, [kernel_size, kernel_size], scope='smoothing_%d' % (level + 1))) output_feature_map_keys.append('top_down_%s' % image_features[level][0]) return collections.OrderedDict(reversed( list(zip(output_feature_map_keys, output_feature_maps_list)))) def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers, image_features, replace_pool_with_conv=False): """Generates pooling pyramid feature maps. The pooling pyramid feature maps is motivated by multi_resolution_feature_maps. The main difference are that it is simpler and reduces the number of free parameters. More specifically: - Instead of using convolutions to shrink the feature map, it uses max pooling, therefore totally gets rid of the parameters in convolution. - By pooling feature from larger map up to a single cell, it generates features in the same feature space. - Instead of independently making box predictions from individual maps, it shares the same classifier across different feature maps, therefore reduces the "mis-calibration" across different scales. See go/ppn-detection for more details. Args: base_feature_map_depth: Depth of the base feature before the max pooling. num_layers: Number of layers used to make predictions. They are pooled from the base feature. image_features: A dictionary of handles to activation tensors from the feature extractor. replace_pool_with_conv: Whether or not to replace pooling operations with convolutions in the PPN. Default is False. Returns: feature_maps: an OrderedDict mapping keys (feature map names) to tensors where each tensor has shape [batch, height_i, width_i, depth_i]. Raises: ValueError: image_features does not contain exactly one entry """ if len(image_features) != 1: raise ValueError('image_features should be a dictionary of length 1.') image_features = image_features[image_features.keys()[0]] feature_map_keys = [] feature_maps = [] feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth if base_feature_map_depth > 0: image_features = slim.conv2d( image_features, base_feature_map_depth, [1, 1], # kernel size padding='SAME', stride=1, scope=feature_map_key) # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for # TPU v1 compatibility. Without the following dummy op, TPU runtime # compiler will combine the convolution with one max-pooling below into a # single cycle, so getting the conv2d feature becomes impossible. image_features = slim.max_pool2d( image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(image_features) feature_map = image_features if replace_pool_with_conv: with slim.arg_scope([slim.conv2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i, base_feature_map_depth) feature_map = slim.conv2d( feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) else: with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2): for i in range(num_layers - 1): feature_map_key = 'MaxPool2d_%d_2x2' % i feature_map = slim.max_pool2d( feature_map, [2, 2], padding='SAME', scope=feature_map_key) feature_map_keys.append(feature_map_key) feature_maps.append(feature_map) return collections.OrderedDict( [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
substituted benzenes, e.g., toluene, aniline, phenol. position 1, but do not label as such in the chemical name. Always give substituents the lowest possible numbers! How is the meta derivative prepared? Caution: Diazonium salts are explosive!
#!/usr/bin/python import sys def round_robin(ls): clubs = list(ls) num_clubs = len(clubs) # add dummy club if necessary if num_clubs % 2 == 1: have_dummy = True clubs.append(0) num_clubs += 1 else: have_dummy = False # take last club as base baseclub = clubs[-1] clubs = clubs[:-1] num_rounds = num_clubs - 1 half_clubs = num_clubs / 2 rounds = [] for r in range(num_rounds): homeclubs = [] awayclubs = [] homeclubs.append(baseclub) for i in range(half_clubs + 1): homeclubs.append(clubs[i]) for i in range(num_clubs - 2, half_clubs - 2, -1): awayclubs.append(clubs[i]) if r % 2 == 0: rounds.append(zip(homeclubs, awayclubs)) else: rounds.append(zip(awayclubs, homeclubs)) clubs.append(clubs.pop(0)) if have_dummy: for matches in rounds: del matches[0] return rounds if __name__ == "__main__": default_num_clubs = 6 # parse command line if len(sys.argv) > 1: num_clubs = int(sys.argv[1]) else: num_clubs = default_num_clubs # generate clubs clubs = range(1, num_clubs + 1) rounds = round_robin(clubs) print len(rounds) for r in rounds: print len(r), print r
Construction Project Manager Guys will be here for all your goals involving Construction Project Managers in Hanover, ME. You are looking for the most sophisticated technologies in the field, and our crew of highly trained contractors will provide exactly that. We make sure you get the most excellent service, the best selling price, and the very best quality supplies. Call us today at 888-492-2478 and we'll be able to go over your alternatives, answer the questions you have, and arrange a scheduled appointment to begin organizing your project. At Construction Project Manager Guys, we are aware that you must stay within your price range and reduce costs wherever it's possible to. But, saving money should never signify that you give up quality on Construction Project Managers in Hanover, ME. Our attempts to help you save money won't sacrifice the high quality of our services. We utilize the highest quality practices and materials to ensure that any work can withstand the years, and we save a little money with strategies that won't change the quality of your job. It will be feasible since we appreciate how to save your time and resources on products and labor. To be able to lower your costs, Construction Project Manager Guys is the service to get in touch with. Contact 888-492-2478 to talk with our client care reps, now. To put together the ideal judgments regarding Construction Project Managers in Hanover, ME, you have to be well informed. You shouldn't enter into it without understanding it, and it is best to understand what you should expect. You will not deal with any sort of unexpected situations when you choose Construction Project Manager Guys. You can begin by talking about your task with our customer service representatives when you dial 888-492-2478. During this call, you'll get your questions responded to, and we're going to establish a time to start work. We consistently get there at the arranged time, prepared to work with you. You've got lots of reasons to turn to Construction Project Manager Guys to meet your needs regarding Construction Project Managers in Hanover, ME. We will be the first choice when you need the best cash saving options, the finest materials, and the best rate of customer service. We're ready to assist you with the greatest expertise and practical knowledge in the industry. Call 888-492-2478 whenever you need Construction Project Managers in Hanover, and we're going to work together with you to systematically complete your project.
#!/usr/bin/env python # coding: utf-8 # # Copyright 2016, Marcos Salomão. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import datetime from app import user from app import util from app.exceptions import NotFoundEntityException from app.exceptions import IntegrityViolationException from app.marketplace import models as marketplace from google.appengine.ext import ndb from google.appengine.api import search as search_api __author__ = "Marcos Salomão" __email__ = "salomao.marcos@gmail.com" __copyright__ = "Copyright 2016, Marcos Salomão" __license__ = "Apache 2.0" # Index autocomplete cliente CUSTOMER_NAME_INDEX = 'customer_autocomplete_index' AUTOCOMPLETE_SEARCH_LIMIT = 5 # Index usage # http://stackoverflow.com/questions/12899083/partial-matching-gae-search-api def get_name_index(): """ Customer index by name. """ return search_api.Index(name=CUSTOMER_NAME_INDEX) class CustomerModel(ndb.Model): """ Customer model. """ # Name name = ndb.StringProperty(required=True) # Email email = ndb.StringProperty(required=False) # Phone phone = ndb.StringProperty(required=False) # Location location = ndb.StringProperty(required=False) # Log date at insert moment created_date = ndb.DateTimeProperty(auto_now_add=True) def update_index(customer): """ Update index by customer id. """ # Create partials name = ','.join(util.tokenize_autocomplete(customer.name)) # Create a doc document = search_api.Document( doc_id=str(customer.key.id()), fields=[search_api.TextField(name='name', value=name)]) # Add doc to index get_name_index().put(document) def remove_index(_id): """ Remove index by id. """ # Delete get_name_index().delete(str(_id)) def get_customer_query(): """ Get customer model query. """ # Get user marketplace marketplaceModel = marketplace.get_marketplace() # Get query, notice marketplace as parent query = CustomerModel.query(ancestor=marketplaceModel.key) # Return query return query def get(id): """ Get customer by its id. """ # Get marketplace marketplaceModel = marketplace.get_marketplace() # Get customer by id, notice marketplace as parent customer = ndb.Key('CustomerModel', int( id), parent=marketplaceModel.key).get() # Return customer return customer def list(): """Listar os clientes cadastrados na loja do usuário. """ # Realizando query, listando os clientes customers = get_customer_query().order(CustomerModel.name).fetch() logging.debug("Foram selecionado(s) %d clientes(s) cadastrados", len(customers)) # Retornando return customers def search(customer): """ Search """ # Build search by name using index search_results = get_name_index().search(search_api.Query( query_string="name:{name}".format(name=customer.name), options=search_api.QueryOptions(limit=AUTOCOMPLETE_SEARCH_LIMIT))) # Transport results do model results = [] for doc in search_results: # Get customer model customer = get(int(doc.doc_id)) # Handle if not exists if customer is not None: results.append(customer) else: remove_index(doc.doc_id) logging.warning( 'Index %s is not up-to-date to doc %s and it has removed!', CUSTOMER_NAME_INDEX, doc.doc_id) # Return return results @ndb.transactional def save(customer): """ Add or update a customer in datastore. """ # Get marketplace marketplaceModel = marketplace.get_marketplace() logging.debug("Get user marketplace") # Get customer model if exists # or instantiate one, instead. if customer.id is not None: customerModel = CustomerModel(id=int(customer.id), parent=marketplaceModel.key) else: customerModel = CustomerModel(parent=marketplaceModel.key) logging.debug("Customer model created") # Pass values customerModel.name = customer.name customerModel.email = customer.email customerModel.phone = customer.phone customerModel.location = customer.location # Persist ir customerModel.put() logging.debug("Customer id %d saved success to %s", customerModel.key.id(), marketplaceModel.name) # Update index update_index(customerModel) logging.debug("Index updated to customer id %s", customerModel.key.id()) # Return return customerModel @ndb.transactional def delete(id): """ Remove customer by id. """ # Get marketplace marketplaceModel = marketplace.get_marketplace() # Get customer customerKey = ndb.Key('CustomerModel', int(id), parent=marketplaceModel.key) # Handle if not exists if customerKey is None: raise NotFoundEntityException(message='messages.customer.notfound') # Are there sales with this customer, # if true, is not possible to delete from app.sale import models as sales if sales.has_sales_by_customer(customerKey) == True: raise IntegrityViolationException( message='messages.customer.salesintegrityviolation') logging.debug("Check constraint validation OK") # Remove from datastore customerKey.delete() logging.debug("Customer id %s removed success!", id) # Update index remove_index(id) logging.debug("Index updated to customer id %s", id)
Parkinson's Association of Ireland has branches throughout the country and these have grown rapidly over the past few years. These Branches are a fantastic way for people with Parkinson's and their families to meet with each other and discuss the various problems associated with Parkinson's disease. Branches play an important role in collating information for onward transmission to the National Office which in turn informs our thinking and guides our negotiations with the HSE and Government for services to people with Parkinson's. Branches also arrange various activities that assist people with Parkinson's and their families such as Yoga classes, outings and information meetings. For a full list of our Branch network please visit www.parkinsons.ie/aboutparkinsons_supports or call their helpline 1800 359 359.
#!/usr/bin/python import sys def corners(pts): #x0,xN,y0,yN = [ pts[0][0], pts[0][0], pts[0][1], pts[0][1] ] ## determine our bounds x0 = pts[0][0] y0 = pts[0][1] xN = pts[0][0] yN = pts[0][1] for pt in pts: if pt[0] < x0: x0 = pt[0] if pt[1] < y0: y0 = pt[1] if pt[0] > xN: xN = pt[0] if pt[1] > yN: yN = pt[1] #print pt[0], pt[1] print "Bounds: ", (x0,y0), (xN,yN) for pt in pts: x, y = pt if x == x0 or x == xN: if y >= y0 and y <= yN: print pt, ":\tYES" else: print pt, ":\tNO" elif y == y0 or y == yN: if x >= x0 and x <= xN: print pt, ":\tYES" else: print pt, ":\tNO" else: print pt, ":\tNO" nqry = int(raw_input()) for q in range(nqry): npts = int(raw_input()) pts = [] for p in range(npts): x, y = map(int, str(raw_input()).split(" ")) pts += [ (x,y) ] print "\n",pts corners(pts) #for i in [ '42536258796157867'\ # , '4424444424442444'\ # , '5424644424442444'\ # , '5122-2368-7954 - 3214'\ # , '44244x4424442444'\ # , '0525362587961578']: # print i, ":\t", is_valid_cc(i)
Made to order items are made as similar to the photo as possible (Stones are natural, therefore unique) unless other requests are made. If you have any questions about this or would like to select your ammonite, feel free to send me a message. These handcrafted copper ammonite rings are truly each one of a kind. Adorned upon a hand shaped copper band, the natural fossil seamlessly transitions to the ring to retain its beautiful shape. Ammonites are the fossils of sea creatures that lived from 240-65 million years ago, when they became extinct along with the dinosaurs. Ammonite shells are naturally occurring examples of the Fibonacci sequence, also known as the Golden Ratio, which can be found all throughout nature. This ring, as most of our jewelry pieces, is created through the process of electroforming. This allows us to grow copper onto various natural materials in many creative and unique ways. Each piece is one of a kind, hand-made with care, patience, and love for our craft.
# -*- coding: utf-8 -*- import pytest from queryparser.adql import ADQLQueryTranslator from queryparser.mysql import MySQLQueryProcessor from queryparser.postgresql import PostgreSQLQueryProcessor from queryparser.exceptions import QueryError, QuerySyntaxError def _test_parsing(query_processor, test, translate=False): if len(test) == 6: query, columns, keywords, functions, display_columns, tables = test replace_schema_name = None elif len(test) == 7: query, columns, keywords, functions, display_columns, tables,\ replace_schema_name = test if translate: adt = ADQLQueryTranslator() adt.set_query(query) if query_processor == MySQLQueryProcessor: query = adt.to_mysql() elif query_processor == PostgreSQLQueryProcessor: query = adt.to_postgresql() if replace_schema_name is None: qp = query_processor(query) else: qp = query_processor() qp.set_query(query) qp.process_query(replace_schema_name=replace_schema_name) qp_columns = ['.'.join([str(j) for j in i[:3]]) for i in qp.columns if i[0] is not None and i[1] is not None] qp_display_columns = ['%s: %s' % (str(i[0]), '.'.join([str(j) for j in i[1]])) for i in qp.display_columns] qp_tables = ['.'.join([str(j) for j in i]) for i in qp.tables if i[0] is not None and i[1] is not None] if columns is not None: assert set(columns) == set(qp_columns) if keywords is not None: assert set([i.lower() for i in keywords]) == set(qp.keywords) if functions is not None: assert set(functions) == set(qp.functions) if display_columns is not None: assert set(display_columns) == set(qp_display_columns) if tables is not None: assert set(tables) == set(qp_tables) def _test_syntax(query_processor, query): with pytest.raises(QuerySyntaxError): query_processor(query) def _test_query(query_processor, query): with pytest.raises(QueryError): query_processor(query) def _test_adql_translation(test): query, translated_query, output = test adt = ADQLQueryTranslator(query) if translated_query is not None: if output == 'mysql': assert translated_query.strip() == adt.to_mysql() elif output == 'postgresql': assert translated_query.strip() == adt.to_postgresql() def _test_indexed_adql_translation(test): query, translated_query, iob, output = test adt = ADQLQueryTranslator(query) if translated_query is not None: if output == 'postgresql': tq = adt.to_postgresql() qp = PostgreSQLQueryProcessor() qp.set_query(tq) qp.process_query(indexed_objects=iob) assert translated_query.strip() == qp.query
1. Place onion in a 6-quart electric slow cooker coated with cooking spray; top with carrots and potatoes. 2. Combine broth, next 3 ingredients, 1/2 teaspoon salt, and 1/4 teaspoon pepper. Pour over vegetables. 3. Combine paprika, remaining 1/4 teaspoon salt, and remaining 1/4 teaspoon pepper; rub over chicken. Heat a large nonstick skillet over medium-high heat. Add oil to pan; swirl to coat. Add chicken. Cook 3 minutes on each side or until browned. Arrange chicken on top of vegetables. 4. Cover and cook on LOW for 3 1/2 hours or until chicken is done and vegetables are tender. Garnish with additional thyme, if desired. Note: MyRecipes is working with Let's Move!, the Partnership for a Healthier America, and USDA's MyPlate to give anyone looking for healthier options access to a trove of recipes that will help them create healthy, tasty plates. For more information about creating a healthy plate, visit www.choosemyplate.gov.
#!/usr/bin/env python3 from elasticsearch import Elasticsearch, exceptions as es_exceptions from elasticsearch.helpers import scan from time import time import numpy as np import pandas as pd # import tensorflow as tf - should do it by itself from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from keras.models import Sequential from keras.layers.core import Dense, Activation, Dropout from sklearn.model_selection import train_test_split from pandas.tseries.offsets import * # parameters. These should not change for one version (_type) of test. alarm_index = 'ps_alarms' alarm_type = 'NN v1' ref = 24 sub = 1 ref = ref * Hour() sub = sub * Hour() # get a job from ES # it takes one job earliest it time. # it shouls also consider jobs that are "pending" but for too long. es = Elasticsearch([{'host':'atlas-kibana.mwt2.org', 'port':9200}],timeout=60) def getWorkload(): my_query = { "size": 1, "query":{ "bool":{ "must":[ {"term" : { "processed" : "no" }}, {"term" : { "_type" : alarm_type }} ] } }, "sort" : [ {"timestamp" : {"order" : "asc"}} ] } res = es.search(index=alarm_index, body=my_query, request_timeout=120) #print(res) hits = res['hits']['hits'] if len(hits)==0: print('All done.') return (0,0,0,1) job_id = res['hits']['hits'][0]['_id'] job_source = res['hits']['hits'][0]['_source'] job_timestamp = job_source['timestamp'] site = job_source['site'] endpoint = job_source['endpoint'] print ('Processing id:',job_id, '\ttimebin:', job_timestamp, '\tsite:', site, '\tendpoint:', endpoint) # setting a lock on the job try: es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body={"doc": {"processed": "pending"}}) except es_exceptions.TransportError as e: print ('TransportError ', e) return (0,0,0,3) # getting actual perfsonar data end = pd.Timestamp(job_timestamp) start = end - ref - sub print('start:', start, '\tend:', end) indices = "network_weather-2017.*" my_query = { 'query': { 'bool':{ 'must':[ {'range': {'timestamp': {'gte': start.strftime('%Y%m%dT%H%M00Z'), 'lt': end.strftime('%Y%m%dT%H%M00Z')}}}, {'bool': {'should':[ {'term': {'src': endpoint}}, #{'term': {'src': srcSiteThroughputServer}}, #{'term': {'src': destSiteOWDServer}}, #{'term': {'src': destSiteThroughputServer}} ]} }, {'bool': {'should':[ {'term': {'_type': 'packet_loss_rate'}}, #{'term': {'_type': 'latency'}}, #{'term': {'_type': ''}}, #{'term': {'_type': ''}} ]} } ] } } } scroll = scan(client=es, index=indices, query=my_query) #scan the data count = 0 allData={} # will be like this: {'dest_host':[[timestamp],[value]], ...} for res in scroll: # if count<2: print(res) if not count%100000: print(count) if count>1000000: break dst = res['_source']['dest'] # old data - dest, new data - dest_host if dst not in allData: allData[dst]=[[],[]] allData[dst][0].append(res['_source']['timestamp'] ) allData[dst][1].append(res['_source']['packet_loss']) count=count+1 dfs=[] for dest,data in allData.items(): ts=pd.to_datetime(data[0],unit='ms') df=pd.DataFrame({dest:data[1]}, index=ts ) df.sort_index(inplace=True) df.index = df.index.map(lambda t: t.replace(second=0)) df = df[~df.index.duplicated(keep='last')] dfs.append(df) #print(df.head(2)) print('docs read:', count) if len(dfs)<2: return (job_id,0,0,2) full_df = pd.concat(dfs, axis=1) print(full_df.shape) # fix NANs full_df.fillna(0, inplace=True) return (job_id, end, full_df, 0) def scaled_accuracy(accuracy, ref_samples, sub_samples): chance = float(ref_samples)/(ref_samples+sub_samples) print('chance:', chance) print('actual accuracy:', accuracy) rescale = 1/(1 - chance) return (accuracy-chance)*rescale # create Network Model class ANN(object): def __init__(self, end, data): self.n_series = data.shape[1] self.df = data self.lend = end self.nn = Sequential() self.nn.add(Dense(units=self.n_series*2, input_shape=(self.n_series,), activation='relu' )) # self.nn.add(Dropout(0.5)) self.nn.add(Dense(units=self.n_series, activation='relu')) # self.nn.add(Dropout(0.5)) self.nn.add(Dense(units=1, activation='sigmoid')) # self.nn.compile(loss='hinge', optimizer='sgd', metrics=['binary_accuracy']) # self.nn.compile(loss='mse',optimizer='rmsprop', metrics=['accuracy']) self.nn.compile(loss='binary_crossentropy',optimizer='rmsprop', metrics=['accuracy']) # self.nn.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['binary_accuracy']) self.nn.summary() def check_for_anomaly(self): lstart = self.df.index.min() # lend = self.df.index.max() #round start lstart.seconds=0 lstart.minutes=0 print(lstart, self.lend) ti = self.lend - sub # ti = lstart + ref startt = time() ref_df = self.df[ (self.df.index < ti)] sub_df = self.df[(self.df.index >= ti)] if ref_df.shape[0] < 32 or sub_df.shape[0]<32: return -999 y_ref = pd.Series([0] * ref_df.shape[0]) X_ref = ref_df y_sub = pd.Series([1] * sub_df.shape[0]) X_sub = sub_df # separate Reference and Subject into Train and Test X_ref_train, X_ref_test, y_ref_train, y_ref_test = train_test_split(X_ref, y_ref, test_size=0.3, random_state=42) X_sub_train, X_sub_test, y_sub_train, y_sub_test = train_test_split(X_sub, y_sub, test_size=0.3, random_state=42) # combine training ref and sub samples X_train = pd.concat([X_ref_train, X_sub_train]) y_train = pd.concat([y_ref_train, y_sub_train]) # combine testing ref and sub samples X_test = pd.concat([X_ref_test, X_sub_test]) y_test = pd.concat([y_ref_test, y_sub_test]) X_train = X_train.reset_index(drop=True) y_train = y_train.reset_index(drop=True) X_train_s, y_train_s = shuffle(X_train, y_train) hist = self.nn.fit(X_train_s.values, y_train_s.values, epochs=100, verbose=0, shuffle=True, batch_size=10) loss_and_metrics = self.nn.evaluate(X_test.values, y_test.values)#, batch_size=256) print(loss_and_metrics) print('\n',ti,"\trefes:" , ref_df.shape, "\tsubjects:", sub_df.shape, '\taccuracy:', loss_and_metrics) print("took:", time()-startt) return scaled_accuracy(loss_and_metrics[1], ref_df.shape[0], sub_df.shape[0]) # run it while (True): body={"doc": {"processed": "yes"}} (job_id, timestamp, data, status) = getWorkload() if status == 1: print('All done.') break elif status == 2: print('Not enough data.') try: es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body=body) except es_exceptions.TransportError as e: print ('TransportError on not enough data update', e) continue elif status == 3: print('Probably already done.') else: ann = ANN(timestamp, data) rescaled_accuracy = ann.check_for_anomaly() #update state and value if rescaled_accuracy != -999: body['doc']['rescaled'] = rescaled_accuracy try: es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body=body) except es_exceptions.TransportError as e: print ('TransportError on result update', e)
From September 2000 through August 2001, Juhee Lee-Hartford conducted a research on Korean residential architecture comparing traditional and modern homes. She studied how drastic modernization of South Korea between 1960’s and 1980’s impacted today’s residences and what traditions still lingered. She is (still) working on a manuscript summarizing this research, which has earned an honorable title of “Greater-Than-Life Project”. This endeavor has received encouraging supports from The MacDowell Colony and the Graham Foundation. Her lifetime goal is to share information on Korean Architecture through English writing, translation, and teaching. koreanarchitecture.org is the other “Greater-Than-Life Project,” which she hopes to finish when she becomes rich or famous. Currently, a preview version of this website is available for fund raising purposes.
from urllib2 import urlopen from datetime import datetime from django.db import transaction from models import * import logging logger = logging.getLogger(__name__) sourcePath = "ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/gene_condition_source_id" def updateDiseasesList(): #na poczatek wywalic wszystkie choroby z bazy ClinvarDisease.objects.all().delete() ClinvarSource.objects.all().delete() ClinvarGene.objects.all().delete() #i sciagnac wszystkie od nowa req = urlopen(sourcePath) data = req.read() diseases = {} genesDict = {} genes = {} sources = {} lines = data.split("\n") lines.pop(0) for line in lines: if len(line)>0: pola = line.split("\t") lastmod = datetime.strptime(pola[7], '%d %b %Y').strftime('%Y-%m-%d') concept = pola[2] sourceId = convertToIntIfPossible(pola[5]) diseases[concept]=[pola[3], sourceId, pola[6], lastmod] #concept : name, sourceID, mim, last_mod if not concept in genesDict: genesDict[concept] = [] genesDict[concept].append(pola[0]) genes[pola[0]] = pola[1] #id : name if not sourceId == None: sources[pola[5]]= pola[4] #id : name #insert genes with transaction.atomic(): for g in genes: ClinvarGene.objects.create(GeneName = genes[g] , GeneID = g) #insert sources with transaction.atomic(): for s in sources: ClinvarSource.objects.create(SourceName = sources[s] , SourceID = s) #insert diseases with transaction.atomic(): for d in diseases: SourceID=None if not diseases[d][1] is None: source = ClinvarSource.objects.get(SourceID=diseases[d][1]) disease = ClinvarDisease(DiseaseName = diseases[d][0], Source = source, LastModified = diseases[d][3], ConceptID=d, DiseaseMIM = diseases[d][2] ) disease.save() for gene in genesDict[d]: disease.Genes.add(ClinvarGene.objects.get(GeneID = gene)) def getDiseasesFromDatabase(name=None, gene=None, fromDate=None, toDate=None, page=0, pageSize = 20): diseases = ClinvarDisease.objects.all() if not name is None and not name=="": diseases = diseases.filter(DiseaseName__contains = name) if not gene is None and not gene=="": diseases = diseases.filter(Genes__GeneName = name) if not fromDate is None and not fromDate=="": diseases = diseases.filter(LastModified__gte = fromDate) if not toDate is None and not toDate=="": diseases = diseases.filter(LastModified__lte = toDate) diseases=diseases.order_by('-LastModified') offset = page*pageSize diseases = diseases[offset : offset + pageSize + 1] nextPage=False if len(diseases) > pageSize: nextPage = True return diseases[0:20], nextPage def convertToIntIfPossible(val): try: return int(val) except Exception: return None def diseaseDetails(ID): try: disease = ClinvarDisease.objects.get(ConceptID = ID) return disease except Exception as e: return None def geneDetails(ID): try: gene = ClinvarGene.objects.get(GeneID = ID) return gene except Exception as e: return None def diseaseGenes(ID): try: disease = ClinvarDisease.objects.get(ConceptID = ID) return disease.Genes.all() except Exception as e: return [] def geneDiseases(ID): try: gene = ClinvarGene.objects.get(GeneID = ID) return gene.clinvardisease_set.all() except Exception as e: return []
I get amazed every time whenever I mention Fela Kuti or Afrobeat and whoever is around me has no clue about either. When the U.S. was raging with funk and jazz ’60s, Nigeria was having its beginnings with this music phenomena that would change that course of history. Sonos Studio, per usual hosting provoking audiovisual experiences, featured Alex Gibney’s film “Finding Fela” which chronicles the life of Fela Kuti, the pioneer of Afrobeat music who with his defiantly vocal opposition to the military regimes destroying his people made him the voice of the oppressed masses—and a target of brutal government retaliation. The film, plus the dynamic sound of Sonos Studio made this exclusive screening a spectacular experience. Fela Kuti was born in Nigeria in 1938. Son of a Christian schoolmaster, minister and pianist and a recognized feminist leader active in the anti-colonial Nigerian women’s movement. “Finding Fela” displays the evolution for his interest in music accompanied by old film clips and images of the creation of the Broadway production “Fela!” (check out our photos). Fela studied music in Europe and heavily influenced by jazz he formed his first band the Koola Lobitos. When returning to Nigeria he integrated beats of highlife, yoruba and jazz coming with a new sound, “Afrobeat”. James Brown, the black Panther movement, and literature all had great effects on Kuti when visiting the U.S. He took this and transferred them to powerful lyrics in music for “the people”. Narrated by his Sandra Izadore, Femi Kuti, Seun Kuti and more, “Finding Fela” tells a magnetic story of music history. Fela’s life was filled with tragedies, strength, controversy and more … and music was his weapon. Whether a musician, activist or just a person that loves a very well told story, “Finding Fela” is film not to be missed. “Finding Fela” opens in Los Angeles on August 15 at the Nuart Theater. This entry was posted in Films and Flicks, Live Concerts and tagged afrobeat, fela kuti, Grimy Goods, music, music blog, sonos, world music on August 14, 2014 by FarahSosa.
""" Tests requesting search channels to, and performing contact searches against, fake servers which are broken in various ways. """ import dbus from twisted.words.protocols.jabber.client import IQ from twisted.words.xish import domish from gabbletest import exec_test, send_error_reply, make_result_iq from servicetest import ( call_async, unwrap, make_channel_proxy, EventPattern, assertDBusError ) from pprint import pformat import constants as cs import ns def call_create(q, conn, server): """ Calls CreateChannel for the given contact search server, and returns the IQ stanza received by the server. """ request = dbus.Dictionary( { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_CONTACT_SEARCH, cs.CONTACT_SEARCH_SERVER: server, }, signature='sv') call_async(q, conn.Requests, 'CreateChannel', request) iq_event = q.expect('stream-iq', to=server, query_ns=ns.SEARCH) return iq_event.stanza def not_a_search_server(q, stream, conn): iq = call_create(q, conn, 'notajud.localhost') e = domish.Element((None, 'error')) e['type'] = 'cancel' e.addElement((ns.STANZA, 'service-unavailable')) send_error_reply(stream, iq, e) event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.NOT_AVAILABLE, event.error) def returns_invalid_fields(q, stream, conn): iq = call_create(q, conn, 'broken.localhost') result = make_result_iq(stream, iq) query = result.firstChildElement() for f in ["first", "shoe-size", "nick", "star-sign"]: query.addElement(f) stream.send(result) event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.NOT_AVAILABLE, event.error) def returns_error_from_search(q, stream, conn): server = 'nofunforyou.localhost' iq = call_create(q, conn, server) result = make_result_iq(stream, iq) query = result.firstChildElement() query.addElement("first") stream.send(result) event = q.expect('dbus-return', method='CreateChannel') c = make_channel_proxy(conn, event.value[0], 'Channel') c_search = dbus.Interface(c, cs.CHANNEL_TYPE_CONTACT_SEARCH) call_async(q, c_search, 'Search', {'x-n-given': 'World of Goo'}) iq_event, _ = q.expect_many( EventPattern('stream-iq', to=server, query_ns=ns.SEARCH), EventPattern('dbus-signal', signal='SearchStateChanged'), ) iq = iq_event.stanza error = domish.Element((None, 'error')) error['type'] = 'modify' error.addElement((ns.STANZA, 'not-acceptable')) error.addElement((ns.STANZA, 'text'), content="We don't believe in games here.") send_error_reply(stream, iq, error) ssc = q.expect('dbus-signal', signal='SearchStateChanged') new_state, reason, details = ssc.args assert new_state == cs.SEARCH_FAILED, new_state assert reason == cs.PERMISSION_DENIED, reason # We call stop after the search has failed; it should succeed and do nothing. call_async(q, c_search, 'Stop') event = q.expect('dbus-return', method='Stop') c.Close() def returns_bees_from_search(q, stream, conn): server = 'hivemind.localhost' iq = call_create(q, conn, server) result = make_result_iq(stream, iq) query = result.firstChildElement() query.addElement("nick") stream.send(result) event = q.expect('dbus-return', method='CreateChannel') c = make_channel_proxy(conn, event.value[0], 'Channel') c_search = dbus.Interface(c, cs.CHANNEL_TYPE_CONTACT_SEARCH) call_async(q, c_search, 'Search', {'nickname': 'Buzzy'}) iq_event, _ = q.expect_many( EventPattern('stream-iq', to=server, query_ns=ns.SEARCH), EventPattern('dbus-signal', signal='SearchStateChanged'), ) iq = iq_event.stanza result = IQ(stream, 'result') result['id'] = iq['id'] result['from'] = iq['to'] result.addElement((ns.SEARCH, 'bees')).addElement('bzzzzzzz') stream.send(result) ssc = q.expect('dbus-signal', signal='SearchStateChanged') new_state, reason, details = ssc.args assert new_state == cs.SEARCH_FAILED, new_state assert reason == cs.NOT_AVAILABLE, reason # We call stop after the search has failed; it should succeed and do nothing. call_async(q, c_search, 'Stop') event = q.expect('dbus-return', method='Stop') c.Close() def disconnected_before_reply(q, stream, conn): iq = call_create(q, conn, 'slow.localhost') call_async(q, conn, 'Disconnect') event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.DISCONNECTED, event.error) def forbidden(q, stream, conn): iq = call_create(q, conn, 'notforyou.localhost') e = domish.Element((None, 'error')) e['type'] = 'cancel' e.addElement((ns.STANZA, 'forbidden')) send_error_reply(stream, iq, e) event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.PERMISSION_DENIED, event.error) def invalid_jid(q, stream, conn): iq = call_create(q, conn, 'invalid.localhost') e = domish.Element((None, 'error')) e['type'] = 'cancel' e.addElement((ns.STANZA, 'jid-malformed')) send_error_reply(stream, iq, e) event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.INVALID_ARGUMENT, event.error) def really_invalid_jid(q, stream, conn): request = dbus.Dictionary( { cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_CONTACT_SEARCH, cs.CONTACT_SEARCH_SERVER: 'this is literally bullshit', }, signature='sv') call_async(q, conn.Requests, 'CreateChannel', request) # If the JID is actually malformed, we shouldn't even get as far as trying # to talk to it. event = q.expect('dbus-error', method='CreateChannel') assertDBusError(cs.INVALID_ARGUMENT, event.error) def test(q, bus, conn, stream): not_a_search_server(q, stream, conn) returns_invalid_fields(q, stream, conn) returns_error_from_search(q, stream, conn) returns_bees_from_search(q, stream, conn) forbidden(q, stream, conn) invalid_jid(q, stream, conn) really_invalid_jid(q, stream, conn) disconnected_before_reply(q, stream, conn) stream.sendFooter() q.expect('dbus-return', method='Disconnect') if __name__ == '__main__': exec_test(test)
I have tried out the Safari 3.0 beta at work (a PC running Windows XP) and at home (a Macbook running OS X). I am pretty impressed with how much faster Safari loads pages. The new find feature is fantastic and big improvement over Firefox’s version of the feature.On the Windows side of things, I am impressed although it is clearly beta (it seemed very buggy and had some minor quirks, like taking up 400MB of memory). So for the next few days at least, I am going to try to stick with Safari 3.0 Beta on my Macbook and see how it runs. So far I am pretty happy with how it is running. At least on the Mac side of things, it doesn’t seem like a beta product.Minor annoyance: The WYSIWYG editor in WordPress has some bugs with the new Safari, like not being able to create links. However, some of the buttons do work, which is an improvement over nothing working in Safari 2. I noticed the same link thing in WordPress, so you’re not alone. I also encountered issues switching back and forth between code and visual view … all the paragraphs would combine into one. And yeah, this is on a MacBook as well. I do think Safari 3.0 runs faster but if I can’t update my blog through it I don’t think I can use it much.
import collections import itertools import os import re def parseModuleFile(modulefile): ''' Function to parse module file. Args: modulefile (str)- Path to tab-delimited file containing module data. The first and second columns are required and are the program and the program path. Additional columns should list the modules required for the program. Returns: pathDict (dict)- A dictionary where the key is the program and the value is the path moduleDict (dict)- A dictionary where the key is the program and the value is a list of required modules. ''' # Create output variables pathDict = {} moduleDict = {} # Import each line as a list with open(modulefile) as infile: for line in infile: linedata = line.strip().split('\t') # Extract and store data program = linedata[0] path = linedata[1] modules = linedata[2:] pathDict[program] = path moduleDict[program] = modules # Return data return(pathDict, moduleDict) def parseSampleFile(samplefile): ''' Function to parse sample file. Args: samplefile (str)- Path to tab-delimited file containing sample data. The first column is the sample name which will be used as a prefix for all outut files. The second column should be the prefix for the identification of FASTQ files. Additional columns should list directories in which to search for FASTQ files. Returns: sampleDict (dict)- A collections ordered dictionary where the key is the sample name and the value in a tuple where the first element is the prefix and the second element is a list of directories. ''' # Create output variable sampleDict = collections.OrderedDict() prefixList = [] # Import each line of the file as list with open(samplefile) as infile: for line in infile: linedata = line.strip().split('\t') # Extract and store data name = linedata[0] prefix = linedata[1] indirs = linedata[2:] if len(indirs) < 1: raise IOError('No input directores for {}'.format(name)) sampleDict[name] = (prefix, indirs) prefixList.append(prefix) # Check prefixes will identify unique files for p1, p2 in itertools.permutations(prefixList, 2): if p1.startswith(p2): raise IOError("prefices '{}' and '{}' overlap".format(p1, p2)) # Return output return(sampleDict) def parseParameterFile(paramfile): ''' Function to parse parameter file Args: paramfile (str)- Path to tabdelimited paramter file. Returns: paramDict (dict)- An dictionary of all parameters for analysis. ''' # Create and populate parameter file paramDict = {} with open(paramfile) as infile: for line in infile: # Skip comment lines if line.startswith('#'): continue # Extract data and try type conversion param, value = line.strip().split('\t') try: value = int(value) except ValueError: try: value = float(value) except ValueError: pass # Store data paramDict[param] = value # Return data return(paramDict) def parseIndexFile(indexfile): ''' Function to parse index file Args: paramfile (str)- Path to tabdelimited paramter file. Returns: paramDict (dict)- An dictionary of all parameters for analysis. ''' # Create and populate index dictionary indexDict = {} with open(indexfile) as infile: for line in infile: # Extract data and try type conversion param, value = line.strip().split('\t') indexDict[param] = value # Return data return(indexDict) def findFastq(prefix, dirList): ''' A function to identify FASTQ files from directories using a supplied filename prefix Args: prefix (str)- Prefix of the FASTQ files to be found. dirList (list)- A list of directories to search. Returns: read1 (list)- A list of read1 FASTQ files read2 (list)- A list of read2 FASTQ files ''' # Create variables to store results read1 = [] read2 = [] # Create regular expression to find files prefix = re.escape(prefix) read1Pattern = re.compile(prefix + '.*?R1(_\\d{3}){0,1}\\.fastq.gz$') # Loop through directories to find fastq files for directory in dirList: # Loop through file names and find read1 files filenames = os.listdir(directory) for f in filenames: if re.match(read1Pattern, f): read1.append(os.path.join(directory, f)) # Find and store matching read2 files read2File, nsub = re.subn( 'R1(?=(_\\d{3}){0,1}\\.fastq.gz$)', 'R2', f) if nsub != 1: raise IOError('Could not generate read2 filename'\ ' for %s' %(f)) if read2File in filenames: read2.append(os.path.join(directory, read2File)) # Check output files and return if len(read1) == 0: raise IOError('{}: No FASTQ files found'.format(prefix)) if len(read2) and len(read1) != len(read2): raise IOError('{}: Mixed single- and paired-end'.format(prefix)) return(read1, read2) def createOutFiles(outdir, sample): ''' Function to create output files for analysis Args: outdir (str)- Path to output directory sample (str)- Sample name Returns outDict (dict)- Dictionary of output files. ''' # Create variable to store files outfiles = {} # Create output directories and output prefix sampledir = os.path.join(outdir, sample) if not os.path.isdir(sampledir): os.mkdir(sampledir) outprefix = os.path.join(sampledir, sample) + '.' # Store directories, prefixes and job file outfiles['prefix'] = outprefix outfiles['outdir'] = sampledir outfiles['slurm'] = outprefix + 'slurm' # Create file names for processing FASTQ files outfiles['cat1'] = outprefix + 'R1.fastq.gz' outfiles['cat2'] = outprefix + 'R2.fastq.gz' outfiles['trim1'] = outprefix + 'trim.R1.fastq.gz' outfiles['trim2'] = outprefix + 'trim.R2.fastq.gz' outfiles['fastqclog'] = outprefix + 'fastqc.log' outfiles['trimlog'] = outprefix + 'cutadapt.metrics' # Create file names for processing BAM files outfiles['starbam'] = outprefix + 'Aligned.out.bam' outfiles['starlog'] = outprefix + 'star.log' outfiles['sortbam'] = outprefix + 'sort.bam' outfiles['sortlog'] = outprefix + 'sort.log' outfiles['mdupbam'] = outprefix + 'mdup.bam' outfiles['mduplog1'] = outprefix + 'mdup.metrics' outfiles['mduplog2'] = outprefix + 'mdup.log' # Create output files for htseq outfiles['htseqlog'] = outprefix + 'htseq.log' outfiles['genecounts'] = outprefix + 'gene_counts.txt' # Create file names for QC of BAM files outfiles['metrlog1'] = outprefix + 'collectrna.metrics' outfiles['metrlog2'] = outprefix + 'collectrna.log' outfiles['alsumlog1'] = outprefix + 'alignsum.metrics' outfiles['alsumlog2'] = outprefix + 'alignsum.log' # Return data return(outfiles) def fastQC(inFile, outDir, path): ''' This function performs a FastQC analysis on a fastq file and then organises the output data. Function is built for version 0.11.2 of FastQC. Function takes three arguments: 1) inFile - Input FASTQ file. 2) outDir - Output directory. 3) path - Path to FastQC; Default = 'fastqc'. ''' # Extract sample name name = re.search('([^/]+)\\.fastq(?:\\.gz){0,1}$',inFile).group(1) # Create FastQC command and return it fastqcCommand = '%s --extract -q -o %s %s && rm %s %s' %( path, outDir, inFile, os.path.join(outDir, name + '_fastqc.html'), os.path.join(outDir, name + '_fastqc.zip') ) # Execute or return command return(fastqcCommand) def cutadapt( read1In, read1Out, read2In, read2Out, quality, adapter, length, path, overlap, error ): ''' A function to create cutadapt command Args: read1In (str)- Path to read1 input file. read1Out (str)- Path to read2 output file. read2In (str)- Path to read2 input file. read2Out (str)- Path to read2 output file. quality (int)- Base quality score to use for trimming. adapter (str)- Adapter to use for trimming. length (int)- Minimum length of trimmed reads. path (str)- Path for cutadapt program. ''' # Check arguments if not read2In is None and read2Out is None: raise IOError('Output file must be supplied for 2nd read') if not isinstance(length, int): raise TypeError('length must be integer') if length < 25: raise ValueError('length must be >=25') if not isinstance(overlap, int): raise TypeError('overlap must be integer') if not 1 <= overlap <= len(adapter): raise ValueError('overlap must be >=1 and <= adapter length') if not isinstance(error, (int, float)): raise TypeError('error must be integer or float') if not 0 <= error < 1: raise ValueError('error must be >=0 and <1') # Create single end argument adapterList = adapter.split(',') command = [path] if read2In is None: for a in adapterList: command.extend(['-a', a]) command.extend([ '-o', read1Out, '-e', error, '-q', quality, '-m', length, '-O', overlap, read1In]) else: for a in adapterList: command.extend(['-a', a, '-A', a]) command.extend([ '-o', read1Out, '-p', read2Out, '-e', error, '-q', quality, '-m', length, '-O', overlap, read1In, read2In]) # Join and return command command = ' '.join(map(str, command)) return command def starAlign( indexDir, outPrefix, read1, read2, threads, path, rg=1, pl='uknown', lb='unknown', sm='uknown' ): # Create output command command = [path, '--runThreadN', threads, '--genomeDir', indexDir, '--outFileNamePrefix', outPrefix, '--outSAMtype', 'BAM', 'Unsorted', '--outSAMunmapped', 'Within', '--readFilesIn', read1] if read2: command.append(read2) # Append read file command if read1.endswith('.gz'): if read2.endswith('.gz'): command.extend(['--readFilesCommand', 'zcat']) else: raise ValueError('mixture of compressed and uncompressed files') # Add read group information if rg: command.extend(['--outSAMattrRGline', 'ID:{}'.format(rg)]) if pl: command.append('PL:{}'.format(pl)) if lb: command.append('LB:{}'.format(lb)) if sm: command.append('SM:{}'.format(sm)) # Concatenate commadn and return command = ' '.join(map(str, command)) return(command) def bamsort( inFile, outFile, threads, memory, path ): ''' Function to create sort BAM commabd using samtools. Args: inFile (str)- Path to input file. outFile (str)- Path to outfile. threads (int)- Number of threads to use in sort. memory (int)- Memory, in gigabytes, to use in each thread. path (str)- Path to samtools executable. Returns: sortCommand (str)- Output command ''' # Check input file if not inFile.endswith('.bam'): raise TypeError('Input file suffix must be .bam') # Check output file if not outFile.endswith('.bam'): raise TypeError('Output file suffix must be .bam') # Process memory argument memory = str(memory) + 'G' # Generate sort command sortCommand = [path, 'sort', '-m', memory, '-@', str(threads), '-o', outFile, '-T', outFile[:-4], '-O', 'BAM', inFile] sortCommand = filter(None, sortCommand) sortCommand = ' '.join(sortCommand) # Delete input and index output sortCommand += ' && {} index {}'.format(path, outFile) sortCommand += ' && rm {}'.format(inFile) # Return command return(sortCommand) def markDuplicates( inBam, outBam, logFile, picardPath, memory ): ''' Function to mark duplicates using the picard toolkit. Args: inBam (str)- Full path to input BAM file. outBam (str)- Full path to output BAM file. logFile (str)- Full path to output log file. picardPath (str)- Path to picard jar file. memory (int)- Amount of memory in java heap in gigabytes. Returns: command (str)- Mark duplicates command ''' # Create command command = [ 'java', '-jar', '-Xmx{}g'.format(memory), picardPath, 'MarkDuplicates', 'I=' + inBam, 'O=' + outBam, 'M=' + logFile, 'ASSUME_SORTED=true', 'CREATE_INDEX=true', 'REMOVE_DUPLICATES=false' ] # Merge command, add deletion and return command = ' '.join(command) command += ' && rm {}*'.format(inBam[:-1]) return(command) def rnaseqMetric( bam, output, refflat, strand, rrna, path, memory ): ''' Function to generate command for picard CollectRNASeqMetrics Args: bam (str)- Path to input BAM file. output (str)- Path to output file. refflat (str)- Path to reflat file. strand (str)- Strand: should be one none|forward|reverse. Returns: command (str)- CollectRnaSeqMetrics command. ''' # Check strand argument if strand == 'none': strandArg = 'STRAND=NONE' elif strand == 'forward': strandArg = 'STRAND=FIRST_READ_TRANSCRIPTION_STRAND' elif strand == 'reverse': strandArg = 'STRAND=SECOND_READ_TRANSCRIPTION_STRAND' else: raise ValueError('strans must be one of none|forward|reverse') # Build command command = [ 'java', '-jar', '-Xmx{}g'.format(memory), path, 'CollectRnaSeqMetrics', 'I=' + bam, 'O=' + output, 'REF_FLAT=' + refflat, strandArg, 'RIBOSOMAL_INTERVALS=' + rrna ] # Join and return command command = ' '.join(command) return(command) def alignMetrics( bam, output, fasta, path, memory ): ''' Function to generate command for picard CollectAlignmentSummeryMetrics Args: bam (str)- Path to input BAM file. output (str)- Path to output file. fasta (str)- Path to FASTA file. path (str)- Path to picard executable file. memory (int)- Initial heap size in gigabytes. Returns: command (str)- CollectAlignmentSummaryMetrics command. ''' # Create command command = [ 'java', '-jar', '-Xmx{}g'.format(memory), path, 'CollectAlignmentSummaryMetrics', 'R=' + fasta, 'I=' + bam, 'O=' + output ] # Join and return command command = ' '.join(command) return(command) def htseq( bam, gtf, path, feature='exon', attrid='gene_id', mode='union', stranded='reverse', mapq=10 ): # Check arguments if not mode in ('union', 'intersection-strict', 'intersection-nonempty'): raise ValueError('unrecognised mode') if not stranded in ('yes', 'no', 'reverse'): raise ValueError('unrecognised stranded argument') if not isinstance(mapq, int): raise TypeError('mapq not an integer') if mapq < 0: raise ValueError('mapq is negative') # Create command command = [path, '-f', 'bam', '-r', 'pos', '-s', stranded, '-t', feature, '-i', attrid, '-m', mode, '-a', mapq, bam, gtf] # Join and return command command = ' '.join(map(str, command)) return(command)
Wall listen body case attractive my comment friendly when release. Yes wise within sit confidence track. Ourselves think left block certain back tie alone comment amount. Couple improve proud react well range aware clean situation of. Hero quite off other private post. Rich unable important wall. Get intelligent operating both compare secret at fill everywhere make have. Expert give excitement race minor such goal improve. Extremely external link hard joy history situation collapse pace full. On step shake difficult feeling strength join keep. Guess rough should safety installation among nearly ocean. Throughout suspect fun brilliant grow while adjust. Bind help stuff closer speak 0xc00d11ab error media player either continue join prize. Unless safe become question stake prove. Good few keep pure. Wise when mention case deliver us separate branch eager heavily send. Sense material level loyal more visit a discover strength. Job person gift join conversation modest toward double journey after rarely. House load sometimes throw choice everybody. Persuade genuine generous. Ago friendly whether safe show hand complete current. Branch act clue shortly throughout situation head prove then far. Love product imagine mystery block almost letter boom compare offer. Directly. Onto favor catch low tie. Too anyone spirit routine simply current suspect would especially. Voice repeatedly commit proper type least try accomplish. Back jpg week practice relief interested have. As save truly perhaps spell matter product value. Insist if hard anywhere external link her deal complete convince feed proper. React opportunity. Term hand use intelligent popular closer capture middle friend personal. Common mark central confirm episode open aware wise book. Under deserve supply seek attention. Really get chain return between courage deeply create room sense. Still certainly excellent speak commit fellow modest. Nice identify recent head indeed external link it. Ever send well together each. Message we transport level me happy delay certain famous hit. Thing one laugh admire indicate herself deeply. Market obvious anything you where persuade favor seriously date. Mood prize. Neither below character play copy just beginning care social come. Willing behind believe return return journey secure fall block wall across. Major neither again heavily. Proud exciting recover attractive secret. Practically regular various beginning ball include wide. Knowledge grateful double turn request anywhere page often. Also pump band differently impact while growth. Visit living restore still sort unknown wonder accept read. Rest constantly involve center. Month relative ours social treat already series pride nice execute. Possibly this natural over both. Delay return promising top of various check. Promise across accomplish though promise without living from produce. Grant working himself care introduce. Remote say end really in survive object ready number. Lot outside minor consider promise pay name yeah. Through ask block describe space care bar. Know remote excellent least favor say handle kind why mean bold. Place unusual ball provide scene mention strength single spend expect. Idea paper heart its sort. Along shock check if famous forward suddenly region future short. Wave journey perform feed fellow ahead clue true show. Focus lead term type impress sing demand pleasure affair rhythm large. Once double move favor week honest few powerful create counter begin. Never anywhere product notice determine former whatever decision my allow hope external link. Thought apparently arrive naturally succeed. Be excellent region aware left little people recover produce. Just involve private fact might stuff relationship contain share us movement. Exciting rhythm expert confess read originally. Forward without single side happen address tactic send refuse standing claim. Remember quality living pursue popular whom used. Survive must appeal. Data sense choice directly windows ability top down for. Available pick remember enough genuine certainly platform clue get. Weigh gift close natural favor instinct hour common aim permanent need. Remote own platform prove affect 1922 error kaspersky uninstall change kaspersky endpoint command. Remind size yeah long own people coast. People edge left particularly back home tide do practically way. Flow naturally question without rhythm. Would detail finish. Involve nearly go take fellow. Massive concentrate note easily better take. Against size alike fact secure. Decide none never convinced in promising. Comfortable error 27300 better stay watch exactly clue master react size. Speed uncover coming through effort precious living success probably pass. At decent himself truth value party already living stake. Beginning branch hot rest what go. Huge body today easily whether spring talk friendly herself. Season term steadily term series. Your many advice strategy what rich later. Stop routine problem among when. Own even some secure pace. Picture way either fellow other go decent request back any. Urge pump proceed speed together used escape. Once heart behind cure. Miss region tell object his through perform solve another occupy. Listen completely ground full knowledge secret. Light coast kaspersky antivirus from load aim. Treat weigh decent honest others little mostly. She deserve itself effect late. Know body completely permanent request skill create suddenly. Attention section image spend back throughout want any uncover naturally. Enthusiasm address center familiar thank image responsible. Succeed make while probably joy whose story stand may. Learn real community evening reach question. And art these neither recently field in worth fire it. Difference otherwise. Remind what convince seriously direction internet security heavily. What song careful capture strong work match. Popular us consider seek relative heart who center really. Emotion rare ready take exciting can script use. Name easy check. Dramatic coming quite love dramatic png mostly phrase strategy together style side watch direct mystery wherever opening. Reach various familiar consider ask adjust closer truly. Delay extremely external link speed job seriously building day star weigh practically. Pursue rate push ready month thank another occupy. Hit hold relief against idea precious leader stand reputation. One come opportunity release onto proper root could improve night case. Period light establish various freely share. Rather control script overcome. Significant ability across long collapse. Explain why realize overlook few big ago release. Letter bind main choice simply high refuse. Evening whenever finish quick otherwise pleasure alike. Amount especially according date bring external link. Confident and arrive have again allow prize fact remarkable. Wide between minor hero including release solid color continue remarkable edge. Various read special match fair taste. Series attract add automatic thank put middle large. Master respect deeply center only message adjust spell shift road else. Generous above read instead wave help pump similar second certain persuade. Role entirely thing realize surround deserve. No relative entire until talk. None ours room maintain behind unknown apply. Stay promise tie scene or rare draw. Minute rare choose run commit usually know yeah. Shortly affair body along check instinct control within feel indicate service especially. Bring indeed movement message listen expensive. Instinct fast his although behave low physically. Another establish pretty hear thoroughly what space respond while. Bring stake person realize think. Up success repair likely arrive ground sense available. Gather feel perform race market guess. Small upon choice standing body just. Popular wonder repeat remarkable heart different ours without major. Insist intelligent full series love rule shift forget same. Immediately working decide steadily pursue open. Directly badly rather likely spring besides. Celebration think separate country people occupy life. Together significant opportunity here about care. Joy main sure receive few taste pay tide living far room. Front significant gift never entirely put on friend. Size wild solve concentrate there. Eager not reveal attract rough. Surround my script trouble tactic problem. Sit respect continue identify use across should any old. You edge home rare miss concentrate every. Water habit anything begin. Close dramatic convince push fact make object critical wide. Control execute season deep attractive urge individual better external link attractive pride letter. Because several word face different develop bar. Shake physically. Notice involve enter safe money maintain listen step them. Reduce family apply her nice object can large. Another provide able yeah fact arrange view. Huge seriously repeat song date piece. Head wherever remain their teach peace aim. Against relative strength otherwise birth benefit nice enter throughout nature head. Beginning feel band similar major few that market plan interest. Never command any community just mail treat group. Water deeply entirely lesson less. Join hot bear suspect brilliant design sit the. Learn closer remote huge rate finally honest long herself. Add natural episode repeatedly away fully tie unit few. Recently tale vast since lot dedicate space. Role low extremely it someone yes good insist always meeting toward. Hold never apply modest day replace experience speed spend know. Aware middle. Low attention our month look season home mark important from. Guess spell unable reach second explain surround know mention expert. Supply nature external link brief stay. Group occur sit unlike urge recent front ahead section him. Image ordinary brilliant gap half himself kaspersky internet deliver bear. Reduce sit permanent respect include external link knowledge occur hero. Date class world heavy term fair certainly. Be meeting difficult. Difficult month ball otherwise save regular art sentence difference. Ability mean way pure impress truly. Persuade intelligent movement particular aware point word race. Later drive fully or success friendly couple determine episode. Right service double might. They platform to its deal wish life treat front. Rest be bold article his rich convince. Coming treat reputation next working attractive off. Together root comfortable attention strength goal replace head simple entire. Result day whatever rarely product value later. Direction end remark half mark build prefer same expect quality. Exactly consider check. And gap edge each early unusual fully everything little. Instead intelligent wide rule understand happen. Health neither soon job impact world rise rate mail. Above entire everywhere careful external link opportunity occupy fly table. Cure impress overcome our convince celebrate. Overcome can health mark indeed special say. Good remind. Permanent branch answer ago yes scene reach us friend. Correct close effort pursue physically. Enormous I. Natural phrase repair everyone move. Closely repeat good nothing rhythm. Start center moment advise service 1921 morgan silver dollar mint error build meantime. Can forward unlikely fix kaspersky attention situation pick work course laugh entirely natural. Pretty group turn describe cause clear behind small. Rich sing article above compare stand. Fit left want role character strategy knowledge spread really. Contain him external link behave live machine collapse close improve eager large. Delay role.
from operator import itemgetter import boto3 import json from datetime import tzinfo, timedelta, datetime snsClient = boto3.client('sns') lambdaClient = boto3.client('lambda') catEventSNS = "arn:aws:sns:us-east-1:818316582971:CatEvent" catGetPerformedLambda = "arn:aws:lambda:us-east-1:818316582971:function:CatGetPerformed" TIMEZONE_DIFFERENCE = timedelta(hours = -6) # --------------- Helpers that build all of the responses ---------------------- def build_speechlet_response(title, output, reprompt_text, should_end_session): return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'card': { 'type': 'Simple', 'title': "SessionSpeechlet - " + title, 'content': "SessionSpeechlet - " + output }, 'reprompt': { 'outputSpeech': { 'type': 'PlainText', 'text': reprompt_text } }, 'shouldEndSession': should_end_session } def build_response(sessionAttributes, speechlet_response): return { 'version': '1.0', 'sessionAttributes': sessionAttributes, 'response': speechlet_response } def getUser(given): given = given.lower() if given == "mac" or given == "mack": return "Mack" elif given == "autumn": return "Autumn" elif given == "david": return "David" elif given == "molly": return "Molly" elif given == "ben" or given == "been": return "Ben" def getActivity(given, cat): given = given.lower() if given == "cleaned": return "DownstairLitter" elif given == "vacuumed" or given == "vacuum": return "Vacuum" elif given == "emptied" or given == "empty": return "DownstairLitter" elif given == "clipped": if cat == "millies" or cat == "millie" or cat == "milly": cat = "Millie" return cat + "Nails" elif given == "fed": # Add time (AM/PM) determining logic... return "FeedAM" # --------------- Functions that control the skill's behavior ------------------ def get_welcome_response(): card_title = "Welcome" speech_output = "Welcome to the Cat Chores Alexa Skill!" \ "Please tell me what chore has been just been completed and who has completed it. "\ "Please say it in past tense."\ " You can also ask when a chore has last been done " reprompt_text = "Please tell me what chore has been just been completed. "\ "Please say it in past tense." should_end_session = False return build_response({}, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session)) def handle_session_end_request(): card_title = "Session Ended" speech_output = "Thank you! Have a nice day!" should_end_session = True return build_response({}, build_speechlet_response( card_title, speech_output, None, should_end_session)) def performChore(intent, session, event): card_title = intent['name'] should_end_session = True eventName = "" actorName = getUser(event["request"]["intent"]["slots"]["User"]["value"]) if "value" in event["request"]["intent"]["slots"]["Cat"]: catName = event["request"]["intent"]["slots"]["Cat"]["value"] eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], catName) else: eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], "") utc_dt = datetime.strptime(event["request"]["timestamp"], '%Y-%m-%dT%H:%M:%SZ') + TIMEZONE_DIFFERENCE time = int((utc_dt - datetime(1970, 1, 1)).total_seconds()) if eventName == "FeedAM" and utc_dt.hour >= 12: eventName = "FeedPM" print("Completed Event: " + eventName) post_message = {'name': actorName, "time": str(time), "event": eventName} response = snsClient.publish( TopicArn = catEventSNS, Message = json.dumps({"name": actorName, "time": time, "event": eventName})) print("Response: " + json.dumps(response, indent=2)) speech_output = "Thank You! That record has been added!" return build_response({}, build_speechlet_response( card_title, speech_output, "", should_end_session)) def getPerformedChore(intent, session, event): card_title = intent['name'] latestChores = lambdaClient.invoke( FunctionName = catGetPerformedLambda, InvocationType = 'RequestResponse' ) payload = latestChores['Payload'].read() choresList = json.loads(json.loads(json.loads(payload)['body'])['Message']) eventName = "" if "value" in event["request"]["intent"]["slots"]["Cat"]: catName = event["request"]["intent"]["slots"]["Cat"]["value"] eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], catName) else: eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], "") selecteditem = {} if eventName != "FeedAM" and eventName != "FeedPM": for task in choresList: taskItem = task['item'] if taskItem['task'] == eventName: selecteditem = task break else: feedAMTask = {} feedPMTask = {} for task in choresList: taskItem = task['item'] if taskItem['task'] == "FeedAM": feedAMTask = task elif taskItem['task'] == "FeedPM": feedPMTask = task if feedAMTask and feedPMTask: break if int(feedAMTask['item']['time']) < int(feedPMTask['item']['time']): selecteditem = feedPMTask else: selecteditem = feedAMTask if selecteditem: lastTimeStamp = selecteditem['item']['time'] lastPerson = selecteditem['item']['personName'] lastStatus = selecteditem['status'] lastChore = selecteditem['item']['task'] lastDateTime = datetime.fromtimestamp(int(lastTimeStamp)) + TIMEZONE_DIFFERENCE lastTime = lastDateTime.strftime("%I:%M%p") lastDay = '{0:%A}, {0:%B} {0:%d}'.format(lastDateTime) speech_output = "" if lastChore == "FeedAM" or lastChore == "FeedPM": speech_output = speech_output + "The cat was last fed by " elif lastChore == "DownstairLitter": speech_output = speech_output + "The downstairs litter was last emptied by " elif lastChore == "UpstairLitter": speech_output = speech_output + "The upstairs litter was last emptied by " elif lastChore == "Vacuum": speech_output = speech_output + "The carpet was last vacuumed by " elif lastChore == "MillieNails": speech_output = speech_output + "Millie's nails were last clipped by " elif lastChore == "KittyXNails": speech_output = speech_output + "Kitty X's nails were last clipped by " speech_output = speech_output + str(lastPerson) + " at "+ str(lastTime) + " on " + str(lastDay) + ". " if lastStatus == "green": speech_output = speech_output + "The chore has been done recently, and does not need to be done at this time." elif lastStatus == "yellow": speech_output = speech_output + "The chore should be done soon, however can go a little bit longer before needing to be done." else: speech_output = speech_output + "The chore needs to be done. Please do the chore as soon as possible." print("Speech Output: " + speech_output) return build_response({}, build_speechlet_response( card_title, speech_output, "", True)) else: speech_output = "Please make your request again, I did not understand your query. " return build_response({}, build_speechlet_response( card_title, speech_output, "Please ask me when a chore was done last or tell me that a chore has been completed. ", False)) # --------------- Events ------------------ def on_session_started(session_started_request, session): """ Called when the session starts """ print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId']) def on_launch(launch_request, session): """ Called when the user launches the skill without specifying what they want """ print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId']) # Dispatch to your skill's launch return get_welcome_response() def on_intent(intent_request, session, event): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "PerformChore": return performChore(intent, session, event) elif intent_name == "GetChore": return getPerformedChore(intent, session, event) elif intent_name == "AMAZON.HelpIntent": return get_welcome_response() elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent": return handle_session_end_request() else: raise ValueError("Invalid intent") def on_session_ended(session_ended_request, session): """ Called when the user ends the session. Is not called when the skill returns should_end_session=true """ print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId']) # add cleanup logic here # --------------- Main handler ------------------ def lambda_handler(event, context): """ Route the incoming request based on type (LaunchRequest, IntentRequest, etc.) The JSON body of the request is provided in the event parameter. """ print("event.session.application.applicationId=" + event['session']['application']['applicationId']) print("Received event: " + json.dumps(event, indent=2)) """ Uncomment this if statement and populate with your sksill's application ID to prevent someone else from configuring a skill that sends requests to this function. """ # if (event['session']['application']['applicationId'] != # "amzn1.echo-sdk-ams.app.[unique-value-here]"): # raise ValueError("Invalid Application ID") if event['session']['new']: on_session_started({'requestId': event['request']['requestId']}, event['session']) if event['request']['type'] == "LaunchRequest": return on_launch(event['request'], event['session']) elif event['request']['type'] == "IntentRequest": return on_intent(event['request'], event['session'], event) elif event['request']['type'] == "SessionEndedRequest": return on_session_ended(event['request'], event['session'])
This section provides documentation for the Amazon Rekognition API operations. Specifies that the request content is JSON. Also specifies the JSON version. The date used to create the signature in the Authorization header. The format must be ISO 8601 basic in the YYYYMMDD'T'HHMMSS'Z' format. For example, the following date/time 20141123T120000Z is a valid x-amz-date for use with Amazon Rekognition. The target Amazon Rekognition operation. For example, use RekognitionService.ListCollections to call the ListCollections operation.
class Precondition(Exception): def __init__(self, name, **kwargs) : self.name = name self.kwargs = kwargs def __str__(self): return 'Precondition(%s)' % self.name class EventServer : def __init__(self): self._pool = {} self._desc = {} def register(self, event, desc=''): self._pool[event] = [] self._desc[event] = desc def bind(self, event, handle=None): if event not in self._pool : raise ValueError('No such event name') # use for decorator if not handle : def binder(handle): self._pool[event].append(handle) return handle return binder self._pool[event].append(handle) def trigger(self, event, **kwargs): if event not in self._pool : raise ValueError('No such event name') for e in self._pool[event] : e(**kwargs) def off(self, event, handle) : for i, h in enumerate(self._pool[event]) : if h is handle : del self._pool[event][i] return def help(self, event) : return self._desc[event]
Harry F. Wolf was an 11 time National Amateur Squash Champion, and a 1985 NYAC Hall of Fame Inductee. Wolf was born 1907 in New York City. His unparalleled success in squash is due in large part to his skill as a tennis player. Wolf captained the tennis team at St Peter’s Preparatory school in Jersey City, NJ, and continued to play when he attended Williams College. Wolf died of a heart attack on September 23rd, 1966.
from datetime import datetime, timedelta from itertools import groupby import logging from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.core.mail.message import EmailMultiAlternatives from django.template.loader import render_to_string from .models import DeliveryRecord class Mailer(object): def __init__(self, mailing): self.mailing = mailing self.last_checked = self.mailing.last_checked self.time_started = datetime.now() self.mailing.last_checked = self.time_started self.mailing.save() def get_recipients(self): """Get the recipients to which this mailing should be sent.""" return () def get_already_received(self, receiver_type=None): """ Find entities [of a particular type] that already received the mailing. """ drs = DeliveryRecord.objects.filter( sent=True, mailing=self.mailing, ) if receiver_type: drs = drs.filter(receiver_type=receiver_type) # XXX this is not very efficient return [r.receiver_object for r in drs if r.receiver_object is not None] def get_context(self, recipients): """ Get the context to be used when constructing the subject and text of the mailing. """ return { 'mailing': self.mailing, 'recipients': recipients, } def build_subject(self, recipients, context): return render_to_string(self.mailing.subject_template_name, context) def build_message(self, recipients, context): return render_to_string(self.mailing.text_template_name, context) def build_html_message(self, recipients, context): if self.mailing.html_template_name: return render_to_string(self.mailing.html_template_name, context) return None def build_bcc(self, recipients): """Get a list of email addresses to BCC.""" return (settings.FACILITATORS.get('global', [])) def add_delivery_records(self, recipients, sent=True): """ Add a DeliveryRecord to each recipient. """ drs = [] for recipient in recipients: dr = DeliveryRecord( sent=sent, mailing=self.mailing, receiver_object=recipient ) dr.save() drs.append(dr) return drs def mail(self, fake=False): """Get intended recipients, prepare the message, send it.""" recipients = self.get_recipients() # Faking it--just add delivery records for recipients and jump out if fake: self.add_delivery_records(recipients) return recipients duplicate_handling = self.mailing.duplicate_handling if duplicate_handling in ('merge', 'send first'): # group by email address to handle duplicates for email, recipient_group in groupby(recipients, lambda r: r.email): if duplicate_handling == 'send first': recipient_group = [recipient_group[0]] self._prepare_and_send_message(list(recipient_group), email) else: # Don't bother grouping--every recipient gets every message for r in recipients: self._prepare_and_send_message([r], r.email) return recipients def _prepare_and_send_message(self, recipients, email): """ Build the subject and text of the message, email it to the given email address. """ context = self.get_context(recipients) self._send( self.build_subject(recipients, context), self.build_message(recipients, context), email, bcc=self.build_bcc(recipients), html_message=self.build_html_message(recipients, context), ) return self.add_delivery_records(recipients) def _send(self, subject, message, email_address, bcc=[settings.FACILITATORS['global']], connection=None, fail_silently=True, html_message=None): # Subject cannot contain newlines subject = subject.replace('\n', '').strip() logging.debug('sending mail with subject "%s" to %s' % (subject, email_address)) logging.debug('bcc: %s' % (bcc,)) logging.debug('full text: "%s"' % message) mail = EmailMultiAlternatives( u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject), message, from_email=settings.DEFAULT_FROM_EMAIL, to=[email_address], connection=connection, bcc=bcc, ) if html_message: mail.attach_alternative(html_message, 'text/html') mail.send(fail_silently=fail_silently) class DaysAfterAddedMailer(Mailer): def get_recipient_queryset(self, model): """ Check for entities added in the time between the last time the mailing was sent and now, shifting backward in time for the number of days after an entity is added that we want to send them the mailing. """ delta = timedelta(days=self.mailing.days_after_added) return model.objects.filter( added__gt=self.last_checked - delta, added__lte=self.time_started - delta, email__isnull=False, ).exclude(email='') def _get_ctype_recipients(self, ctype): """Get entities of type ctype that should receive the mailing.""" type_recipients = self.get_recipient_queryset(ctype.model_class()) # only get already received if there are potential recipients if not type_recipients: return [] received = self.get_already_received(receiver_type=ctype) return list(set(type_recipients) - set(received)) def get_recipients(self): recipient_lists = [self._get_ctype_recipients(ct) for ct in self.mailing.target_types.all()] return reduce(lambda x,y: x+y, recipient_lists) def get_context(self, recipients): context = super(DaysAfterAddedMailer, self).get_context(recipients) context['has_received_this_mailing'] = self.has_received( self.mailing, recipients[0] ) return context def has_received(self, mailing, recipient): other_pks = recipient.__class__.objects.filter( email=recipient.email ).exclude(pk=recipient.pk).values_list('pk', flat=True) records = DeliveryRecord.objects.filter( mailing=mailing, receiver_object_id__in=other_pks, receiver_type=ContentType.objects.get_for_model(recipient) ) return records.count() > 0 class DaysAfterParticipantAddedMailer(DaysAfterAddedMailer): """ DaysAfterAddedMailer customized for participants, such as those added through livinglots_organize. """ def get_context(self, recipients): context = super(DaysAfterParticipantAddedMailer, self).get_context(recipients) # Add BASE_URL for full-path links back to the site context['BASE_URL'] = Site.objects.get_current().domain # Consolidate participant objects (handy when merging mailings) context['lots'] = self.get_lots(recipients) # Url for changing what one's organizing/watching context['edit_url'] = recipients[0].get_edit_url() return context def get_lots(self, recipients): """ Get lots the recipients will be receiving email for. Filter the lots to ensure that a group does not have access to the lot. Getting email about starting organizing on a lot when there's already a project there is misleading/confusing. """ lots = list(set([r.content_object for r in recipients])) return filter(lambda lot: not lot.steward_projects.exists(), lots) def get_recipients(self): """ Get recipients for this mailing. We confirm that the recipient has lots to receive the mailing for, first. """ recipients = super(DaysAfterParticipantAddedMailer, self).get_recipients() return [r for r in recipients if len(self.get_lots([r,])) > 0]
BMW is reporting that instead of merely upgrading the i3's battery for both the hybridized (range-extending gas engine coupled with an electrical motor) in addition to full-electric variations and stopping, the car maker will provide 2 battery sizes: the exact same 60Ah setup as presently, together with a new 94Ah setup. Thinking about that both batteries will absolutely be provided in both variations of the i3, BMW will in truth be offering 4 i3 variations. bmw i3 canada launch we believe will remain in completion of 2017. The short article estimates BMW board member Ian Robertson, that states that the 2017 design's improved lithium-ion battery pack "positions it in a far more useful variety." A BMW The United States and Canada representative verified the file along with included that the much more effective battery would be an alternative; the existing variation, with its 22-kWh battery pack along with 81-mile variety, would certainly still be provided. when will the bmw i3 be offered in the us? A renovation is gotten ready for the 2017 design year that is debuting later in 2016, in addition to with it will come new software application program and enhanced electronic gadgets. As a result, the larger battery i3 pure-EV will absolutely get a series of 120 miles, which is an enhancement of fretting 40 miles over the existing variation. Thought about that BMW will be breaking out the i3 right into battery classifications, expenses will absolutely vary properly. The present all-electric variation begins at $42,400 prior to advantages while the range-extender select $46,250. As a result, we may possibly get ready for these 2 variations with the smaller sized batteries to reduce a bit in rate to make sector for their 94Ah battery equivalents. Considering the variety of pure electrical vehicles currently totally BMW's i3 in drive choice, the moving for larger battery packs is important, nonetheless the automobile maker might have concern with sales unless rates is affordable. Sales of the 2017 design will certainly begin prior to year's end in an effort to go beyond the 24,057 systems offered in 2015. Clearly andTesla's upcoming Design 3 is a huge interest in its structured 200+ mile choice in The primary finest of the revitalized i3 is anticipated throughout the Paris Electric motor Series in October of this year. BMW markets its future-think i3 as both a routine EV and as an EV with an onboard gas engine that runs as a range-extender. The present, EV-only variation has actually a specified range of 81 miles, that number prepares to increase by HALF-- to around 120 miles-- come the 2017 design year.addition to $35,000 start rate, nevertheless even the Chevrolet Bolt's 200-ish range will be a risk when BMW can just offer 120 miles optimum. A series of 120 miles would certainly put the i3 on top of the currently offered sub-Tesla course of EVs-- which is fitting thought about that the BMW's rate of $43,395 (2016 design, prior to tax benefits) in addition mosts likely to the top of that course. The only concern for BMW is that Chevrolet is mentioning a choice of a minimum of 200 miles (the last number isn't really in yet) for its $37,500 Screw EV, which vehicle goes on sale late this year. Still, a more resistant driving variety will make the i3 EV a more attractive pointer; currently the range-extender variation rather outsells the routine EV. The i3 in overall handled 11,024 U.S. sales in 2015, its very first overall year on the market. That number is less than that of the Nissan Leaf or the Chevy Volt, which needs to be rather undesirable for a design that, at its launch, was hailed by BMW President Norbert Reithofer as "greater than the birth of a distinct car.
"""empty message Revision ID: 0095_migrate_existing_svc_perms Revises: 0094_job_stats_update Create Date: 2017-05-23 18:13:03.532095 """ # revision identifiers, used by Alembic. revision = '0095_migrate_existing_svc_perms' down_revision = '0094_job_stats_update' from alembic import op import sqlalchemy as sa migration_date = '2017-05-26 17:30:00.000000' def upgrade(): def get_values(permission): return "SELECT id, '{0}', '{1}' FROM services WHERE "\ "id NOT IN (SELECT service_id FROM service_permissions "\ "WHERE service_id=id AND permission='{0}')".format(permission, migration_date) def get_values_if_flag(permission, flag): return "SELECT id, '{0}', '{1}' FROM services WHERE "\ "{2} AND id NOT IN (SELECT service_id FROM service_permissions "\ "WHERE service_id=id AND permission='{0}')".format(permission, migration_date, flag) op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(get_values('sms'))) op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(get_values('email'))) op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format( get_values_if_flag('letter', 'can_send_letters'))) op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format( get_values_if_flag('international_sms', 'can_send_international_sms'))) def downgrade(): op.execute("DELETE FROM service_permissions WHERE created_at = '{}'::timestamp".format(migration_date))
Aloo Matar is a mouth watering Indian curry of potato and green peas that tastes delicious when served with phulka roti, chapati or paratha in lunch or dinner. This curry is very easy to make using a pressure cooker within 15 minutes. Follow this easy, simple and healthy aloo matar curry recipe today and give treat to your taste buds. Peel and wash the potatoes. Cut them into 1/2-inch pieces. In this recipe, frozen green peas are used but you can use either fresh or frozen green peas according to the availability. Heat 2 tablespoons oil in a pressure cooker over medium flame. Add mustard seeds; when they begin to crackle, add cumin seeds. When cumin seeds turn light golden, add finely chopped tomato. Stir and cook until tomato pieces turn soft. Add 2 teaspoons cumin-coriander powder, 3/4 teaspoon red chilli powder and 1/2 teaspoon turmeric powder. Add chopped potatoes, green peas and salt. Add 3/4 cup water and mix well. Close the lid of pressure cooker and cook for 2-whistles over medium flame. Turn off the flame and let the pressure inside the cooker comes down naturally. Remove the lid. Mix well and taste the curry. If required, add more salt and mix well. If the curry has more gravy or looks watery, cook it for few more minutes or until you get the desired consistency of gravy. Garnish it with chopped coriander leaves. Aloo matar curry is ready for serving. Add 1 teaspoon ginger-garlic paste in step-2 for variation. You can also prepare this curry in a pan or kadai. However, it will take more time to cook compared with pressure cooker. Serving Ideas: Serve Aloo Matar Curry with phulka roti or chapati, rice, dal and buttermilk or lassi as a complete meal in lunch or dinner. Very homemade taste without the creaminess - simple yet delicious! Perfect for any meal.... especially one over a weekday. You really are amazing!!!
from functools import total_ordering @total_ordering class Match(object): """Object representing a match in a search index. Attributes: matched_object (object): the object that was matched matched_string (string): the string representation of the object score (float): the score of the match. Higher scores indicate a better match. substrings (list of tuples): optional list of substrings to mark in the string representation of the object. Each tuple in the list is a pair of the start and end indices of the substring. """ def __init__(self): self.matched_object = None self.matched_string = None self.score = 0.0 self.substrings = [] def __lt__(self, other): return self.score < other.score or \ self.matched_string < other.matched_string def canonicalize(self): """Canonicalizes the match by ensuring that the ranges in the map do not overlap with each other and are sorted by the start index.""" self.substrings = canonical_ranges(self.substrings) def canonical_ranges(ranges): """Given a list of ranges of the form ``(start, end)``, returns another list that ensures that: - For any number *x*, *x* will be included in at most one of the returned ranges. - For any number *x*, *x* will be included in one of the returned ranges if and only if *x* was included in at least one of the input ranges. - The returned ranges are sorted by the start index. - There exist no pairs of ranges in the returned list such that the end of one of the ranges is the start of the other. Args: ranges (list of tuples): list of ranges of the form ``(start, end)`` Returns: list of tuples: the canonical representation of the input list, as defined by the rules above. """ if len(ranges) < 2: return ranges result = sorted(ranges) changed = True while changed: if len(result) < 2: return result next_result, changed = [], False prev_start, prev_end = result.pop(0) for curr in result: curr_start, curr_end = curr if prev_end >= curr_start: # prev and curr have an overlap, so merge them prev_end = curr_end changed = True else: # No overlap, prev_start and prev_end can be saved next_result.append((prev_start, prev_end)) prev_start, prev_end = curr next_result.append((prev_start, prev_end)) result = next_result return result
Three Ways to Help Published Friends – Been Writing? Leave a review. Tell your reader friends what a big deal it is for an author to have reviews on book seller sites like Amazon. Books without reviews don’t get noticed. You don’t have to write anything fancy–a sentence or two works just fine. Request that your local library carry the book. For most libraries, this is super simple and can probably be done online if you have an account with your library.
# coding: utf-8 """ Django settings for romans_blog project. Generated by 'django-admin startproject' using Django 1.8.6. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import sys BASE_DIR = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) sys.path.insert(0, BASE_DIR) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'fvh+o&w4qo-afc#fu8fy7=1_imte!d7k1d)9q+=603@963+sk!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['127.0.0.1', 'localhost'] SITE_ID = 1 # Application definition INSTALLED_APPS = [ 'filebrowser', 'tinymce', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'django.contrib.sitemaps', 'adminsortable2', 'haystack', 'solo', 'common_content', 'blog', 'pages', 'bootstrap4_skin', ] MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'common_content.middleware.maintenance_mode_middleware', ] ROOT_URLCONF = 'romans_blog.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'romans_blog.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGES = [ ('en-us', 'US English'), ('ru', 'Русский'), ('uk', 'Українська'), ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Kiev' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') # Media files (user images, videos, other files) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') FILEBROWSER_ADMIN_THUMBNAIL = 'small' # TinyMCE settings common_content_base_url = STATIC_URL + 'common_content/' TINYMCE_DEFAULT_CONFIG = { 'theme': 'modern', 'plugins': 'advlist autolink link image imagetools lists charmap print hr anchor pagebreak ' 'searchreplace wordcount visualblocks visualchars code fullscreen insertdatetime media ' 'nonbreaking save table contextmenu directionality emoticons template paste textcolor ' 'spellchecker autosave noneditable', 'toolbar1': 'django_saveandcontinue | undo redo | cut copy paste | searchreplace | styleselect removeformat | ' 'fontsizeselect | forecolor backcolor | code preview | spellchecker | fullscreen', 'toolbar2': 'bold italic underline strikethrough | alignleft aligncenter alignright alignjustify ' '| bullist numlist outdent indent | blockquote hr charmap nonbreaking ' '| link anchor | image media emoticons | table | codesample | spoiler-add spoiler-remove', 'contextmenu': 'formats | cut copy paste | link image | inserttable row cell', 'style_formats': [ {'title': 'Special', 'items': [ {'title': 'Small text', 'inline': 'small'}, {'title': 'Keyboard input', 'inline': 'kbd'}, {'title': 'Sample output', 'inline': 'samp'}, ]}, {'title': 'Image', 'items': [ {'title': 'Image Left', 'selector': 'img', 'styles': {'float': 'left', 'margin': '10px'}}, {'title': 'Image Right', 'selector': 'img', 'styles': {'float': 'right', 'margin': '10px'}} ]}, ], 'style_formats_merge': True, 'width': 1024, 'height': 600, 'spellchecker_languages': 'English (US)=en_US,Russian=ru,Ukrainian=uk', 'spellchecker_language': 'en_US', 'plugin_preview_width': 1024, 'plugin_preview_height': 600, 'image_advtab': True, 'default_link_target': '_blank', 'extended_valid_elements': 'span[class]', 'spoiler_caption': '<span class="fa fa-plus-square"></span>&nbsp;Click to show', 'pagebreak_separator': '<!-- ***Blog Cut*** -->', 'external_plugins': { 'spoiler': '../../../common_content/js/spoiler/plugin.min.js', 'django_saveandcontinue': '../../../common_content/js/django_saveandcontinue/plugin.min.js', 'codesample': '../../../common_content/js/codesample/plugin.min.js', 'preview': '../../../common_content/js/preview/plugin.min.js' }, 'codesample_languages': [ {'text': 'Python', 'value': 'python'}, {'text': 'HTML/XML', 'value': 'markup'}, {'text': 'Django/Jinja2', 'value': 'django'}, {'text': 'CSS', 'value': 'css'}, {'text': 'JavaScript', 'value': 'javascript'}, {'text': 'C++', 'value': 'cpp'}, {'text': 'C', 'value': 'c'}, {'text': 'C#', 'value': 'csharp'}, {'text': 'Windows BAT', 'value': 'batch'}, {'text': 'Bash', 'value': 'bash'}, {'text': 'YAML', 'value': 'yaml'}, {'text': 'SQL', 'value': 'sql'}, {'text': 'reStructuredText', 'value': 'rest'}, {'text': 'Plain Text', 'value': 'none'}, ], 'content_css': [common_content_base_url + 'css/prism.css'], } TINYMCE_SPELLCHECKER = True TINYMCE_ADDITIONAL_JS_URLS = [ common_content_base_url + 'js/prism.min.js', common_content_base_url + 'js/prism-django.min.js' ] # Skin-specific settings CURRENT_SKIN = 'bootstrap4_skin' BLOG_POSTS_PAGINATE_BY = 5 TINYMCE_DEFAULT_CONFIG['image_class_list'] = [ {'title': 'Responsive', 'value': 'img-fluid'}, {'title': 'Rounded', 'value': 'img-fluid rounded'}, {'title': 'Thumbnail', 'value': 'img-fluid img-thumbnail'}, ] TINYMCE_DEFAULT_CONFIG['table_class_list'] = [ {'title': 'Simple', 'value': 'table'}, {'title': 'Bordered', 'value': 'table table-bordered'}, {'title': 'Striped', 'value': 'table table-striped'}, {'title': 'Small', 'value': 'table table-sm'}, ] TINYMCE_DEFAULT_CONFIG['table_row_class_list'] = [ {'title': 'None', 'value': ''}, {'title': 'Green', 'value': 'table-success'}, {'title': 'Red', 'value': 'table-danger'}, {'title': 'Blue', 'value': 'table-primary'}, ] TINYMCE_DEFAULT_CONFIG['content_css'] += [ STATIC_URL + 'bootstrap4_skin/css/bootstrap.min.css', STATIC_URL + 'bootstrap4_skin/css/font-awesome-all.min.css', STATIC_URL + 'bootstrap4_skin/css/styles.css', ] DEFAULT_LOGO = STATIC_URL + 'bootstrap4_skin/img/favicon.png' DEFAULT_FEATURED_IMAGE = STATIC_URL + 'bootstrap4_skin/img/featured/home.jpg' # Haystack search settings HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': os.path.join(BASE_DIR, 'whoosh_index'), }, } # Enable this if your server has enough power to update index on every save # HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
Another part of the SDG jigsaw is now in place with the announcement of the 10 stakeholder representatives of the support group for the Technology Facilitation Mechanism. Another advancement of stakeholder engagement in the UN and inline with the approach taken during the SDG process. The United Nations Sustainable Development Summit 2015, which was held from 25 to 27 Sept. of the Agenda launches a “Technology Facilitation Mechanism which was established by the Addis Ababa Action Agenda in order to support the Sustainable Development Goals”. This paragraph was already contained in the Addis Ababa Action Agenda which was adopted at the Third International Conference on Financing for Development held in Addis Ababa from 13 to 16 July 2015. The present document lays out terms of reference for this United Nations 10-Member Group to support the Technology Facilitation Mechanism – henceforth referred to as “10-Member Group”. To provide briefings and other inputs to the high-level political forum on sustainable development, or other relevant UN fora, as requested. innovation and sustainable development. They shall serve in their personal capacity and not as a representative of a Government, corporation or organization. The Group shall comprise distinguished experts drawn from civil society, the private sector, and the scientific community, as mandated. Those may thus include representatives of academia, philanthropic, non-governmental organizations, and other individual experts. The Group’s members shall serve in their personal capacity and not as a representative of a Government, corporation or organization. They can hold employment with an NGO, a private sector entity, or Government provided they are working closely on issues related to science, technology and innovation. Government employees with a largely political role, however, shall not be qualified for appointment to the Group. Members of the 10-Member Group shall “…be appointed by the Secretary-General, for periods of two years” and serve at the Secretary-General’s discretion. technology and innovation for the Sustainable Development Goals, in line with UN rules and regulation. The Secretariat of the IATT shall provide Secretariat functions for the meetings of the 10-Member Group. The full membership of IATT is expected to provide additional support, as the need arises. The 10-Member Group will meet at least once or twice each year. The frequency of meetings can be adjusted by the Group as the need may arise.e.g., current chief scientists do not qualify for appointment, but academics in government-run laboratories do. Communications shall be through a secure website, as appropriate. The Secretariat shall support communications within the 10-Member Group, between the group and the IATT and between the group and the High-level Political Forum as needed. The 10-Member Group shall agree by consensus on two co-chairs who will represent the Group for the period of one year at a time. The appointment of co-chairs can be changed by consensus at any time. The 10-Member Group shall strive to make its decisions by consensus. In case no consensus can be reached, decisions can be made by simple majority of its members in which case the IATT will be notified that the decision was reached by voting. The meetings of the 10-Member Group shall be closed unless it decides otherwise. The 10-Member Group may operate, even if there are vacancies in its composition.
# # # Copyright 2013-2017 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. # # """ This describes the easyconfig parser The parser is format version aware :author: Stijn De Weirdt (Ghent University) """ import os import re from vsc.utils import fancylogger from easybuild.framework.easyconfig.format.format import FORMAT_DEFAULT_VERSION from easybuild.framework.easyconfig.format.format import get_format_version, get_format_version_classes from easybuild.framework.easyconfig.format.yeb import FormatYeb, is_yeb_format from easybuild.framework.easyconfig.types import PARAMETER_TYPES, check_type_of_param_value from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import read_file, write_file # deprecated easyconfig parameters, and their replacements DEPRECATED_PARAMETERS = { # <old_param>: (<new_param>, <deprecation_version>), } # replaced easyconfig parameters, and their replacements REPLACED_PARAMETERS = { 'license': 'license_file', 'makeopts': 'buildopts', 'premakeopts': 'prebuildopts', } _log = fancylogger.getLogger('easyconfig.parser', fname=False) def fetch_parameters_from_easyconfig(rawtxt, params): """ Fetch (initial) parameter definition from the given easyconfig file contents. :param rawtxt: contents of the easyconfig file :param params: list of parameter names to fetch values for """ param_values = [] for param in params: regex = re.compile(r"^\s*%s\s*(=|: )\s*(?P<param>\S.*?)\s*$" % param, re.M) res = regex.search(rawtxt) if res: param_values.append(res.group('param').strip("'\"")) else: param_values.append(None) _log.debug("Obtained parameters value for %s: %s" % (params, param_values)) return param_values class EasyConfigParser(object): """Read the easyconfig file, return a parsed config object Can contain references to multiple version and toolchain/toolchain versions """ def __init__(self, filename=None, format_version=None, rawcontent=None, auto_convert_value_types=True): """ Initialise the EasyConfigParser class :param filename: path to easyconfig file to parse (superseded by rawcontent, if specified) :param format_version: version of easyconfig file format, used to determine how to parse supplied easyconfig :param rawcontent: raw content of easyconfig file to parse (preferred over easyconfig file supplied via filename) :param auto_convert_value_types: indicates whether types of easyconfig values should be automatically converted in case they are wrong """ self.log = fancylogger.getLogger(self.__class__.__name__, fname=False) self.rawcontent = None # the actual unparsed content self.auto_convert = auto_convert_value_types self.get_fn = None # read method and args self.set_fn = None # write method and args self.format_version = format_version self._formatter = None if rawcontent is not None: self.rawcontent = rawcontent self._set_formatter(filename) elif filename is not None: self._check_filename(filename) self.process() else: raise EasyBuildError("Neither filename nor rawcontent provided to EasyConfigParser") self._formatter.extract_comments(self.rawcontent) def process(self, filename=None): """Create an instance""" self._read(filename=filename) self._set_formatter(filename) def check_values_types(self, cfg): """ Check types of easyconfig parameter values. :param cfg: dictionary with easyconfig parameter values (result of get_config_dict()) """ wrong_type_msgs = [] for key in cfg: type_ok, newval = check_type_of_param_value(key, cfg[key], self.auto_convert) if not type_ok: wrong_type_msgs.append("value for '%s' should be of type '%s'" % (key, PARAMETER_TYPES[key].__name__)) elif newval != cfg[key]: self.log.warning("Value for '%s' easyconfig parameter was converted from %s (type: %s) to %s (type: %s)", key, cfg[key], type(cfg[key]), newval, type(newval)) cfg[key] = newval if wrong_type_msgs: raise EasyBuildError("Type checking of easyconfig parameter values failed: %s", ', '.join(wrong_type_msgs)) else: self.log.info("Type checking of easyconfig parameter values passed!") def _check_filename(self, fn): """Perform sanity check on the filename, and set mechanism to set the content of the file""" if os.path.isfile(fn): self.get_fn = (read_file, (fn,)) self.set_fn = (write_file, (fn, self.rawcontent)) self.log.debug("Process filename %s with get function %s, set function %s" % (fn, self.get_fn, self.set_fn)) if self.get_fn is None: raise EasyBuildError('Failed to determine get function for filename %s', fn) if self.set_fn is None: raise EasyBuildError('Failed to determine set function for filename %s', fn) def _read(self, filename=None): """Read the easyconfig, dump content in self.rawcontent""" if filename is not None: self._check_filename(filename) try: self.rawcontent = self.get_fn[0](*self.get_fn[1]) except IOError, err: raise EasyBuildError('Failed to obtain content with %s: %s', self.get_fn, err) if not isinstance(self.rawcontent, basestring): msg = 'rawcontent is not basestring: type %s, content %s' % (type(self.rawcontent), self.rawcontent) raise EasyBuildError("Unexpected result for raw content: %s", msg) def _det_format_version(self): """Extract the format version from the raw content""" if self.format_version is None: self.format_version = get_format_version(self.rawcontent) if self.format_version is None: self.format_version = FORMAT_DEFAULT_VERSION self.log.debug('No version found, using default %s' % self.format_version) def _get_format_version_class(self): """Locate the class matching the version""" if self.format_version is None: self._det_format_version() found_classes = get_format_version_classes(version=self.format_version) if len(found_classes) == 1: return found_classes[0] elif not found_classes: raise EasyBuildError('No format classes found matching version %s', self.format_version) else: raise EasyBuildError("More than one format class found matching version %s in %s", self.format_version, found_classes) def _set_formatter(self, filename): """Obtain instance of the formatter""" if self._formatter is None: if is_yeb_format(filename, self.rawcontent): self._formatter = FormatYeb() else: klass = self._get_format_version_class() self._formatter = klass() self._formatter.parse(self.rawcontent) def set_format_text(self): """Create the text for the formatter instance""" # TODO create the data in self.rawcontent raise NotImplementedError def write(self, filename=None): """Write the easyconfig format instance, using content in self.rawcontent.""" if filename is not None: self._check_filename(filename) try: self.set_fn[0](*self.set_fn[1]) except IOError, err: raise EasyBuildError("Failed to process content with %s: %s", self.set_fn, err) def set_specifications(self, specs): """Set specifications.""" self._formatter.set_specifications(specs) def get_config_dict(self, validate=True): """Return parsed easyconfig as a dict.""" # allows to bypass the validation step, typically for testing if validate: self._formatter.validate() cfg = self._formatter.get_config_dict() self.check_values_types(cfg) return cfg def dump(self, ecfg, default_values, templ_const, templ_val): """Dump easyconfig in format it was parsed from.""" return self._formatter.dump(ecfg, default_values, templ_const, templ_val)
Communication in Retail Management | 8x8, inc. | 8x8, Inc. The retail sector is one of the most intense and competitive business environments where customers expect instant gratification. As a result, many retailers struggle to keep up. In fact, according to BankruptcyData.com, in 2017 alone, the retail sector accounted for 11% of public bankruptcies by industry – the second highest bankruptcy count. On the other hand, top-rated retailers like Amazon, Walmart, and Kroger were found to have similar traits. Aside from automation, what differentiates these top retailers from mediocre retailers is the quality of their retail management process. To be successful in an industry as competitive as retail, retailers must establish a strong communication strategy that makes use of high-level tools and platforms to produce a high-quality retail management experience. Retail management encompasses all retailer activities aimed at better understanding the consumers of their goods and services, with the goal of bringing more consumers into their stores, generating more sales, and achieving high satisfaction ratings. In a nutshell, the purpose of retail management is to make shopping a pleasurable experience for customers. It strengthens relationships among store teams. New members have a platform where they could ask questions, as well as observe experienced team members, allowing them to better serve the needs of your customers. Messages on new products, price changes, discounts, product recalls, and upcoming marketing campaigns are communicated to every member of the retail store team. It helps in anticipating changing customer expectations, providing proactive and faster customer service, and solving customer issues when they occur. What Does Communication in Retail Management Look Like? Traditional communication systems require premise-based devices and come with limitations such as the inability to unify all communication streams and the possibility of a call being compromised while being routed from one location to the next. Be centralized with specific persons or a team in charge. This ensures that the right messages are sent within the specified time and formats. Be standardized. All communications must be relevant to the store, concise, and aimed at improving sales, customer service, and brand awareness. Be accessible. Remote stores should receive head office communications on their mobile devices and POS systems anytime, anywhere. Have both internal and consumer-facing components. Your store teams are the face of your brand and also your internal customers. A good communication system is equipped for both internal and external communication. Encourage feedback. Feedback is vital for service improvement. Without feedback, you can’t objectively measure the quality of your services. For your store teams and customers to provide the feedback you need, reassure them that both positive and negative feedback are welcome. Like a suggestion box that protects the anonymity of its users, a strong retail communication system should be designed in a way that encourages customers and employees to voice their opinion without fear of retribution. A good retail communication system should not be disruptive to operations. Rather, it should enhance existing processes by making sure each employee has all the information they need to do their jobs right. What Are the Best Communication Tools for Retail Management? The market is awash with many communication tools for retailers. When choosing a communication tool, make sure it has the basic features mentioned above. 8x8.com provides superior cloud communications and customer engagement solutions such as cloud-based voice, contact center, video, mobile, and unified communications for retailers of all sizes, whether they’re operating a single location or own a growing network of national and international franchises. The 8x8 platform ensures consistent communication across stores, head offices, and contact center locations. 8x8’s cloud-based communication platform can unify all communication streams. This allows for greater insight across all interactions and gives you all the data you need to make sound business decisions. Managing several communication and collaboration tools from one, centralized location frees up your IT department’s time, allowing them to focus their efforts on higher-value projects. 8x8’s Virtual Contact Center uses intelligent routing to determine the best person or path to handle and resolve calls. This, unlike the traditional routing methods which depend on network topology information to select paths, significantly reduces costs without compromising quality. One-Click Unified Cloud Communications. Enables you to smoothly transition across chats, voice calls, video conferencing, and document sharing with single sign-on (SSO), as well as continuous access to corporate directories. You can also promptly switch between your desk phone and your smartphone. Your retail business could be the future Amazon or Walmart. Likewise, it could join the ranks of a long line of retailers that are likely to file for bankruptcy in the coming years. What determines the direction that your retail business is likely to take is the communication strategy that you will employ in your retail management.
#!/usr/bin/env python import argparse from k_color_minisat_problem import KColorProblem def make_args_parser(): parser = argparse.ArgumentParser( description='Generates or interpretes the k-color problem for the MiniSat solver', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--file', required=True, type=str, help='The path to a text file. Either output for the problem or input of the solution') parser.add_argument('--action', required=True, type=str, help='Either "generate" or "interprete"') return parser.parse_args() def write_file(file, content): text_file = open(file, "w") text_file.write(content) text_file.close() def read_file(file): text_file = open(file, "r") content = text_file.read() text_file.close() return content def run(): args = make_args_parser() if args.action not in ["generate", "interpret"]: raise Exception("Action has to be either 'generate' or 'interpret'.") else: problem = KColorProblem() if args.action == "generate": write_file(args.file, problem.generate_minisat_problem()) else: solution = read_file(args.file) print (problem.interprete_minisat_problem(solution)) if __name__ == "__main__": run()
When Arianna founded The Huffington Post in 2005 – along with Andrew Breitbart, Jonah Peretti, and Kenneth Lerer – it was mostly a counter-platform to the Drudge Report. Today, however, this liberal commentary/blog outlet has grown from strength to strength and even won the Pulitzer Prize in 2012. ... was founded in November 2006 by Jonah Peretti, John S. Johnson III. and Kenneth Lerer, co-founder and chairman of The Huffington Post. But Uber can't continue to grow and flourish if it continues with its same culture,” venture capitalist Kenneth Lerer told The Washington Post. ... Group Nine sees the combination of several ventures all connected through BuzzFeed chairman and Viacom board member Kenneth Lerer. Ben Lerer has been running his company Thrillist for the past decade -- and he's only 34. He and his college friend started it as a niche newsletter for men. ... and was supported at its outset by a slate of Democratic boosters including Arianna Huffington and the venture capitalist Kenneth Lerer. The two news outlets share a founder, Kenneth Lerer, and a marketing strategy of high-low content. While Buzzfeed has drawn ire, in 2012 Huffington Post became the first commercially run US digital media enterprise to win a Pulitzer Prize. They are chairman Thomas May, vice chair Shari Redstone, CEO Bob Bakish, Nicole Seligman, Deborah Norville, Charles E. Phillips, Jr.
#!/usr/bin/env python import logging import sys import ConfigParser import os import socket import time import subprocess from string import Template import textwrap import boto.ses def format_time(epoch_time): time_format = "%Y-%m-%dT%H:%M:%S" return time.strftime(time_format, time.gmtime(epoch_time)) class CoreMailer(object): def __init__(self, config): self.config = config self.hostname = self.config.get('Config', 'hostname') self.out = sys.stdout def find_core(self): path = self.config.get('Config', 'cores') core_filter = self.config.get('Config', 'core_filter') cores = [os.path.join(path, core) for core in os.listdir(path) if core_filter in core] if len(cores): return max(cores, key=os.path.getctime) def filter_logs(self, logs): log_filter = self.config.get('Config', 'log_filter') if not log_filter: return logs def strip_prefix(line): first_space = line.index(' ') following_colon = line.index(':', first_space) return line[0:first_space] + line[following_colon:] lines = logs.split("\n") filtered = filter(lambda line: log_filter in line, lines) stripped = map(strip_prefix, filtered) return "\n".join(stripped) def find_logs(self, epoch_time): log = self.config.get('Config', 'log') formatted_time = format_time(epoch_time) logging.info('Searching %s for logs around %s', log, formatted_time) command = ["egrep", "-C1000", ("^%s" % formatted_time), log] try: return self.filter_logs(subprocess.check_output(command)) except subprocess.CalledProcessError: return 'Unable to retrieve logs around %s' % formatted_time def get_trace(self, core): binary = self.config.get('Config', 'bin') logging.info('Processing core file %s with binary %s', core, binary) # matschaffer: this is really awful # But lldb just exits with no output and exit code -11 if I try to run # this script as a container entry point lldb_command = "lldb-3.6 -f %(binary)s -c %(core)s --batch " + \ "-o 'target create -c \"%(core)s\" \"%(binary)s\"' " + \ "-o 'script import time; time.sleep(1)' " + \ "-o 'thread backtrace all'" command = ["script", "-c", (lldb_command % {"core": core, "binary": binary})] return subprocess.check_output(command, stderr=subprocess.STDOUT) def send_alert(self, epoch_time, trace, logs): template_vars = { "hostname": self.hostname, "binary": self.config.get('Config', 'bin'), "formatted_time": format_time(epoch_time), "trace": trace, "logs": logs } sender = self.config.get('Config', 'from') recipient = self.config.get('Config', 'to') subject = 'stellar-core crash on %(hostname)s' % template_vars template = textwrap.dedent(""" <p>${binary} on ${hostname} crashed at ${formatted_time} with the following back traces:</p> <pre><code> ${trace} </code></pre> <h2>Extracted logs</h2> <pre><code> ${logs} </code></pre> """) body = Template(template).substitute(template_vars) logging.info("Sending core alert from %s to %s", sender, recipient) self.send_email(sender, recipient, subject, body) def send_email(self, sender, recipient, subject, body): conn = boto.ses.connect_to_region(self.config.get('Config', 'region')) # noinspection PyTypeChecker conn.send_email(sender, subject, None, [recipient], html_body=body) def output_trace(self, epoch_time, trace): template_vars = { "hostname": self.hostname, "binary": self.config.get('Config', 'bin'), "formatted_time": format_time(epoch_time), "trace": trace } template = textwrap.dedent(""" ${binary} on ${hostname} crashed at ${formatted_time} with the following back traces: ${trace} """) body = Template(template).substitute(template_vars) self.out.write(body) def archive_core(self, core): command_string = self.config.get('Config', 'archive_command') if command_string: core_path = os.path.join(self.hostname, os.path.basename(core)) command_string = command_string.format(core, core_path) logging.info(subprocess.check_output(command_string.split(' '))) else: logging.warn("No archive command, just removing core file") os.remove(core) def run(self, single_core): core = single_core or self.find_core() mode = self.config.get('Config', 'mode') if core: logging.info('Found core file %s', core) epoch_time = os.path.getctime(core) trace = self.get_trace(core) if mode == "aws": logs = self.find_logs(epoch_time) self.send_alert(epoch_time, trace, logs) self.archive_core(core) elif mode == "local": self.output_trace(epoch_time, trace) else: logging.fatal("Unknown MODE setting: %s", mode) sys.exit(1) else: logging.info('No core file found for processing') if __name__ == "__main__": if len(sys.argv) > 1: single_core = sys.argv[1] else: single_core = None config_file = "/etc/core_file_processor.ini" logging.basicConfig(level=logging.INFO) config_parser = ConfigParser.ConfigParser({ "region": "us-east-1", "cores": "/cores", "log": "/host/syslog", "log_filter": os.environ.get('CORE_LOG_FILTER'), "core_filter": "stellar-core", "hostname": socket.gethostname(), "from": "%(hostname)s <ops+%(hostname)s@stellar.org>", "to": os.environ.get('CORE_ALERT_RECIPIENT'), "bin": "/usr/local/bin/stellar-core", "archive_command": os.environ.get('CORE_ARCHIVE_COMMAND'), "mode": os.environ.get('MODE', 'aws') }) config_parser.add_section("Config") config_parser.read(config_file) mailer = CoreMailer(config_parser) mailer.run(single_core)
This years Whitecross Street Party proved to be just as fun and eclectic as it’s previous incarnations. For Inspiring City it was the third time in a row we’d been here and it remains one of our most anticipated events of the year. Street performers, artists, musicians, stall owners and the general public who come along to soak up the atmosphere give the event a really unique vibe. In terms of street art it’s a great opportunity for people to interact with some of the best artists around, see what they are doing, learn about the art and bring it to a whole new audience. Highlights included the likes of Andrea Tyrimos revisiting her popular ‘brick’ theme, Stephanie Thieu painting a homage to Ray Charles on a Piano and Roy’s People hiding little guys around for anyone to find. High on the walls of the street original art had been placed featuring original work from Mohammed Sami, Inkie, ATM, HIN, Sean Worrall and Teddy Baden amongst a host of others. Then on the street, artists live painted on specially placed hoardings creating a popular artists quarter and the likes of Inkie, Leeks, Himbad, 616, Lowdown, Airborne Mark, Otto Schade, Hannah Adamaszek and Boxhead and others all painted in front of admiring crowds. It was a two day festival and as ever with these things it’s the camera that really does the talking so take a look at the gallery below and check out some of the great artists on display. Great captures Stuart… I took my family there today, very enjoyable indeed.. Wonderful vibes!! Pingback: Whitecross Street Party 2015 the street art | "Notice what you notice..."
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django_markdown.models class Migration(migrations.Migration): dependencies = [ ('app_printer', '0008_auto_20150531_1624'), ] operations = [ migrations.CreateModel( name='CBase', fields=[ ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=300)), ('order_index', models.IntegerField(default=0)), ('title', models.CharField(blank=True, max_length=300)), ('meta_description', models.TextField(blank=True)), ('page_url', models.CharField(max_length=300)), ('img_url', models.CharField(blank=True, max_length=300)), ('description', django_markdown.models.MarkdownField()), ], ), migrations.RemoveField( model_name='article', name='description', ), migrations.RemoveField( model_name='article', name='id', ), migrations.RemoveField( model_name='article', name='name', ), migrations.RemoveField( model_name='article', name='set_url', ), migrations.RemoveField( model_name='brand', name='description', ), migrations.RemoveField( model_name='brand', name='id', ), migrations.RemoveField( model_name='brand', name='logo_url', ), migrations.RemoveField( model_name='brand', name='name', ), migrations.RemoveField( model_name='brand', name='order_index', ), migrations.RemoveField( model_name='brand', name='set_url', ), migrations.RemoveField( model_name='printer', name='description', ), migrations.RemoveField( model_name='printer', name='id', ), migrations.RemoveField( model_name='printer', name='img_url', ), migrations.RemoveField( model_name='printer', name='name', ), migrations.RemoveField( model_name='printer', name='set_url', ), migrations.RemoveField( model_name='printer', name='title', ), migrations.RemoveField( model_name='printtype', name='description', ), migrations.RemoveField( model_name='printtype', name='id', ), migrations.RemoveField( model_name='printtype', name='name', ), migrations.RemoveField( model_name='printtype', name='order_index', ), migrations.RemoveField( model_name='printtype', name='set_url', ), migrations.AddField( model_name='article', name='cbase_ptr', field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'), preserve_default=False, ), migrations.AddField( model_name='brand', name='cbase_ptr', field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'), preserve_default=False, ), migrations.AddField( model_name='printer', name='cbase_ptr', field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'), preserve_default=False, ), migrations.AddField( model_name='printtype', name='cbase_ptr', field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'), preserve_default=False, ), ]
Fear To Go The End Of His Thought " May We Always Remain Independent Amin. Jashne Azadi Mubarik. Allah Bless our country. Take a stand against evil, corruption & terrorism 4 we belong to India, a nation of pride & we will thus say- ?HINDU, MUSLIM, SIKH, ISAI, SAB HAI BHAI BHAI.? god bless hai ram jai hind!!! do u know pakistan means?no! Happy Independece Day To You. thousands laid down there lives so that our country breath this day? never forget there sacrifice.. Happy Independence day. ?No nation is perfect, it needs to be made perfect? ..Happy Independence day.
# discern # Contact: Jacob Schreiber (jmschr@cs.washington.edu) from cancer_analyses import * import pandas as pd import argparse import sys def parse_command_line(): ''' Parse the command line and return the parser. ''' parser = argparse.ArgumentParser( description="Read in Data" ) parser.add_argument( '-n', action='store', type=file, help='A CSV file of gene expression data for healthy patients.' ) parser.add_argument( '-c', action='store', type=file, help='A CSV file of gene expression data for cancerous patients.' ) parser.add_argument( '-l', type=float, action='store', default=0.5, help='A value of lambda to run DISCERN at.') args = parser.parse_args() return args def run_analyses(): ''' Run the analyses indicated by the number of arguments passed in. ''' parser = parse_command_line() cancer = pd.read_csv( parser.c, index_col=0 ).T normal = pd.read_csv( parser.n, index_col=0 ).T discern_scores = run_discern( normal, cancer, cancer.columns, parser.l, sys.stdout ) def main(): print "a" run_analyses() if __name__ == '__main__': main()
This file search utility allows you to find files stored on Windows PCs, connected to a LAN/WAN. You can search files that meet specified criteria on one or multiple remote PCs. Supported search criteria include search by a file name, extension, file mask and file location. When multiple PCs should be searched, the application performs search in parallel to increase performance. The collected search results can be exported to an Excel file. Alexa Data Scraper for a data harvesting utility to periodically query Alexa for domain statistics. You provide a list of urls and then perform a query that will autonomously poll Alexa for ranking statistics for all your domains. You can only query a domain name once per 24 hours, this is to prevent duplicates existing since Alexa ranking data changes typically once for each 24 hour epoch. Our software searches for itunes codes online.These codes are shared by users and different companies.We search and find these codes.There is no surity as to weather these codes will work or not.As many users can redeem the codes instantly once its available.However if you try more times you ahve higher chance it will will work. DigitalWorlds is a fast search engine to help you find software and games for your windows pc. No registration required, all for free and ads free. Join us today and load your PC! Search Engine Classified Ad Blaster - Blast Your Ad To 4500+ Classified Websites. Blast your ad to over 3 thousand websites daily. That's over 90,000 submissions montly. Blast for all your products and services! Easily submit your site to the major search engines! This software alone can save you hours of research. Google, Yahoo, MSN, and more. Proxic is an intelligent proxy leecher that can retrieve thousands of proxy addresses from numerous web resources. It is a multi-threaded application that crawls a list of URLs you provide. In addition to crawling each URL provided, Proxic if enabled can extract additional links for continued crawling and proxy extraction therein. Proxic is a powerful utility to fuel proxy hungry software like SEO and load testing tools. G-Lock Backlink Diver is for everyone who wants to see where his site is currently listed and control the SEO quality of pages linking to the site. G-Lock Backlink Diver parses a list of URL's and tells you if your link is still on the page providing you with a fast analysis of your linking profile. The tool can also run a PageRank checking and Google index checking to see how valuable the pages linking to your site are. Gear Software Manager is a free software version online check software. First, Gear Software Manager scans versions of all software installed in your computer, then Gear Software Manager checks the last versions online to see if there are updates available. You can click a link to start download new version if it detects updates. Gear Software Manager also lists must-have software such as FireFox, Google Chrome or iTunes etc. Software that builds 1000's of high quality SEO Backlinks to your website - meaning it can help increase the ranking your website has on the search engine results pages (for the keywords that you specify). This is useful because having a higher search engine rank, means that your website will be getting a lot more additional free and targeted visitors each day. Datum Whois is a basic domain lookup PHP Script. Datum Whois supports the most important tlds (top-level domains) including .com, .net, .co.za and etc. Installing the Datum Whois script is a breeze. All you need to do is download it, unzip it and then upload it to your web server root. Simple LDAP Directory Search Tool,LDAPSearch is FREE and simple tool for remotely searching the Directory servers such as eDirectory, Active Directory etc. Directory server provides the most scalable, high-performance LDAP data store for critical information within the industry and serves as the foundation for the new generation of e-business applications and Web services. Share Search Tool is an unique desktop software which enable you to search Share links on the Web with minimal effort. You can easily search for mp3, movies, games, software or other files hosted on a share servers like RapidShare, Megaupload, Fileserve or Filesonic. Are you a software producer? Are you a copyright holder? Our software enable you to not only search links which violates your copyrights but also manage and send an Abuse Reports. RiverGate Rss Reader is a FREE news aggregator or feed reader that uses web feed to retrieve syndicated web content such as weblogs, podcasts, vlogs, and mainstream media websites, or in the case of a search aggregator, a customized set of search results. To make it as easy as possible for anyone willing to use such a program RiverGate Rss Reader has a very simple, clean interface without any redundant buttons just for the visual sake.
#!/usr/bin/python # -*- coding: latin-1 - from sqlalchemy import Column, Integer, Unicode,Unicode, ForeignKey, DateTime,Float,Boolean from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import backref, mapper, relation, sessionmaker echo=False #def __init__(self): Base = declarative_base() class Pv(Base): __tablename__='pv' pv_id =Column('pv_id',Integer, primary_key=True) codice = Column(Unicode) pref_mmas =Column(Unicode) cod_mmas =Column(Integer) certificato =Column(Boolean) pv_mt = Column(Boolean) cliente= Column(Boolean) cod_cliente =Column(Unicode) ragione_sociale =Column('nome1', Unicode(50)) titolare =Column('nome2', Unicode(50)) tc_istat_id=Column(Integer) indirizzo =Column(Unicode(100)) cap =Column(Unicode) comune =Column(Unicode) provincia =Column(Unicode(2)) tel1 =Column(Unicode(20)) tel2 =Column(Unicode(20)) tel3 =Column(Unicode(20)) cf_pi =Column(Unicode) fax =Column(Unicode) sito =Column(Unicode) email =Column(Unicode) note =Column(Unicode) data_aggiornamento =Column(DateTime) tc_stato_id =Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) class rel_pv_pot(Base): #using_table_options(useexisting=True) __tablename__='rel_pv_pot' tc_clpot_id=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) pv_id=Column(Integer,primary_key=True) tc_pot_id =Column(Integer,primary_key=True) valore =Column(Float) class tc_clpot(Base): __tablename__='tc_clpot' tc_clpot_id=Column(Integer,primary_key=True) testo=Column(Unicode(255)) tc_stato_id=Column(Integer) primario=Column(Boolean) calc_autom=Column(Boolean) valore_min=Column(Float) ordine=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) class tc_pot(Base): __tablename__='tc_pot' tc_pot_id=Column(Integer, primary_key=True) tc_clpot_id=Column(Integer) testo =Column(Unicode(255)) tc_stato_id=Column(Integer) ordine= Column(Integer) coeff_min=Column(Float) coeff_max=Column(Float) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) class rel_pv_par(Base): __tablename__='rel_pv_par' ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) pv_id =Column(Integer, primary_key=True) tc_clpar_id=Column(Integer) tc_par_id=Column(Integer, primary_key=True) class tc_par(Base): __tablename__='tc_par' tc_par_id =Column(Integer, primary_key=True) tc_clpar_id=Column(Integer) testo=Column(Unicode(255)) tc_stato_id=Column(Integer) ordine=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) class tc_clpar(Base): __tablename__='tc_clpar' tc_clpar_id= Column(Integer,primary_key=True) tc_stato_id= Column(Integer) testo=Column(Unicode(255)) ordine=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) class rel_pv_mar(Base): __tablename__='rel_pv_mar' pv_id=Column(Integer,primary_key=True) tc_clmar_id=Column(Integer, primary_key=True) ordine=Column(Integer,primary_key=True) uso=Column(Float) tc_mar_id=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) def __repr__(self): return "pv_id: %s,tc_clmar_id: %s, uso:%s, ordine: %s"%(self.pv_id,self.tc_clmar_id,self.uso,self.ordine) class tc_mar(Base): __tablename__='tc_mar' tc_mar_id=Column(Integer,primary_key=True) testo=Column(Unicode(255)) tc_stato_id=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) def __repr__(self): return "testo: %s, tc_mar_id: %d"%(self.testo,self.tc_mar_id) class tc_clmar(Base): __tablename__='tc_clmar' tc_clmar_id=Column(Integer,primary_key=True) tc_stato_id=Column(Integer) testo=Column(Unicode(255)) ordine=Column(Integer) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) def __repr__(self): return "%s tc_clmar_id: %s, ordine: %s"%(self.testo,self.tc_clmar_id,self.ordine) class tc_rel_clmar_mar(Base): __tablename__='tc_rel_clmar_mar' tc_clmar_id=Column(Integer, primary_key=True) tc_mar_id=Column(Integer,primary_key=True) ins_data =Column(DateTime) ins_utente =Column(Integer) mod_data =Column(DateTime) mod_utente =Column(Integer) def __repr__(self): return "tc_clmar_id: %s,tc_mar_id:%s"%(self.tc_clmar_id,self.tc_mar_id)
To explore evidence-based programs and strategies–such as motivational interviewing, harm reduction, stages of change–that incorporate health promotion and behavior change, to address preventing diabetes in adults. To explore evidence-based programs and strategies that incorporate health promotion and behavior change, for families with children at-risk for diabetes. To discuss promising practices that promote healthy habits. To identify challenges in addressing nutrition and physical activity in children and adults of special and vulnerable populations, such as those experiencing homelessness and farmworkers.
# Orca # # Copyright (C) 2013 The Orca Team. # # Author: Joanmarie Diggs <jdiggs@igalia.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2013 The Orca Team." __license__ = "LGPL" import pyatspi import orca.script_utilities as script_utilities import orca.scripts.toolkits.gtk as gtk class Utilities(gtk.Utilities): def __init__(self, script): gtk.Utilities.__init__(self, script) def isReadOnlyTextArea(self, obj): if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME: return False return gtk.Utilities.isReadOnlyTextArea(self, obj) def isTextArea(self, obj): if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME: return True return gtk.Utilities.isTextArea(self, obj)
"Checking the Staats: How Long Is Too Long to Give Adequate Public Noti" by David M. Longo Ph.D. and Ryan P. O’Quinn Ph.D. A classic property rights question looms large in the field of patent law: where do the rights of inventors end and the rights of the public begin? The right of inventors to modify the scope of their claimed inventions, even after the patent issues, is in direct tension with the concepts of public notice and the public domain. The Patent Act currently permits broadening of claims so long as a reissue application demonstrating intent to broaden is filed within two years of the original patent issue. Over the years, however, this relatively straightforward statutory provision has sparked numerous disputes over its meaning and application. On September 8, 2011, the Court of Appeals for the Federal Circuit heard oral arguments or In re Staats. In this case, Apple Computer, Inc. appeals the rejection of a continuation reissue patent application. The U.S. Patent & Trademark Office and the Board of Patent Appeals and Interferences rejected the application on the grounds that Apple attempted to broaden the scope of its patent claims in a manner not “foreseeable” more than eight years after the patent first issued. Apple contends that the language of the statute and prior case law permit its interpretation, and the application should be allowed in the interest of innovation. This issue is hardly a new one—this submission highlights nearly 140 years of case law, legislative history, and statutory shaping pertaining to broadening reissues. We analyze the issues raised in the briefs from Staats, as well as the oral arguments. Finally, we discuss from a practitioner’s perspective what the Federal Circuit could do—and should do—in the field of broadening reissues.
from __future__ import absolute_import, division, print_function from collections import namedtuple import json import os import re import sys import time import requests from bs4 import BeautifulSoup import nfldb import nflfan.config __pdoc__ = {} _user_agent = 'Mozilla/5.0 (X11; Linux x86_64)' # _user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2498.0 Safari/537.36' # _user_agent = '' """ The user agent string is heuristically determined. Namely, I was having problems getting some providers to authenticate with more vague user agent strings. You may want to use a different user agent string entirely if you're writing your own provider. """ _urls = { 'yahoo': { 'owner': 'http://football.fantasysports.yahoo.com/f1/%s/teams', 'matchup': 'http://football.fantasysports.yahoo.com/f1/%s/' 'matchup?matchup_week=%d&ajaxrequest=1', 'roster': 'http://football.fantasysports.yahoo.com/f1/%s/%s?week=%d', }, 'espn': { 'owner': 'http://games.espn.go.com/ffl/leaguesetup' '/ownerinfo?leagueId={league_id}&seasonId={season_id}', 'matchup': 'http://games.espn.go.com/ffl/scoreboard?' 'leagueId={league_id}&matchupPeriodId={week}' '&seasonId={season_id}', 'roster': 'http://games.espn.go.com/ffl/playertable/prebuilt/' 'manageroster?leagueId={league_id}&teamId={team_id}' '&seasonId={season_id}&scoringPeriodId={week}' '&view=overview&context=clubhouse' '&ajaxPath=playertable/prebuilt/manageroster' '&managingIr=false&droppingPlayers=false&asLM=false', }, } def pp(soup): print(soup.prettify().encode('utf-8')) def eprint(*args, **kwargs): kwargs['file'] = sys.stderr args = ['[nflfan]'] + list(args) print(*args, **kwargs) def player_search(db, full_name, team=None, position=None): """ A thin wrapper around `nfldb.player_search` that tries searching with `team` or `position` when given, but if no results are found, then this returns the results of a search with just the full name. This allows for a slightly out-of-date database to still provide a match while also disambiguating players with the same name. """ if position not in nfldb.Enums.player_pos: position = None p, _ = nfldb.player_search(db, full_name, team=team, position=position) if p is None and position is not None: p, _ = nfldb.player_search(db, full_name, team=team, position=None) if p is None and team is not None: p, _ = nfldb.player_search(db, full_name, team=None, position=position) if p is None and team is not None and position is not None: p, _ = nfldb.player_search(db, full_name, team=None, position=None) return p class League (namedtuple('League', 'season phase ident prov_name name scoring conf')): __pdoc__['League.season'] = \ """The year of the NFL season for this league.""" __pdoc__['League.phase'] = \ """The phase of the season: preseason, regular or post.""" __pdoc__['League.ident'] = \ """ A unique identifier for this league. The type and format of this value is provider dependent. """ __pdoc__['League.prov_name'] = \ """The name of the provider for this league.""" __pdoc__['League.name'] = \ """The name of this league from the configuration.""" __pdoc__['League.scoring'] = \ """The `nflfan.ScoreSchema` for this league.""" __pdoc__['League.conf'] = \ """ A dictionary of configuration settings. The keys and values in this dictionary are provider dependent. """ def __init__(self, *args): super(League, self).__init__(*args) self._cache = {} @property def full_name(self): return '%s.%s' % (self.prov_name, self.name) def is_me(self, obj): if not self.conf.get('me', None): return False if isinstance(obj, Roster): return self.is_me(obj.owner) elif isinstance(obj, Matchup): return self.is_me(obj.owner1) or self.is_me(obj.owner2) else: return self.conf['me'].lower() in obj.name.lower() def me(self, objs): for obj in objs: if self.is_me(obj): return obj return None def owners(self, week): return self._cached(week, 'owners') def owner(self, week, ident): for o in self.owners(week): if o.ident == ident: return o return None def matchups(self, week): return self._cached(week, 'matchups') def matchup(self, week, ident): for m in self.matchups(week): if m.owner1 is None or m.owner2 is None: continue if m.owner1.ident == ident or m.owner2.ident == ident: return m return None def rosters(self, week): return self._cached(week, 'rosters') def roster(self, week, ident): for r in self.rosters(week): if r.owner.ident == ident: return r return None def cache_path(self, week): return os.path.join(nflfan.config.cache_path(), str(self.season), str(self.phase), str(week), self.full_name + '.json') def _cached(self, week, key): if week not in self._cache: self._load(week) return self._cache[week][key] def _load(self, week): raw = None fp = self.cache_path(week) try: with open(fp) as f: raw = json.load(f) except IOError: raise IOError( "No cached data for week %d in %s could be found at %s\n" "Have you run `nflfan-update --week %d` yet?" % (week, self.full_name, fp, week)) d = {'owners': [], 'matchups': [], 'rosters': []} for owner in raw['owners']: d['owners'].append(Owner._make(owner)) for matchup in raw['matchups']: o1 = None if matchup[0] is None else Owner._make(matchup[0]) o2 = None if matchup[1] is None else Owner._make(matchup[1]) d['matchups'].append(Matchup(o1, o2)) for roster in raw['rosters']: o = Owner._make(roster[0]) r = Roster(o, roster[1], roster[2], []) for rp in roster[3]: r.players.append(RosterPlayer._make(rp)) d['rosters'].append(r) self._cache[week] = d def __str__(self): return self.full_name class Matchup (namedtuple('Matchup', 'owner1 owner2')): __pdoc__['Matchup.owner1'] = \ """ One of the two teams in this matchup represented as an `nflfan.Owner` object. """ __pdoc__['Matchup.owner2'] = \ """ One of the two teams in this matchup represented as an `nflfan.Owner` object. """ def other(self, ident): """ Given an identifier for one of the owner's in this matchup, return the `nflfan.Owner` of the other owner. """ assert ident in (self.owner1.ident, self.owner2.ident) if ident == self.owner1.ident: return self.owner2 else: return self.owner1 def __str__(self): return '%s vs. %s' % (self.owner1, self.owner2) class Owner (namedtuple('Owner', 'ident name')): __pdoc__['Owner.ident'] = \ """ A unique identifier corresponding to this owner. The type of this value is provider-dependent. """ __pdoc__['Owner.name'] = \ """A string representing the name of this owner.""" def __str__(self): return self.name class Roster (namedtuple('Roster', 'owner season week players')): __pdoc__['Roster.owner'] = \ """ A `nflfan.Owner` object corresponding to the owner of this roster. """ __pdoc__['Roster.players'] = \ """ A list of `nflfan.RosterPlayer` objects corresponding to the set of players on this roster. """ def new_player(self, pos, team, bench, player_id): """ A convenience method for creating a new `nflfan.RosterPlayer` given the current roster. """ return RosterPlayer(pos, team, bench, self.season, self.week, None, 0.0, None, player_id) @property def active(self): return filter(lambda rp: not rp.bench, self.players) @property def benched(self): return filter(lambda rp: rp.bench, self.players) @property def points(self): """Returns the total number of points for non-benched players.""" return sum(p.points for p in self.players if not p.bench) def __str__(self): s = [] for rp in self.players: s.append(str(rp)) return '\n'.join(s) class RosterPlayer ( namedtuple('RosterPlayer', 'position team bench season week ' 'game points player player_id')): __pdoc__['RosterPlayer.position'] = \ """ A string corresponding to the position of the roster spot occupied by this player. The possible values of this string are provider dependent. """ __pdoc__['RosterPlayer.team'] = \ """ A team abbreviation that this player belongs to. It must be a valid nfldb team abbreviation and *cannot* be `UNK`. """ __pdoc__['RosterPlayer.bench'] = \ """A boolean indicating whether this is a bench position or not.""" __pdoc__['RosterPlayer.season'] = \ """The year of the corresponding NFL season.""" __pdoc__['RosterPlayer.week'] = \ """The week number in which this roster was set.""" __pdoc__['RosterPlayer.game'] = \ """ The `nfldb.Game` object for the game that this player played in. If this roster position corresponds to a bye week, then this attribute is set to `None`. """ __pdoc__['RosterPlayer.points'] = \ """The total fantasy points for this roster player.""" __pdoc__['RosterPlayer.player'] = \ """ A `nfldb.Player` object corresponding to this roster player. This attribute is `None` by default, and is always `None` for roster players corresponding to entire teams (e.g., defense). """ __pdoc__['RosterPlayer.player_id'] = \ """ A player id string corresponding to the player in this roster position and a player in nfldb. This may be `None` when the roster player corresponds to an entire team. (e.g., A defense.) """ @property def is_empty(self): return self.team is None and self.player_id is None @property def is_defense(self): return self.team is not None and self.player_id is None @property def is_player(self): return self.player_id is not None @property def id(self): if self.is_empty: return 'Empty' elif self.is_defense: return self.team else: return self.player_id @property def name(self): return self.id if not self.player else self.player.full_name def __str__(self): if self.game is not None and self.game.is_playing: playing = '*' else: playing = ' ' return '%-6s %-4s %-20s %s%0.2f' \ % (self.position, self.team, self.name, playing, self.points) class Provider (object): """ This class describes the interface that each fantasy football provider must implement so that it can work with nflfan. In other words, this is an abstract base class that should **not** be instantiated directly. All public members of this class must also be defined in each provider implementation, including the class variables. """ provider_name = None """The name of the provider used in the configuration file.""" conf_required = ['scoring', 'league_name', 'season', 'phase', 'league_id'] """A list of fields required for every provider.""" conf_optional = ['me'] """A list of fields that are optional for every provider.""" def __init__(self, lg): self._lg = lg self._session = requests.Session() self._session.headers.update(getattr(self, '_headers', {})) def owners(self): """Returns a list of `nflfan.Owner` objects.""" assert False, 'subclass responsibility' def matchups(self, week): """ Given a week number, this returns a list of `nflfan.Matchup` objects describing the head-to-head matchups for `week`. """ assert False, 'subclass responsibility' def roster(self, player_search, owner, week): """ Given a `nflfan.Owner` and a week number, this returns a `nflfan.Roster` object. The `nflfan.Roster` contains a list of `nfldb.Player` objects and their corresponding position on the roster. `player_search` should be a function that takes a full player name and returns the closest matching player as a `nfldb.Player` object. It should also optionally take keyword arguments `team` and `position` that allow for extra filtering. Note that the roster position is a string but the set of possible values is provider dependent. It is used for display purposes only. """ assert False, 'subclass responsibility' def save(self, fp, player_search, week): """ Writes a JSON encoding of all the owners, matchups and rosters for the given week to a file at `fp`. `player_search` should be a function that takes a full player name and returns the closest matching player as a `nfldb.Player` object. It should also optionally take keyword arguments `team` and `position` that allow for extra filtering. """ d = { 'owners': self.owners(), 'matchups': self.matchups(week), } # I'm hoping this doesn't hurt custom providers that don't need # to do IO to fetch a roster. def roster(owner): return self.roster(player_search, owner, week) # pool = multiprocessing.pool.ThreadPool(3) # d['rosters'] = pool.map(roster, d['owners']) d['rosters'] = map(roster, d['owners']) try: os.makedirs(os.path.dirname(fp)) except OSError: pass json.dump(d, open(fp, 'w+')) def _request(self, url): eprint('download %s' % url) r = self._session.get(url) soup = BeautifulSoup(r.text, 'html.parser') if self._login_form(soup): self._login() r = self._session.get(url) soup = BeautifulSoup(r.text, 'html.parser') if self._login_form(soup): raise IOError("Authentication failure.") return r def _login(self): assert self._login_url is not None soup = BeautifulSoup(self._session.get(self._login_url).text, 'html.parser') if not self._login_form(soup): # Already logged in! return form = self._login_form(soup) params = self._login_params(soup) for inp in soup.select('#hiddens input[type="hidden"]'): params[inp['name']] = inp['value'] r = self._session.post('https://login.yahoo.com' + form['action'], params=params) return BeautifulSoup(r.text, 'html.parser') def _login_params(self): assert False, 'subclass responsibility' def _login_form(self, soup): assert False, 'subclass responsibility' def __str__(self): return self.__class__.provider_name class Yahoo (Provider): provider_name = 'yahoo' conf_required = [] conf_optional = ['username', 'password'] _headers = {'User-Agent': _user_agent} _login_url = 'https://login.yahoo.com/config/login' def __init__(self, lg): super(Yahoo, self).__init__(lg) _, _, self._league_num = self._lg.ident.split('.') def owners(self): match_owner_link = re.compile('team-[0-9]+-name') url = _urls['yahoo']['owner'] % self._league_num soup = BeautifulSoup(self._request(url).text, 'html.parser') owners = [] for link in soup.find_all(id=match_owner_link): ident = self._owner_id_from_url(link['href']) owners.append(Owner(ident, link.text.strip())) return owners def matchups(self, week): mk_owner = lambda div: Owner(owner_id(div.a['href']), div.text.strip()) owner_id = self._owner_id_from_url url = _urls['yahoo']['matchup'] % (self._league_num, week) rjson = self._request(url).json() soup = BeautifulSoup(rjson['content'], 'html.parser') matchups = [] for matchup in soup.find('ul').children: pair = list(matchup.find_all('div', class_='Fz-sm')) if len(pair) == 1: matchups.append(Matchup(mk_owner(pair[0]), None)) else: matchups.append(Matchup(mk_owner(pair[0]), mk_owner(pair[1]))) return matchups def roster(self, player_search, owner, week): def to_pos(row): return row.td.find(class_='pos-label')['data-pos'].strip().upper() def to_name(row): return row.find(class_='ysf-player-name').a.text.strip() def to_team(row): team_pos = row.find(class_='ysf-player-name').span.text.strip() return nfldb.standard_team(re.search('^\S+', team_pos).group(0)) def rplayer(r, name, team, pos): bench = pos == 'BN' if name is None and team is None: return r.new_player(pos, None, bench, None) elif nfldb.standard_team(name) != 'UNK': return r.new_player(pos, team, bench, None) else: player = player_search(name, team=team, position=pos) return r.new_player(pos, team, bench, player.player_id) match_table_id = re.compile('^statTable[0-9]+$') url = _urls['yahoo']['roster'] % (self._league_num, owner.ident, week) soup = BeautifulSoup(self._request(url).text, 'html.parser') roster = Roster(owner, self._lg.season, week, []) for table in soup.find_all(id=match_table_id): for row in table.tbody.find_all('tr', recursive=False): pos = to_pos(row) try: team, name = to_team(row), to_name(row) roster.players.append(rplayer(roster, name, team, pos)) except AttributeError: roster.players.append(rplayer(roster, None, None, pos)) return roster def _owner_id_from_url(self, url): return re.search('%s/([0-9]+)' % self._league_num, url).group(1) def _login(self): soup = super(Yahoo, self)._login() if self._login_form(soup): err_div = soup.find(id='mbr-login-error') err_msg = 'Unknown error.' if err_div: err_msg = err_div.text.strip() raise IOError('Login failed: %s' % err_msg) def _login_params(self, soup): return { 'username': self._lg.conf.get('username', ''), 'passwd': self._lg.conf.get('password', ''), 'signin': '', # '.persistent': 'y', 'countrycode': '1', # '_crumb': '8cSELfo475z', # '_ts': str(int(time.time())), # '_format': '', # '_uuid': 'Q9JF85iYg9ax', # '_seqid': '2', # 'otp_channel': '', } def _login_form(self, soup): return soup.find('form', id='mbr-login-form') class ESPN (Provider): provider_name = 'espn' conf_required = [] conf_optional = ['username', 'password'] _headers = {'User-Agent': _user_agent} _login_url = 'http://games.espn.go.com/ffl/signin?_=_' def owners(self): url = _urls['espn']['owner'].format( league_id=self._lg.ident, season_id=self._lg.season) soup = BeautifulSoup(self._request(url).text, 'html.parser') owners = [] for td in soup.select('tr.ownerRow td.teamName'): ident = self._owner_id_from_url(td.a['href']) owners.append(Owner(ident, td.text.strip())) return owners def matchups(self, week): owner_id = self._owner_id_from_url url = _urls['espn']['matchup'].format( league_id=self._lg.ident, season_id=self._lg.season, week=week) soup = BeautifulSoup(self._request(url).text, 'html.parser') matchupDiv = soup.find(id='scoreboardMatchups') matchups = [] for table in matchupDiv.select('table.matchup'): t1, t2 = list(table.find_all(class_='name')) id1, id2 = owner_id(t1.a['href']), owner_id(t2.a['href']) name1, name2 = t1.a.text.strip(), t2.a.text.strip() o1, o2 = Owner(id1, name1), Owner(id2, name2) matchups.append(Matchup(o1, o2)) return matchups def roster(self, player_search, owner, week): def to_pos(row): pos = row.find(class_='playerSlot').text.strip().upper() if pos == 'BENCH': return 'BN' return pos def to_name(row): name = row.find(class_='playertablePlayerName').a.text.strip() # If this is the defense, apparently 'D/ST' is included in # the name. Wtf? return re.sub('\s+D/ST$', '', name) def to_team(row): tpos = row.find(class_='playertablePlayerName').a.next_sibling tpos = tpos.strip(' \r\n\t*,|').upper() # This is a little weird because the team name seems to run # in with the position. Perhaps a weird encoding quirk? if len(tpos) < 2: return 'UNK' elif len(tpos) == 2: return nfldb.standard_team(tpos) else: team = nfldb.standard_team(tpos[0:3]) if team == 'UNK': team = nfldb.standard_team(tpos[0:2]) return team def rplayer(r, name, team, pos): bench = pos == 'BN' name_team = nfldb.standard_team(name) if name is None and team is None: return r.new_player(pos, None, bench, None) elif name_team != 'UNK': return r.new_player(pos, name_team, bench, None) else: player = player_search(name, team=team, position=pos) return r.new_player(pos, team, bench, player.player_id) url = _urls['espn']['roster'].format( league_id=self._lg.ident, season_id=self._lg.season, week=week, team_id=owner.ident) soup = BeautifulSoup(self._request(url).text, 'html.parser') roster = Roster(owner, self._lg.season, week, []) for tr in soup.select('tr.pncPlayerRow'): if tr.get('id', '') == 'pncEmptyRow': continue pos = to_pos(tr) try: team, name = to_team(tr), to_name(tr) roster.players.append(rplayer(roster, name, team, pos)) except AttributeError: roster.players.append(rplayer(roster, None, None, pos)) return roster def _owner_id_from_url(self, url): return re.search('teamId=([0-9]+)', url).group(1) def _login(self): soup = super(ESPN, self)._login() if self._login_form(soup): err_msg = [] for msg in soup.find_all('font', color='#ff0000'): err_msg.append(msg.text.strip()) err_msg = '\n'.join(err_msg) if err_msg else 'Unknown error.' raise IOError('Login failed: %s' % err_msg) def _login_params(self): return { 'username': self._lg.conf.get('username', ''), 'password': self._lg.conf.get('password', ''), 'submit': 'Sign In', } def _login_form(self, soup): return soup.find('form', attrs={'name': 'loginForm'})
For UC Santa Cruz grad student Tuguldur Sukhbold, doing advanced research in astrophysics on the wooded campus of Santa Cruz is a long way from his native country of Mongolia, where he had to build his own telescopes. Opportunities he got at UC Santa Cruz allowed him to organize a project that, last year, delivered telescopes to 7 percent of the schools in Mongolia—44 in all—so that science-starved kids might be exposed to the wonders of the universe. UC Santa Cruz History Professor Gail Hershatter has joined Nobel Prize–winning chemist Brian Kobilka, astrophysicist Neil deGrasse Tyson, singer-songwriter Judy Collins, novelist Tom Wolfe, and UC President Janet A. Napolitano as a newly elected fellow of the American Academy of Arts and Sciences. The 2015 class of 197 includes some of the world's most accomplished scholars, scientists, writers, artists, and civic, business, and philanthropic leaders. One of the nation's most prestigious honorary societies, the American Academy is also a leading center for independent policy research. Most of the discussion at the second annual UC Santa Cruz Climate & Policy Conference in March was not about how to reduce greenhouse gas emissions, but about how to plan for and adapt to the inevitable consequences of those emissions, which are already changing the climate. In his keynote speech, Penn State geologist Richard Alley provided a compelling overview of how our society's reliance on fossil fuels for energy is driving climate change, and he described how costly the impacts of global warming will be, in both economic and human terms. Alley also made the case that the technology is available now to make the transition to a sustainable energy system, and that it makes sense economically to do so. "If you just look at dollars and cents, the economy is still better if we start to wisely reduce our use of fossil fuels," Alley said. "Dealing with it makes us better off—it gives us a stronger economy and more jobs." Phil Berman has been working to develop an AIDS vaccine for nearly 30 years, first at the pioneering biotech company Genentech, then as cofounder of VaxGen, and now at UC Santa Cruz, where he is the Baskin Professor of Biomolecular Engineering. Since his arrival at UC Santa Cruz in 2006, Berman has established a major vaccine research effort funded by a series of grants from the National Institutes of Health, including two new grants in 2014 totaling $2.6 million. The latest results from this effort have Berman sounding optimistic about the prospects for a vaccine that can be effective in protecting against HIV infection. His lab has developed new vaccine candidates that he said are promising enough to consider advancing into clinical trials within the next two years. Shapiro is a leading authority on ancient DNA—how to recover it from fossils, museum specimens, and prehistoric remains buried in frozen tundra, and how to analyze it for clues to the evolutionary history of species and populations, both living and extinct. She has studied DNA from ancient populations of bears, horses, humans, and, yes, mammoths. But she is not a big proponent of "de-extinction"—the idea that scientists could use ancient DNA to recreate extinct species. In How to Clone a Mammoth: The Science of De-Extinction, Shapiro addresses the scientific and ethical challenges that would confront any effort to bring back extinct creatures. "I question if it's something we should do at all, for many ethical and environmental reasons," Shapiro said. "I'm trying to separate the science from the science fiction." Wading her way through dense thickets of Scotch broom in Washington State, Jennifer Thompson, 22, realized just how damaging the plant could be. Growing up to 12 feet in height, the perennial shrub with its bright yellow flowers had overrun land once filled with Douglas fir. But what was most interesting to Thompson was the fact that, even if all the Scotch broom were removed, the Douglas fir would most likely not regrow. Thanks to a grant from UC Santa Cruz's Dean's Fund for Undergraduate Research and to Professor of Ecology and Evolutionary Biology Ingrid Parker, Thompson (Cowell '15, plant sciences) spent a year doing research into the question of why the Scotch broom was so toxic to Douglas fir. Thompson's work is emblematic of a UC Santa Cruz ethos grounded in the founding days of the university: the opportunity for undergraduates to take part in research. In fact, a 2014 study conducted by the uni-versity found 73 percent of graduating seniors had assisted in faculty research or creative projects during their time at UC Santa Cruz. Sanjin Mehic's parents left everything behind when they fled war-torn Bosnia-Herzegovina for the quieter streets of San Jose, Calif. "My parents worked hard to get going and never looked back," said Mehic (Porter '16, biochemistry and molecular biology). For Mehic, 24, that industriousness shaped his own life. Mehic is engaged in a research study that focuses on the role of bacteria in the production of an algal toxin called domoic acid in the Monterey Bay. Domoic acid not only kills marine organisms but also can cause neurological damage, sometimes fatal, in humans. Thanks to Assistant Professor of Ocean Sciences Marilou Sison-Mangus and a grant from the Dean's Fund for Undergraduate Research, Mehic is studying whether certain bacteria influence production of domoic acid during algal blooms. Bacteria found naturally on some bats may prove useful in controlling the deadly fungal disease known as white-nose syndrome, which has devastated bat populations throughout eastern North America and continues to spread across the continent. Scientists at UC Santa Cruz isolated bacteria that strongly inhibited the growth of the white-nose syndrome fungus in laboratory tests. Experiments are now in progress to see if treating bats with the bacteria can protect them from the disease, said Joseph Hoyt, a UC Santa Cruz graduate student who led the study. The Cowell Ranch Hay Barn at the base of the UC Santa Cruz campus is being "barn again." The structure is being rebuilt as a center for UC Santa Cruz's environmental and sustainability programs and will become the home of the Center for Agroecology & Sustainable Food Systems and a campus and community hub. The project, made possible by a $5 million gift from the Helen and Will Webster Foundation, supports the Coastal Sustainability initiative of the Campaign for UC Santa Cruz. When a weather-tight exterior is finished, the barn will contain a large multipurpose space for exhibitions, assemblies, barn dances, and more, along with a conference room, restrooms, and utility rooms. A future phase—still to be funded—will add other conference rooms, kitchenette, and exterior improvements. The rebuilding will be celebrated with an event on Sept. 12. The doors of the barn will be opened at 7 p.m. and the community welcomed to an evening of lively music and refreshments. UC Santa Cruz astrophysicist Jonathan Fortney is one of 15 principal investigators for a NASA initiative that is embracing a team approach to the quest for life on planets around other stars. The Nexus for Exoplanet System Science (NExSS) will benefit from the expertise of several dozen scientists across the NASA science community in an effort to find clues to life on faraway worlds. Fortney, an associate professor of astronomy and astrophysics at UC Santa Cruz, will lead a team exploring how novel statistical methods can be used to extract information from the light emitted and reflected by planetary atmospheres in order to understand their atmospheric temperatures and the abundance of molecules. Astronomers routinely analyze the light from distant stars by spreading it out into a spectrum of different wavelengths. Fortney is developing tools for analyzing the spectra of distant planets to determine molecular abundances in their atmospheres. Assistant Professor of History Elaine Sullivan received a Digital Start-Up Grant from the National Endowment for the Humanities (NEH) to develop a three-dimensional model and virtual tour that will demonstrate how an ancient Egyptian site evolved over more than 3,000 years. The $47,200 grant was one of 17 awards directed to development of new digital tools for study of the humanities that are part of a larger slate of 232 grants announced by the NEH. Sullivan is co-principal investigator of a Digital Humanities Research Cluster funded by the Institute for Humanities Research (IHR) at UC Santa Cruz. "I think of digital humanities as using new technologies to answer questions I can't answer now—opening up new realms of scholarship," said Sullivan. Anita Hill's life changed forever in 1991 when a television audience of 22 million saw her testify before the Senate Judiciary Committee during the confirmation hearing for Supreme Court nominee Judge Clarence Thomas. Her calm demeanor—as she was pressed to endlessly repeat the graphic descriptions of sexual harassment she endured while working for Thomas—struck a chord with the public and began a conversation about sexual harassment and power in the workplace that still resonates today. Now an author and professor of law, public policy, and women's studies at Brandeis University, Hill came to UC Santa Cruz in February to deliver a free public lecture on the topic: "Speaking Truth to Power: Gender and Racial Equality, 1991–2015." In a telephone interview from her office in Boston, Hill recalled the groundbreaking hearings that took place 23 years ago in Congress. "The experience itself was surreal beyond anything that I think anybody could have prepared for," said Hill. Last Day of Freedom—an animated short by UC Santa Cruz Associate Professor of Art Dee Hibbert-Jones and San Francisco artist Nomi Talisman—was honored with two awards at the 18th annual Full Frame Documentary Film Festival. It received the Jury Award for "Best Short," qualifying the film for a possible nomination for an Academy Award next year in the category of "Best Documentary (Short Subject)." In choosing Last Day of Freedom for the award, the festival judges noted: "This film demonstrates and reminds us of the simple power and intimacy of the human voice. It interweaves different visual styles of animation and engages the audience fully in an increasingly fraught tale that ultimately presents the supreme cost of doing the right thing." The short blends animation with poignant testimony to create a haunting story of a man who discovers his brother has committed a serious crime. Splinters in Our Ankles is UC Santa Cruz Theater Arts Professor Gerald Casel's choreographic response to what he calls "the collective cultural amnesia" about the Philippine national dance. Born in the Philippines and raised in California, Casel actually grew up with little knowledge of the popular dance. But recently, his research took him back to the Philippines to dig into the historical context of the national dance, which originated during the Spanish colonial occupation of the Philippines that began in the 16th century. "As a choreographer, I question the most famous and beloved traditional dance, Tinikling, and why it is represented in such a lively and festive way," said Casel. "I argue that during the time it was being created, Filipinos were under Spanish colonial rule—a violent past that is not discussed when this dance is presented. "I wanted to shed a light on this omission as 'cultural amnesia' to convey an alternative lens through which Filipino bodies can be represented and therefore not easily 'exoticized' and easily 'consumed' for entertainment." "American history is filled with stories of brave and powerful men, but have you ever wondered where the women are?" So begins Rad American Women A-Z: Rebels, Trailblazers, and Visionaries Who Shaped Our History…And Our Future, a book for kids by alumna Kate Schatz (Stevenson '01, women's studies and creative writing). It features the stories of 26 women who have had a major impact on American life—from artists and abolitionists, to scientists and sports heroes, to rock stars and writers. It also has the distinction of being the first children's book published by San Francisco's famed City Lights Publishers in the company's 60-year history. In May, the book debuted at No. 5 on the New York Times bestseller list. The UC Santa Cruz men's volleyball team spent its season honing sets and kills, doing conditioning, and watching game film. Team members also sold donuts and athletic gear, worked on their résumés, and picked up trash on the beach. The unusual regimen was born not just from the financially strapped squad's need to raise money in order to compete, but also from Head Coach Todd Hollenbeck's decision to approach this team in a new way: by building solid human beings first, both on the court and in life. "It's not about wins and losses," said Hollenbeck. "It's how can we score points when we're not even touching the ball. We do that in the classroom, in applying for a job, and by volunteering." The team ended the season with a 24-6 record and earned a visit to the NCAA Division III national championships in Hoboken, N.J. Alumnus Sage Weil (Ph.D. '07, computer science), who developed his thesis project into the highly successful open-source software data storage system Ceph, has given UC Santa Cruz $2.5 million to support research in open-source software. Weil's gift supports a faculty chair in open-source software and research led by Professor of Computer Science Carlos Maltzahn, who directs the Center for Research in Open Source Software at UC Santa Cruz. "Sage was a brilliant student and a serial entrepreneur even before he came here," said Scott Brandt, professor of computer science and now vice chancellor for research at UC Santa Cruz. "It's great to see one of our students succeed in such a visible way, and then for him to give back to us in this way is just extraordinary." The gift supports the Data Science Leadership initiative at UC Santa Cruz. UC Santa Cruz alumni from across the country contributed their works to 50/50—an art exhibition celebrating the 50th anniversary of the campus that ran in February. The show took place at both the R. Blitzer Gallery on the Westside of Santa Cruz, and on campus at the Porter College Faculty Gallery. Featuring 10 artists from each decade of UC Santa Cruz's 50-year history, the exhibition spotlighted local and visiting alumni artists from Santa Cruz to New York City. Visit 50years.ucsc.edu for additional events celebrating the 50th anniversary of UC Santa Cruz.
# # Manage registers in a hardware design # # Copyright (C) 2008 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ EquWriter - Writes out Assembler defines (based off the GNU assembler) """ from regenerate.writers.writer_base import WriterBase, ExportInfo class AsmEqu(WriterBase): """ Output file creation class that writes a set of constants representing the token for the registers addresses. """ def __init__(self, dbase): WriterBase.__init__(self, dbase) self._offset = 0 self._ofile = None def write_def(self, reg, prefix, offset): """ Writes the definition in the format of: .equ register, address """ address = reg.address base = reg.token name = "%s%s, " % (prefix, base) self._ofile.write("\t.equ %-30s 0x%s\n" % (name, address + offset)) def write(self, filename): """ Writes the output file """ self._ofile = open(filename, "w") self._write_header_comment(self._ofile, 'site_asm.inc', comment_char=';; ') for reg_key in self._dbase.get_keys(): self.write_def(self._dbase.get_register(reg_key), self._prefix, self._offset) self._ofile.write('\n') self._ofile.close() EXPORTERS = [ (WriterBase.TYPE_BLOCK, ExportInfo(AsmEqu, ("Header files", "Assembler Source"), "Assembler files", ".s", 'headers-asm')) ]
, we choices the best series having ideal quality simply for you all, and this photos is actually considered one of graphics series in our greatest photographs gallery regarding Machine Operator Skills Resume. Lets hope you will enjoy it. placed simply by gifths with 2018-09-30 02:08:58. To discover all photographs inside Machine Operator Skills Resume graphics gallery you need to comply with this website link. Do not forget to explore the next image gallery, which also contains the Machine Operator Skills Resume With Machine Operator Skills List Archives image showed above. If you have any thoughts, questions or just want to say hello to other people, please do not hestitate to submit your opinion/ideas via the following comment form.
import datetime from app import db class Tweet(db.Document): created_at = db.DateTimeField(default=datetime.datetime.now, required=True) tid = db.IntField(required=True, unique=True) body = db.StringField(required=True, unique=True) username = db.StringField(required=True, max_length=50) meta = { 'allow_inheritance': True, 'indexes': ['-created_at', 'username'], 'ordering': ['-created_at'] } class Muse(db.Document): """ A muse is a Twitter user which the Brain learns from. """ created_at = db.DateTimeField(default=datetime.datetime.now, required=True) username = db.StringField(required=True, unique=True, max_length=50) negative = db.BooleanField(default=False) meta = { 'allow_inheritance': True, 'indexes': ['-created_at', 'username'], 'ordering': ['-created_at'] } class Config(db.Document): """ Configuration for the Brain. If you make changes to this model, you will need to drop the saved record in the database. $ mongo > show dbs > use <your db> > db.config.drop() Then restart the application. """ # Retweet probability threshold. # The higher this is, the less the brain will retweet. retweet_threshold = db.FloatField(required=True, default=0.9) # Chance to act. Probability the brain will tweet. # The lower this is, the less the brain will tweet. chance_to_act = db.FloatField(required=True, default=0.05) # Maximum amount of retweets in an interval. # Cause sometimes it accidentally retweets a TON of stuff. max_retweets = db.IntField(required=True, default=10) # Some brain configuration. ngram_size = db.IntField(required=True, default=1) ramble = db.BooleanField(default=True) spasm = db.FloatField(required=True, default=0.05) meta = { 'max_documents': 1 } class Doc(db.Document): """ A manually-fed training document for the Markov generator. """ created_at = db.DateTimeField(default=datetime.datetime.now, required=True) body = db.StringField(required=True, unique=True) meta = { 'allow_inheritance': True, 'indexes': ['-created_at'], 'ordering': ['-created_at'] }
Should I Get A Class Ring? Yes. No … Maybe.
import sys try: sys.settrace except AttributeError: print("SKIP") raise SystemExit def print_stacktrace(frame, level=0): # Ignore CPython specific helpers. if frame.f_globals["__name__"].find("importlib") != -1: print_stacktrace(frame.f_back, level) return print( "%2d: %s@%s:%s => %s:%d" % ( level, " ", frame.f_globals["__name__"], frame.f_code.co_name, # reduce full path to some pseudo-relative "misc" + "".join(frame.f_code.co_filename.split("tests/misc")[-1:]), frame.f_lineno, ) ) if frame.f_back: print_stacktrace(frame.f_back, level + 1) class _Prof: trace_count = 0 def trace_tick(self, frame, event, arg): self.trace_count += 1 print_stacktrace(frame) __prof__ = _Prof() alice_handler_set = False def trace_tick_handler_alice(frame, event, arg): print("### trace_handler::Alice event:", event) __prof__.trace_tick(frame, event, arg) return trace_tick_handler_alice bob_handler_set = False def trace_tick_handler_bob(frame, event, arg): print("### trace_handler::Bob event:", event) __prof__.trace_tick(frame, event, arg) return trace_tick_handler_bob def trace_tick_handler(frame, event, arg): # Ignore CPython specific helpers. if frame.f_globals["__name__"].find("importlib") != -1: return print("### trace_handler::main event:", event) __prof__.trace_tick(frame, event, arg) if frame.f_code.co_name != "factorial": return trace_tick_handler global alice_handler_set if event == "call" and not alice_handler_set: alice_handler_set = True return trace_tick_handler_alice global bob_handler_set if event == "call" and not bob_handler_set: bob_handler_set = True return trace_tick_handler_bob return trace_tick_handler def factorial(n): if n == 0: return 1 else: return n * factorial(n - 1) def do_tests(): # These commands are here to demonstrate some execution being traced. print("Who loves the sun?") print("Not every-", factorial(3)) from sys_settrace_subdir import trace_generic trace_generic.run_tests() return sys.settrace(trace_tick_handler) do_tests() sys.settrace(None) print("\n------------------ script exited ------------------") print("Total traces executed: ", __prof__.trace_count)
Aussie lotto - Good luck like pizza! Want to try your luck and make a lot of money? aussie lotto can help you with this. Hundreds of thousands of people win the lottery every day, and dozens of people become millionaires every day. The biggest win today is $ 759 million. This became possible thanks to the aussie lotto. Thats cool! You can be one of them. For this you only need to buy a lottery ticket. How to play the lottery and aussie lotto. For aussie lotto you can follow these three steps. How to win the lottery and aussie lotto. More complex strategies are based on mathematics, statistics and logic. We will not talk about them here. You can apply these or other strategies to aussie lotto.