text
stringlengths
29
850k
# This file is part of Mylar. # # Mylar is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mylar is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mylar. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import re import time import threading import platform import urllib, urllib2 from xml.dom.minidom import parseString, Element from xml.parsers.expat import ExpatError import requests import mylar from mylar import logger, db, cv from mylar.helpers import multikeysort, replace_all, cleanName, listLibrary, listStoryArcs import httplib mb_lock = threading.Lock() def patch_http_response_read(func): def inner(*args): try: return func(*args) except httplib.IncompleteRead, e: return e.partial return inner httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read) if platform.python_version() == '2.7.6': httplib.HTTPConnection._http_vsn = 10 httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0' def pullsearch(comicapi, comicquery, offset, type): cnt = 1 for x in comicquery: if cnt == 1: filterline = '%s' % x else: filterline+= ',name:%s' % x cnt+=1 PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + filterline + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,deck,description,first_issue,last_issue&format=xml&sort=date_last_updated:desc&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page #all these imports are standard on most modern python implementations #logger.info('MB.PULLURL:' + PULLURL) #new CV API restriction - one api request / second. if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2: time.sleep(2) else: time.sleep(mylar.CONFIG.CVAPI_RATE) #download the file: payload = None try: r = requests.get(PULLURL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception as e: logger.warn('Error fetching data from ComicVine: %s' % e) return try: dom = parseString(r.content) #(data) except ExpatError: if u'<title>Abnormal Traffic Detected' in r.content: logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.') else: logger.warn('[WARNING] ComicVine is not responding correctly at the moment. This is usually due to some problems on their end. If you re-try things again in a few moments, it might work properly.') return except Exception as e: logger.warn('[ERROR] Error returned from CV: %s' % e) return else: return dom def findComic(name, mode, issue, limityear=None, type=None): #with mb_lock: comicResults = None comicLibrary = listLibrary() comiclist = [] arcinfolist = [] commons = ['and', 'the', '&', '-'] for x in commons: cnt = 0 for m in re.finditer(x, name.lower()): cnt +=1 tehstart = m.start() tehend = m.end() if any([x == 'the', x == 'and']): if len(name) == tehend: tehend =-1 if not all([tehstart == 0, name[tehend] == ' ']) or not all([tehstart != 0, name[tehstart-1] == ' ', name[tehend] == ' ']): continue else: name = name.replace(x, ' ', cnt) originalname = name if '+' in name: name = re.sub('\+', 'PLUS', name) pattern = re.compile(ur'\w+', re.UNICODE) name = pattern.findall(name) if '+' in originalname: y = [] for x in name: y.append(re.sub("PLUS", "%2B", x)) name = y if limityear is None: limityear = 'None' comicquery = name if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None: logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.') return else: comicapi = mylar.CONFIG.COMICVINE_API if type is None: type = 'volume' #let's find out how many results we get from the query... searched = pullsearch(comicapi, comicquery, 0, type) if searched is None: return False totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText logger.fdebug("there are " + str(totalResults) + " search results...") if not totalResults: return False if int(totalResults) > 1000: logger.warn('Search returned more than 1000 hits [' + str(totalResults) + ']. Only displaying first 1000 results - use more specifics or the exact ComicID if required.') totalResults = 1000 countResults = 0 while (countResults < int(totalResults)): #logger.fdebug("querying " + str(countResults)) if countResults > 0: offsetcount = countResults searched = pullsearch(comicapi, comicquery, offsetcount, type) comicResults = searched.getElementsByTagName(type) body = '' n = 0 if not comicResults: break for result in comicResults: #retrieve the first xml tag (<tag>data</tag>) #that the parser finds with name tagName: arclist = [] if type == 'story_arc': #call cv.py here to find out issue count in story arc try: logger.fdebug('story_arc ascension') names = len(result.getElementsByTagName('name')) n = 0 logger.fdebug('length: ' + str(names)) xmlpub = None #set this incase the publisher field isn't populated in the xml while (n < names): logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName) if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc': logger.fdebug('yes') try: xmlTag = result.getElementsByTagName('name')[n].firstChild.wholeText xmlTag = xmlTag.rstrip() logger.fdebug('name: ' + xmlTag) except: logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.') return elif result.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher': logger.fdebug('publisher check.') xmlpub = result.getElementsByTagName('name')[n].firstChild.wholeText n+=1 except: logger.warn('error retrieving story arc search results.') return siteurl = len(result.getElementsByTagName('site_detail_url')) s = 0 logger.fdebug('length: ' + str(names)) xmlurl = None while (s < siteurl): logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName) if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc': try: xmlurl = result.getElementsByTagName('site_detail_url')[s].firstChild.wholeText except: logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.') return s+=1 xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText if xmlid is not None: arcinfolist = storyarcinfo(xmlid) logger.info('[IMAGE] : ' + arcinfolist['comicimage']) comiclist.append({ 'name': xmlTag, 'comicyear': arcinfolist['comicyear'], 'comicid': xmlid, 'cvarcid': xmlid, 'url': xmlurl, 'issues': arcinfolist['issues'], 'comicimage': arcinfolist['comicimage'], 'publisher': xmlpub, 'description': arcinfolist['description'], 'deck': arcinfolist['deck'], 'arclist': arcinfolist['arclist'], 'haveit': arcinfolist['haveit'] }) else: comiclist.append({ 'name': xmlTag, 'comicyear': arcyear, 'comicid': xmlid, 'url': xmlurl, 'issues': issuecount, 'comicimage': xmlimage, 'publisher': xmlpub, 'description': xmldesc, 'deck': xmldeck, 'arclist': arclist, 'haveit': haveit }) logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist)) else: xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText #here we can determine what called us, and either start gathering all issues or just limited ones. if issue is not None and str(issue).isdigit(): #this gets buggered up with NEW/ONGOING series because the db hasn't been updated #to reflect the proper count. Drop it by 1 to make sure. limiter = int(issue) - 1 else: limiter = 0 #get the first issue # (for auto-magick calcs) iss_len = len(result.getElementsByTagName('name')) i=0 xmlfirst = '1' xmllast = None try: while (i < iss_len): if result.getElementsByTagName('name')[i].parentNode.nodeName == 'first_issue': xmlfirst = result.getElementsByTagName('issue_number')[i].firstChild.wholeText if '\xbd' in xmlfirst: xmlfirst = '1' #if the first issue is 1/2, just assume 1 for logistics elif result.getElementsByTagName('name')[i].parentNode.nodeName == 'last_issue': xmllast = result.getElementsByTagName('issue_number')[i].firstChild.wholeText if all([xmllast is not None, xmlfirst is not None]): break i+=1 except: xmlfirst = '1' if all([xmlfirst == xmllast, xmlfirst.isdigit(), xmlcnt == '0']): xmlcnt = '1' #logger.info('There are : ' + str(xmlcnt) + ' issues in this series.') #logger.info('The first issue started at # ' + str(xmlfirst)) cnt_numerical = int(xmlcnt) + int(xmlfirst) # (of issues + start of first issue = numerical range) #logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical)) #logger.info('The limiter (issue max that we know of) is # ' + str(limiter)) if cnt_numerical >= limiter: cnl = len (result.getElementsByTagName('name')) cl = 0 xmlTag = 'None' xmlimage = "cache/blankcover.jpg" xml_lastissueid = 'None' while (cl < cnl): if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'volume': xmlTag = result.getElementsByTagName('name')[cl].firstChild.wholeText #break if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'image': xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'last_issue': xml_lastissueid = result.getElementsByTagName('id')[cl].firstChild.wholeText cl+=1 if (result.getElementsByTagName('start_year')[0].firstChild) is not None: xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText else: xmlYr = "0000" yearRange = [] tmpYr = re.sub('\?', '', xmlYr) if tmpYr.isdigit(): yearRange.append(tmpYr) tmpyearRange = int(xmlcnt) / 12 if float(tmpyearRange): tmpyearRange +1 possible_years = int(tmpYr) + tmpyearRange for i in range(int(tmpYr), int(possible_years),1): if not any(int(x) == int(i) for x in yearRange): yearRange.append(str(i)) logger.fdebug('[RESULT][' + str(limityear) + '] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']') if tmpYr != xmlYr: xmlYr = tmpYr if any(map(lambda v: v in limityear, yearRange)) or limityear == 'None': xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText idl = len (result.getElementsByTagName('id')) idt = 0 xmlid = None while (idt < idl): if result.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume': xmlid = result.getElementsByTagName('id')[idt].firstChild.wholeText break idt+=1 if xmlid is None: logger.error('Unable to figure out the comicid - skipping this : ' + str(xmlurl)) continue publishers = result.getElementsByTagName('publisher') if len(publishers) > 0: pubnames = publishers[0].getElementsByTagName('name') if len(pubnames) >0: xmlpub = pubnames[0].firstChild.wholeText else: xmlpub = "Unknown" else: xmlpub = "Unknown" #ignore specific publishers on a global scale here. if mylar.CONFIG.BLACKLISTED_PUBLISHERS is not None and any([x for x in mylar.CONFIG.BLACKLISTED_PUBLISHERS if x.lower() == xmlpub.lower()]): logger.fdebug('Blacklisted publisher [' + xmlpub + ']. Ignoring this result.') continue try: xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText except: xmldesc = "None" #this is needed to display brief synopsis for each series on search results page. try: xmldeck = result.getElementsByTagName('deck')[0].firstChild.wholeText except: xmldeck = "None" xmltype = None if xmldeck != 'None': if any(['print' in xmldeck.lower(), 'digital' in xmldeck.lower(), 'paperback' in xmldeck.lower(), 'one shot' in re.sub('-', '', xmldeck.lower()).strip(), 'hardcover' in xmldeck.lower()]): if all(['print' in xmldeck.lower(), 'reprint' not in xmldeck.lower()]): xmltype = 'Print' elif 'digital' in xmldeck.lower(): xmltype = 'Digital' elif 'paperback' in xmldeck.lower(): xmltype = 'TPB' elif 'hardcover' in xmldeck.lower(): xmltype = 'HC' elif 'oneshot' in re.sub('-', '', xmldeck.lower()).strip(): xmltype = 'One-Shot' else: xmltype = 'Print' if xmldesc != 'None' and xmltype is None: if 'print' in xmldesc[:60].lower() and all(['print edition can be found' not in xmldesc.lower(), 'reprints' not in xmldesc.lower()]): xmltype = 'Print' elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower(): xmltype = 'Digital' elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc[:60].lower(): xmltype = 'TPB' elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower(): xmltype = 'HC' elif any(['one-shot' in xmldesc[:60].lower(), 'one shot' in xmldesc[:60].lower()]) and any(['can be found' not in xmldesc.lower(), 'following the' not in xmldesc.lower()]): i = 0 xmltype = 'One-Shot' avoidwords = ['preceding', 'after the special', 'following the'] while i < 2: if i == 0: cbd = 'one-shot' elif i == 1: cbd = 'one shot' tmp1 = xmldesc[:60].lower().find(cbd) if tmp1 != -1: for x in avoidwords: tmp2 = xmldesc[:tmp1].lower().find(x) if tmp2 != -1: xmltype = 'Print' i = 3 break i+=1 else: xmltype = 'Print' if xmlid in comicLibrary: haveit = comicLibrary[xmlid] else: haveit = "No" comiclist.append({ 'name': xmlTag, 'comicyear': xmlYr, 'comicid': xmlid, 'url': xmlurl, 'issues': xmlcnt, 'comicimage': xmlimage, 'publisher': xmlpub, 'description': xmldesc, 'deck': xmldeck, 'type': xmltype, 'haveit': haveit, 'lastissueid': xml_lastissueid, 'seriesrange': yearRange # returning additional information about series run polled from CV }) #logger.fdebug('year: %s - constraint met: %s [%s] --- 4050-%s' % (xmlYr,xmlTag,xmlYr,xmlid)) else: #logger.fdebug('year: ' + str(xmlYr) + ' - contraint not met. Has to be within ' + str(limityear)) pass n+=1 #search results are limited to 100 and by pagination now...let's account for this. countResults = countResults + 100 return comiclist def storyarcinfo(xmlid): comicLibrary = listStoryArcs() arcinfo = {} if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None: logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.') return else: comicapi = mylar.CONFIG.COMICVINE_API #respawn to the exact id for the story arc and count the # of issues present. ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0' #logger.fdebug('arcpull_url:' + str(ARCPULL_URL)) #new CV API restriction - one api request / second. if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2: time.sleep(2) else: time.sleep(mylar.CONFIG.CVAPI_RATE) #download the file: payload = None try: r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS) except Exception as e: logger.warn('While parsing data from ComicVine, got exception: %s' % e) return try: arcdom = parseString(r.content) except ExpatError: if u'<title>Abnormal Traffic Detected' in r.content: logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.') else: logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content)) return except Exception as e: logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content)) return try: logger.fdebug('story_arc ascension') issuedom = arcdom.getElementsByTagName('issue') issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') ) isc = 0 arclist = '' ordernum = 1 for isd in issuedom: zeline = isd.getElementsByTagName('id') isdlen = len( zeline ) isb = 0 while ( isb < isdlen): if isc == 0: arclist = str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum) else: arclist += '|' + str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum) ordernum+=1 isb+=1 isc+=1 except: logger.fdebug('unable to retrive issue count - nullifying value.') issuecount = 0 try: firstid = None arcyear = None fid = len ( arcdom.getElementsByTagName('id') ) fi = 0 while (fi < fid): if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue': if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid: logger.fdebug('hit it.') firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText break # - dont' break out here as we want to gather ALL the issue ID's since it's here fi+=1 logger.fdebug('firstid: ' + str(firstid)) if firstid is not None: firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid) logger.fdebug('success') arcyear = cv.Getissue(firstid,firstdom,'firstissue') except: logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.') try: xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText except: xmlimage = "cache/blankcover.jpg" try: xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText except: xmldesc = "None" try: xmlpub = arcdom.getElementsByTagName('publisher')[0].firstChild.wholeText except: xmlpub = "None" try: xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText except: xmldeck = "None" if xmlid in comicLibrary: haveit = comicLibrary[xmlid] else: haveit = "No" arcinfo = { #'name': xmlTag, #theese four are passed into it only when it's a new add #'url': xmlurl, #needs to be modified for refreshing to work completely. #'publisher': xmlpub, 'comicyear': arcyear, 'comicid': xmlid, 'issues': issuecount, 'comicimage': xmlimage, 'description': xmldesc, 'deck': xmldeck, 'arclist': arclist, 'haveit': haveit, 'publisher': xmlpub } return arcinfo
I am happy to announce the availability of TamiFlex 2.0. This new release includes a couple of bug fixes but also some important changes and new features. The Play-Out Agent is now extensible: you can add your own instrumentation probes if you like. By default, the agent can now log many more reflective calls than before. The probes, as well as other options, are now configured through configuration files. Neither agent accepts command-line options any longer. We highly recommend reading out documentation on those files. Also we make available, for the first time, an Eclipse Plugin that allows you to play-out and play-in directly from within Eclipse. The Play-In Agent and Booster did not change much.
import datetime import time import string import urllib2 import math import redis import json import eto import py_cf import os import base64 import load_files import watch_dog class SprinklerQueueElementControl( ): def __init__(self,redis,io_control,alarm_queue,counter_devices): self.redis = redis self.alarm_queue = alarm_queue self.io_control = io_control self.counter_devices = counter_devices self.app_files = load_files.APP_FILES(redis) self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0) def check_for_excessive_flow_rate( self,*args ): flow_value = float( check_redis_value( "global_flow_sensor_corrected" ) ) max_flow = float( check_redis_value( "FLOW_CUT_OFF")) if max_flow == 0: return # feature is not turned on compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 ) json_string = base64.b64decode(compact_data) json_object = json.loads(json_string) run_time = int( json_object["run_time"]) elasped_time = int(json_object["elasped_time"]) schedule_step = int(json_object["step"]) step_number = json_object["step"] schedule_name = json_object["schedule_name"] if elasped_time < 3 : return # let flow stabilize if flow_value > max_flow: over_load_time = int(self.redis.hget("CONTROL_VARIBALES","MAX_FLOW_TIME")) +1 if over_load_time > 2: self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON") self.alarm_queue.store_past_action_queue("IRRIGATION:FLOW_ABORT","RED", { "schedule_name":json_object["schedule_name"],"step_number":json_object["step"], "flow_value":flow_value,"max_flow":max_flow } ) self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0) else: self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",over_load_time) else: self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0) def check_redis_value( self,key): value = redis.hget( "CONTROL_VARIABLES",key ) if value == None: value = 0 return value def check_current(self,*args): compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 ) json_string = base64.b64decode(compact_data) #print "json_string",json_string json_object = json.loads(json_string) temp = float( self.redis.hget( "CONTROL_VARIABLES","coil_current" )) print "check_current temp",temp if temp > 24: self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON") self.clean_up_irrigation_cell( json_object ) self.alarm_queue.store_past_action_queue("IRRIGATION:CURRENT_ABORT","RED", { "schedule_name":json_object["schedule_name"],"step_number":json_object["step"] } ) return "RESET" else: return "DISABLE" def start(self, *args ): #print "start ------------------------------------------------->" self.redis.hset("CONTROL_VARIBALES","MAX_FLOW_TIME",0) compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 ) json_string = base64.b64decode(compact_data) json_object = json.loads(json_string) if self.check_redis_value("SUSPEND") == "ON": #self.log_start_step( schedule_name, json_object["step"]) #self.io_control.turn_off_io(json_object["io_setup"]) #self.io_control.disable_all_sprinklers() return # System is not processing commands right now #print "start --- #1" self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF") #print "made it here" run_time = int( json_object["run_time"]) elasped_time = int(json_object["elasped_time"]) schedule_step = int(json_object["step"]) step_number = json_object["step"] schedule_name = json_object["schedule_name"] #print "run_time",run_time if json_object["eto_enable"] == True: run_time = self.eto_update( run_time , json_object["io_setup"] ) #print "start ---#2 runtime",run_time if run_time == 0: self.clean_up_irrigation_cell(json_object) json_object["run_time"] = 0 self.alarm_queue.store_past_action_queue("IRRIGATION:START:ETO_RESTRICTION","YELLOW", json_object ) return "RESET" self.io_control.load_duration_counters( run_time ) #print "made it here" self.io_control.turn_on_master_valves() self.io_control.turn_on_io( json_object["io_setup"] ) station_by_pass = 0 elasped_time = 1 self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","AUTO") self.redis.hset( "CONTROL_VARIABLES","schedule_name", schedule_name ) self.redis.hset( "CONTROL_VARIABLES","schedule_step_number", step_number ) self.redis.hset( "CONTROL_VARIABLES","schedule_step", schedule_step ) self.redis.hset( "CONTROL_VARIABLES","schedule_time_count", elasped_time ) self.redis.hset( "CONTROL_VARIABLES","schedule_time_max", run_time ) self.log_start_step( schedule_name, json_object["step"]) #print "current_log",self.current_log_object #print "flow_log", self.flow_log_object json_object["elasped_time"] = elasped_time json_object["run_time"] = run_time json_string = json.dumps( json_object ) compact_data = base64.b64encode(json_string) #print "start #end json string ",json_string self.redis.lset( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", 0, compact_data ) return "DISABLE" def monitor( self, *args ): #print "monitor --------------->" # check to see if something is in the queue length = self.redis.llen( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" ) #print "length",length if length == 0 : return "CONTINUE" compact_data = self.redis.lindex( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE",0 ) json_string = base64.b64decode(compact_data) json_object = json.loads(json_string) run_time = int( json_object["run_time"]) elasped_time = int(json_object["elasped_time"]) schedule_step = int(json_object["step"]) step_number = json_object["step"] schedule_name = json_object["schedule_name"] if (self.check_redis_value("SUSPEND") == "ON") : #self.io_control.turn_off_io(json_object["io_setup"]) #self.io_control.disable_all_sprinklers() return "HALT" # System is not processing commands right now elasped_time = elasped_time +1 self.log_sensors( schedule_name, schedule_step) if json_object["eto_enable"] == True: self.update_eto_queue_a( 1, json_object["io_setup"] ) if (elasped_time <= run_time ) and ( self.check_redis_value("SKIP_STATION") != "ON" ): self.io_control.turn_on_io( json_object["io_setup"] ) self.io_control.turn_on_master_valves() self.redis.hset( "CONTROL_VARIABLES","schedule_time_count", elasped_time ) json_object["elasped_time"] = elasped_time json_string = json.dumps( json_object ) compact_data = base64.b64encode(json_string) self.redis.lset( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", 0, compact_data ) return_value = "RESET" else: #print "normal end" self.log_step_stop() self.clean_up_irrigation_cell(json_object) return_value = "DISABLE" #print "cell returnValue is ",return_value return return_value def clean_up_irrigation_cell( self ,json_object ): #print "made it to cleanup" self.redis.delete("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE") self.redis.hset("CONTROL_VARIABLES", "schedule_name","offline" ) self.redis.hset("CONTROL_VARIABLES", "schedule_step_number",0 ) self.redis.hset("CONTROL_VARIABLES", "schedule_step",0 ) self.redis.hset("CONTROL_VARIABLES", "schedule_time_count",0 ) self.redis.hset( "CONTROL_VARIABLES","schedule_time_max",0 ) self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","AUTO") self.redis.hset( "CONTROL_VARIABLES","SKIP_STATION","OFF") self.io_control.turn_off_io(json_object["io_setup"]) self.io_control.disable_all_sprinklers() self.io_control.clear_duration_counters() self.io_control.turn_off_master_valves() def log_sensors(self, schedule_name,step): if hasattr(self, 'current_log_object') == False: self.current_log_object = self.initialize_object( "current_log",schedule_name,step) if hasattr(self, 'flow_log_object') == False: self.flow_log_object = self.initialize_object( "flow_log",schedule_name,step ) coil_current = float( self.redis.hget( "CONTROL_VARIABLES","coil_current" )) self.log_coil_current ( coil_current ) for i in self.counter_devices.keys(): sensor_name = i flow_value = self.redis.lindex("QUEUES:SPRINKLER:FLOW:"+str(i),0) self.log_flow_rate( sensor_name, flow_value ) def log_flow_rate( self, sensor_name, flow_value ): if self.flow_log_object["fields"].has_key( sensor_name ) == False: self.initialize_field( self.flow_log_object, sensor_name ) temp = self.flow_log_object["fields"][ sensor_name ] temp["count"] = temp["count"]+1 temp["data"].append( flow_value) if flow_value > temp["max"] : temp["max"] = flow_value if flow_value < temp["min"] : temp["min"] = flow_value def log_coil_current ( self,coil_current ): if self.current_log_object["fields"].has_key( "coil_current" ) == False: self.initialize_field( self.current_log_object, "coil_current") temp = self.current_log_object["fields"]["coil_current"] temp["count"] = temp["count"]+1 temp["data"].append( coil_current ) if coil_current > temp["max"] : temp["max"] = coil_current if coil_current < temp["min"] : temp["min"] = coil_current def log_start_step( self, schedule_name, step): #print "made it log start step" self.current_log_object = self.initialize_object( "current_log",schedule_name,step) self.flow_log_object = self.initialize_object( "flow_log",schedule_name,step ) self.alarm_queue.store_event_queue( "start_step", { "schedule":schedule_name, "step":step } ) def log_step_stop( self ): if hasattr(self, 'flow_log_object') == False: return # case when eto abort obj = self.flow_log_object self.alarm_queue.store_past_action_queue("IRRIGATION:END","GREEN", { "schedule_name":obj["schedule_name"],"step_name":obj["step"] } ) self.store_object( self.current_log_object, "coil" ) self.store_object( self.flow_log_object, "flow" ) obj = {} obj["coil"] = self.current_log_object obj["flow"] = self.flow_log_object self.alarm_queue.store_event_queue( "irrigatation_store_object", obj ) self.current_log_object = None self.flow_log_object = None def store_object( self, obj ,queue_type ): if obj == None: return #self.add_limits(obj, queue_type ) self.compute_object_statistics( obj ) queue = "log_data:"+queue_type+":"+obj["schedule_name"]+":"+str(obj["step"]) json_string = json.dumps(obj) compact_data = base64.b64encode(json_string) self.redis.lpush( queue, json_string ) self.redis.ltrim( queue,0,100) def initialize_object( self, name,schedule_name,step ): obj = {} obj["name"] = name obj["time"] = time.time() obj["schedule_name"] = schedule_name obj["step"] = step obj["fields"] = {} return obj def initialize_field( self, obj ,field): if obj["fields"].has_key(field) == False: obj["fields"][field] = {} obj["fields"][field]["max"] = -1000000 obj["fields"][field]["min"] = 1000000 obj["fields"][field]["count"] = 0 obj["fields"][field]["data"] = [] def compute_object_statistics( self, obj ): #print "compute object statistics", obj for j in obj["fields"] : temp = obj["fields"][j] temp["total"] = 0 count = 0 for m in temp["data"]: m = float(m) count = count +1 if count > 5: temp["total"] = temp["total"] + m #print "count ",count if count > 5: temp["average"] = temp["total"]/(count -5) else: temp["average"] = 0 temp["std"] = 0 count = 0 for m in temp["data"]: m = float(m) count = count +1 if count > 5 : temp["std"] = temp["std"] + (m -temp["average"])*(m-temp["average"]) temp["std"] = math.sqrt(temp["std"]/(count-5)) else: temp["std"] = 0 ## 1 gallon is 0.133681 ft3 ## assuming a 5 foot radius ## a 12 gallon/hour head 0.2450996343 inch/hour ## a 14 gallon/hour head 0.2859495733 inch/hour ## a 16 gallon/hour head 0.3267995123 inch/hour ## ## ## ## ## capacity of soil ## for silt 2 feet recharge rate 30 % recharge inches -- .13 * 24 *.3 = .936 inch ## for sand 1 feet recharge rate 30 % recharge inches -- .06 * 12 *.3 = .216 inch ## ## recharge rate for is as follows for 12 gallon/hour head: ## sand 1 feet .216/.245 which is 52 minutes ## silt 2 feet recharge rate is 3.820 hours or 229 minutes ## ## {"controller":"satellite_1", "pin": 9, "recharge_eto": 0.216, "recharge_rate":0.245 }, ## eto_site_data def eto_update( self, schedule_run_time, io_list ): self.eto_site_data = self.app_files.load_file( "eto_site_setup.json" ) manage_eto = self.redis.hget( "CONTROL_VARIABLES","ETO_MANAGE_FLAG" ) if manage_eto == None: manage_eto = 1 self.redis.hset("CONTROL_VARIABLES", "ETO_MANAGE_FLAG",manage_eto) manage_eto = int( manage_eto ) if manage_eto == 1: sensor_list = self.find_queue_names( io_list ) if len(sensor_list) != 0: run_time = self.find_largest_runtime( schedule_run_time, sensor_list ) if run_time < schedule_run_time : schedule_run_time = run_time return schedule_run_time def find_queue_names( self, io_list ): eto_values = [] for j in io_list: controller = j["remote"] bits = j["bits"] bit = bits[0] index = 0 for m in self.eto_site_data: if (m["controller"] == controller) and (m["pin"] == bit): queue_name = controller+"|"+str(bit) data = self.redis.hget( "ETO_RESOURCE", queue_name ) eto_values.append( [index, data, queue_name ] ) index = index +1 #print "eto values ",eto_values return eto_values def find_largest_runtime( self, run_time, sensor_list ): runtime = 0 for j in sensor_list: index = j[0] deficient = float(j[1]) eto_temp = self.eto_site_data[index] recharge_eto = float( eto_temp["recharge_eto"] ) recharge_rate = float(eto_temp["recharge_rate"]) if float(deficient) > recharge_eto : runtime_temp = (deficient /recharge_rate)*60 if runtime_temp > runtime : runtime = runtime_temp #print "run time",runtime return runtime def update_eto_queue_a( self, run_time, io_list ): self.eto_site_data = self.app_files.load_file( "eto_site_setup.json" ) manage_eto = self.redis.hget( "CONTROL_VARIABLES","ETO_MANAGE_FLAG" ) if manage_eto == None: manage_eto = 1 self.redis.hset( "CONTROL_VARIABLES","ETO_MANAGE_FLAG",manage_eto) manage_eto = int( manage_eto ) if manage_eto == 1: sensor_list = self.find_queue_names( io_list ) if len(sensor_list) != 0: self.update_eto_queue(run_time,sensor_list) def update_eto_queue( self, run_time, sensor_list ): for l in sensor_list: j_index = l[0] queue_name = l[2] j = self.eto_site_data[ j_index ] deficient = self.redis.hget("ETO_RESOURCE", queue_name ) if deficient == None: deficient = 0 else: deficient = float(deficient) recharge_rate = float(j["recharge_rate"]) deficient = deficient - (recharge_rate/60)*run_time if deficient < 0 : deficient = 0 self.redis.hset( "ETO_RESOURCE", queue_name, deficient ) class SprinklerQueueControl(): def __init__(self,alarm_queue,redis): self.alarm_queue = alarm_queue self.redis = redis # # This function takes data from the IRRIGATION QUEUE And Transferrs it to the IRRIGATION_CELL_QUEUE # IRRIGATION_CELL_QUEUE only has one element in it # def load_irrigation_cell(self,chainFlowHandle, chainObj, parameters,event ): #print "load irrigation cell ######################################################################" ## if queue is empty the return ## this is for resuming an operation length = self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" ) #print "made it here cell ", length if length > 0: return "RESET" length = self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_QUEUE") #print "length queue ",length if length == 0: return "RESET" if self.redis.hget("CONTROL_VARIABLES","SUSPEND") == "ON": return "RESET" compact_data = self.redis.rpop( "QUEUES:SPRINKLER:IRRIGATION_QUEUE" ) json_string = base64.b64decode(compact_data) json_object = json.loads(json_string) if json_object["type"] == "RESISTANCE_CHECK": chainFlowHandle.enable_chain_base( ["resistance_check"]) self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON") return "RESET" if json_object["type"] == "CHECK_OFF": chainFlowHandle.enable_chain_base( ["check_off_chain"]) self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON") return "RESET" if json_object["type"] == "CLEAN_FILTER": chainFlowHandle.enable_chain_base( ["clean_filter_action_chain"]) self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON") return "RESET" if json_object["type"] == "IRRIGATION_STEP": #print "irrigation step" self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE", compact_data ) ''' if json_object["type"] == "START_SCHEDULE" : self.redis.set( "schedule_step_number", json_object["step_number"] ) self.store_event_queue( "irrigation_schedule_start", json_object ) if json_object["type"] == "END_SCHEDULE" : self.store_event_queue( "irrigation_schedule_stop", json_object ) ''' #print "load irrigation cell CONTINUE" return "DISABLE" class SprinklerControl(): def __init__(self, irrigation_control,alarm_queue,redis): self.irrigation_control = irrigation_control self.alarm_queue = alarm_queue self.redis = redis self.commands = {} self.commands["OFFLINE"] = self.go_offline self.commands["QUEUE_SCHEDULE"] = self.queue_schedule self.commands["QUEUE_SCHEDULE_STEP"] = self.queue_schedule_step self.commands["QUEUE_SCHEDULE_STEP_TIME"] = self.queue_schedule_step_time self.commands["RESTART_PROGRAM"] = self.restart_program #tested self.commands["NATIVE_SCHEDULE"] = self.queue_schedule_step_time self.commands["NATIVE_SPRINKLER"] = self.direct_valve_control self.commands["CLEAN_FILTER"] = self.clean_filter #tested self.commands["OPEN_MASTER_VALVE"] = self.open_master_valve #tested self.commands["CLOSE_MASTER_VALVE"] = self.close_master_valve #tested self.commands["RESET_SYSTEM"] = self.reset_system #tested self.commands["CHECK_OFF"] = self.check_off #tested self.commands["SUSPEND"] = self.suspend #tested self.commands["RESUME" ] = self.resume #tested self.commands["SKIP_STATION"] = self.skip_station self.commands["RESISTANCE_CHECK"] = self.resistance_check self.app_files = load_files.APP_FILES(redis) def dispatch_sprinkler_mode(self,chainFlowHandle, chainObj, parameters,event): #try: length = self.redis.llen( "QUEUES:SPRINKLER:CTRL") #print length if length > 0: data = self.redis.rpop("QUEUES:SPRINKLER:CTRL") data = base64.b64decode(data) object_data = json.loads(data ) #print object_data["command"] print "object_data",object_data if self.commands.has_key( object_data["command"] ) : self.commands[object_data["command"]]( object_data,chainFlowHandle, chainObj, parameters,event ) else: self.alarm_queue.store_past_action_queue("Bad Irrigation Command","RED",object_data ) raise #except: #print "exception in dispatch mode" #quit() def suspend( self, *args ): self.alarm_queue.store_past_action_queue("SUSPEND_OPERATION","YELLOW" ) self.irrigation_control.turn_off_master_valves() self.irrigation_control.disable_all_sprinklers() self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON") def resume( self, *args ): self.alarm_queue.store_past_action_queue("RESUME_OPERATION","GREEN" ) self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") def skip_station( self, *args ): self.alarm_queue.store_past_action_queue("SKIP_STATION","YELLOW" ,{"skip: on"} ) self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","ON" ) def resistance_check( self, object_data, chainFlowHandle, chainObj, parameters, event ): json_object = {} json_object["type"] = "RESISTANCE_CHECK" json_string = json.dumps( json_object) compact_data = base64.b64encode(json_string) self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data ) alarm_queue.store_past_action_queue( "RESISTANCE_CHECK", "GREEN", { "action":"start" } ) def check_off( self,object_data,chainFlowHandle, chainObj, parameters,event ): json_object = {} json_object["type"] = "CHECK_OFF" json_string = json.dumps( json_object) compact_data = base64.b64encode(json_string) self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data ) alarm_queue.store_past_action_queue( "CHECK_OFF", "GREEN", { "action":"start" } ) def clean_filter( self, object_data,chainFlowHandle, chainObj, parameters,event ): json_object = {} json_object["type"] = "CLEAN_FILTER" json_string = json.dumps( json_object) compact_data = base64.b64encode(json_string) self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data ) alarm_queue.store_past_action_queue( "CLEAN_FILTER", "GREEN", { "action":"start" } ) def go_offline( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.alarm_queue.store_past_action_queue("OFFLINE","RED" ) self.redis.hset("CONTROL_VARIABLES","sprinkler_ctrl_mode","OFFLINE") self.irrigation_control.turn_off_master_valves() self.irrigation_control.disable_all_sprinklers() self.clear_redis_sprinkler_data() self.clear_redis_irrigate_queue() self.redis.hset( "CONTROL_VARIABLES","schedule_name","OFFLINE") self.redis.hset( "CONTROL_VARIABLES","current_log_object", None ) self.redis.hset( "CONTROL_VARIABLES","flow_log_object", None ) ### not sure of self.redis.hset( "CONTROL_VARIABLES","SUSPEND","ON") chainFlowHandle.disable_chain_base( ["monitor_irrigation_job_queue","monitor_irrigation_cell"]) chainFlowHandle.enable_chain_base( ["monitor_irrigation_job_queue"]) def queue_schedule( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.schedule_name = object_data["schedule_name"] self.load_auto_schedule(self.schedule_name) #self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF") self.alarm_queue.store_past_action_queue("QUEUE_SCHEDULE","GREEN",{ "schedule":self.schedule_name } ) def queue_schedule_step( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.schedule_name = object_data["schedule_name"] self.schedule_step = object_data["step"] self.schedule_step = int(self.schedule_step) self.alarm_queue.store_past_action_queue("QUEUE_SCHEDULE_STEP","GREEN",{ "schedule":self.schedule_name,"step":self.schedule_step } ) #print "queue_schedule",self.schedule_name,self.schedule_step self.load_step_data( self.schedule_name, self.schedule_step ,None,True ) #self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF") def queue_schedule_step_time( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.schedule_name = object_data["schedule_name"] self.schedule_step = object_data["step"] self.schedule_step_time = object_data["run_time"] self.alarm_queue.store_past_action_queue("DIAGNOSTICS_SCHEDULE_STEP_TIME","YELLOW" , {"schedule_name":self.schedule_name, "schedule_step":self.schedule_step,"schedule_time":self.schedule_step_time}) self.schedule_step = int(self.schedule_step) self.schedule_step_time = int(self.schedule_step_time) self.irrigation_control.turn_off_master_valves() self.irrigation_control.disable_all_sprinklers() self.clear_redis_sprinkler_data() self.clear_redis_irrigate_queue() self.load_step_data( self.schedule_name, self.schedule_step, self.schedule_step_time,False ) self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF") def direct_valve_control( self, object_data,chainFlowHandle, chainObj, parameters,event ): remote = object_data["controller"] pin = object_data["pin"] schedule_step_time = object_data["run_time"] pin = int(pin) schedule_step_time = int(schedule_step_time) self.alarm_queue.store_past_action_queue("DIRECT_VALVE_CONTROL","YELLOW" ,{"remote":remote,"pin":pin,"time":schedule_step_time }) #print "made it here",object_data self.irrigation_control.turn_off_master_valves() self.irrigation_control.disable_all_sprinklers() self.clear_redis_sprinkler_data() self.clear_redis_irrigate_queue() #print "direct_valve_control",remote,pin,schedule_step_time self.load_native_data( remote,pin,schedule_step_time) self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") self.redis.hset("CONTROL_VARIABLES","SKIP_STATION","OFF") def open_master_valve( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.alarm_queue.store_past_action_queue("OPEN_MASTER_VALVE","YELLOW" ) self.irrigation_control.turn_on_master_valves() chainFlowHandle.enable_chain_base([ "monitor_master_on_web"]) def close_master_valve( self, object_data,chainFlowHandle, chainObj, parameters,event ): self.alarm_queue.store_past_action_queue("CLOSE_MASTER_VALVE","GREEN" ) chainFlowHandle.disable_chain_base( ["manual_master_valve_on_chain"]) chainFlowHandle.disable_chain_base( ["monitor_master_on_web"]) self.irrigation_control.turn_off_master_valves() def reset_system( self, *args ): self.alarm_queue.store_past_action_queue("REBOOT","RED" ) self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","RESET_SYSTEM") os.system("reboot") def restart_program( self, *args ): self.alarm_queue.store_past_action_queue("RESTART","RED" ) self.redis.hset( "CONTROL_VARIABLES","sprinkler_ctrl_mode","RESTART_PROGRAM") quit() def clear_redis_irrigate_queue( self,*args ): #print "clearing irrigate queue" self.redis.delete( "QUEUES:SPRINKLER:IRRIGATION_QUEUE" ) self.redis.delete( "QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE") def clear_redis_sprinkler_data(self): self.redis.hset("CONTROL_VARIABLES", "sprinkler_ctrl_mode","OFFLINE") self.redis.hset( "CONTROL_VARIABLES","schedule_name","offline" ) self.redis.hset("CONTROL_VARIABLES", "schedule_step_number",0 ) self.redis.hset("CONTROL_VARIABLES", "schedule_step",0 ) self.redis.hset("CONTROL_VARIABLES", "schedule_time_count",0 ) self.redis.hset( "CONTROL_VARIABLES","schedule_time_max",0 ) def load_auto_schedule( self, schedule_name): schedule_control = self.get_json_data( schedule_name ) step_number = len( schedule_control["schedule"] ) ### ### load schedule start ### ### #json_object = {} #json_object["type"] = "START_SCHEDULE" #json_object["schedule_name"] = schedule_name #json_object["step_number"] = step_number #json_string = json.dumps( json_object) #self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", json_string ) ### ### load step data ### ### for i in range(1,step_number+1): self.load_step_data( schedule_name, i ,None,True ) ### ### load schedule end ### ### #json_object = {} #json_object["type"] = "END_SCHEDULE" #json_object["schedule_name"] = schedule_name #json_object["step_number"] = step_number #json_string = json.dumps( json_object) #self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", json_string ) # note schedule_step_time can be None then use what is in the schedule def load_step_data( self, schedule_name, schedule_step, schedule_step_time ,eto_flag ): #print "load step data schedule name ----------------->",schedule_name, schedule_step, schedule_step_time temp = self.get_schedule_data( schedule_name, schedule_step) if temp != None : schedule_io = temp[0] schedule_time = temp[1] if schedule_step_time == None: schedule_step_time = schedule_time json_object = {} json_object["type"] = "IRRIGATION_STEP" json_object["schedule_name"] = schedule_name json_object["step"] = schedule_step json_object["io_setup"] = schedule_io json_object["run_time"] = schedule_step_time json_object["elasped_time"] = 0 json_object["eto_enable"] = eto_flag json_string = json.dumps( json_object) compact_data = base64.b64encode(json_string) #print "load step data ===== step data is queued" self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data ) else: self.store_event_queue( "non_existant_schedule", json_object ) raise # non schedule # this is for loading user specified data def load_native_data( self, remote,bit,time ): json_object = {} json_object["type"] = "IRRIGATION_STEP" json_object["schedule_name"] = "MANUAL" json_object["step"] = 1 json_object["io_setup"] = [{ "remote":remote, "bits":[bit] }] json_object["run_time"] = time json_object["elasped_time"] = 0 json_object["eto_enable"] = False json_string = json.dumps( json_object) compact_data = base64.b64encode(json_string) #print "native load",json_string self.redis.lpush( "QUEUES:SPRINKLER:IRRIGATION_QUEUE", compact_data) #print self.redis.llen("QUEUES:SPRINKLER:IRRIGATION_QUEUE") def get_schedule_data( self, schedule_name, schedule_step): schedule_control = self.get_json_data( schedule_name ) if schedule_control != None: io_control = schedule_control["schedule"][schedule_step -1] m = io_control[0] schedule_time = m[2] # format io_control new_io_control = [] for i in io_control: temp = { } temp["remote"] = i[0] temp["bits"] = i[1] new_io_control.append(temp) return [ new_io_control, schedule_time ] return None def get_json_data( self, schedule_name ): #print("get json data ",schedule_name) sprinkler_ctrl = self.app_files.load_file("sprinkler_ctrl.json") for j in sprinkler_ctrl : if j["name"] == schedule_name: json_data=open("app_data_files/"+j["link"]) json_data = json.load(json_data) #print "json data",json_data return json_data return None class Monitor(): # # Measures current and flow rate every minute # Up # def __init__(self, redis, basic_io_control,counter_devices,analog_devices, gpio_bit_input_devices, alarm_queue, udp_servers ): self.redis = redis self.basic_io_control = basic_io_control self.counter_devices = counter_devices self.analog_devices = analog_devices self.gpio_inputs = gpio_bit_input_devices self.alarm_queue = alarm_queue self.counter_time_ref = time.time() self.udp_servers = udp_servers def log_clean_filter( self,*args): self.redis.hset self.alarm_queue.store_past_action_queue("CLEAN_FILTER","GREEN" ) self.redis.hset("CONTROLLER_STATUS","clean_filter",time.time() ) def set_suspend( self, *args): self.redis.hset("CONTROL_VARIABLES","SUSPEND","ON") def set_resume( self,*args): self.redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") def verify_resume( self, *args): if self.redis.hget("CONTROL_VARIABLES","SUSPEND") == "OFF": return "DISABLE" else: return "HALT" def clear_cleaning_sum(self, *args): redis.hset("CONTROL_VARIABLES","cleaning_sum",0) def check_to_clean_filter( self, chainFlowHandle, chainObj, parameters,event ): cleaning_interval = redis.hget("CONTROL_VARIABLES","CLEANING_INTERVAL") flow_value = float( check_redis_value( "global_flow_sensor_corrected" ) ) cleaning_sum = float( check_redis_value( "cleaning_sum") ) cleaning_sum = cleaning_sum + flow_value redis.hset("CONTROL_VARIABLES","cleaning_sum",cleaning_sum) if cleaning_interval == 0 : return # no cleaning interval active if cleaning_sum > cleaning_interval : chainFlowHandle.enable_chain_base(["clean_filter_action_chain"]) def update_modbus_statistics( self, *args ): servers = [] for i in self.udp_servers: temp = modbus_control.get_all_counters(i) if temp[0] == True: servers.append(i) data = json.loads(temp[1]) for j in data.keys(): if redis.hexists("MODBUS_STATISTICS:"+i,j) == False: self.redis.hset("MODBUS_STATISTICS:"+i,j,json.dumps(data[j])) else: temp_json = redis.hget("MODBUS_STATISTICS:"+i,j) temp_value = json.loads(temp_json) temp_value["address"] = j temp_value["failures"] = int(temp_value["failures"]) +int(data[j]["failures"]) temp_value["counts"] = int(temp_value["counts"]) + int(data[j]["counts"]) temp_value["total_failures"] = int(temp_value["total_failures"]) +int(data[j]["total_failures"]) temp_json = json.dumps(temp_value) self.redis.hset("MODBUS_STATISTICS:"+i,j,temp_json) modbus_control.clear_all_counters(i) self.redis.set("MODBUS_INTERFACES",json.dumps(servers)) def clear_modbus_statistics( self,*args): interfaces_json = self.redis.get("MODBUS_INTERFACES") interfaces_value = json.loads(interfaces_json) for i in interfaces_value: self.redis.delete("MODBUS_STATISTICS:"+i) def update_time_stamp( self, *args): self.alarm_queue.update_time_stamp() def measure_input_gpio( self, *args ): for i in self.gpio_inputs: self.basic_io_control.get_gpio_bit(i) # need to store values def measure_flow_rate ( self, *args ): deltat = time.time()-self.counter_time_ref self.counter_time_ref = time.time() for i in counter_devices.keys(): flow_value = self.basic_io_control.measure_counter(deltat,i) self.redis.lpush("QUEUES:SPRINKLER:FLOW:"+str(i),flow_value ) self.redis.ltrim("QUEUES:SPRINKLER:FLOW:"+str(i),0,800) if i == "main_sensor": self.redis.hset("CONTROL_VARIABLES","global_flow_sensor",flow_value ) conversion_rate = counter_devices[i]["conversion_factor"] self.redis.hset("CONTROL_VARIABLES","global_flow_sensor_corrected",flow_value*conversion_rate ) def measure_current( self, *args ): for i in analog_devices.keys(): current = self.basic_io_control.get_analog( i ) self.redis.lpush( "QUEUES:SPRINKLER:CURRENT:"+i,current ) self.redis.ltrim( "QUEUES:SPRINKLER:CURRENT:"+i,0,800) self.redis.hset( "CONTROL_VARIABLES",i, current ) def measure_current_a( self, *args ): for i in analog_devices.keys(): current = self.basic_io_control.get_analog( i ) self.redis.hset( "CONTROL_VARIABLES",i, current ) class PLC_WATCH_DOG(): def __init__(self, redis, alarm_queue,watch_dog_interface ): self.redis = redis self.alarm_queue = alarm_queue self.watch_dog_interface = watch_dog_interface def read_wd_flag( self,*arg ): try: return_value = self.watch_dog_interface.read_wd_flag() #print "read_wd_flag",return_value except: pass return "DISABLE" def write_wd_flag( self,value,*arg ): try: self.watch_dog_interface.write_wd_flag(1) except: pass return "DISABLE" def read_mode_switch( self,value,*arg ): return_value = self.watch_dog_interface.read_mode_switch() #print "read_mode_switch",return_value return "DISABLE" def read_mode( self,value,*arg ): return_value = self.watch_dog_interface.read_mode() #print "read_mode_switch",return_value return "DISABLE" if __name__ == "__main__": import datetime import time import string import urllib2 import math import redis import json import eto import py_cf import os import base64 import io_control_backup.alarm_queue import io_control_backup.modbus_UDP_device import io_control_backup.click import io_control_backup.basic_io_control import io_control_backup.irrigation_ctl import io_control_backup.new_instrument import watch_dog #ir_ctl = Irrigation_Control("/media/mmc1/app_data_files","/media/mmc1/system_data_files") from data_management.configuration import * redis = redis.StrictRedis( host = '192.168.1.84', port=6379, db = 0 ) app_files = load_files.APP_FILES(redis) sys_files = load_files.SYS_FILES(redis) redis_dict = {} redis_dict["GPIO_BITS"] = "GPIO_BITS" redis_dict["GPIO_REGS"] = "GPIO_REGS" redis_dict["GPIO_ADC"] = "GPIO_ADC" redis_dict["COUNTER"] = "COUNTER" redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") redis.hincrby("CONTROLLER_STATUS", "irrigation_resets") alarm_queue = io_control_backup.alarm_queue.AlarmQueue( redis,"cloud_alarm_queue" ) io_server = io_control_backup.modbus_UDP_device.ModbusUDPDeviceClient(remote_devices , "192.168.1.84") plc_click = io_control_backup.click.PLC_Click( alarm_queue, io_server, redis, redis_dict ) modbus_control = io_control_backup.modbus_UDP_device.ModbusUDPDeviceClient( [], "192.168.1.84") plc_map = { "CLICK":plc_click } basic_io_control = io_control_backup.basic_io_control.BasicIo( redis_dict = redis_dict, redis_server=redis, plc_interface=plc_map , gpio_bit_input_devices=gpio_bit_input_devices, gpio_bit_output_devices= None, gpio_reg_input_devices=None, gpio_reg_output_devices= None, analog_devices=analog_devices, counter_devices=counter_devices ) irrigation_io_control = io_control_backup.irrigation_ctl.IrrigationControl( irrigation_io, master_valve_list, plc_map, redis ) plc_watch_dog_interface = io_control_backup.irrigation_ctl.WatchDogControl( remote_devices, plc_map ) plc_watch_dog = PLC_WATCH_DOG( redis, alarm_queue,plc_watch_dog_interface ) monitor = Monitor(redis, basic_io_control, counter_devices, analog_devices, gpio_bit_input_devices,alarm_queue, ["192.168.1.84"] ) monitor.update_modbus_statistics() wd_client = watch_dog.Watch_Dog_Client(redis, "irrigation_ctrl","irrigation control") sprinkler_control = SprinklerControl(irrigation_io_control,alarm_queue,redis) sprinkler_element = SprinklerQueueElementControl(redis,irrigation_io_control,alarm_queue,counter_devices ) sprinkler_queue = SprinklerQueueControl( alarm_queue, redis ) def check_redis_value( key): value = redis.hget( "CONTROL_VARIABLES",key ) if value == None: value = 0 return value def clear_counters(*args): for i,j in remote_devices.items(): ip = j["UDP"] io_server.clear_all_counters(ip) def check_off ( *args ): temp = float(redis.hget( "CONTROL_VARIABLES","global_flow_sensor_corrected" )) redis.hset("CONTROLLER_STATUS", "check_off",temp ) if temp > 1.: redis.hset("ALARM","check_off",True) redis.hset("CONTROL_VARIABLES","SUSPEND","ON") alarm_queue.store_past_action_queue( "CHECK_OFF", "RED", { "action":"bad","flow_rate":temp } ) return_value = "DISABLE" else: redis.hset("CONTROL_VARIABLES","SUSPEND","OFF") redis.hset("ALARMS","check_off",False) alarm_queue.store_past_action_queue( "CHECK_OFF", "GREEN", { "action":"good","flow_rate":temp } ) return_value = "DISABLE" return return_value def detect_on_switch_on( self,*args): for i in master_switch_keys: try: value = int(redis.hget("GPIO_BITS",i)) except: value = 0 if value != 0: print "++++++++++",value return "DISABLE" return "RESET" def detect_off_switches(*args): #print "detect off", master_reset_keys for i in master_reset_keys: try: value = int(redis.hget("GPIO_BITS",i)) except: value = 0 if value != 0: print "-------",value return True return False def clear_redis_set_keys( *args): for i in master_switch_keys: redis.hset("GPIO_BITS",i,0) def clear_redis_clear_keys( *args): for i in master_reset_keys: redis.hset("GPIO_BITS",i,0) def detect_switch_off( chainFlowHandle, chainObj, parameters, event ): returnValue = "RESET" if detect_off_switches() == True: clear_redis_clear_keys() returnValue = "DISABLE" return returnValue def check_for_uncompleted_sprinkler_element( chainFlowHandle,chainObj,parameters,event ): #alarm_queue.store_past_action_queue("START_UP","RED" ) length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" ) if length > 0: #print "enabling chain" chainFlowHandle.enable_chain_base( ["monitor_irrigation_cell"]) def check_irrigation_queue( chainFlowHandle,chainObj,parameters,event ): #alarm_queue.store_past_action_queue("START_UP","RED" ) length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" ) if length > 0: print "Jobs in Queue" return "TERMINATE" else: return "DISABLE" def add_resistance_entry( remote_dictionary, pin_dictionary, remote, pin ): if ( remote not in remote_dictionary ) or ( pin not in pin_dictionary ): remote_dictionary.union( remote) pin_dictionary.union(pin) json_object = [ remote,pin] json_string = json.dumps(json_object) print "json_string",json_string queue_object = base64.b64encode(json_string) redis.lpush( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE",queue_object ) def update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary ): if dictionary.has_key( remote ) == False: dictionary[remote] = {} if dictionary[remote].has_key( pin ) == False: dictionary[remote][pin] = list(set()) dictionary[remote][pin] = set( dictionary[remote][pin]) dictionary[remote][pin].union(schedule) dictionary[remote][pin] = list( dictionary[remote][pin]) add_resistance_entry( remote_dictionary, pin_dictionary, remote, pin ) def assemble_relevant_valves( *args): remote_dictionary = set() pin_dictionary = set() dictionary = {} print "assemble relevant valves" redis.delete( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" ) sprinkler_ctrl = app_files.load_file("sprinkler_ctrl.json") for j in sprinkler_ctrl: schedule = j["name"] json_data =app_files.load_file(j["link"]) for i in json_data["schedule"]: for k in i: remote = k[0] pin = str(k[1][0]) update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary ) master_valve = sys_files.load_file("master_valve_setup.json") for j in master_valve: remote = j[0] pin = str(j[1][0]) update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary ) remote = j[2] pin = str(j[3][0]) update_entry( remote_dictionary, pin_dictionary, remote,pin, schedule , dictionary ) json_string = json.dumps(dictionary) queue_object = base64.b64encode(json_string) redis.set( "SPRINKLER_RESISTANCE_DICTIONARY",queue_object) def test_individual_valves( chainFlowHandle,chainObj,parameters,event ): returnValue = "HALT" if event["name"] == "INIT" : parameters[1] = 0 # state variable else: if event["name"] == "TIME_TICK": if parameters[1] == 0: if redis.llen( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" ) == 0: returnValue = "DISABLE" else: compact_data = redis.rpop( "QUEUES:SPRINKLER:RESISTANCE_CHECK_QUEUE" ) json_string = base64.b64decode(compact_data) json_object = json.loads(json_string) print "json object",json_object irrigation_io_control.disable_all_sprinklers() irrigation_io_control.load_duration_counters( 1 ) # 1 minute irrigation_io_control.turn_on_valve( [{"remote": json_object[0], "bits":[int(json_object[1])]}] ) # {"remote":xxxx,"bits":[] } parameters[1] = 1 parameters[2] = json_object[0] parameters[3] = json_object[1] else: monitor.measure_current() try: coil_current = float( redis.hget( "CONTROL_VARIABLES","coil_current" )) print "coil current",coil_current queue = "log_data:resistance_log:"+parameters[2]+":"+parameters[3] redis.lpush(queue, coil_current ) # necessary for web server redis.ltrim(queue,0,10) queue = "log_data:resistance_log_cloud:"+parameters[2]+":"+parameters[3] redis.lpush(queue, json.dumps( { "current": coil_current, "time":time.time()} )) #necessary for cloud redis.ltrim(queue,0,10) except: raise #should not happen irrigation_io_control.disable_all_sprinklers() parameters[1] = 0 return returnValue # # Adding chains # cf = py_cf.CF_Interpreter() cf.define_chain("reboot_message", True) #tested cf.insert_link( "link_1", "One_Step", [ clear_redis_set_keys ] ) cf.insert_link( "link_2", "One_Step", [ clear_redis_clear_keys ] ) cf.insert_link( "link_2", "One_Step", [ plc_watch_dog.read_mode ] ) cf.insert_link( "link_3", "One_Step", [ plc_watch_dog.read_mode_switch ] ) cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] ) cf.insert_link( "link_4", "One_Step" ,[ check_for_uncompleted_sprinkler_element ] ) cf.insert_link( "link_5", "Terminate", [] ) cf.define_chain( "monitor_flow_rate", True) #tested cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] ) cf.insert_link( "link_2", "One_Step", [ monitor.measure_flow_rate ] ) cf.insert_link( "link_3", "One_Step", [ monitor.measure_current ] ) cf.insert_link( "link_5", "Reset", [] ) cf.define_chain("measure_input_gpio", False )# TBD cf.insert_link( "link_1", "WaitTime", [30,0,0,0] ) cf.insert_link( "link_2", "One_Step", [ monitor.measure_input_gpio ] ) cf.insert_link( "link_3", "Reset", [] ) cf.define_chain("update_time_stamp", True) #tested cf.insert_link( "link_1", "WaitTime", [10,0,0,0] ) cf.insert_link( "link_3", "One_Step", [ monitor.update_time_stamp ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("watch_dog_thread",True) #tested cf.insert_link( "link_0", "Log", ["Watch dog thread"] ) cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] ) cf.insert_link( "link_3", "One_Step", [ wd_client.pat_wd ]) cf.insert_link( "link_5", "Reset", []) cf.define_chain("clean_filter_chain", False) #tested cf.insert_link( "link_1", "WaitTod", ["*",17,"*","*"] ) #cf.insert_link( "link_2", "Enable_Chain", [["clean_filter_action_chain"]] ) cf.insert_link( "link_3", "WaitTod", ["*",18,"*","*" ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("clean_filter_action_chain", False) #tested cf.insert_link( "link_0", "Code", [ check_irrigation_queue ] ) cf.insert_link( "link_1", "Log", ["Clean Step 1"] ) cf.insert_link( "link_2", "One_Step", [ monitor.set_suspend ]) cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] ) cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn off cleaning valve cf.insert_link( "link_5", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve cf.insert_link( "link_6", "WaitTime", [120,0,0,0] ) cf.insert_link( "link_1", "Log", ["Clean Step 3"] ) cf.insert_link( "link_7", "One_Step", [ irrigation_io_control.turn_on_cleaning_valves ] )# turn on cleaning valve cf.insert_link( "link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn off master valve cf.insert_link( "link_9", "WaitTime", [30,0,0,0] ) cf.insert_link( "link_1", "Log", ["Clean Step 4"] ) cf.insert_link( "link_10", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve cf.insert_link( "link_11", "WaitTime", [10,0,0,0] ) cf.insert_link( "link_1", "Log", ["Clean Step 5"] ) cf.insert_link( "link_12", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve cf.insert_link( "link_13", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn off cleaning valve cf.insert_link( "link_14", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] ) cf.insert_link( "link_15", "One_Step", [ monitor.clear_cleaning_sum ] ) cf.insert_link( "link_16", "One_Step", [ monitor.set_resume ]) cf.insert_link( "link_17", "One_Step", [ monitor.log_clean_filter ] ) cf.insert_link( "link_17", "Terminate", [] ) cf.define_chain("check_off", False ) # tested cf.insert_link( "link_1", "WaitTod", ["*",16,"*","*"] ) #cf.insert_link( "link_2", "Enable_Chain", [["check_off_chain"]] ) cf.insert_link( "link_3", "WaitTod", ["*",17,"*","*" ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("check_off_chain", False ) #tested #cf.insert_link( "link_1", "Log", ["check off is active"] ) cf.insert_link( "link_16", "One_Step", [ monitor.set_suspend ] ) cf.insert_link( "link_2", "One_Step", [ irrigation_io_control.disable_all_sprinklers ] ) cf.insert_link( "link_3", "WaitTime", [15,0,0,0] ) cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_on_master_valves ] )# turn turn on master valve cf.insert_link( "link_5", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve cf.insert_link( "link_6", "WaitTime", [300,0,0,0] ) cf.insert_link( "link_7", "Code", [ check_off ] ) cf.insert_link( "link_16", "One_Step", [ monitor.set_resume ]) cf.insert_link( "link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn on master valve cf.insert_link( "link_9", "Terminate", [] ) cf.define_chain("manual_master_valve_on_chain",False) #tested #cf.insert_link( "link_1", "Log", ["manual master"] ) cf.insert_link( "link_2", "Code", [ monitor.verify_resume ]) cf.insert_link( "link_3", "One_Step", [ irrigation_io_control.turn_on_master_valves ] ) cf.insert_link( "link_4", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve cf.insert_link( "link_5", "WaitTime", [ 5,0,0,0] ) # wait 1 seconds cf.insert_link( "link_6", "Reset", [] ) cf.define_chain("monitor_master_on_switch",False) #TBD #cf.insert_link("link_1", "WaitTime", [5,0,0,0] ) #cf.insert_link("link_2", "Code", [ detect_on_switch_on ] ) #cf.insert_link("link_3", "One_Step", [ clear_redis_set_keys ] ) #cf.insert_link("link_4", "Enable_Chain", [["manual_master_valve_on_chain"]] ) #cf.insert_link("link_5", "Enable_Chain", [["manual_master_valve_off_chain"]] ) #cf.insert_link("link_6", "WaitTime", [3600*8,0,0,0] ) # wait 8 hours #cf.insert_link("link_7", "Disable_Chain", [["manual_master_valve_on_chain"]] ) #cf.insert_link("link_8", "One_Step", [ irrigation_io_control.turn_off_master_valves ]) #cf.insert_link("link_9", "Reset", []) cf.insert_link("link_9", "Halt", []) cf.define_chain("monitor_master_on_web",False) #TBD cf.insert_link( "link_0", "Log", ["monitor master on web"] ) cf.insert_link("link_1", "Enable_Chain", [["manual_master_valve_on_chain"]] ) cf.insert_link("link_2", "WaitTime", [ 3600*8,0,0,0] ) # wait 8 hour cf.insert_link("link_3", "Enable_Chain", [["manual_master_valve_on_chain"]] ) cf.insert_link("link_4", "Disable_Chain", [["manual_master_valve_off_chain"]] ) cf.insert_link("link_5", "One_Step", [ irrigation_io_control.turn_off_master_valves ]) cf.insert_link("link_6", "Disable_Chain", [["monitor_master_on_web"]] ) cf.define_chain("manual_master_valve_off_chain",False ) #TBD cf.insert_link("link_1", "WaitTime", [5,0,0,0] ) #cf.insert_link("link_1", "Code", [ detect_switch_off ] ) #cf.insert_link("link_2", "One_Step", [ clear_redis_clear_keys ] ) #cf.insert_link("link_3", "One_Step", [ clear_redis_set_keys ] ) #cf.insert_link("link_4", "Enable_Chain", [["monitor_master_on_switch"]] ) #cf.insert_link("link_5", "Disable_Chain", [["manual_master_valve_on_chain"]] ) #cf.insert_link("link_6", "Disable_Chain", [["monitor_master_on_web"]] ) #cf.insert_link("link_7", "One_Step", [ irrigation_io_control.turn_off_master_valves ] )# turn turn on master valve #cf.insert_link("link_8", "One_Step", [ irrigation_io_control.turn_off_cleaning_valves ] )# turn turn off master valve cf.insert_link("link_6", "Disable_Chain", [["manual_master_valve_off_chain"]] ) cf.define_chain("gpm_triggering_clean_filter",True) #TBDf cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] ) #cf.insert_link( "link_1", "Log", ["check to clean filter"] ) cf.insert_link( "link_2", "One_Step", [ monitor.check_to_clean_filter ] ) cf.insert_link( "link_3", "Reset", [] ) cf.define_chain("update_modbus_statistics",True) #tested #cf.insert_link( "link_1", "Log", ["update modbus statistics"] ) cf.insert_link( "link_2", "One_Step", [ monitor.update_modbus_statistics ] ) cf.insert_link( "link_3", "WaitTime", [ 15,25,0,0] ) # wait 15 minutes cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("clear_modbus_statistics",True) #tested cf.insert_link( "link_1", "WaitTod", ["*",1,"*","*"] ) #cf.insert_link( "link_2", "Log", ["clear modbus statistics"] ) cf.insert_link( "link_3", "One_Step", [ monitor.clear_modbus_statistics ] ) cf.insert_link( "link_4", "WaitTod", ["*",2,"*","*"] ) cf.insert_link( "link_5", "Reset", [] ) cf.define_chain("resistance_check",False) #not tested cf.insert_link( "link_1", "Log", ["resistance check"] ) cf.insert_link( "link_2", "One_Step", [ monitor.set_suspend ]) cf.insert_link( "link_3", "One_Step", [ assemble_relevant_valves ] ) cf.insert_link( "link_4", "Code", [ test_individual_valves,0,0,0 ] ) cf.insert_link( "link_5", "One_Step", [ monitor.set_resume ]) cf.insert_link( "link_6", "Disable_Chain", [["resistance_check"]] ) cf.define_chain("plc_watch_dog", True ) #TBD #cf.insert_link( "link_1", "Log", ["plc watch dog thread"] ) #cf.insert_link( "link_2", "One_Step", [ plc_watch_dog.read_mode ] ) #cf.insert_link( "link_3", "One_Step", [ plc_watch_dog.read_mode_switch ] ) cf.insert_link( "link_4", "One_Step", [ plc_watch_dog.read_wd_flag ] ) cf.insert_link( "link_5", "One_Step", [ plc_watch_dog.write_wd_flag ] ) cf.insert_link( "link_1", "WaitTime", [ 30,0,0,0] ) # wait 1 seconds cf.insert_link( "link_7", "Reset", [] ) cf.define_chain( "plc_monitor_control_queue", True ) #tested cf.insert_link( "link_1", "WaitTime", [ 1,0,0,0] ) # wait 1 seconds cf.insert_link( "link_2", "One_Step", [ sprinkler_control.dispatch_sprinkler_mode ] ) cf.insert_link( "link_3", "Reset", [] ) cf.define_chain("monitor_irrigation_job_queue", True ) # tested cf.insert_link( "link_1", "WaitTime", [ 5,0,0,0] ) # wait 5 seconds cf.insert_link( "link_2", "Code", [ sprinkler_queue.load_irrigation_cell ] ) cf.insert_link( "link_3", "Code", [ sprinkler_element.start] ) cf.insert_link( "link_4", "WaitTime", [ 1,0,0,0] ) # wait 1 seconds cf.insert_link( "link_5", "One_Step", [ monitor.measure_current ] ) cf.insert_link( "link_6", "Code", [ sprinkler_element.check_current ] ) cf.insert_link( "link_7", "Enable_Chain", [["monitor_irrigation_cell","monitor_current_sub" ]]) cf.insert_link( "link_8", "WaitEvent", ["CELL_DONE" ] ) cf.insert_link( "link_9", "Reset", [] ) cf.define_chain("monitor_current_sub", False ) cf.insert_link( "link_0", "Log" , [["monitor_current_sub chain is working"]]) cf.insert_link( "link_1", "WaitTime", [ 15,0,0,0] ) # wait 15 second cf.insert_link( "link_2", "One_Step", [ monitor.measure_current_a ] ) cf.insert_link( "link_3", "One_Step", [ sprinkler_element.check_current ] ) cf.insert_link( "link_4", "Reset", [] ) cf.define_chain("monitor_irrigation_cell", False ) #Tested cf.insert_link( "link_1", "WaitEvent", [ "MINUTE_TICK" ] ) cf.insert_link( "link_2", "One_Step", [ sprinkler_element.check_current ] ) cf.insert_link( "link_3", "One_Step", [ sprinkler_element.check_for_excessive_flow_rate ] ) cf.insert_link( "link_3", "Code", [ sprinkler_element.monitor ] ) cf.insert_link( "link_4", "SendEvent", ["CELL_DONE"] ) cf.insert_link( "link_5", "Disable_Chain", [["monitor_irrigation_cell","monitor_current_sub" ]]) length = redis.llen("QUEUES:SPRINKLER:IRRIGATION_CELL_QUEUE" ) cf_environ = py_cf.Execute_Cf_Environment( cf ) cf_environ.execute()
Four western North Dakota colleges celebrated the completion of a six-year-long $24 million grant that resulted in thousands of trained individuals ready to fill crucial workforce needs in North Dakota. In 2012 Bismarck State College (BSC), Sitting Bull College (SBC), Turtle Mountain Community College (TMCC) and Williston State College (WSC) partnered to form the TREND Consortium, or Training for Regional Energy in North Dakota. The four shared in two rounds of grants from the U.S. Department of Labor, $14.6 million in 2012 and $9.9 million in 2014. These grants, provided under the Trade Adjustment Assistance Community College and Career Training (TAACCCT) program, aimed to assist institutions in improving their delivery of education and career training programs that could be completed in two years or less. BSC was the lead institution for the grant, which President Larry C. Skogen said allowed schools to purchase equipment and upgrade facilities that will continue to benefit future students for years to come. More than 2,300 students received training in the 2012-2016 round of grant funding, and more than 2,200 students benefited from the 2014-2018 round. TMCC was able to begin a Commercial Driver’s License (CDL) training program, with more than 100 people obtaining their CDL through the program. “They’re probably all over the country driving now,” said Dr. Jim Davis, president of TMCC. He also emphasized the importance of the collaboration between tribal colleges and colleges in the North Dakota University System. “If they hadn’t opened their doors, we wouldn’t be standing here,” he said. Dr. John Miller, president of WSC, agreed. “They provided the critical mass and leadership for us to bring this project home,” he said. Miller says the grant enabled WSC to double the enrollments in the college’s technical programs and provided funding necessary for the development of expensive new technical labs. “The grant for Williston State College was transformational,” he said. He added the grant also helped the school transform numerous internal processes and connections with students resulting in an updated advising system for all students at WSC. SBC Vice President Dr. Koreen Ressler said the grant enabled them to recruit more males into programs, increasing male enrollment at the school to 40% of students, up from 30% before the grants. Ressler says grant-funded programs at SBC focused on construction and trades and the school plans to continue those programs into the future. All four campus presidents spoke about the desire to continue their collaboration into the future.
# -*- coding:utf-8 -*- from __future__ import print_function import os import time import shutil import traceback from report_crawler.spiders.__Global_function import get_localtime from report_crawler.spiders.__Global_variable import REPORT_SAVEDIR now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime())) DATADIR = REPORT_SAVEDIR + '/' + str(now_time) class Spider_starter(object): def crawl(self): self.X001() def run_spider(self, spider_name): dirname = REPORT_SAVEDIR + '/' + str(now_time) + '/' + spider_name[len(spider_name)-3:] + '/' + spider_name[0:len(spider_name)-3] # If the dir is exist, clear the dir(today) if os.path.exists(dirname): shutil.rmtree(dirname, True) # If one of the spiders has error, the print_exc() function will tell us which is criminal try: if not os.path.exists(DATADIR): os.makedirs(DATADIR) os.system('scrapy crawl ' + spider_name) except: traceback.print_exc() def X001(self): spider_list = { 'B': ['BNU001', 'BUAA001'], 'C': ['CSU001', 'CUMT001'], 'E': ['ECNU001'], 'H': ['HNU001'], 'J': ['JLU001'], 'N': ['NCU001', 'NKU001', 'NWSUAF001'], 'P': ['PKU001'], 'S': ['SCU001', 'SDU001', 'SEU001', 'SHU001', 'SUDA001', 'SWJTU001', 'SWU001', 'SYSU001'], 'T': ['THU001', 'TYUT001'], 'U': ['UESTC001'], 'W': ['WHU001'], 'Z': ['ZZU001'] } for key in spider_list.keys(): for spider in spider_list[key]: self.run_spider(spider) if __name__ == '__main__': starter = Spider_starter() starter.crawl()
Howard S. Zelbo is partner of Cleary Gottlieb Steen & Hamilton LLP. Howard Zelbo’s practice focuses on international arbitration and complex commercial litigation. Howard joined the firm in 1988 and became a partner in 1994.
#!/usr/bin/python import sys import os from eclipse2buck import decorator def check_force_flag(lines): for line in lines: tag = "plugin.build.force=" if line.startswith(tag): line = line.strip('\r\n') if (line[len(tag):] == "true"): return True return False def extract_include(lines): if check_force_flag(lines): for line in lines: tag = "plugin.build.include=" if line.startswith(tag): line = line.strip('\r\n') for item in line[len(tag):].split(" "): if len(item) > 0: print "\'" + item + "'," @decorator.var("SECONDARY_DEX_PATTERN_LIST") def print_secondary_pattern(folder): for dirname in os.listdir(folder): if os.path.isdir(folder+dirname) and (not dirname.startswith('.')): filename = folder+dirname + "/plugin.properties" if os.path.isfile(filename): with open(filename) as fd: extract_include(fd.readlines()) def dump_secondary_pattern(folder, outfile='./SECONDARY_DEX_PATTERN_LIST'): with open(outfile, 'w') as out: terminal = sys.stdout sys.stdout = out print_secondary_pattern(folder) sys.stdout = terminal if __name__ == "__main__": if len(sys.argv) > 1: root = sys.argv[1] else: root = "./" print_secondary_pattern(root)
Sinja city is located in Sudan at the 13.15, 33.9333 coordinates. Distance from Sinja to cities are listed below, also there are 5 sub cities within Sinja, click on the city name to find distance between. Distance from Sinja to Khartoum 307 km, to Port Sudan 800 km, to Kassala 369 km. The green marker indicates the location of Sinja. List of distances from Sinja to Sudan cities. List of distances from Sinja to sub cities (sub divisions). Choose a city to calculate distance from Sinja to major cities in the world.
from django import template from django.template.loader import render_to_string from primer.utils import get_request from primer.comments.forms import CommentForm, StatusForm, TimelineForm, get_comment_form from primer.comments.utils import get_content_types_hash, get_content_types_list register = template.Library() @register.simple_tag(takes_context=True) def comments(context, target, **kwargs): """ This renders a comments list Arguments see setup_comment_data """ comment_data = setup_comment_data('comments', target, **kwargs) return render_to_string('comments/base_comments.html', comment_data, context) @register.simple_tag(takes_context=True) def wall(context, target, **kwargs): comment_data = setup_comment_data('wall', target, **kwargs) return render_to_string('comments/base_comments.html', comment_data, context) @register.simple_tag(takes_context=True) def timeline(context, target, position='center', **kwargs): kwargs['css_class_name'] = 'comments-timeline-%s' % position comment_data = setup_comment_data('timeline', target, **kwargs) return render_to_string('comments/base_comments.html', comment_data, context) def setup_comment_data(comments_type, target, placeholder = None, stream = [], limit = 10, reversed = 0, read_only = 0, forms = None, tab_class = 'nav-pills', login_required = True, css_class_name=''): """ Sets up comment data for walls, comment lists, timelines, etc Arguments comments_type: comments, wall, or timeline target: a single db object that the comments belong to placeholder : the placeholder text for the comment input box limit: the number of comments per page reversed: 0 or 1 as boolean. Reverses the direction of the list and renders the form at the top or bottom read_only: 0 or 1 as boolean. Whether or not the wall is read only """ if comments_type == 'comments': css_class_name = 'comments-list %s' % css_class_name else: css_class_name = ' comments-%s %s' % (comments_type, css_class_name) if not forms: if comments_type == 'wall': comment_forms = [StatusForm(target, comments_type = comments_type)] elif comments_type == 'timeline': comment_forms = [TimelineForm(target, comments_type = comments_type)] else: comment_forms = [CommentForm(target, comments_type = comments_type)] else: comment_forms = [get_comment_form(form)(target, comments_type = comments_type) for form in forms.replace(' ', '').split(',')] # setup reversed properly, we only allow reversed for comments if comments_type != 'comments': reversed = False # optionally overwrite the placeholder text that gets passed in if placeholder: comment_form.fields['comment'].widget.attrs['placeholder'] = placeholder # add this set of data to the session and get # the comment hash stream = list(stream) stream.extend([target]) comment_hash = add_to_session(stream, read_only) return { 'target' : target, 'comment_forms' : comment_forms, 'comment_hash' : comment_hash, 'limit' : limit, 'comments_type' : comments_type, 'read_only' : read_only, 'css_class_name' : css_class_name, 'tab_class' : tab_class, 'login_required' : login_required, 'is_reversed' : reversed } def add_to_session(target, read_only): """ This adds a hash that identifies the contents of a wall of comments_list in the session This hash will get checked against when loading more comments, to make sure They are allowed to load the content they are asking for The hashing algorithm is sha224 Arguments target: the target(s) that are getting hashed """ # for security, store a hash of this comments conetents in the users session request = get_request() # create our list if nonexistant if not 'primer_comment_hashes' in request.session: request.session['primer_comment_hashes'] = {} # convert the stream to a serialized list of content_types and pks target_list = get_content_types_list(target) comment_hash = get_content_types_hash(target_list) # add it to the session request.session['primer_comment_hashes'][comment_hash] = { 'content_types' : target_list, 'read_only' : bool(read_only), 'blah' : 'foo' } request.session.save() print read_only return comment_hash
Did you just get a wedding ring over the holiday season? Are you Engaged and looking to talk with a wedding photographer? I would love to talk with you about your big day and hear how I can help you curate this special day of your lives. Please feel free to email me at info@kschulzphotography or call at 630-222-9124 I look forward to hearing from you. What an inspiring blog! Beautiful ring photography. And what a great idea with the newspaper and the wedding date. I love unique photos! Love that you have incorporated each couples wedding theme into a beautiful portrait of their wedding band. So fun. The ducks are my favorite. Lovely! I love how you capture something so special in such a manner that is unique to each couple!
from __future__ import division from ndex.networkn import NdexGraph import data_model as dm import operator from math import sqrt # TODO: only one edge between each pair of nodes. Take the best one. def create_similarity_map_from_enrichment_files(map_name, directory, e_set_name, min_subsumption, max_edges=3, ext_link_paths=None): e_data = dm.EnrichmentData(directory) e_data.load_one_eset(e_set_name) e_set = e_data.get_e_set(e_set_name) return create_similarity_map(map_name, e_set, min_subsumption, max_edges=max_edges, ext_link_paths=ext_link_paths) def create_similarity_map(name, e_set, min_subsumption, id_attribute="genes", max_edges=5, ext_link_paths=None): similarity_graph = NdexGraph() similarity_graph.set_name(name) if ext_link_paths is None: ext_link_paths = { 'BiopaxFile': 'NCI_PID_BIOPAX_2016-06-08-PC2v8-API', 'GSEAFile': 'NCI_PID_GSEA_2017-04-06', 'serverSubDomain': 'public' } set_name_to_node_id_map = {} id_sets = {} remove_super_nodes = [] for network_id in e_set.id_set_map: id_set_object = e_set.id_set_map[network_id] network_name = id_set_object.name id_set = id_set_object.set id_sets[network_id] = id_set att = {id_attribute: list(id_set)} node_id = similarity_graph.add_new_node(network_name, att) gene_count = float(len(id_set_object.gene_set)) similarity_graph.set_node_attribute(node_id, "gene count", gene_count) similarity_graph.set_node_attribute(node_id, "width", sqrt(gene_count)) similarity_graph.set_node_attribute(node_id, "ndex:internalLink", "[%s](%s)" % ("<i class='fa fa-eye' aria-hidden='true'></i>&nbsp;&nbsp;&nbsp;View network<br />",network_id)) if ext_link_paths is not None: externalLink1 = "[%s](%s)" %("<i class='fa fa-download' aria-hidden='true'></i>&nbsp;&nbsp;&nbsp;BioPAX3 file (.owl)<br />","ftp://ftp.ndexbio.org/" + ext_link_paths.get('BiopaxFile') + "/" + network_name.replace(" ", "%20") + ".owl.gz") externalLink2 = "[%s](%s)" % ("<i class='fa fa-download' aria-hidden='true'></i>&nbsp;&nbsp;&nbsp;GSEA gene set (.grp)<br />","ftp://ftp.ndexbio.org/" + ext_link_paths.get('GSEAFile') + "/" + network_name.replace(" ", "%20") + ".grp.gz") externalLink3 = "[%s](%s)" % ("<i class='fa fa-download' aria-hidden='true'></i>&nbsp;&nbsp;&nbsp;CX file (.cx)","http://" + ext_link_paths.get('serverSubDomain') + ".ndexbio.org/v2/network/" + network_id + "?download=true") similarity_graph.set_node_attribute(node_id, "ndex:externalLink", [externalLink1, externalLink2, externalLink3]) if(network_name == "NCI Pathway Interaction Database - Final Revision"): remove_super_nodes.append(node_id) set_name_to_node_id_map[network_id] = node_id source_similarities = {} for network_id_1 in id_sets.keys(): source_node_id = set_name_to_node_id_map[network_id_1] list_1 = list(id_sets[network_id_1]) set_1_size = len(list_1) similarities = [] for network_id_2 in id_sets.keys(): if network_id_1 != network_id_2: set_1 = id_sets[network_id_1] set_2 = id_sets[network_id_2] target_node_id = set_name_to_node_id_map[network_id_2] list_2 = list(id_sets[network_id_2]) set_2_size = len(list_2) overlap = list(set_1.intersection(set_2)) size_overlap=len(overlap) if size_overlap != 0: subsumes = size_overlap/set_2_size #subsumes_1 = size_overlap/set_2_size #subsumes_2 = size_overlap/set_1_size #subsumes = min(subsumes_1, subsumes_2) if size_overlap > 3: print "overlap: %s %s" % (size_overlap, overlap) similarity = {"source_node_id": source_node_id, "target_node_id": target_node_id, "subsumes": subsumes} similarity["atts"] = {"subsumes": subsumes, "overlap": overlap, "overlap_size": size_overlap} similarities.append(similarity) else: print "no overlap" # rank the similarities similarities = sorted(similarities, key=operator.itemgetter('subsumes'), reverse=True) source_similarities[network_id_1] = similarities temp_similarity_graph = similarity_graph.copy() for network_id, similarities in source_similarities.iteritems(): count = 0 for similarity in similarities: if count >= max_edges: break if count == 0 or similarity["subsumes"] > min_subsumption: atts = similarity["atts"] source_node_id = similarity["source_node_id"] target_node_id = similarity["target_node_id"] edge_id = temp_similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts) count = count + 1 # always include the most similar node to make sure that each node has at least one edge and the graph is connected # don't connect more than max_edges for network_id, similarities in source_similarities.iteritems(): count = 0 for similarity in similarities: if count >= max_edges: break if count == 0 or similarity["subsumes"] > min_subsumption: atts = similarity["atts"] source_node_id = similarity["source_node_id"] source_gene_count = similarity_graph.node[source_node_id].get("gene count") target_node_id = similarity["target_node_id"] target_gene_count = similarity_graph.node[target_node_id].get("gene count") edge_overlap = float(atts["overlap_size"]) # If the edge is pointing from low gene count to high gene count we proceed. # if the edge is pointing from high gene count to low count we check # the edge map to see if the converse edge exists. If so we skip adding and # let the converse edge populate # if there is no acceptable edge we switch the source and target and proceed if(target_gene_count > source_gene_count): edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap) edge_id = similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts, interaction='shares genes with') similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"]) similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function) if(similarity["subsumes"] > 0.4): similarity_graph.set_edge_attribute(edge_id, "strength", "high") else: similarity_graph.set_edge_attribute(edge_id, "strength", "low") elif(target_gene_count == source_gene_count): if(source_node_id not in similarity_graph[target_node_id] and target_node_id not in similarity_graph[source_node_id]): edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap) edge_id = similarity_graph.add_edge_between(source_node_id, target_node_id, attr_dict=atts, interaction='shares genes with') similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"]) similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function) if(similarity["subsumes"] > 0.4): similarity_graph.set_edge_attribute(edge_id, "strength", "high") else: similarity_graph.set_edge_attribute(edge_id, "strength", "low") else: if(source_node_id in temp_similarity_graph[target_node_id]): print "Converse edge exists. Skipping " + str(source_node_id) + ", " + str(target_node_id) else: edge_function = edge_overlap / (source_gene_count + target_gene_count - edge_overlap) edge_id = similarity_graph.add_edge_between(target_node_id, source_node_id, attr_dict=atts, interaction='shares genes with') similarity_graph.set_edge_attribute(edge_id, "overlap metric", similarity["subsumes"]) similarity_graph.set_edge_attribute(edge_id, "edge function", edge_function) if(similarity["subsumes"] > 0.4): similarity_graph.set_edge_attribute(edge_id, "strength", "high") else: similarity_graph.set_edge_attribute(edge_id, "strength", "low") count = count + 1 for remove_this_node in remove_super_nodes: similarity_graph.remove_node(remove_this_node) return similarity_graph
BIFIDSAN is a symbiotic product: in it there are prebiotics (ingredients that we are not able to digest and that stimulate the growth of intestinal flora) with probiotics (live microorganisms that help the development of microbial flora in the gut). Published studies confirm taht the strains used in BIFIDSAN are able to improve intenstinal transit in healthy individuals with disorders of evacuation. Inulin, a prebiotic product presents in BIFIDSAN, contributes to the increase and to implementation of live microorganisms in the gastrointestinal system. Syrup to Reduce Water Retention Manufacturer: Pinisan Presentation: Bottle of 500 ml. Dosage: 30 ml. diluted in half liter of water to drink during the day. Manufacturer: NatysalActive Ingredients: dry sage leaves and fruits of Vitex extract.Directions: Take 1 capsule daily.Presentation: Package of 30 capsules.
import sys from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import List, Optional, Sequence # Shim to wrap setup.py invocation with setuptools # # We set sys.argv[0] to the path to the underlying setup.py file so # setuptools / distutils don't take the path to the setup.py to be "-c" when # invoking via the shim. This avoids e.g. the following manifest_maker # warning: "warning: manifest_maker: standard file '-c' not found". _SETUPTOOLS_SHIM = ( "import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" "f=getattr(tokenize, 'open', open)(__file__);" "code=f.read().replace('\\r\\n', '\\n');" "f.close();" "exec(compile(code, __file__, 'exec'))" ) def make_setuptools_shim_args( setup_py_path, # type: str global_options=None, # type: Sequence[str] no_user_config=False, # type: bool unbuffered_output=False # type: bool ): # type: (...) -> List[str] """ Get setuptools command arguments with shim wrapped setup file invocation. :param setup_py_path: The path to setup.py to be wrapped. :param global_options: Additional global options. :param no_user_config: If True, disables personal user configuration. :param unbuffered_output: If True, adds the unbuffered switch to the argument list. """ args = [sys.executable] if unbuffered_output: args += ["-u"] args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)] if global_options: args += global_options if no_user_config: args += ["--no-user-cfg"] return args def make_setuptools_bdist_wheel_args( setup_py_path, # type: str global_options, # type: Sequence[str] build_options, # type: Sequence[str] destination_dir, # type: str ): # type: (...) -> List[str] # NOTE: Eventually, we'd want to also -S to the flags here, when we're # isolating. Currently, it breaks Python in virtualenvs, because it # relies on site.py to find parts of the standard library outside the # virtualenv. args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) args += ["bdist_wheel", "-d", destination_dir] args += build_options return args def make_setuptools_clean_args( setup_py_path, # type: str global_options, # type: Sequence[str] ): # type: (...) -> List[str] args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) args += ["clean", "--all"] return args def make_setuptools_develop_args( setup_py_path, # type: str global_options, # type: Sequence[str] install_options, # type: Sequence[str] no_user_config, # type: bool prefix, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool ): # type: (...) -> List[str] assert not (use_user_site and prefix) args = make_setuptools_shim_args( setup_py_path, global_options=global_options, no_user_config=no_user_config, ) args += ["develop", "--no-deps"] args += install_options if prefix: args += ["--prefix", prefix] if home is not None: args += ["--home", home] if use_user_site: args += ["--user", "--prefix="] return args def make_setuptools_egg_info_args( setup_py_path, # type: str egg_info_dir, # type: Optional[str] no_user_config, # type: bool ): # type: (...) -> List[str] args = make_setuptools_shim_args( setup_py_path, no_user_config=no_user_config ) args += ["egg_info"] if egg_info_dir: args += ["--egg-base", egg_info_dir] return args def make_setuptools_install_args( setup_py_path, # type: str global_options, # type: Sequence[str] install_options, # type: Sequence[str] record_filename, # type: str root, # type: Optional[str] prefix, # type: Optional[str] header_dir, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool no_user_config, # type: bool pycompile # type: bool ): # type: (...) -> List[str] assert not (use_user_site and prefix) assert not (use_user_site and root) args = make_setuptools_shim_args( setup_py_path, global_options=global_options, no_user_config=no_user_config, unbuffered_output=True ) args += ["install", "--record", record_filename] args += ["--single-version-externally-managed"] if root is not None: args += ["--root", root] if prefix is not None: args += ["--prefix", prefix] if home is not None: args += ["--home", home] if use_user_site: args += ["--user", "--prefix="] if pycompile: args += ["--compile"] else: args += ["--no-compile"] if header_dir: args += ["--install-headers", header_dir] args += install_options return args
Every so often Jetwhine is lucky enough to unexpectedly receive an important work of prose from another professional mind. Today we have one from Frank Froman, a St. Louis psychologist, a man with a secret dream, but a man who also knows how to solve a problem when he’s confronted with it. It’s men like Frank that just might hold some of the solutions the airlines are seeking … maybe! Ladies and Gentlemen, welcome to flight 2020A flying today from St. Louis to New York’s airport, whichever one we can get into. We hope this is a comfortable trip for you. And do we have a pilot on board? No? Ouch. Anyone who has ever flown a 737 here maybe? No? How about anyone with dual engine experience? Mmmm. Anyone with single engine and instrument flying experience? How about a visual-only pilot? That’s when I stood up. It seemed only reasonable to assist in these tight eco-times. July 20, 1969: Where Were you? This entry was posted on Monday, July 14th, 2008 at 7:53 pm and is filed under Airlines, Airports, Aviation Marketing, Blogging, The Buzz. You can follow any responses to this entry through the RSS 2.0 feed. Both comments and pings are currently closed.
import logging from functools import wraps from sqlalchemy import Column, String, ForeignKey, Integer from sqlalchemy.orm import relationship from sqlalchemy.orm import sessionmaker from pynYNAB.exceptions import BudgetNotFound, WrongPushException from pynYNAB.schema import Base, Catalog, Budget, Knowledge, Payee, Transaction LOG = logging.getLogger(__name__) def operation(expected_delta): def operation_decorator(fn): @wraps(fn) def wrapped(self, *args, **kwargs): fn(self, *args, **kwargs) LOG.debug('push after '+fn.__name__) self.push(expected_delta) return wrapped return operation_decorator class nYnabClient_(Base): __tablename__ = "nynabclients" id = Column(String, primary_key=True) catalog_id = Column(ForeignKey('catalog.id')) catalog = relationship('Catalog') budget_id = Column(ForeignKey('budget.id')) budget = relationship('Budget') budget_version_id = Column(String) budget_name = Column(String) starting_device_knowledge = Column(Integer, default=0) ending_device_knowledge = Column(Integer, default=0) connection = None session = None @property def user_id(self): return self.id def add_missing(self): self.catalog = Catalog() self.catalog.knowledge = Knowledge() self.budget = Budget() self.budget.knowledge = Knowledge() self.session.add(self.catalog) self.session.add(self.budget) self.session.commit() def sync(self, update_keys=None): LOG.debug('Client.sync') self.catalogClient.sync(update_keys) self.select_budget(self.budget_name) self.budgetClient.sync(update_keys) self.catalogClient.clear_changed_entities() self.budgetClient.clear_changed_entities() if self.budget_version_id is None and self.budget_name is not None: raise BudgetNotFound() def push(self, expected_delta=1): # ending-starting represents the number of modifications that have been done to the data ? LOG.debug('Client.push') catalog_changed_entities = self.catalogClient.get_changed_apidict() budget_changed_entities = self.budgetClient.get_changed_apidict() delta = sum(len(l) for k, l in catalog_changed_entities.items()) + \ sum(len(l) for k, l in budget_changed_entities.items()) if delta != expected_delta: raise WrongPushException(expected_delta, delta) if any(catalog_changed_entities) or any(budget_changed_entities): self.ending_device_knowledge = self.starting_device_knowledge + 1 self.catalogClient.push() self.budgetClient.push() self.catalogClient.clear_changed_entities() self.budgetClient.clear_changed_entities() self.starting_device_knowledge = self.ending_device_knowledge self.session.commit() @operation(3) def add_account(self, account, balance, balance_date): payee = Payee( entities_account_id=account.id, enabled=True, auto_fill_subcategory_enabled=True, auto_fill_memo_enabled=False, auto_fill_amount_enabled=False, rename_on_import_enabled=False, name="Transfer : %s" % account.account_name ) immediateincomeid = next( s.id for s in self.budget.be_subcategories if s.internal_name == 'Category/__ImmediateIncome__') startingbalanceid = next(p.id for p in self.budget.be_payees if p.internal_name == 'StartingBalancePayee') transaction = Transaction( accepted=True, amount=balance, entities_subcategory_id=immediateincomeid, cash_amount=0, cleared='Cleared', date=balance_date, entities_account_id=account.id, credit_amount=0, entities_payee_id=startingbalanceid, is_tombstone=False ) self.budget.be_accounts.append(account) self.budget.be_payees.append(payee) self.budget.be_transactions.append(transaction) @operation(1) def delete_account(self, account): self.budget.be_accounts.remove(account) @operation(1) def add_transaction(self, transaction): self.budget.be_transactions.append(transaction) def add_transactions(self, transaction_list): @operation(len(transaction_list)) def _add_transactions_method(self, tr_list): for tr in tr_list: self.budget.be_transactions.append(tr) return _add_transactions_method(transaction_list) @operation(1) def delete_transaction(self, transaction): self.budget.be_transactions.remove(transaction) @operation(1) def delete_budget(self, budget_name): for budget in self.catalog.ce_budgets: if budget.budget_name == budget_name: self.catalog.ce_budgets.remove(budget) def select_budget(self, budget_name): self.budget_version_id = None for budget_version in self.catalog.ce_budget_versions: if budget_version.version_name == budget_name: self.budget_version_id = budget_version.id if self.budget_version_id is None: raise BudgetNotFound() def create_budget(self, budget_name): import json currency_format = dict( iso_code='USD', example_format='123,456.78', decimal_digits=2, decimal_separator='.', symbol_first=True, group_separator=',', currency_symbol='$', display_symbol=True ) date_format = dict( format='MM/DD/YYYY' ) self.connection.dorequest(opname='CreateNewBudget', request_dic={ "budget_name": budget_name, "currency_format": json.dumps(currency_format), "date_format": json.dumps(date_format) })
Software Listing of Author : "Nvidia" Cg Toolkit is a handy and reliable application designed to help software developers add stunning and interactive effects within 3D applications and share them between Cg programs. The application provides users with a compiler in order to write and run Cg programs and support for CgFX effect files. With flexible effect formats and hardware profiles, Cg Toolkit helps you maximize the graphic possibilities. The CUDA Developer SDK provides examples with source code to help you get started with CUDA. NVIDIA CUDA technology is the world's only C language environment that enables programmers and developers to write software to solve complex computational problems in a fraction of the time by tapping into the many-core parallel processing power of GPUs. With millions of CUDA-capable GPUs already deployed, thousands of software programmers are already using the free CUDA software tools to accelerate applications-from video and audio encoding to oil and gas exploration, product design, medical imaging, and scientific research. Glowball: Tegra 3 Only is an interactive demo created to showcase the power of the NVIDIA Tegra 3 chip. Tegra 3 runs effortlessly at top speed; efficiently delivering the power needed for nuanced and complex 3D graphics. The Glowball: Tegra 3 Only environment is rich in geometric complexity, texture detail, and graphic effects and could not have been built with previous generation hardware. All four cores are kept busy with PhysX® collisions, multiple cloth simulations, particle effects, and dynamic scene lighting. Step right up, folks. The GPU Computing SDK package provides examples with source code, utilities, and white papers to help you get started writing GPU Computing software. The full SDK includes dozens of code samples covering a wide range of applications. The OpenCL applications in the NVIDIA GPU Computing SDK require a GPU with CUDA Compute Architecture to run properly. After installing the SDK, open the SDK Browser from the Start Menu by clicking on "NVIDIA GPU Computing SDK Browser" in the NVIDIA GPU Computing folder within the NVIDIA Corporation program group installed in the Windows Start Menu. - Each installed SDK sample program is shown along with links for running the executable and viewing the source code files. - Some of the samples additionally present a link to a Whitepaper describing the sample in detail. NOVEMBER 18, 2014 UPDATE: NVIDIA will continue to run our old GRID Beta app for a few more months to give you time to finish your current games. We recommend all users switch to the new GRID Gaming Service http://shield.nvidia.com/grid-game-streaming which is now integrated into the SHIELD Hub, expands the service regions, and includes more games. NVIDIA and Adhesive have teamed up to bring you this live wallpaper. Are you excited to rampage in a 20 ton mech in Hawken? NVIDIA and Adhesive have teamed up to bring you an amazing Hawken experience using PhysX Particle and APEX Turbulence effects. In anticipation of playing the game themselves, NVIDIA designers developed this live wallpaper and made it available for Tegra users. HAWKEN is a free-to-play, online, multiplayer, mech-based first-person shooter that puts you in the pilot seat of a giant robotic war machine. Customize and upgrade your mechs the way you want, then join your friends on the battlefield to rain destruction across HAWKEN's beautiful and immersive alien landscapes. Best of all, Hawken is free to play! Experience everything. Sacrifice nothing. Gear up with GeForce GTX. NVIDIA CUDA technology is the world's only C language environment that enables developers and programmers to write software to solve complex computational problems in a fraction of the time by tapping into the many-core parallel processing power of GPUs. NVIDIA CUDA-enabled GPUs power millions of desktops, notebooks, workstations, and supercomputers around the world, accelerating computationally-intensive tasks for consumers, professionals, scientists, and researchers. NVIDIA DDS Utilities is a package that brings you several tools that will enable you to handle DirectDraw Surface (DDS) format files easily. Among the included command-line applications you will find one for extracting Mipmap (MIP) levels from any DDS file and another tool for merging those levels to form again a DirectDraw Surface graphics file. NVIDIA Encode SDK is a powerful toolset that includes all the components and libraries for taking advantage of NVIDIA's latest set of GPUs in application development. With the help of this technology, the stress on the CPU is much reduced since the GPU can take much of the load off it. Also, the power consumption should decrease as compared to CUDA's similar encoder. NVIDIA Guard Service is a handy application that was especially designed to to help you address some security vulnerabilities that your display driver may have. As soon as you deploy NVIDIA Guard Service, it will scan the system and in case such problems are identified, the service will be installed and will enable you to fix the issues. NVIDIA Maximus is a lightweight solution that provides you with convenient GPU processing controls. The application perfectly combines the interactive design of NVIDIA Quadro GPUs and the high-performance power of NVIDIA Tesla driver in a single workstation. NVIDIA Maximus allows you to optimize your workstation with the best graphics and GPU performance. This way, you can perform simultaneous CAE or rendering analysis on the same system. NVIDIA NPP is a library of functions for performing CUDA accelerated processing. The initial set offunctionality in the library focuses on imaging and video processing and is widely applicable for developers in these areas. NPP will evolve over time to encompass more of the compute heavy tasks in a variety of problem domains. The NPP library is written to maximize flexibility, while maintaining high performance. * A cooperative library for interoperating with a developer's GPU code efficiently. NVIDIA OptiX SDK is a scalable framework designed for building ray tracing based applications. The application enables the assignment of CUDA functions and allows efficient sharing of data between OptiX and other CUDA applications. Together, these components provide users with low-level support for "raw ray tracing". Moreover, the OptiX engine enables software developers to accelerate as many ray tracing tasks as they want. We are working on TegraZone 3.0 and we'd like to hear from you! NVIDIA TegraZone: Find the best Android games. Finding the best Android games just got easier. With the TegraZone app for your NVIDIAA* Tegra*-powered mobile device, you can easily find games that are optimized to exploit the full potential of the Tegra 2 and Tegra 3 mobile processor. TegraZone also works with the new Google Nexus 7. Discover unique, premium games for Android that deliver more visually-stunning graphics and smoother gameplay, so you can get the most out of your Tegra-powered mobile device. Texture Atlas Tools consists of a set of applications that enable you to easily build a custom texture atlas. The package includes a utility for viewing and interpreting texture atlases and another tool for compressing textures and thus generating atlas files.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # Copyright 2011 Grid Dynamics # Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import webob from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute from nova import exception from nova.openstack.common.gettextutils import _ authorize = extensions.extension_authorizer('compute', 'console_output') class ConsoleOutputController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ConsoleOutputController, self).__init__(*args, **kwargs) self.compute_api = compute.API() @wsgi.action('os-getConsoleOutput') def get_console_output(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] authorize(context) try: instance = self.compute_api.get(context, id) except exception.NotFound: raise webob.exc.HTTPNotFound(_('Instance not found')) try: length = body['os-getConsoleOutput'].get('length') except (TypeError, KeyError): raise webob.exc.HTTPBadRequest(_('os-getConsoleOutput malformed ' 'or missing from request body')) if length is not None: try: # NOTE(maurosr): cast length into a string before cast into an # integer to avoid thing like: int(2.5) which is 2 instead of # raise ValueError like it would when we try int("2.5"). This # can be removed once we have api validation landed. int(str(length)) except ValueError: raise webob.exc.HTTPBadRequest(_('Length in request body must ' 'be an integer value')) try: output = self.compute_api.get_console_output(context, instance, length) except exception.NotFound: raise webob.exc.HTTPNotFound(_('Unable to get console')) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) # XML output is not correctly escaped, so remove invalid characters remove_re = re.compile('[\x00-\x08\x0B-\x1F]') output = remove_re.sub('', output) return {'output': output} class Console_output(extensions.ExtensionDescriptor): """Console log output support, with tailing ability.""" name = "ConsoleOutput" alias = "os-console-output" namespace = ("http://docs.openstack.org/compute/ext/" "os-console-output/api/v2") updated = "2011-12-08T00:00:00+00:00" def get_controller_extensions(self): controller = ConsoleOutputController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension]
Made with PS CS5 and Wacom Bamboo tablet. Copyright© 2007-2013 EowynRus / Elena Nizaeva. My work is © and may not be reproduced, copied, tubed, uploaded, edited, redistributed, altered, built upon, published or transmited without my prior written permission, or re-uploaded with the intention of claiming it as your own. Failure to follow the above may end into legal actions. And believe me, you will have to face the consequences.
# -*- coding: utf-8 -*- from flask.ext import wtf from google.appengine.ext import ndb import flask import auth import model import util from main import app ############################################################################### # User List ############################################################################### @app.route('/_s/user/', endpoint='user_list_service') @app.route('/user/') @auth.admin_required def user_list(): user_dbs, more_cursor = util.retrieve_dbs( model.User.query(), limit=util.param('limit', int), cursor=util.param('cursor'), order=util.param('order') or '-created', name=util.param('name'), admin=util.param('admin', bool), ) if flask.request.path.startswith('/_s/'): return util.jsonify_model_dbs(user_dbs, more_cursor) return flask.render_template( 'user/user_list.html', html_class='user-list', title='User List', user_dbs=user_dbs, more_url=util.generate_more_url(more_cursor), has_json=True, ) ############################################################################### # User Update ############################################################################### class UserUpdateForm(wtf.Form): username = wtf.StringField('Username', [wtf.validators.required(), wtf.validators.length(min=3)], filters=[util.email_filter], ) name = wtf.StringField('Name', [wtf.validators.required()], filters=[util.strip_filter], ) email = wtf.StringField('Email', [wtf.validators.optional(), wtf.validators.email()], filters=[util.email_filter], ) admin = wtf.BooleanField('Admin') active = wtf.BooleanField('Active') @app.route('/user/<int:user_id>/update/', methods=['GET', 'POST']) @auth.admin_required def user_update(user_id): user_db = model.User.get_by_id(user_id) if not user_db: flask.abort(404) form = UserUpdateForm(obj=user_db) if form.validate_on_submit(): if not util.is_valid_username(form.username.data): form.username.errors.append('This username is invalid.') elif not is_username_available(form.username.data, user_db): form.username.errors.append('This username is taken.') else: form.populate_obj(user_db) if auth.current_user_id() == user_db.key.id(): user_db.admin = True user_db.active = True user_db.put() return flask.redirect(flask.url_for('user_list', order='-modified')) if flask.request.path.startswith('/_s/'): return util.jsonify_model_db(user_db) return flask.render_template( 'user/user_update.html', title=user_db.name, html_class='user-update', form=form, user_db=user_db, ) ############################################################################### # User Delete ############################################################################### @app.route('/_s/user/delete/', methods=['DELETE']) @auth.admin_required def user_delete_service(): user_keys = util.param('user_keys', list) user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys] delete_user_dbs(user_db_keys) return flask.jsonify({ 'result': user_keys, 'status': 'success', }) @ndb.transactional(xg=True) def delete_user_dbs(user_db_keys): ndb.delete_multi(user_db_keys) ############################################################################### # Helpers ############################################################################### def is_username_available(username, self_db=None): user_dbs, more_cursor = util.retrieve_dbs( model.User.query(), username=username, limit=2, ) c = len(user_dbs) return not (c == 2 or c == 1 and self_db and self_db.key != user_dbs[0].key)
Webcam Platja del Migjorn (Formentera), Spain - 176.2 miles from Bazouka: Beach and Sea - A view over beach and sea from the Blue Bar Formentera. Webcam Es Pujols (Formentera), Spain - 178.5 miles from Bazouka: Es Pujols Beach - View of Es Pujols beach in Formentera. Webcam La Savina (Formentera), Spain - 179.6 miles from Bazouka: Estany des Peix Lagoon - View on the Estany des Peix lagoon near the Ses Illetes and Llevant beaches, pearls of Balearic Islands. Webcam La Manga del Mar Menor, Spain - 189.1 miles from Bazouka: Faro de Cabo de Palos - Live view of Cala de Hierro and Faro de Cabo de Palos in Costa Cálida. Webcam Ibiza Town, Spain - 191.4 miles from Bazouka: View over Ibiza Town - A nice panorama view over Ca'n Escandell in Ibiza Town (Ciutat d'Eivissa). Webcam Ibiza Town, Spain - 191.4 miles from Bazouka: HD Stream Port - A nice livestream over the port of Ibiza Town. Webcam Ibiza Town, Spain - 191.4 miles from Bazouka: Port of Ibiza - A beautiful livestream over the port of Ibiza. Webcam Cala Vadella (Ibiza), Spain - 194.5 miles from Bazouka: Livestream Beach and Harbor - Ibiza, view on the beach and the natural harbor of Cala Vadella. Webcam Santa Eulària (Ibiza), Spain - 195.5 miles from Bazouka: Beach of Santa Eulària - A nice livestream from Ibiza HEUTE. Webcam Cala Tarida (Ibiza), Spain - 196.1 miles from Bazouka: Beach View - A view of the beach of Cala Tarida, Ibiza.
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DLRM implementation with REDACTED embeddings via TPUEstimator.""" import os import timeit import REDACTED from absl import app as absl_app from absl import flags import tensorflow.compat.v1 as tf from REDACTED.tensorflow.contrib.tpu.python.tpu import async_checkpoint from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dataloader from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import dlrm from REDACTED.tensorflow_models.mlperf.models.rough.dlrm import feature_config as fc from REDACTED.tensorflow_models.mlperf.models.rough.dlrm_tf2 import common FLAGS = flags.FLAGS flags.DEFINE_string("master", default=None, help="Address of the master.") flags.DEFINE_string(name="model_dir", default=None, help="Model directory.") def create_tpu_estimator_columns(feature_columns, params, iters_per_loop=200): """Creates TPU estimator using feature columns. Args: feature_columns: Feature columns to use. params: Hparams for the model. iters_per_loop: Number of iterations to use per device loop invocation. Returns: An instance of TPUEstimator to use when training model. """ dlrm_tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=iters_per_loop, per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig .PER_HOST_V2) run_config = tf.estimator.tpu.RunConfig( master=FLAGS.master, tpu_config=dlrm_tpu_config) embedding_config_spec = tf.estimator.tpu.experimental.EmbeddingConfigSpec( feature_columns=feature_columns, optimization_parameters=tf.tpu.experimental .StochasticGradientDescentParameters(learning_rate=FLAGS.learning_rate), pipeline_execution_with_tensor_core=FLAGS.pipeline_execution, partition_strategy=FLAGS.partition_strategy) # Key "batch_size" is reserved by TPUEstimator. tpu_params = {k: v for k, v in params.items() if k != "batch_size"} return tf.estimator.tpu.TPUEstimator( model_fn=dlrm.create_model_fn(), config=run_config, use_tpu=True, train_batch_size=params["batch_size"], params=tpu_params, model_dir=FLAGS.model_dir, embedding_config_spec=embedding_config_spec) def create_tpu_estimator_dicts(feature_to_config_dict, table_to_config_dict, params, iters_per_loop=200): """Creates TPU estimator using feature config dicts. Args: feature_to_config_dict: Feature config dicts using TableConfig values. table_to_config_dict: Feature config dicts using FeatureConfig values. params: Hparams for the model. iters_per_loop: Number of iterations to use per device loop invocation. Returns: An instance of TPUEstimator to use when training model. """ per_host_train = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 # SLICED: hangs - not supported with REDACTED? # PER_HOST_V1: the batch dimension of the dense inputs is not sharded # per_host_eval = tf.estimator.tpu.InputPipelineConfig.SLICED per_host_eval = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V1 # per_host_eval = tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 dlrm_tpu_config = tf.estimator.tpu.TPUConfig( iterations_per_loop=iters_per_loop, per_host_input_for_training=per_host_train, eval_training_input_configuration=per_host_eval, experimental_host_call_every_n_steps=FLAGS.summary_every_n_steps) run_config = tf.estimator.tpu.RunConfig( master=FLAGS.master, model_dir=FLAGS.model_dir, # Disable checkpointing and use async checkpointing instead. save_checkpoints_steps=None, save_checkpoints_secs=None, log_step_count_steps=FLAGS.summary_every_n_steps, tpu_config=dlrm_tpu_config ) embedding_config_spec = tf.estimator.tpu.experimental.EmbeddingConfigSpec( table_to_config_dict=table_to_config_dict, feature_to_config_dict=feature_to_config_dict, optimization_parameters=tf.tpu.experimental .StochasticGradientDescentParameters(learning_rate=FLAGS.learning_rate), pipeline_execution_with_tensor_core=FLAGS.pipeline_execution, # (for quality) gradient_multiplier partition_strategy=FLAGS.partition_strategy, ) # Key "batch_size" is reserved by TPUEstimator. tpu_params = {k: v for k, v in params.items() if k != "batch_size"} return tf.estimator.tpu.TPUEstimator( model_fn=dlrm.create_model_fn(), config=run_config, use_tpu=True, train_batch_size=params["batch_size"], eval_batch_size=params["eval_batch_size"], params=tpu_params, embedding_config_spec=embedding_config_spec) def load_global_step_from_checkpoint_dir(checkpoint_dir): try: checkpoint_reader = tf.training.NewCheckpointReader( tf.training.latest_checkpoint(checkpoint_dir)) return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP) except: # pylint: disable=bare-except return 0 def main(_): params = common.get_params() feature_to_config_dict, table_to_config_dict = fc.get_feature_tbl_config( params) # Builds an estimator using FeatureConfig and TableConfig, as defined in # third_party/tensorflow/python/tpu/tpu_embedding.py estimator = create_tpu_estimator_dicts( feature_to_config_dict, table_to_config_dict, params, iters_per_loop=FLAGS.summary_every_n_steps) train_input_fn = dataloader.CriteoTsvReader( file_path="/REDACTED/mb-d/home/tpu-perf-team/tayo/criteo/terabyte_mlperf/rs=6.3/train/terabyte_train*", is_training=True, use_synthetic_data=params["use_synthetic_data"]) eval_input_fn = dataloader.CriteoTsvReader( file_path="/readahead/128M/REDACTED/iz-d/home/tpu-perf-team/tayo/criteo/terabyte_mlperf/rs=6.3/eval/terabyte_eval*", is_training=False, use_synthetic_data=params["use_synthetic_data"]) if FLAGS.mode == "eval": # From Pytorch logging: # num eval batches: 1361, each 64K # num train batches: 64014, each 64K # 64013*4 @ 16k # From other source: # num_train_samples = 4195197692 if params["terabyte"]: # TODO(tayo): The following number drops remainder. num_eval_records = 89128960 num_eval_steps = num_eval_records // FLAGS.eval_batch_size cycle_idx = 0 # Run evaluation when there appears a new checkpoint. for ckpt in tf.train.checkpoints_iterator(FLAGS.model_dir, timeout=None): try: tf.logging.info("Beginning eval iteration {}.".format(cycle_idx + 1)) cycle_idx = cycle_idx + 1 start_time = timeit.default_timer() eval_metrics = estimator.evaluate( input_fn=eval_input_fn, steps=num_eval_steps, checkpoint_path=ckpt # checkpoint_path="/REDACTED/mb-d/home/tpu-perf-team/tayo/dlrm/model_dir_full_precision_0/model.ckpt-256000", ) tf.logging.info( "Eval results: {}. Elapsed eval time: {:.4f}".format( eval_metrics, timeit.default_timer() - start_time)) # Terminate eval job when final checkpoint is reached current_step = int(os.path.basename(ckpt).split("-")[1]) if current_step >= FLAGS.train_steps: tf.logging.info( "Evaluation finished after training step %d", current_step) break except tf.errors.NotFoundError: tf.logging.info( "Checkpoint %s no longer exists, skipping checkpoint", ckpt) else: # FLAGS.mode == "train" current_step = load_global_step_from_checkpoint_dir(FLAGS.model_dir) tf.logging.info("Training for {} steps at batch_size {}.".format( FLAGS.train_steps, FLAGS.batch_size)) start_time = timeit.default_timer() hooks = [] hooks.append( async_checkpoint.AsyncCheckpointSaverHook( checkpoint_dir=FLAGS.model_dir, save_steps=128000)) estimator.train( input_fn=train_input_fn, max_steps=FLAGS.train_steps, hooks=hooks ) if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) tf.disable_v2_behavior() common.define_dlrm_flags() absl_app.run(main)
Jeffrey Hollender is the co-founder and former CEO of the sustainable consumer product company Seventh Generation. Hollender is a professor of corporate sustainability at NYU Stern. He is also the co-CEO of Sustain Natural which he runs with his daughter. Hollender says sustainability is not just about the environment. It's about thinking long-term and considering the impact a business has on the planet and society. Hollender says treating employees well leads to faster growth and better financial performance. Sara Silverstein: So you created a sustainable business. Can you tell me what sustainability means to you? Jeffrey Hollender: Well sustainability means a systematic approach to thinking about the total impact a business has on the planet, as well as society. And we often are overly focused on the environmental dimension and it's critical that we think about how people are treated, people that work at the company, people in the supply chain, and issues like employee ownership are as much a part of sustainability as lowering CO2 emissions. The Vermont-based company Seventh Generation is a mission-driven producer of natural homecare products. Silverstein: And why is that important? Why does that make a company more sustainable, is it from a business perspective or for humanity's? Hollender: For both really, I mean, yes it's good for humanity, but it's better for business, so if you treat your employees well, if you allow them to be owners of the company, they're more loyal, they're more productive, there's less turnover, and Harvard has done a study that actually shows that this translates all the way down to earnings per share in companies that are sustainable that treat their employees better. Silverstein: And what is the main thing that you would impart on companies that are maximizing for shareholder profit? What's the one thing that they should really change to be more sustainable from a business perspective? Hollender: Yeah, well sustainability is going to improve your financial performance and that's what people don't understand. People think it's a trade-off: am I gonna be a nice, good citizen and treat the environment well or am I gonna maximize profits? And the truth is you will perform better financially by doing things like having a great sustainability program, by having women on your board and in your senior management and by treating your employees well and ensuring that they're owners of the company. Those things translate into better financial performance. Silverstein: We see a lot of different types of sustainable or socially responsible business. There's B Corps now, we have TOMS One for One, Seventh Generation is built on environmentally sustainable products. What different business models work and which ones are the most effective? Hollender: Sure, so I think the gold standard is B Corporations. Sustain Natural is a B Corporation, Seventh Generation is a B Corporation. There's thousands of companies that have gone through this process, so that's really really, I think, the best gold standard. But there's also organizations like The American Sustainable Business Council that has hundreds of thousands of members and that is a terrific organization to be part of if you want to influence public policy because, remember, there's only so much companies can do. We also have to change the business landscape that change things like such short-term focus on investing, if we have, you know, rules where you get long-term capital gains treatment after 12 months, that's not helping from a sustainability perspective because we need to think in a much longer term fashion. Silverstein: And what would you like to see change the most of America, in America policy-wise to help businesses think long-term? Hollender: Well I think we need a variety of things. We need a price on carbon because it doesn't work to try to encourage companies to emit less carbon when they can emit it for free. I also think that we need to see higher minimum wages. I think moving towards $15 is the right thing to do, it's good for business, it's good for our country. We have to deal with issues like wealth inequality. We have a tremendous inequality in this country, much more than people realize. One out of six people use food stamps, so we've got to take care of our workforce better than we do. We also need to deal with things like, you know, creating a longer-term framework, so we need to take the way capital gains are treated and give better incentives to people that make 10-, 20-, 30-year investments. Making a one-year investment is not what we should consider long-term. Silverstein: And for people that want to create socially responsible companies, that aren't necessarily nonprofits or focused only on social responsibility. How do they do that? What advice do you have for them? Books written by Jeffrey Hollender. Hollender: Again, there's great tools like B Corporation and the American Sustainable Business Council. There's also organizations like the Social Venture Network and Business for Social Responsibility. These are all organizations and tools that will help companies move down the road towards becoming more responsible and more sustainable. I've written a bunch of books, seven to be exact, and "The Responsibility Revolution" talks about what are the best practices and how to put them into work. Silverstein: And you're working on an eighth book about having a net positive impact. Can you explain to me what your thesis is? Hollender: Sure, you know, we've become very focused in the world of sustainability about being less bad, less CO2 emissions, less water pollution, less waste, and what we really need to do is turn that on its head and think about, what does it mean to be a truly good company where you leave the world every day better off than it was the day before, not how we minimize our negative impact. Silverstein: And can I ask what's the — what do you teach in the first day of your class at NYU Stern? Hollender: Well I start with this whole question of values because I think if you're going to be a responsible, sustainable business leader, you really need to be clear about what your values are and what means the most to you and what you're going to use to steer the direction of your business and you know, it's interesting, but you know, college students are just in the beginning phase of formulating those ideas so we start with values and philosophy and beliefs. We then also talk about systems thinking because we think about things in a very, very compartmentalized way. We don't think about the inner relationship between things and so we also do a session on systems thinking. And that's where we start, we get to business plans and financial forecasts and all those other things, but we need to lay the groundwork first for what is required to be a real responsible, sustainable leader. Silverstein: And what does that mean exactly, systems thinking? Hollender: Systems thinking means, I mean, if we think about many decisions that companies make, they make a decision that makes sense within the narrow guardrails that they're thinking and often those guardrails are shareholder performance. You know, what is good for my shareholders this quarter? And the problem is that often leads to very bad decisions. If we think about what's happening with corporate stock buybacks, we have a raging market where companies are buying back their stock. They're spending more money buying back their stock than they are on investing in research and development. That's not a good trend from a long-term perspective. That will not leave us a strong country with great competitive advantages against other businesses. Stock buybacks are great for pushing up the stock price in the short term, pushing up options so that people can exercise them and make money, but those are not things that companies should be spending that much money on if they're thinking about making long-term decisions.
"""Our traits must be able to deal with Absent values, for two reasons. First, the fact that we don't specify an optional value does not imply that the resulting resource will have a default. Second, when we do modification (PATCH) operations, we only specify the values we want to change. In practice, this means that all traits are optional. Mandatory entries are only enforced when creating new resources or setting from scratch.""" import traitlets as _traitlets HasTraits = _traitlets.HasTraits TraitError = _traitlets.TraitError Absent = _traitlets.Sentinel("Absent", "tornadowebapi.traitlets") class Int(_traitlets.Int): """An int trait, with support for lack of specified value""" default_value = Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class Unicode(_traitlets.Unicode): default_value = Absent def info(self): qualifiers = [] if self.metadata.get("strip", False): qualifiers.append("strip") if not self.metadata.get("allow_empty", True): qualifiers.append("not empty") text = ", ".join(qualifiers) if len(text): return self.info_text + "("+text+")" else: return self.info_text def validate(self, obj, value): if value == Absent: return value value = super().validate(obj, value) if self.metadata.get("strip", False): value = value.strip() if not self.metadata.get("allow_empty", True) and len(value) == 0: self.error(obj, value) return value class Label(Unicode): """A label is a string that is not none and is automatically stripped""" def __init__(self): super().__init__(allow_empty=False, strip=True) class Enum(_traitlets.Enum): default_value = Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class Bool(_traitlets.Bool): default_value = Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class Float(_traitlets.Float): default_value = Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class List(_traitlets.List): def make_dynamic_default(self): return Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class Dict(_traitlets.Dict): def make_dynamic_default(self): return Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value) class OneOf(_traitlets.Instance): """Marks a one to one relationship with a resource or resourcefragment.""" def make_dynamic_default(self): return Absent def validate(self, obj, value): if value == Absent: return value return super().validate(obj, value)
When some of our California kids were here this summer the dinner conversation turned to Pizookies. At first when my step-son Bryan said Pizookie I wasn’t sure what I heard him say. I said, “will you spell that for me?” He did one better and Googled it for me. This is a pizza cookie all rolled into one single serving baking dish. He and our grandson Sean went on to poetically describe one of their favorite desserts from BJ’s Restaurant. The more they talked the more exacted they got. They tossed around words like chocked full of chocolate chips, warm and gooey, topped with quickly melting vanilla ice cream, deep dish cookie, served warm from the oven. They had me at warm and gooey. Before dinner was over I was ready to tackle this project and give my boys a taste test! Pizookie is a BJ’s copyrighted dessert and they serve it in many flavors. But my kids wanted their favorite – pure chocolate chip cookie, no caramel, no added salt, just the pure thing. I have these cute little personal 4″ round springform pans. Perfect! Many recipes can be found all over the internet but I chose to use a favorite chocolate chip cookie recipe loaded with half milk chocolate Ghirardelli light and dark chips. The cookie is slightly “under-baked” and still soft in the middle. Oh how I wish you could taste this warm, chocolately mess. It is off the charts wonderful. When the ice cream melts it is truly a cookies and cream moment. As soon as my sous-chef dish-washing mom sees this blog post she is going to want me to make these again. Guess we have Pizookie’s in our near future! When serving my Chief Pizookie eater a little chocolate syrup topped his dessert! Don’t forget the Wicked Women of the Bible book giveaway on Tuesday, the 29th! Inspired by BJ's Restaurant Pizookie dessert ... a pizza cookie. This is pure and simple cookies and cream at it's best ... warm and gooey! In a large bowl beat butter with an electric mixer until creamy. Add the sugars and beat until light and fluffy, about 2 minutes. Add the vanilla extract and egg and beat well. Add in the flour, salt, and baking soda. Mix on low speed until dry ingredients are incorporated. Mix in chocolate chips by hand. Divide the batter evenly between 6 lightly greased ramekins or springform pans. Spread evenly through each pan. Place ramekins or small pans onto a baking sheet and bake for 15 minutes or until golden-brown on the edges and still a little golden and soft in the middle. Remove from oven and cool for at least 5 minutes before serving. Serve with or without a scoop of vanilla ice cream. Cookie recipe adapted from thekitchn recipe. Ghirardelli chocolate is an excellent choice for this recipe, but your favorite chocolate works well too! I am thrilled with my new index. My only problem is I don’t have enough time to play with it and see all it will do! This is so easy and really good. Just don’t over-cook!
""" Runtime options. """ class Option(object): """ Shell option. """ def __init__(self, values, current_value): self.values = values self._value = current_value self._callbacks = [] def on_change(self, callback): self._callbacks.append(callback) def get(self): return self._value def set(self, value): self._value = value for c in self._callbacks: c() class BooleanOption(Option): def __init__(self, current_value): assert isinstance(current_value, bool) Option.__init__(self, ['on', 'off'], 'on' if current_value else 'off') def get_value_as_bool(self): return self._value == 'on' class Options(object): def __init__(self): self._options = { 'keep-panes-open': BooleanOption(False), # Other options to implement: # 'colorscheme': Option(['dark_background', 'light_background'], 'dark_background'), # 'interactive': BooleanOption(True), # 'interactive': BooleanOption(True), } def __getitem__(self, name): return self._options[name] def items(self): return self._options.items()
Oh my goodness, these pictures of Charlotte are gorgeous!! She is so beautiful and blossoming along with the rest of God's creation! You are such a great mommy. Sometimes Charlotte barely resembles the child you brought home a year ago. I am so proud of you and Jason...and happy for you, too! Korea is so very beautiful in the spring! Charlotte's outfit in this post is so adorable! I love seeing her grow into such a little lady!
#!/usr/bin/env python3 """ Script to trangle over a directory full of wikipedia dumps as produced by WikiExtractor.py and add them to the database. """ import glob import os import constants import model import sqlalchemy from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from model import Document from model import Sentence from model import Tag engine = create_engine(constants.THEDB) Session = sessionmaker(bind=engine) Base = model.Base Base.metadata.create_all(engine) def save_document(title, session): """Create a new document; return it.""" ## XXX(alexr): need to handle source languages... document = Document(title, "bob", "en") session.add(document) session.commit() print("added document:", document) return document def get_tag(tagname, session): """Return or create a Tag object for this tag name.""" tag = session.query(Tag).filter_by(text=tagname).first() if not tag: tag = Tag(tagname) session.add(tag) session.commit() return tag def iterate_through_file(fn): docid = None session = Session() with open(fn) as infile: for line in infile: line = line.strip() if line.startswith("###"): splitted = line[3:].split("|||") title = splitted[0] ## save document, get docid. document = save_document(title, session) docid = document.id tagnames = splitted[1:] ## tag that document with these tags. for tagname in tagnames: tag = get_tag(tagname, session) document.tags.append(tag) continue ## Otherwise, we have a sentence. assert docid, "We're not currently in a document??" sent = Sentence(line, docid) session.add(sent) session.commit() def main(): import sys document_dir = sys.argv[1] fns = sorted(glob.glob("{0}/wiki*".format(document_dir))) print("going through {0} files, each with many articles.".format(len(fns))) for fn in fns: iterate_through_file(fn) if __name__ == "__main__": main()
We are equipped with 3 modern and efficient printing machines which enable us to react quickly and with the flexibility to even the most demanding requirements of our clients. We are able to deal with a high volume of orders of different kinds in a very short period of time. We can handle both high and low volume orders, offering not only great quality but also a very affordable price. Our Company’s flagship is an 8-colour sheet-fed printing press KBA Rapida 75-8 with the option for duplex printing. This technology takes us to the higher level when it comes to quality and quantity of the services we provide. Our printing house has been offering complex typographic services in the area of sheet-fed offset printing for more than 20 years. We use new and modern technologies for offset printing and book binding. We keep investing in advanced technologies which results in improvement of our products and expansion of the services we provide. We have been working with VALEUR since 2006. For us, they are our reliable partner in the area of offset printing. They take care of everything, from production to logistics which means I don’t have to worry about anything.
from itertools import cycle import random import sys from Tilecoder import numTilings, tilecode, numTiles, tiles import pygame from pygame.locals import * from pylab import * import random FPS = 30 SCREENWIDTH = 280 SCREENHEIGHT = 512 # amount by which base can maximum shift to left PIPEGAPSIZE = 100 # gap between upper and lower part of pipe BASEY = SCREENHEIGHT * 0.79 # image, sound and hitmask dicts IMAGES, SOUNDS, HITMASKS = {}, {}, {} SHARED = {} # list of all possible players (tuple of 3 positions of flap) PLAYERS_LIST = ( # red bird ( 'assets/sprites/redbird-upflap.png', 'assets/sprites/redbird-midflap.png', 'assets/sprites/redbird-downflap.png', ), # blue bird ( # amount by which base can maximum shift to left 'assets/sprites/bluebird-upflap.png', 'assets/sprites/bluebird-midflap.png', 'assets/sprites/bluebird-downflap.png', ), # yellow bird ( 'assets/sprites/yellowbird-upflap.png', 'assets/sprites/yellowbird-midflap.png', 'assets/sprites/yellowbird-downflap.png', ), ) # list of backgrounds BACKGROUNDS_LIST = ( 'assets/sprites/background-day.png', 'assets/sprites/background-night.png', ) # list of pipes PIPES_LIST = ( 'assets/sprites/pipe-green.png', 'assets/sprites/pipe-red.png', ) def start(): global SCREEN, FPSCLOCK pygame.init() FPSCLOCK = pygame.time.Clock() SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) pygame.display.set_caption('Flappy Bird') # numbers sprites for score display IMAGES['numbers'] = ( pygame.image.load('assets/sprites/0.png').convert_alpha(), pygame.image.load('assets/sprites/1.png').convert_alpha(), pygame.image.load('assets/sprites/2.png').convert_alpha(), pygame.image.load('assets/sprites/3.png').convert_alpha(), pygame.image.load('assets/sprites/4.png').convert_alpha(), pygame.image.load('assets/sprites/5.png').convert_alpha(), pygame.image.load('assets/sprites/6.png').convert_alpha(), pygame.image.load('assets/sprites/7.png').convert_alpha(), pygame.image.load('assets/sprites/8.png').convert_alpha(), pygame.image.load('assets/sprites/9.png').convert_alpha() ) # game over sprite IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha() # message sprite for welcome screen IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha() # base (ground) sprite IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha() # sounds if 'win' in sys.platform: soundExt = '.wav' else: soundExt = '.ogg' SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt) SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt) SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt) SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt) SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt) # select random background sprites randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1) IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert() # select random player sprites randPlayer = random.randint(0, len(PLAYERS_LIST) - 1) IMAGES['player'] = ( pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(), pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(), ) # select random pipe sprites pipeindex = random.randint(0, len(PIPES_LIST) - 1) IMAGES['pipe'] = ( pygame.transform.rotate( pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180), pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), ) # hismask for pipes HITMASKS['pipe'] = ( getHitmask(IMAGES['pipe'][0]), getHitmask(IMAGES['pipe'][1]), ) # hitmask for player HITMASKS['player'] = ( getHitmask(IMAGES['player'][0]), getHitmask(IMAGES['player'][1]), getHitmask(IMAGES['player'][2]), ) movementInfo = showWelcomeAnimation() SHARED['score'] = 0 SHARED['playerIndex'] = 0 SHARED['loopIter'] = 0 SHARED['playerIndexGen'] = movementInfo['playerIndexGen'] SHARED['playerx'] = int(SCREENWIDTH * 0.2) SHARED['playery'] = movementInfo['playery'] SHARED['basex'] = movementInfo['basex'] SHARED['baseShift'] = IMAGES['base'].get_width() - IMAGES['background'].get_width() SHARED['newPipe'] = getRandomPipe() SHARED['upperPipes'] = [{'x': SCREENWIDTH + 20, 'y': SHARED['newPipe'][0]['y']}] SHARED['lowerPipes'] = [{'x': SCREENWIDTH + 20, 'y': SHARED['newPipe'][1]['y']}] SHARED['pipeVelX'] = -5 SHARED['playerVelY'] = -9 SHARED['playerMaxVelY'] = 20 SHARED['playerAccY'] = 3 SHARED['playerFlapAcc'] = -7 SHARED['playerFlapped'] = False SHARED['flap'] = 1 SHARED['birdY'] = 244 SHARED['pipeX'] = 300 SHARED['pipeY'] = 0 SHARED['reward'] = 1 SHARED['state'] = (None,None) return movementInfo def mainGame(movementInfo,SHARED): score = SHARED['score'] playerIndex = SHARED['playerIndex'] loopIter = SHARED['loopIter'] playerIndexGen = SHARED['playerIndexGen'] playerx, playery = SHARED['playerx'],SHARED['playery'] basex = SHARED['basex'] baseShift = SHARED['baseShift'] # get 2 new pipes to add to upperPipes lowerPipes list newPipe = SHARED['newPipe'] pipeVelX = SHARED['pipeVelX'] # player velocity, max velocity, downward accleration, accleration on flap playerVelY = SHARED['playerVelY'] # player's velocity along Y, default same as playerFlapped playerMaxVelY = SHARED['playerMaxVelY'] # max vel along Y, max descend speed playerAccY = SHARED['playerAccY'] # players downward accleration playerFlapAcc = SHARED['playerFlapAcc'] # players speed on flapping playerFlapped = SHARED['playerFlapped'] # True when player flaps flap = SHARED['flap'] # while True: if(flap==1): if playery > -2 * IMAGES['player'][0].get_height(): playerVelY = playerFlapAcc playerFlapped = True SHARED['playerVelY'] = playerVelY SHARED['playerFlapped'] = True # check for crash here crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex}, SHARED['upperPipes'], SHARED['lowerPipes']) if crashTest[0]: SHARED['reward'] = -1000#-abs(SHARED['playery']-SHARED['pipeY']) return { 'y': playery, 'groundCrash': crashTest[1], 'basex': basex, 'upperPipes': SHARED['upperPipes'], 'lowerPipes': SHARED['lowerPipes'], 'score': score, 'playerVelY': playerVelY, } # check for score playerMidPos = playerx + IMAGES['player'][0].get_width() / 2 for pipe in SHARED['upperPipes']: pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2 if pipeMidPos <= playerMidPos < pipeMidPos + 4: score += 1 SHARED['score'] += 1 # SOUNDS['point'].play() # playerIndex basex change if (loopIter + 1) % 3 == 0: playerIndex = playerIndexGen.next() SHARED['playerIndex'] = playerIndex loopIter = (loopIter + 1) % 30 basex = -((-basex + 100) % baseShift) SHARED['loopIter'] = loopIter SHARED['basex'] = basex # player's movement if playerVelY < playerMaxVelY and not playerFlapped: playerVelY += playerAccY SHARED['playerVelY'] = playerVelY if playerFlapped: playerFlapped = False SHARED['playerFlapped'] = False playerHeight = IMAGES['player'][playerIndex].get_height() playery += min(playerVelY, BASEY - playery - playerHeight) playery = max(0,playery) SHARED['playery'] = playery # move pipes to left for uPipe, lPipe in zip(SHARED['upperPipes'], SHARED['lowerPipes']): uPipe['x'] += pipeVelX lPipe['x'] += pipeVelX # add new pipe when first pipe is about to touch left of screen if 0 < SHARED['upperPipes'][0]['x'] < 10: SHARED['newPipe'] = getRandomPipe() SHARED['upperPipes'].append(newPipe[0]) SHARED['lowerPipes'].append(newPipe[1]) # remove first pipe if its out of the screen if SHARED['upperPipes'][0]['x'] < -IMAGES['pipe'][0].get_width(): SHARED['upperPipes'].pop(0) SHARED['lowerPipes'].pop(0) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) for uPipe, lPipe in zip(SHARED['upperPipes'], SHARED['lowerPipes']): SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y'])) SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y'])) SCREEN.blit(IMAGES['base'], (basex, BASEY)) # print score so player overlaps the score showScore(score) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery)) SHARED['birdY'] = playery SHARED['pipeX'] = lPipe['x'] SHARED['pipeY'] = lPipe['y'] #print flap pygame.display.update() FPSCLOCK.tick(FPS) SHARED['reward'] = 1 SHARED['state'] = (SHARED['pipeX'],SHARED['pipeY']-SHARED['playery']) return 1 def showWelcomeAnimation(): """Shows welcome screen animation of flappy bird""" # index of player to blit on screen playerIndex = 0 playerIndexGen = cycle([0, 1, 2, 1]) # iterator used to change playerIndex after every 5th iteration loopIter = 0 playerx = int(SCREENWIDTH * 0.2) playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2) messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2) messagey = int(SCREENHEIGHT * 0.12) basex = 0 # amount by which base can maximum shift to left baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width() # player shm for up-down motion on welcome screen playerShmVals = {'val': 0, 'dir': 1} return {'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen,} while True: for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): pygame.quit() sys.exit() if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP): # make first flap sound and return values for mainGame SOUNDS['wing'].play() return { 'playery': playery + playerShmVals['val'], 'basex': basex, 'playerIndexGen': playerIndexGen, } # adjust playery, playerIndex, basex if (loopIter + 1) % 5 == 0: playerIndex = playerIndexGen.next() loopIter = (loopIter + 1) % 30 basex = -((-basex + 4) % baseShift) playerShm(playerShmVals) # draw sprites SCREEN.blit(IMAGES['background'], (0,0)) SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery + playerShmVals['val'])) SCREEN.blit(IMAGES['message'], (messagex, messagey)) SCREEN.blit(IMAGES['base'], (basex, BASEY)) pygame.display.update() FPSCLOCK.tick(FPS) def playerShm(playerShm): """oscillates the value of playerShm['val'] between 8 and -8""" if abs(playerShm['val']) == 8: playerShm['dir'] *= -1 if playerShm['dir'] == 1: playerShm['val'] += 1 else: playerShm['val'] -= 1 def getRandomPipe(): """returns a randomly generated pipe""" # y of gap between upper and lower pipe gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE)) gapY += int(BASEY * 0.2) pipeHeight = IMAGES['pipe'][0].get_height() pipeX = SCREENWIDTH + 10 return [ {'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe {'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe ] def showScore(score): """displays score in center of screen""" scoreDigits = [int(x) for x in list(str(score))] totalWidth = 0 # total width of all numbers to be printed for digit in scoreDigits: totalWidth += IMAGES['numbers'][digit].get_width() Xoffset = (SCREENWIDTH - totalWidth) / 2 for digit in scoreDigits: SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1)) Xoffset += IMAGES['numbers'][digit].get_width() def checkCrash(player, upperPipes, lowerPipes): """returns True if player collders with base or pipes.""" pi = player['index'] player['w'] = IMAGES['player'][0].get_width() player['h'] = IMAGES['player'][0].get_height() # if player crashes into ground if player['y'] + player['h'] >= BASEY - 1: return [True, True] else: playerRect = pygame.Rect(player['x'], player['y'], player['w'], player['h']) pipeW = IMAGES['pipe'][0].get_width() pipeH = IMAGES['pipe'][0].get_height() for uPipe, lPipe in zip(upperPipes, lowerPipes): # upper and lower pipe rects uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH) lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH) # player and upper/lower pipe hitmasks pHitMask = HITMASKS['player'][pi] uHitmask = HITMASKS['pipe'][0] lHitmask = HITMASKS['pipe'][1] # if bird collided with upipe or lpipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if uCollide or lCollide: return [True, False] return [False, False] def pixelCollision(rect1, rect2, hitmask1, hitmask2): """Checks if two objects collide and not just their rects""" rect = rect1.clip(rect2) if rect.width == 0 or rect.height == 0: return False x1, y1 = rect.x - rect1.x, rect.y - rect1.y x2, y2 = rect.x - rect2.x, rect.y - rect2.y for x in xrange(rect.width): for y in xrange(rect.height): if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]: return True return False def getHitmask(image): """returns a hitmask using an image's alpha.""" mask = [] for x in range(image.get_width()): mask.append([]) for y in range(image.get_height()): mask[x].append(bool(image.get_at((x,y))[3])) return mask def actionTileCode(F,S,A): tilecqode(S[0],S[1],F) F = [x + A*(numTilings*tiles*tiles) for x in F] return F def getExpected(q): expectedVal = 0 a = argmax(q) for i in range(2): if(a==i): expectedVal = expectedVal + (1 - (epsilon/2))*q[i] else: expectedVal = expectedVal + (epsilon/2)*q[i] return expectedVal def eligibilityTrace(zerovec,F): zerovec = alpha*lmbda*zerovec zerovec[F] = 1 return zerovec episodeNum = 100000 count = 0 alpha = 0.1/numTilings gamma = 1 lmbda = 0.5 epsilon = 0.1 n = numTiles * 2 F = [-1]*numTilings w = -0.01*rand(n) returnSum = 0 while(count<episodeNum): moveInfo = start() crashInfo = mainGame(moveInfo, SHARED) S = SHARED['state'] zerovec = zeros(n) G = 0 A = 0 F = actionTileCode(F,S,A) zerovec[F] = 1 while(crashInfo == 1): crashInfo = mainGame(moveInfo,SHARED) S = SHARED['state'] R = SHARED['reward'] G = G + R delta = R - sum(w[F]) q = zeros(2) if(crashInfo == 1): for a in range(2): F = actionTileCode(F,S,a) q[a] = sum(w[F]) else: w = w + alpha*delta*zerovec break expected_q = getExpected(q) delta = delta + expected_q A = argmax(q) if rand() >= epsilon else random.choice([0,1]) SHARED['flap'] = A w = w + alpha*delta*zerovec F = actionTileCode(F,S,A) zerovec = eligibilityTrace(zerovec,F) count += 1 returnSum = returnSum + G print 'Return from Episode ', count, 'is ', G if(G>1000): break print returnSum/episodeNum pygame.quit()
Saw an interesting opinion written by George F. Will recently in the newspaper. He had an interesting prospective he called Easterbrook's "Law of Doomsaying" where he noted that those successful at the subject predicted dire events to occur between 5 and 10 years into the future. The predicted global calamities should be near enough to invoke serious concern but distant enough that errors can be overlooked. Very interesting. Sounds a lot like global warming, and it was especially appropriate that he mentioned the widely accepted view of the 1970's that a new Ice Age was imminent. This was viewed as a "likely source of wholesale death and misery" and that the world's climatologists "are agreed that we must prepare for the next ice age". The article mentions one other point that I thought very interesting. The unstated premise of such eco-pessimism is that "environmental conditions are, or recently were, optimal". Thus they must be preserved, right? (Shake your head up and down to signify yes here.) Brings to mind another saying about those who don't remember history are doomed to repeat it. Interesting, huh. In geological terms we may well be close to the next ice age, in human terms we are much closer to a man made global warming. I've no clue as to which side of the debate you stand. Is naturally occuring change fine but manmade change just automatically bad? Actually, I might be generally agreeable with that, but I need real proof and not lots of people saying (shouting) that of course there is a global warming problem. High CO2 levels and a warmer earth have many beneficial effects. Global warming is a natural and very useful physical phenomenum. If it didn't exist, the earth would look like a big snowball. By orders of magnitude, the biggest contributor to this natural phenomenum is water. Did I hear someone ask how water vapor in the atmosphere will be affected by increased levels of CO2? There are lots of topics to explore in this subject, but very few of the topics include the expressions "of course" or "for sure". Now, when you say "we are much closer to a man made global warming", then I say "prove it". A bunch of government paid people studying the problem might produce the correct answer. But for now there's very little that they say that I would accept as indisputable. Again, show me the model that accurately includes historically known past global warm and cold periods. That's all I'm asking for. Don't have it? Then preface your remarks with "some speculate" rather than "It's known". That's how science should work. Edited by djack77494, 06 January 2010 - 05:49 PM. I must admit I didn't realize there still was any debate about global warming. Is man made change bad? Of course it is if it has an adverse impact on the lives of millions (and potentially billions) of people, not to mention the extinction of many plant and animal species. If it had no adverse impact we wouldn't care about it. The phenomenon your referring to is actually the Greenhouse effect, and yes this is essential to keep the planet live-able. On the other hand Global warming in this context is a man-made global increase in (or in some parts decrease in) temperature. A model that predicts every past change is a naive and unrealistic expectation, what you can easily show is that increasing CO2 increases the greenhouse effect, which we have already agreed is a real and proven concept. The existing models do take into account the net change in water vapor in the atmosphere. It is known that an increase in CO2 will increase the greenhouse effect and will increase the global average temperature, additionally there is no other plausible explanation for the recent rises in atmospheric temperature and the evidence is in the majority of peoples opinion, overwhelming. Whether you choose to believe it or not is down to the individual.
#------------------------------------------------------------------------------- # simple client to test REST API of Job Launcher import httplib import json from cStringIO import StringIO #------------------------------------------------------------------------------- authority = "localhost:8181" #------------------------------------------------------------------------------- def callGetJobStatus( joblist ): conn = httplib.HTTPConnection( authority ) # encode the request params params = { 'joblist': joblist, } payload = json.dumps( params, ensure_ascii=False ) payload.encode( 'utf-8' ) # define the params encoding headers = { 'Content-Type': 'application/json; charset=utf-8'} # call the remote service cleanURI = '/jobs' conn.request( 'GET', cleanURI, body=payload, headers=headers ) # get the result retValues = {} response = conn.getresponse() if response.status == 200: try: retValues = json.loads( response.read() ) except ValueError: print "error: can't decode json response" else: print "error :", response.status, response.reason print retValues #------------------------------------------------------------------------------- def callRunJob( user, params ): conn = httplib.HTTPConnection( authority ) # encode the request params params = { 'user': user, 'program': 'trufa', 'params': params, } payload = json.dumps( params, ensure_ascii=False ) payload.encode( 'utf-8' ) # define the params encoding headers = { 'Content-Type': 'application/json; charset=utf-8'} # call the remote service cleanURI = '/jobs' conn.request( 'PUT', cleanURI, body=payload, headers=headers ) # get the result retValues = {} response = conn.getresponse() if response.status == 200: try: retValues = json.loads( response.read() ) except ValueError: print "error: can't decode json response" else: print "error :", response.status, response.reason print retValues #------------------------------------------------------------------------------- def callJobStatus( jobid ): conn = httplib.HTTPConnection( authority ) # call the remote service cleanURI = '/jobs/'+str(jobid) conn.request( 'GET', cleanURI ) # get the result retValues = {} response = conn.getresponse() if response.status == 200: try: retValues = json.loads( response.read() ) except ValueError: print "error: can't decode json response" else: print "error :", response.status, response.reason print retValues #------------------------------------------------------------------------------- def callCancelJob( jobid ): conn = httplib.HTTPConnection( authority ) # encode the request params params = { 'cancel': True, } payload = json.dumps( params, ensure_ascii=False ) payload.encode( 'utf-8' ) # define the params encoding headers = { 'Content-Type': 'application/json; charset=utf-8'} # call the remote service cleanURI = '/jobs/'+str(jobid) conn.request( 'POST', cleanURI, body=payload, headers=headers ) # get the result retValues = {} response = conn.getresponse() if response.status == 200: try: retValues = json.loads( response.read() ) except ValueError: print "error: can't decode json response" else: print "error :", response.status, response.reason print retValues #-------------------------------------------------------------------------------
Apple doesn't provide an acknowledgement loop for email attachments sent by Mail Drop. Several readers over just a couple of weeks asked about Mail Drop, a feature added in OS X 10.10 Yosemite to upload large email attachments from Apple’s Mail app to iCloud rather than pass them through your email host’s servers. That’s because many mail hosts put a size limit (say, 10MB) on attachments, both on the sending and receiving end. With Mail Drop, a recipient receives a link to one or more attachments that they can click to download. How do I confirm the recipient of my file has received it or downloaded it, other than by contacting them? Short answer: You can’t! This may be due to age: Apple seemingly hasn’t revisited the system since its introduction, and it already feels a bit creaky. The email message and Web interface to handle links and downloads is very thin and feels longer out of date than it is. The Mail app will prompt about a large attachment if you haven’t set the default behavior for an acount to use Mail Drop. Apple could redesign the system, because the email links already take a recipient to a Web page at which they click to start retrieving the file. That page could have an opt-in button that lets the downloader also agree to inform the sender. Recipients have 30 days to download a Mail Drop attachment, and the link sent in email remains active until then. If you need to be sure someone downloads a file, you can opt instead for a service that informs you when that happens, like WeTransfer, which allows sending files up to 2GB each at no cost, or up to 20GB with a paid account. Another reader asked, conversely, how you retrieve a file from Mail Drop. When you send a message Mail identifies as containing attachments greater than your mail service can handle, it either prompts you to confirm uploading or automatically uploads the attachments to iCloud, depending on your configuration. The recipient gets the email with links to download the attachment; the sender doesn’t see those links at all. You can only download files from Mail Drop if you receive those file links.
""" Spherical Harmonics http://www.sjbrown.co.uk/?article=sharmonics """ import numpy import math factorial = lambda n:reduce(lambda a,b:a*(b+1),range(n),1) def evaluate_SH(angles, degree, dl=1): theta = angles[0] phi = angles[1] if (dl==2): coeff_length = (degree+1)*(degree+2)/2 B = numpy.zeros([1,coeff_length]) Btheta = numpy.zeros([1,coeff_length]) Bphi = numpy.zeros([1,coeff_length]) elif (dl==1): coeff_length = (degree+1)*(degree+1) B = numpy.zeros([1,coeff_length]) Btheta = numpy.zeros([1,coeff_length]) Bphi = numpy.zeros([1,coeff_length]) for l in range(0,degree+1,dl): if (dl==2): center = (l+1)*(l+2)/2 - l elif (dl==1): center = (l+1)*(l+1) - l lconstant = math.sqrt((2*l + 1)/(4*math.pi)) center = center - 1 Plm,dPlm = P(l,0,theta) B[0,center] = lconstant*Plm Btheta[0,center] = lconstant * dPlm Bphi[0,center] = 0 for m in range(1,l+1): precoeff = lconstant * math.sqrt(2.0)*math.sqrt(factorial(l - m)/(factorial(l + m)*1.0)) if (m % 2 == 1): precoeff = -precoeff Plm,dPlm = P(l,m,theta) pre1 = precoeff*Plm pre2 = precoeff*dPlm B[0,center + m] = pre1*math.cos(m*phi) B[0,center - m] = pre1*math.sin(m*phi) Btheta[0,center+m] = pre2*math.cos(m*phi) Btheta[0,center-m] = pre2*math.sin(m*phi) Bphi[0,center+m] = -m*B[0,center-m] Bphi[0,center-m] = m*B[0,center+m] return B,Btheta,Bphi def real_spherical_harmonics(angles, coeff, degree, dl=1): """ Given a real-valued spherical function represented by spherical harmonics coefficients, this function evaluates its value and gradient at given spherical angles SYNTAX: [f, g] = real_spherical_harmonics(angles, coeff, degree, dl); INPUTS: angles - [theta,phi] are colatitude and longitude, respectively coeff - real valued coefficients [a_00, a_1-1, a_10, a_11, ... ] degree - maximum degree of spherical harmonics ; dl - {1} for full band; 2 for even order only OUTPUTS: f - Evaluated function value f = \sum a_lm*Y_lm g - derivatives with respect to theta and phi """ B,Btheta,Bphi = evaluate_SH(angles, degree, dl) f = sum(-numpy.dot(B,coeff)) g = numpy.array((-sum(numpy.dot(Btheta,coeff)), -sum(numpy.dot(Bphi,coeff)))) return f,g def P(l,m,theta): """ The Legendre polynomials are defined recursively """ pmm = 1 dpmm = 0 x = math.cos(theta) somx2 = math.sin(theta) fact = 1.0 for i in range(1,m+1): dpmm = -fact * (x*pmm + somx2*dpmm) pmm = pmm*(-fact * somx2) fact = fact+2 # No need to go any further, rule 2 is satisfied if (l == m): Plm = pmm dPlm = dpmm return Plm,dPlm # Rule 3, use result of P(m,m) to calculate P(m,m+1) pmmp1 = x * (2 * m + 1) * pmm dpmmp1 = (2*m+1)*(x*dpmm - somx2*pmm) # Is rule 3 satisfied? if (l == m + 1): Plm = pmmp1 dPlm = dpmmp1 return Plm, dPlm # Finally, use rule 1 to calculate any remaining cases pll = 0 dpll = 0 for ll in range(m + 2,l+1): # Use result of two previous bands pll = (x * (2.0 * ll - 1.0) * pmmp1 - (ll + m - 1.0) * pmm) / (ll - m) dpll = ((2.0*ll-1.0)*( x*dpmmp1 - somx2*pmmp1 ) - (ll+m-1.0)*dpmm) / (ll - m) # Shift the previous two bands up pmm = pmmp1 dpmm = dpmmp1 pmmp1 = pll dpmmp1 = dpll Plm = pll dPlm = dpll return Plm,dPlm
Whilst in Italy last October, my father Vince made contact with the producer of a show called “Cara Francesca”. The show is based in Italy but is broadcast internationally on RAI INTERNATIONAL which is the equivalent of our ABC. Cara Francesca tells stories of Italians around the world that have contributed to the wealth of Italy from a cultural and social perspective. They recognise the contributions that the Italian Migration has made to the Italian Economy. In the post-war period, thousands of Italians – mainly from Sicily, Calabria, Campagna (Napoli) as well as from the North (Veneto, Piemonte) emigrated to countries such as Australia, USA, Argentina, Brazil and Canada. The ‘burden’ on the ‘State’ was reduced – post-war Italy had less stress on the social security system due to the population working overseas (many Italians worked in Switzerland and Belgium and still paid taxes in Italy). Brand “Italy” was exported to the world and Food, Fashion, Cars and Machinery created a global demand for Italian products – from Fusilli to Ferraris!! Vince’s story is a typical migrant story and has been recognised by the show Cara Francesca where he tells his story of post-war Italy – the hardships and the joys of building a business and raising a family. Please see the link below for Vince’s story (in Italian). Vince’s book is still available (in English), so please ask us for a copy and we will gladly send you one!!!
# This file is part of EsperaDesespera. # # Copyright (C) 2011 by Alberto Ariza, Juan Elosua & Marta Alonso # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is provided 'as-is', without any express or implied warranty. # In no event will the authors be held liable for any damages arising from the # use of this software. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #!/usr/bin/env python # -*- coding: utf-8 -*- import csv, re #Diccionarios dicts = ('dictHosp','dictServ','DictTipo','dictFecha','dictInte') #Diccionario con los diferentes Hospitales dictHosp = {} #Diccionario con los diferentes Tipos de servicio dictTipo = {} #Diccionario con los diferentes Servicios dictServ = {} #Diccionario con las diferentes Fechas de Analísis dictFecha = {} #Diccionario con los diferentes intervalos a tener en cuenta dictInte = {'0 - 3 m': '1', '3 - 6 m' : '2', '6 - 12 m' : '3', 'más 12 m' : '4'} #marcador de inicio de intervalos s0a3 = '0 - 3 m' #marcador de fin de intervalos total = 'TOTAL' tempo = 'Tempo medio de espera' #fIndex = 'prueba.txt' fIndex = 'CsvfilesIndex.txt' #Los informes de CEX cambiaron a partir de 2006 CEXServChangeDate = '2006' #A partir de esta fecha se añadio un informe de totales por hospital reportChangeDate = 200912 #Funcion para determinar si el campo es numérico def is_numeric(val): try: float(val) except ValueError, e: return False return True #Procesar los csv de hospitales def procesarHosp(l,tipo): try: print 'procesarHosp' #print 'longitud de la lista: ', len(l) #Buscamos la fecha de analisis i = l[1].index('/20') fechaAnalisis = l[1][i-5:i+5] if (not dictFecha.has_key(fechaAnalisis)): dictFecha[fechaAnalisis] = str(len(dictFecha)+1) if (not dictTipo.has_key(tipo)): dictTipo[tipo] = str(len(dictTipo)+1) i = l.index(s0a3) j = l.index(total) """La longitud de cada fila hay que sumarle 1 para incluir ambos extremos y 2 más para la cabecera y tiempo medio""" longfila = (j-i+3) ifila = j+2 ffila = ifila+longfila while (ffila <= len(l)): if (l[ifila] != total): if (not dictHosp.has_key(l[ifila])): dictHosp[l[ifila]] = str(len(dictHosp)+1) wr1.writerow([dictFecha[fechaAnalisis], dictHosp[l[ifila]],dictTipo[tipo],l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')]) ifila = ffila ffila += longfila except Exception, e: print 'Error', e return False return True #Procesar los csv de servicios def procesarServ(l,tipo): try: print 'procesarServ' #print 'longitud de la lista: ', len(l) #Buscamos la fecha de analisis i = l[1].index('/20') fechaAnalisis = l[1][i-5:i+5] if (not dictFecha.has_key(fechaAnalisis)): dictFecha[fechaAnalisis] = str(len(dictFecha)+1) if (not dictTipo.has_key(tipo)): dictTipo[tipo] = str(len(dictTipo)+1) if (l[1][i+1:i+5] <= CEXServChangeDate and tipo == 'CEX'): j = l.index('Pacientes') longfila = 3 else: i = l.index(s0a3) j = l.index(total) longfila = (j-i+3) ifila = j+2 ffila = ifila+longfila while (ffila <= len(l)): if (l[ifila] != total): if (not dictServ.has_key(l[ifila])): dictServ[l[ifila]] = str(len(dictServ)+1) wr2.writerow([dictFecha[fechaAnalisis], dictServ[l[ifila]],dictTipo[tipo], l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')]) else: wr3.writerow([dictFecha[fechaAnalisis],dictTipo[tipo], l[ffila-2].replace('.',''),l[ffila-1].replace(',','.')]) ifila = ffila ffila += longfila except Exception, e: print e return False return True #Procesar los csv de pruebas def procesarPrueb(l,tipo): try: print 'procesarPrueb' #print 'longitud de la lista: ', len(l) #Buscamos la fecha de analisis i = l[1].index('/20') fechaAnalisis = l[1][i-5:i+5] if (not dictFecha.has_key(fechaAnalisis)): dictFecha[fechaAnalisis] = str(len(dictFecha)+1) if (not dictTipo.has_key(tipo)): dictTipo[tipo] = str(len(dictTipo)+1) if (fechaAnalisis != '31/03/2009'): s = '|'.join(l) l2 = s.split('|0 - 3 m|') l2=l2[1:] for chunk in l2: laux=chunk.split('|') j = laux.index(total) #calculamos la longitud en base a la posición de corte calculada longfila = j+4 #nos posicionamos en la primera fila ifila = j+2 ffila = ifila+longfila while (ffila <= len(laux)): if (laux[ifila] != total): if (not dictServ.has_key(laux[ifila])): dictServ[laux[ifila]] = str(len(dictServ)+1) wr2.writerow([dictFecha[fechaAnalisis], dictServ[laux[ifila]],dictTipo[tipo], laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) else: wr3.writerow([dictFecha[fechaAnalisis],dictTipo[tipo], laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) ifila = ffila ffila += longfila else: i = l.index(s0a3) j = l.index(total) longfila = (j-i+4) ifila = j+3 ffila = ifila+longfila while (ffila <= len(l)): if (l[ifila] != total): if (not dictServ.has_key(l[ifila])): dictServ[l[ifila]] = str(len(dictServ)+1) wr2.writerow([dictFecha[fechaAnalisis],dictServ[l[ifila]], dictTipo[tipo],l[ffila-3].replace('.',''),l[ffila-2].replace(',','.')]) else: wr3.writerow([dictFecha[fechaAnalisis], dictTipo[tipo],l[ffila-3].replace('.',''),l[ffila-2].replace(',','.')]) ifila = ffila ffila += longfila except Exception, e: print e return False return True #Procesar los csv de hospitales y servicios def procesarHospServ(l, tipo): try: print 'procesarHospServ' #Buscamos la fecha de analisis i = l[1].index('/20') fechaAnalisis = l[1][i-5:i+5] if (not dictFecha.has_key(fechaAnalisis)): dictFecha[fechaAnalisis] = str(len(dictFecha)+1) if (not dictTipo.has_key(tipo)): dictTipo[tipo] = str(len(dictTipo)+1) s = '|'.join(l) if (l[1][i+1:i+5] <= CEXServChangeDate and tipo == 'CEX'): l2 = s.split('|Pacientes|') hospital = '' for chunk in l2: laux=chunk.split('|') #almacenamos el nombre del hospital de la siguiente tabla if (laux.count(tempo) == 0): hospital = laux[len(laux)-2] if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) continue j = laux.index(tempo) #calculamos la longitud en base a la posición de corte calculada longfila = j+3 #nos posicionamos en la primera fila ifila = j+1 ffila = ifila+longfila while (ffila <= len(laux)): if (laux[ifila] != total): if (not dictServ.has_key(laux[ifila])): dictServ[laux[ifila]] = str(len(dictServ)+1) #Solamente cuenta con totales y no con valores individuales por intervalo wr4.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],dictServ[laux[ifila]], dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) else: wr1.writerow([dictFecha[fechaAnalisis],dictHosp[hospital], dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) ifila = ffila ffila += longfila hospital = laux[len(laux)-2] if (not chunk == l2[len(l2)-1]): if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) else: l2 = s.split('|0 - 3 m|') hospital = '' for chunk in l2: laux=chunk.split('|') #almacenamos el nombre del hospital de la siguiente tabla if (laux.count(total) == 0): hospital = laux[len(laux)-2] if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) continue j = laux.index(total) #calculamos la longitud en base a la posición de corte calculada longfila = j+4 #nos posicionamos en la primera fila ifila = j+2 ffila = ifila+longfila while (ffila <= len(laux)): if (laux[ifila] != total): if (not dictServ.has_key(laux[ifila])): dictServ[laux[ifila]] = str(len(dictServ)+1) #tenemos que quitar la primera columna y las 2 ultimas longdatos = longfila-3 for i in range(longdatos): wr5.writerow([dictFecha[fechaAnalisis], str(i+1),dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo],laux[ifila+1+i].replace('.','')]) wr4.writerow([dictFecha[fechaAnalisis], dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo], laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) else: if(int(fechaAnalisis[6:10]+fechaAnalisis[3:5]) < reportChangeDate): wr1.writerow([dictFecha[fechaAnalisis],dictHosp[hospital], dictTipo[tipo],laux[ffila-2].replace('.',''),laux[ffila-1].replace(',','.')]) ifila = ffila ffila += longfila hospital = laux[len(laux)-2] if (not chunk == l2[len(l2)-1]): if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) except Exception, e: print e return False return True #Procesar los csv de hospitales y pruebas def procesarHospPrueb(l, tipo): try: print 'procesarHospPrueb' #Buscamos la fecha de analisis i = l[1].index('/20') fechaAnalisis = l[1][i-5:i+5] if (not dictFecha.has_key(fechaAnalisis)): dictFecha[fechaAnalisis] = str(len(dictFecha)+1) if (not dictTipo.has_key(tipo)): dictTipo[tipo] = str(len(dictTipo)+1) if (fechaAnalisis == '31/03/2009'): offset = 1 else: offset = 0 s = '|'.join(l) l2 = s.split('|0 - 3 m|') hospital = '' for chunk in l2: laux=chunk.split('|') #almacenamos el nombre del hospital de la siguiente tabla if (laux.count(total) == 0): hospital = laux[len(laux)-3+offset] if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) continue j = laux.index(total) #calculamos la longitud en base a la posición de corte calculada longfila = j+4+offset #nos posicionamos en el principio de la fila ifila = j+2+offset ffila = ifila+longfila while (ffila <= len(laux)): if (laux[ifila] != total): if (not dictServ.has_key(laux[ifila])): dictServ[laux[ifila]] = str(len(dictServ)+1) #tenemos que quitar la primera columna y las 2 ultimas longdatos = longfila-3 for i in range(longdatos): wr5.writerow([dictFecha[fechaAnalisis],str(i+1), dictHosp[hospital],dictServ[laux[ifila]],dictTipo[tipo],laux[ifila+1+i].replace('.','')]) wr4.writerow([dictFecha[fechaAnalisis],dictHosp[hospital],dictServ[laux[ifila]], dictTipo[tipo],laux[ffila-(2+offset)].replace('.',''),laux[ffila-(1+offset)].replace(',','.')]) ifila = ffila ffila += longfila h =laux[len(laux)-3+offset].replace(',','.') if (not is_numeric(h)): hospital = laux[len(laux)-3+offset] if (not chunk == l2[len(l2)-1]): if (not dictHosp.has_key(hospital)): dictHosp[hospital] = str(len(dictHosp)+1) except Exception, e: print e return False return True #Fichero de entrada fileInput = open(fIndex,'r') #Ficheros de salida de datos fileOutput1 = open('TotHosp.csv','wb') wr1 = csv.writer(fileOutput1, quoting=csv.QUOTE_ALL) fileOutput2 = open('TotServ.csv','wb') wr2 = csv.writer(fileOutput2, quoting=csv.QUOTE_ALL) fileOutput3 = open('TotTipoServ.csv','wb') wr3 = csv.writer(fileOutput3, quoting=csv.QUOTE_ALL) fileOutput4 = open('TotHospServ.csv','wb') wr4 = csv.writer(fileOutput4, quoting=csv.QUOTE_ALL) fileOutput5 = open('DatosPacien.csv','wb') wr5 = csv.writer(fileOutput5, quoting=csv.QUOTE_ALL) #Ficheros de salida de definición fileOutput6 = open('DefHosp.csv','wb') wr6 = csv.writer(fileOutput6, quoting=csv.QUOTE_ALL) fileOutput7 = open('DefServ.csv','wb') wr7 = csv.writer(fileOutput7, quoting=csv.QUOTE_ALL) fileOutput8 = open('DefTipoServ.csv','wb') wr8 = csv.writer(fileOutput8, quoting=csv.QUOTE_ALL) fileOutput9 = open('DefFecha.csv','wb') wr9 = csv.writer(fileOutput9, quoting=csv.QUOTE_ALL) fileOutput10 = open('DefInte.csv','wb') wr10 = csv.writer(fileOutput10, quoting=csv.QUOTE_ALL) for line in fileInput.readlines(): try: tipo = line[4:7] ifile = open(line.rstrip(), "rb") reader = csv.reader(ifile) for row in reader: if (row[0].find('por hospitais e servizos') != -1): print line.rstrip() procesarHospServ(row,tipo) elif (row[0].find('por hospitais e probas') != -1 or row[0].find('probas diagnósticas por hospitais') != -1): print line.rstrip() procesarHospPrueb(row,tipo) elif (row[0].find('por servizos') != -1): print line.rstrip() procesarServ(row,tipo) elif (row[0].find('probas diagnósticas') != -1): print line.rstrip() procesarPrueb(row,tipo) elif (row[0].find('por hospitais') != -1): print line.rstrip() procesarHosp(row,tipo) else: print 'Categoria no esperada ', row[0] ifile.close() except Exception, e: print 'Error: ', e try: l = dictHosp.keys() for data in l: wr6.writerow([dictHosp[data],data,data]) l = dictServ.keys() for data in l: wr7.writerow([dictServ[data],data,data]) l = dictTipo.keys() for data in l: wr8.writerow([dictTipo[data],data,data,data]) l = dictFecha.keys() for data in l: wr9.writerow([dictFecha[data],data,data,' ']) l = dictInte.keys() for data in l: wr10.writerow([dictInte[data],data,data]) except Exception, e: print e #print 'diccionario de fechas: ', dictFecha #print 'diccionario de hospitales: ', dictHosp #print 'diccionario de servicios: ', dictServ #print 'diccionario de tipos: ', dictTipo #print 'diccionario de intervalos: ', dictInte fileOutput1.close() fileOutput2.close() fileOutput3.close() fileOutput4.close() fileOutput5.close() fileOutput6.close() fileOutput7.close() fileOutput8.close() fileOutput9.close() fileOutput10.close() fileInput.close()
What a wonderful weekend away with my two loves. This was our first actual “family” vacation— just us three. I think the images speak for themselves but we had a wonderful time. It was my-kind-of perfect: laid-back, sans makeup, sunshine, lazy beach days, catching up on sleep, building sandcastles, lots of snuggle time, poolside reading, yummy seafood and just good, quality time together. If you’re like me, you make very little time for white space. My day is packed full of to-do lists, phone calls, errands and appointments. I always feel like there are never enough hours in the day to finish it all, and then 4pm rolls around and I don’t know where the day went. So…I’m trying to make more time for the “white space” in my life…to say “no” to a design project when my plate is too full, to stop trying to be everything to everyone and everywhere at once…because that white space is so critical to balance and joy. That white space is where the things that matter happen and where our relationships grow. Well, our little 4-day getaway was definitely full of white space! It was a relaxing, quiet, non-event-filled, long weekend—which was just what we needed. How nice to slow down for a little while and let time stand still.
# valueIterationAgents.py # ----------------------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). import mdp, util from learningAgents import ValueEstimationAgent class ValueIterationAgent(ValueEstimationAgent): """ * Please read learningAgents.py before reading this.* A ValueIterationAgent takes a Markov decision process (see mdp.py) on initialization and runs value iteration for a given number of iterations using the supplied discount factor. """ def __init__(self, mdp, discount = 0.9, iterations = 100): """ Your value iteration agent should take an mdp on construction, run the indicated number of iterations and then act according to the resulting policy. Some useful mdp methods you will use: mdp.getStates() mdp.getPossibleActions(state) mdp.getTransitionStatesAndProbs(state, action) mdp.getReward(state, action, nextState) mdp.isTerminal(state) """ self.mdp = mdp self.discount = discount self.iterations = iterations self.values = util.Counter() # A Counter is a dict with default 0 # Write value iteration code here "*** YOUR CODE HERE ***" AllStates = mdp.getStates() self.values = util.Counter() #Iterate value function for k in range(iterations): Vnew = util.Counter() #Batch computation for State in AllStates: AllActions = mdp.getPossibleActions(State) if len(AllActions) > 0: #Some actions are available ExpectedValueofAction = util.Counter() #Temporary counter of value of each action available at s for Action in AllActions: Pssa = mdp.getTransitionStatesAndProbs(State,Action) #List of ((s'), probability) for s,a for Transition in Pssa: #sum over all possible s' = StatePrime StatePrime = Transition[0] Probability = Transition[1] Reward = mdp.getReward(State,Action,StatePrime) Vprime = self.values[StatePrime] ExpectedValueofAction[Action] += Probability*(Reward + discount*Vprime) #Pick the best action in ValueofActions: SortedActions = ExpectedValueofAction.sortedKeys() OptimalAction = SortedActions[0] #print "State :"+str(State)+" | Optimal Action: "+OptimalAction #Update value function Vnew[State] = ExpectedValueofAction[OptimalAction] #else: no available action -> don't do anything to self.values[State] self.values = Vnew def getValue(self, state): """ Return the value of the state (computed in __init__). """ return self.values[state] def computeQValueFromValues(self, state, action): """ Compute the Q-value of action in state from the value function stored in self.values. """ "*** YOUR CODE HERE ***" Pssa = self.mdp.getTransitionStatesAndProbs(state,action) ExpectedValueofAction = 0 for Transition in Pssa: #sum over all possible s' = StatePrime StatePrime = Transition[0] Probability = Transition[1] Reward = self.mdp.getReward(state,action,StatePrime) Vprime = self.values[StatePrime] ExpectedValueofAction += Probability*(Reward + self.discount*Vprime) return ExpectedValueofAction def computeActionFromValues(self, state): """ The policy is the best action in the given state according to the values currently stored in self.values. You may break ties any way you see fit. Note that if there are no legal actions, which is the case at the terminal state, you should return None. """ "*** YOUR CODE HERE ***" AllActions = self.mdp.getPossibleActions(state) if len(AllActions) > 0: #Some actions are available ExpectedValueofAction = util.Counter() #Temporary counter of value of each action available at s for Action in AllActions: ExpectedValueofAction[Action] = self.computeQValueFromValues(state, Action) #Pick the best action in ValueofActions: OptimalAction = ExpectedValueofAction.argMax() else: OptimalAction = 'None' return OptimalAction def getPolicy(self, state): return self.computeActionFromValues(state) def getAction(self, state): "Returns the policy at the state (no exploration)." return self.computeActionFromValues(state) def getQValue(self, state, action): return self.computeQValueFromValues(state, action)
Special attention should be paid to designer Ernesto Esposito himself who has successfully collaborated with such famous fashion houses like Sergio Rossi, Chloe, Louis Vuitton, Sonia Rykiel, Fendi and Marc Jacobs for his 30 year career, and his portraits and photographs have been illustrated in the pieces of work of cult artists and photographers from the whole world: Andy Warhol, Helmut Newton, Jack Pierson and Mario Testino. In addition, Ernesto is now working on a new collection of shoes for the Missoni brand, as well as the legendary Royal brand – Rayne with Princess Diana and Queen of England Elizabeth among its passionate admirers. Meeting of Zhanna and Ernesto, as well as Zhanna’s love of beautiful shoes resulted in creation of a namesake brand – Zhanna Badoeva that fully embodies the life philosophy of the TV hostess. Zhanna Badoeva shoes are made in the Italian factory in Venice where high-quality shoes for the world’s leading brands are produced. Each pair of the Zhanna Badoeva collection developed by a creative duo is a real work of art. Combination of different textures of silk, leather and fur sets the collection apart and makes it unique. The color palette of the collection includes bright, saturated shades of red, black and green, with drawings and prints.
import re from django.apps import apps from django.core.management.base import BaseCommand, CommandError from django.core.files import File from django.conf import settings from django.db import transaction from taiga.base.utils.iterators import iter_queryset url = """ https://api-taiga.kaleidos.net/attachments/446/?user=8&amp;token=9ac0fc593e9c07740975c6282e1e501189578faa """ class Command(BaseCommand): help = "Parses all objects and try replace old attachments url with one new" trx = r"((?:https?)://api-taiga\.kaleidos\.net/attachments/(\d+)/[^\s\"]+)" @transaction.atomic def handle(self, *args, **options): settings.MEDIA_URL="https://media.taiga.io/" self.move_user_photo() self.move_attachments() self.process_userstories() self.process_issues() self.process_wiki() self.process_tasks() self.process_history() def move_attachments(self): print("Moving all attachments to new location") Attachment = apps.get_model("attachments", "Attachment") qs = Attachment.objects.all() for item in iter_queryset(qs): try: with transaction.atomic(): old_file = item.attached_file item.attached_file = File(old_file) item.save() except FileNotFoundError: item.delete() def move_user_photo(self): print("Moving all user photos to new location") User = apps.get_model("users", "User") qs = User.objects.all() for item in iter_queryset(qs): try: with transaction.atomic(): old_file = item.photo item.photo = File(old_file) item.save() except FileNotFoundError: pass def get_attachment_real_url(self, pk): if isinstance(pk, str): pk = int(pk) Attachment = apps.get_model("attachments", "Attachment") return Attachment.objects.get(pk=pk).attached_file.url def replace_matches(self, data): matches = re.findall(self.trx, data) original_data = data if len(matches) == 0: return data for url, attachment_id in matches: new_url = self.get_attachment_real_url(attachment_id) print("Match {} replaced by {}".format(url, new_url)) try: data = data.replace(url, self.get_attachment_real_url(attachment_id)) except Exception as e: print("Exception found but ignoring:", e) assert data != original_data return data def process_userstories(self): UserStory = apps.get_model("userstories", "UserStory") qs = UserStory.objects.all() for item in iter_queryset(qs): description = self.replace_matches(item.description) UserStory.objects.filter(pk=item.pk).update(description=description) def process_tasks(self): Task = apps.get_model("tasks", "Task") qs = Task.objects.all() for item in iter_queryset(qs): description = self.replace_matches(item.description) Task.objects.filter(pk=item.pk).update(description=description) def process_issues(self): Issue = apps.get_model("issues", "Issue") qs = Issue.objects.all() for item in iter_queryset(qs): description = self.replace_matches(item.description) Issue.objects.filter(pk=item.pk).update(description=description) def process_wiki(self): WikiPage = apps.get_model("wiki", "WikiPage") qs = WikiPage.objects.all() for item in iter_queryset(qs): content = self.replace_matches(item.content) WikiPage.objects.filter(pk=item.pk).update(content=content) def process_history(self): HistoryEntry = apps.get_model("history", "HistoryEntry") qs = HistoryEntry.objects.all() for item in iter_queryset(qs): comment = self.replace_matches(item.comment) comment_html = self.replace_matches(item.comment_html) HistoryEntry.objects.filter(pk=item.pk).update(comment=comment, comment_html=comment_html)
Plan 365 specializes in marketing planning and program implementation services for high tech clients. We tailor and scale field-level program models and services to your specific needs through the development, implementation and measurement of go-to-market strategies. From field enablement through demand generation and closed sales, our clients trust us to turn great ideas into action, and action into results. Our suite of marketing program models can function individually or as an integrated series to build momentum and increase sales. Our innovative models align seamlessly with existing marketing and sales processes, and we tailor service models to your needs. Co-marketing and market development fund (MDF) investments can be a challenge to manage. We support all aspects of the management and rollout of cooperative marketing program models to optimize your return on investment. We offer a complete suite of logistics management and implementation support services to help you advance your sales and marketing goals through carefully planned project and budget management, logistics handling and performance tracking and reporting. We take a customized approach to getting programs resourced and implemented as effectively as possible. Our services and service models are designed to travel. Many of our direct sales, channel enablement and channel acceleration programs, such as marketing asset management and sales incentives, can be deployed to support global market requirements. These programs offer central management control with local administration flexibility. Let us put Plan 365 to work for you. We are here to discuss, plan and get started, when you’re ready.
# -------------------------------------------------------------------------------- # Split Wordpress XML (using Element Tree) # -------------------------------------------------------------------------------- import sys, os, re, codecs sys.path.append('/usr/local/lib/python2.7/site-packages/') from lxml import etree as ET #xmldata = 'input/dmclubcustomerblog.wordpress.2014-10-29.xml' xmldata = 'input/wp.xml' # Register Wordpress XML namespaces namespaces = { 'wp' : 'http://wordpress.org/export/1.2/', 'excerpt' : 'http://wordpress.org/export/1.2/excerpt/', 'content' : 'http://purl.org/rss/1.0/modules/content/', 'wfw' : 'http://wellformedweb.org/CommentAPI/', 'dc' : 'http://purl.org/dc/elements/1.1/', } """ REGISTER NAMESPACE WHEN WRITING ONLY for prefix, uri in namespaces.iteritems(): ET.register_namespace(prefix, uri) """ def write_utf8_file(fp, ustr): f = codecs.open(fp, 'w', 'utf-8'); f.write(ustr) f.close() # Parse the XML using ElementTree's streaming SAX-like parser for event, elem in ET.iterparse(xmldata, tag='item', strip_cdata=False, remove_blank_text=True): title = elem.find('title').text type = elem.find('wp:post_type', namespaces=namespaces).text name = elem.find('wp:post_name', namespaces=namespaces).text print '{:15s} {:100s} {:100s}'.format(type, title, name) content = elem.find('content:encoded', namespaces=namespaces) excerpt = elem.find('excerpt:encoded', namespaces=namespaces) elem.remove(content) elem.remove(excerpt) if title is not None: dir_suffix = name if dir_suffix is None: dir_suffix = re.sub(r'[^\w]', '_', title.lower()) dir = os.getcwd()+'/output/'+type+'__'+dir_suffix if not os.path.exists(dir): os.makedirs(dir) xmlstr = ET.tostring(elem, pretty_print=True, encoding='unicode', method='xml') write_utf8_file(dir+'/meta.xml', xmlstr) write_utf8_file(dir+'/content.html', content.text) write_utf8_file(dir+'/excerpt.html', excerpt.text)
One of the last manufacturers of handy and popular midsize pickup trucks, Toyota has a winner in its rugged, off-road-capable 2014 Tacoma lineup. This downsized hauler, seriously challenged only by Nissan’s gussied-up Frontier, touts a couple of minor changes this year, but still offers three cab configuration: the 3-seat, 2-door Regular Cab; the 4-place, 4-door Access Cab with reverse-opening rear doors; and the 5-passenger Double-Cab, with 4 independently opening doors. A pair of bed lengths, the standard 6.1-foot bed and the Double-Cab-only 5-foot bed, are once again offered, while trim levels remain the entry-level Tacoma and the Tacoma PreRunner, both available in peppy V6 editions. As noted, there are a couple of tweaks evident in the 2014 Tacoma stable, including a new SR Package option that begets various upgrades to interior and exterior décor, as well as upsized and upgraded wheels and tinted headlights. The street-smart Tacoma X-Runner, meantime, is deleted this year. Of course, in keeping with tradition, this downsized pickup comes in standard rear-wheel drive (RWD), while all trim/cab editions remain available with part-time, shift-on-the-fly 4-wheel drive (4WD). Said 4WD system includes hi-lo gear selection using the unique push-and-turn dial and an automatic rear limited-slip differential, as well as auto-locking hubs and available descent control. The PreRunner trim level, by the way, is offered only in the RWD configuration. Again, a pair of engines are available to this capable midsize pickup, with the standard engine in both base Tacoma and PreRunner trim levels being a variable-valve-timed (VVT) 2.7-liter inline 4-cylinder (I4) powerplant that mates with either the delivered 5-speed manual transmission or an available 4-speed automatic transmission for 159 hp and 180 lb-ft of torque. Mileage with the 4-banger is estimated at 21 mpg city/25 highway in a RWD, stick-shift-equipped version or 18/21 in a 4WD variation with the 5-speed automatic. Should a bit more oomph be needed, those trims given the V6 moniker carry a standard VVT 4.0-liter V6 engine that remains managed by either a standard 5-speed automatic transmission or an available 6-speed manual gearbox to put out 236 hp and 266 lb-ft of torque. Mileage estimates in V6-toting Tacoma trims run 17/21 for RWD 5-speed-automatic-laden editions to 16/19 in those Tacomas bearing the stick-shift and 4WD. Towing with a properly equipped V6-packing Tacoma is maxed out at 6,500 pounds. Creature comforts are sparse in the basic Regular Cab Tacoma, with 15-inch wheels and a trailer hitch among the notable appearance features, and cloth upholstery, telescoping tilt-wheel steering, simulated alloy cabin accents and air conditioning adorning the interior. A 6.1-inch touchscreen display, meanwhile, interfaces with a single-CD player with 4 speakers in Regular Cab variants and 6 speakers in Access and Double Cab versions. Again, Bluetooth hands-free calling also remains standard. Those Access Cab and Double Cab trims equipped with 4WD add on standard 16-inch wheels and skid plates, while also boasting standard power door locks and windows, a fold-flat front passenger seatback and split-folding rear seatbacks. Optional equipment, depending on trim level, engine size and drivetrain configuration, includes a sliding rear window, cruise control, remote keyless entry, a rear-view camera, TRD (Toyota Racing Division) sport- and off-road-oriented parts, graphics and badges, heated, power-adjustable sport front seats, a rear-view camera, and upsized and upgraded wheels, all of which are offered in the form of various packages. A navigation system, meantime, is available as a standalone option, as are upgraded audio components, Entune infotainment, a tow hitch, various high-end interior and exterior accents, and Toyota’s touted VIP security system. Finally, standard safety equipment of note aboard the 2014 Tacoma includes 4-wheel antilock brakes (ABS), traction and stability control, front side-mounted airbags, front, and in Access and Double Cab trims, rear head airbags, and front head restraint whiplash protection. Daytime running lights are standard aboard all Tacoma iterations, while front fog/driving lights are available lineup-wide. What's your take on the 2014 Toyota Tacoma? Francesco Marcelo SR5, nice smooth drive, feels like a sedan. Have you driven a 2014 Toyota Tacoma? Traveling How Far Should I Go Before Stopping To Let My Engine Cool? I’m traveling in my 2014 Toyota Tacoma, 800 miles (12hrs) today and 800 miles (12hrs) tomorrow. How far should I travel before stopping to allow my engine to cool? Can I Take The Existing OBD II Out And Replace It With A HUM OBD II?
''' Code for processing images Copyright (c) 2014, Helen Ramsden All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' __author__ = 'helenlramsden' import os, re import Image import ImageChops, ImageOps import numpy as np def resizeimages(infolderpath, iseriesdict): ''' Run image resize :param infolderpath: :param iseriesdict: :return: ''' for iseries in iseriesdict.keys(): filename = iseries + '.jpg' filename_exp = re.sub('ish','exp',filename) borderresize(filename, infolderpath, 'ish') borderresize(filename_exp, re.sub('ish','exp',infolderpath), 'exp') def borderresize(filename, infolderpath, runtype): ''' Resize ISH images :param filename: :param infolderpath: :return: ''' newimagewidth = 6000/4 newimageheight = 4500/4 if 'ishfull' in filename: newimage = Image.new('L', (newimagewidth,newimageheight), (255) ) #white image else:newimage = Image.new('L', (newimagewidth,newimageheight), (0) ) #black image resizefactor = 1.25 # images 0.8 of original size try: ishimage = Image.open(infolderpath +filename).convert("L") except IOError: print filename, 'Image failed' return # Not used previously - remove outer border to get rid of any dark borders on original image ishimage = ImageOps.crop(ishimage, border=5) dwidth = newimagewidth - ishimage.size[0] dheight = newimageheight - ishimage.size[1] newimage.paste(ishimage, (dwidth/2,dheight/2,dwidth/2+ishimage.size[0],dheight/2+ishimage.size[1])) newimage = newimage.resize((int(float(newimagewidth)/resizefactor),int(float(newimageheight)/resizefactor))) newimage.save(re.sub('Original_%s' % runtype,'Resize_%s' % runtype, infolderpath) + filename) def sb_images(infolderpath, outfolderpath,iseriesdict, outfile): ''' Write ImageJ macro to subtract background :param infolderpath: :param outfolderpath: :param iseriesdict: :param outfile: :return: ''' for iseries in iseriesdict.keys(): filename = iseries + '.jpg' outfile.write('open("' + infolderpath + filename +'");\n') outfile.write('run("8-bit");\n') outfile.write('run("Subtract Background...", "rolling=1 light");\n') # changed from 3 for smaller images outfile.write('saveAs("Jpeg", "' +outfolderpath+ re.sub('.jpg','_sb.jpg',filename) + '");\n') outfile.write('run("Close All");\n') def thresh_images(infolderpath, outfolderpath, iseriesdict, outfile): ''' Images are thresholded for use in registration, but not for segmentation. Performance could possibly be improved by thresholding prior to edge detection :param infolderpath: :param outfolderpath: :param iseriesdict: :param outfile: :return: ''' for iseries in iseriesdict.keys(): filename = iseries + '.jpg' outfile.write('open("' + infolderpath + filename +'");\n') outfile.write('run("Auto Threshold", "method=MinError(I) white");\n') outfile.write('saveAs("Jpeg", "' + outfolderpath + re.sub('_sb.jpg','_sbthres.jpg',filename) + '");\n') outfile.write('run("Close All");\n') def find_seg_edges(infilepath,outfilepath, iseriesdict, outfile): ''' Detect the edges :param infilepath: :param outfilepath: :param iseriesdict: :param outfile: :return: ''' for iseries in iseriesdict.keys(): filename = iseries + '.jpg' outfile.write('open("' + infilepath + filename +'");\n') outfile.write('selectWindow("' + filename + '");\n') outfile.write('run("8-bit");\n') outfile.write('run("FeatureJ Edges", "compute smoothing=10 lower=[] higher=[]");\n') # check size for resized images outfile.write('selectWindow("' + filename + ' edges");\n') outfile.write('saveAs("Jpeg", "' + outfilepath+ re.sub('.jpg','_edges.jpg',filename) + '");\n') outfile.write('run("Close All");\n') def create_seg_mask(infilepath, outfilepath, iseriesdict, outfile, thresh): ''' Create segmentation mask :param infilepath: :param outfilepath: :param iseriesdict: :param outfile: :param thresh: :return: ''' edgefilepath = re.sub('Segmented/','Edges/',outfilepath) origfilepath = re.sub('Segmented/','Resize_ish/',outfilepath) segorigfilepath = re.sub('Segmented/','Segmented/SegmentedOrig/',outfilepath) segmaskfilepath = re.sub('Segmented/','Segmented/SegmentedMask/',outfilepath) for iseries in iseriesdict.keys(): filename = iseries + '.jpg' outfile.write('open("' + edgefilepath + filename + '");\n') outfile.write('run("Auto Threshold", "method=Li white");\n') # white ensures white on black background outfile.write('run("Fill Holes");\n') outfile.write('run("Watershed");\n') outfile.write('run("Analyze Particles...", "size=210000-Infinity circularity=0.00-1.00 show=Masks display clear summarize add");\n') # size needs altering for resized images outfile.write('selectWindow("Mask of ' + filename +'"); \n') outfile.write('saveAs("Jpeg", "' + segmaskfilepath + re.sub('_edges.jpg' , '_mask.jpg', filename) + '");\n') outfile.write('selectWindow("' + filename +'");\n') outfile.write('run("Close All");\n') def apply_seg_mask(infilepath, outfilepath, iseriesdict): ''' Apply mask to other images :param infilepath: :param outfilepath: :param iseriesdict: :return: ''' sbinfilepath = re.sub('Segmented/SegmentedMask/','Thresh/',infilepath) origfilepath = re.sub('Segmented/','Resize_ish/',outfilepath) expfilepath = re.sub('Segmented/','Resize_exp/',outfilepath) segorigfilepath = re.sub('Segmented/','Segmented/SegmentedOrig/',outfilepath) segmaskfilepath = re.sub('Segmented/','Segmented/SegmentedMask/',outfilepath) segsbfilepath =re.sub('Segmented/','Segmented/SegmentedThresh/',outfilepath) segexpfilepath = re.sub('Segmented/','Segmented/SegmentedExp/',outfilepath) for iseries in iseriesdict.keys(): seg_mask(iseries, sbinfilepath, segmaskfilepath, segsbfilepath,origfilepath,expfilepath,segexpfilepath,segorigfilepath) def seg_mask(iseries, sbinfilepath, segmaskfilepath, segsbfilepath,origfilepath,expfilepath,segexpfilepath,segorigfilepath): #iseries is a filename, without jpg on the end and with sb on the end # First, apply mask to sb image - mask is black (or grey) on white background filename = re.sub('_mask','',iseries) + '.jpg' #this is the sb image # print 'Initial', filename maskim = Image.open(segmaskfilepath+ re.sub('.jpg','_mask.jpg',filename)).convert("L") # Mask not always black so first make sure it is threshold = 141 maskim = maskim.point(lambda p: p > threshold and 255) threshfilename = re.sub('_sb','_sbthres', filename) sbim = Image.open(sbinfilepath + threshfilename) try: # print 'Get thresh' seg_sb = ImageChops.lighter(sbim,maskim) seg_sb.save(segsbfilepath+ re.sub('.jpg','_seg.jpg',threshfilename) ) except IOError: print 'error in file' #Now open the original image - get rid of sb from filename filename = re.sub('_sb','', filename) origim = Image.open(origfilepath + filename).convert("L") seg_orig = ImageChops.lighter(origim,maskim) seg_orig.save(segorigfilepath+ re.sub('.jpg','_seg_orig.jpg',filename)) #Now open the exp image and apply mask # First make mask white on black maskim = ImageChops.invert(maskim) # Now extract all the pixels that are white and make this region a transparent region on the mask maskim = maskim.convert('LA') datas = maskim.getdata() newData = list() for item in datas: if item[0] == 255: newData.append((255, 0)) else: newData.append(item) maskim.putdata(newData) #img.save("img2.png", "PNG") l,a = maskim.split() # Check that exp file exists if os.path.exists(expfilepath + re.sub('ish','exp',filename)): #seg_exp = ImageChops.logical_and(expim,maskim) expim = Image.open(expfilepath + re.sub('ish','exp',filename)).convert("LA") # should be a grayscale image expim.paste(maskim, mask = a) expim = expim.convert("L") expim.save(segexpfilepath+ re.sub('.jpg','_seg_exp.tif',filename)) else: print 'N' def getallfiledict(filefolder, filelist, filetype, fileend='jpg'): ''' Function finds all the files within a folder and returns a dictionary of their image series (val) and full filename (key) :param filefolder: :param filelist: :param filetype: :param project: :return: ''' ffilelist = re.sub('.txt',filefolder.split('/')[-2] + '.txt',filelist) os.system("ls %s | grep %s > %s" % (filefolder, filetype, ffilelist)) allfilesdict = dict((line.strip(),line.strip().split('_')[0]) for line in open(ffilelist, 'r')) # key = whole filename, val = iseries iseriesdict = dict((line.strip().split('\t')[0].split('.' + fileend)[0], line.strip().split('\t')) for line in open(ffilelist,'r')) # key = filename without jpg, filename (replace tif with jpg) return allfilesdict, iseriesdict def getnewdict(outfilefolder, filelist, preiseriesdict,fileendout, fileendin, fileend='jpg'): ''' Get dictionary of images in a particular file. If file already present, don't overwrite ''' print outfilefolder, fileendin,len(preiseriesdict.keys()) [gotfilesdict,gotiseriesdict] = getallfiledict(outfilefolder, filelist, fileendout,fileend) gotfileskeys = [re.sub(fileendout,fileendin,g) for g in gotiseriesdict.keys()] print len(gotfileskeys) try: print list(preiseriesdict.keys())[0],len(gotfileskeys),gotfileskeys[0],list(preiseriesdict.keys())[0] except IndexError: print 'Empty list' allfiles = set(preiseriesdict.keys()).difference(gotfileskeys) print 'Files to be processed: ', len(allfiles) iseriesdict = dict((k,preiseriesdict[k]) for k in allfiles) return iseriesdict def moveimages(originfolder, origfilename, maskinfolder, maskfilename,expinfolder, expfilename,outorigfolder,outexpfolder): ''' Function for moving images into the centre - we didn't use this ''' try: segorigimage = Image.open(originfolder +origfilename).convert("L") segexpimage = Image.open(expinfolder +expfilename).convert("L") maskim = Image.open(maskinfolder + maskfilename).convert("L") # need to convert to 8 bit (not rgb) except IOError: print origfilename, 'Image failed' return threshold = 141 maskim = maskim.point(lambda p: p > threshold and 255) maskim = ImageChops.invert(maskim) com = ndimage.measurements.center_of_mass(np.array(maskim)) dwidth = int(com[1] - 525) # centre of mass - 600 (so leftwards will be negative) dheight = int(com[0] - 430) # centre of mass - 450 (so upwards will be negative) newsegimage = Image.new('L', (1200,900), (255) ) # white image for seg orig newexpimage = Image.new('L', (1200,900), (0) ) # black image for seg orig print dwidth, dheight le = up = 0; ri = segorigimage.size[0]; lo = segorigimage.size[1];left = upper = 0 if dwidth > 0:le = int(dwidth) else: ri = segorigimage.size[0] + int(dwidth); left = -dwidth if dheight > 0: up = int(dheight) else: lo = segorigimage.size[1] + int(dheight); upper = -dheight box = (le, up, ri, lo) newsegorigimage = segorigimage.crop(box) newsegexpimage = segexpimage.crop(box) newsegimage.paste(newsegorigimage, (left,upper,left + newsegorigimage.size[0],upper + newsegorigimage.size[1])) # left, upper, right, lower newsegimage.save(outorigfolder + origfilename) newexpimage.paste(newsegexpimage, (left,upper,left + newsegexpimage.size[0],upper + newsegexpimage.size[1])) # left, upper, right, lower newexpimage.save(outexpfolder + expfilename)
Are dental surgical handpieces (DSH) really pre-disinfected before their treatment in central sterilization unit? Corrosion and Stains on Instruments - What can we do? What do we have to do? How much does a cleaning and sterilization cycle cost? Protocol of Change Evaluation of the steam sterilization process developed in the HSL. Torn packs: At what stages of the process do they occur?
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SyntaxNet lexicon utils.""" import os.path import tensorflow as tf from syntaxnet import task_spec_pb2 from syntaxnet.ops import gen_parser_ops def create_lexicon_context(path): """Construct a SyntaxNet TaskContext file for standard lexical resources.""" context = task_spec_pb2.TaskSpec() for name in [ 'word-map', 'tag-map', 'tag-to-category', 'lcword-map', 'category-map', 'char-map', 'char-ngram-map', 'label-map', 'prefix-table', 'suffix-table' ]: context.input.add(name=name).part.add(file_pattern=os.path.join(path, name)) return context def build_lexicon(output_path, training_corpus_path, tf_master='', training_corpus_format='conll-sentence', morph_to_pos=False, **kwargs): """Constructs a SyntaxNet lexicon at the given path. Args: output_path: Location to construct the lexicon. training_corpus_path: Path to CONLL formatted training data. tf_master: TensorFlow master executor (string, defaults to '' to use the local instance). training_corpus_format: Format of the training corpus (defaults to CONLL; search for REGISTER_SYNTAXNET_DOCUMENT_FORMAT for other formats). morph_to_pos: Whether to serialize morph attributes to the tag field, combined with category and fine POS tag. **kwargs: Forwarded to the LexiconBuilder op. """ context = create_lexicon_context(output_path) if morph_to_pos: context.parameter.add(name='join_category_to_pos', value='true') context.parameter.add(name='add_pos_as_attribute', value='true') context.parameter.add(name='serialize_morph_to_pos', value='true') # Add the training data to the context. resource = context.input.add() resource.name = 'corpus' resource.record_format.extend([training_corpus_format]) part = resource.part.add() part.file_pattern = training_corpus_path # Run the lexicon builder op. with tf.Session(tf_master) as sess: sess.run( gen_parser_ops.lexicon_builder( task_context_str=str(context), corpus_name='corpus', **kwargs))
The same study concluded that mortality rates are higher in coal-mining communities than in other areas of the country or region and that there are 313 deaths in West ia from coal-mining pollution every year. The study accounted for restricted access to healthcare, higher than average smoking rates, and lower than average income levels. Chapter 13 Environmental Science: Mineral Resources and Mining DO NOT FORGET TO MEMORIZE THE ROCK CYCLE AS WELL AS BE FAMILIAR WITH THE PROCESS OF MINING...good luck guys! Hope this helps. Hope this helps. The cell wall (CW) as a first line of defense against biotic and abiotic stresses is of primary importance in plant biology. The proteins associated with cell walls play a significant role in determining a plant's sustainability to adverse environmental conditions. Introduction to Mineral Processing. ... froth flotation cells. ... The exact range at any given plant is optimized for the ore at that site.
import store import tile import math import controls import level import pyglet.sprite from pyglet.window import mouse from pyglet.window import key class Player(tile.Gameobject): def __init__(self, coor, img,name = 'Player', inv=None): super(Player, self).__init__(img) self.coor = coor self.loc = store.findtile(self.coor) self.name = name self.inv = [] if inv is None else inv self.itemcount = 0 self.faces = ['pcharF','pchar','pcharB','pcharR'] self.look = 'pchar' self.mrange = 5 self.sp = pyglet.sprite.Sprite(x = store.ct(self.coor[0]), y = store.ct(self.coor[1]), img = store.getim(self), batch = store.player_bt) store.add(self,'gp') store.add(self.sp,'spo') def build(self,buildmenu): if not self.cols(): if buildmenu.c[1] == 1: buildmenu.overlay(store.findtile(self.coor), buildmenu.blist[buildmenu.c[0]][0], self.coor) else: buildmenu.build(store.findtile(self.coor), buildmenu.blist[buildmenu.c[0]][0], self.coor) controls.turn() def updateols(self): for item in self.inv: item.sp.x = self.sp.x item.sp.y = self.sp.y def updateitems(self): for item in self.inv: item.x = self.coor[0] item.y = self.coor[1] item.loc = self.loc def cols(self): for g_tile in store.store['gt']: if (g_tile.passable == False and g_tile.coor == self.coor): return True return False def distance(self,target): distance = [abs(self.coor[0]-target.coor[0]), abs(self.coor[1]-target.coor[1])] return distance def player_bordering(self): player_bordering = [] up = self.coor[1] + 1 down = self.coor[1] - 1 right = self.coor[0] + 1 left = self.coor[0] - 1 for g_tile in store.store['gt']: add = False if (g_tile.coor[1] == up and g_tile.coor[0] == right): add = True ckcoll = [up,right] elif (g_tile.coor[1] == down and g_tile.coor[0] == left): add = True ckcoll = [down,left] elif (g_tile.coor[1] == up and g_tile.coor[0] == left): add = True ckcoll = [up,left] elif (g_tile.coor[1] == down and g_tile.coor[0] == right): add = True ckcoll = [down,right] elif (g_tile.coor[1] == up and g_tile.coor[0] == self.coor[0]): add = True ckcoll = [up,self.coor[0]] elif (g_tile.coor[1] == down and g_tile.coor[0] == self.coor[0]): add = True ckcoll = [down,self.coor[0]] elif (g_tile.coor[1] == self.coor[1] and g_tile.coor[0] == right): add = True ckcoll = [self.coor[1],right] elif (g_tile.coor[1] == self.coor[1] and g_tile.coor[0] == left): add = True ckcoll = [self.coor[1],left] if (add == True and not self.cols([ckcoll[0],ckcoll[1]])): player_bordering.append(g_tile) return player_bordering def pathing(self): self.checkmv(self.loc,True,pat=True) Path.tagged = list(set(Path.tagged)) if Path.pl: mincost = Path.pl[0].cost costlist=[] for path in Path.pl: costlist.append(path.cost) Path.cpath = Path.pl[costlist.index(min(costlist))] for node in Path.cpath.nodes: tag = pyglet.sprite.Sprite(x= store.ct(node.coor[0]), y= store.ct(node.coor[1]), img = store.image ['marker2'], batch = store.debug_bt) Path.wp.append(tag) def moveg(self): Path.clean_Path() self.checkmv(self.loc,True) Path.tagged = list(set(Path.tagged)) for tile in Path.tagged: tag = pyglet.sprite.Sprite(x= store.ct(tile.coor[0]), y= store.ct(tile.coor[1]), img = store.image ['marker'], batch = store.debug_bt) Path.tags.append(tag) for tagged in Path.tagged: Path.ptagged.append(tagged) if level.ontiles([store.cursor.mposx,store.cursor.mposy],Path.ptagged): Path.clean_Path(tags=False) Path.goal = store.findtile(store.cursor.coor) store.cplayer.pathing() def checkmv(self,tchk,first = False,pat=False,f=None): checkdirs = [tchk.dirs[-1],tchk.dirs[1], tchk.dirs[0],tchk.dirs[2]] if f: checkdirs.pop(checkdirs.index(f)) if first == True: st_cost = Path.cost st_tagged = len(Path.tagged) for ccheck in checkdirs: Path.cost = st_cost for i in range(len(Path.tagged)-st_tagged): if pat==True:Path.tagged.pop() if pat == True: self.checkmv(ccheck,pat=True, f=tchk) else: self.checkmv(ccheck,f=tchk) if (first == False and tchk.passable == True and Path.cost + 1 <= self.mrange): Path.cost += 1 Path.tagged.append(tchk) st_cost = Path.cost st_tagged = len(Path.tagged) if pat == True and tchk.coor == Path.goal.coor: p = Path(Path.cost,[]) for node in Path.tagged: p.nodes.append(node) Path.pl.append(p) if Path.cost != self.mrange: for ccheck in checkdirs: Path.cost = st_cost for i in range(len(Path.tagged)-st_tagged): if pat == True:Path.tagged.pop() if pat == True: self.checkmv(ccheck,pat=True, f=tchk) else: self.checkmv(ccheck,f=tchk) def moveone(self,coor,dir,fixcoor): if not self.coll(coor+dir): self.coor[coor] += dir self.look = self.faces[coor+dir] self.sp.image=store.image [self.look] if coor == 0: self.sp.x = store.ct(self.coor[0]) else: self.sp.y = store.ct(self.coor[1]) self.loc = store.findtile(self.coor) controls.turn() self.updateols() self.updateitems() def coll(self,direc): if not (self.loc.dirs[direc].passable): return True return False def pmove(self,path,step): if Path.step < len(path): Path.node = path[step] Path.anim = True else: Path.step = 0 self.coor[0] = Path.goal.coor[0] self.coor[1] = Path.goal.coor[1] self.loc = store.findtile(self.coor) self.sp.x = store.ct(self.coor[0]) self.sp.y = store.ct(self.coor[1]) controls.turn() def addplayer(self): g_newplayer = Player(coor=[self.coor[0]+1, self.coor[1]+1],img='pchar') store.cid +=1 def cloak(self): if not self.cols(): if self.img == 'pchar': self.sp.image= store.image['pchar_1b'] self.img = 'pchar_1b' else: self.sp.image= store.image['pchar'] self.img = 'pchar' controls.turn() def hasitem_name(self,name): for item in self.inv: if item.name == name: return True return False class Path(object): cost = 0 tagged = [] ptagged = [] tags = [] wp = [] goal = None pl = [] cpath = None anim = False step = 0 @staticmethod def clean_Path(tags=True): Path.cost = 0 Path.tagged[:] = [] Path.pl[:] = [] if Path.wp: for wp in Path.wp: wp.delete del Path.wp[:] if tags == True: for tag in Path.tags: tag.delete() del Path.tags[:] @staticmethod def on_key_press(symbol,modifiers): if symbol == key.ESCAPE: store.clevel.pop_handlers() store.handleraltered = False Path.clean_Path() del Path.ptagged[:] return True @staticmethod def on_mouse_motion(x,y,dx,dy): if (x+store.ats > store.cursor.mposx + store.ts or x+store.ats < store.cursor.mposx or y+store.ats > store.cursor.mposy + store.ts or y+store.ats < store.cursor.mposy ): if level.ontiles([x,y],Path.ptagged): store.cursor.xcoor = math.floor(x/store.ts) store.cursor.ycoor = math.floor(y/store.ts) store.cursor.cursor = pyglet.sprite.Sprite( x =store.ct(store.cursor.xcoor), y =store.ct(store.cursor.ycoor), img = store.image['cursor']) store.cursor.mposx = x store.cursor.mposy = y store.cursor.coor = [store.cursor.xcoor, store.cursor.ycoor] store.cursor.onarea = 'm' Path.clean_Path(tags=False) Path.goal = store.findtile(store.cursor.coor) store.cplayer.pathing() return True @staticmethod def on_mouse_press(x,y,button,modifiers): if button == mouse.LEFT: if level.ontiles([x,y],Path.ptagged): Path.clean_Path() Path.goal = store.findtile(store.cursor.coor) store.cplayer.pathing() store.cplayer.pmove(Path.cpath.nodes, Path.step) del Path.ptagged[:] Path.clean_Path() store.clevel.pop_handlers() store.handleraltered = False return True def __init__(self,cost,nodes): self.cost = cost self.nodes = nodes
Take the ramp eleveator to the tunnel level ("T") and the tunnel will lead you straight ahead into the St. Anthony Main building where we’re located, right next to the Aster Café. Take I-35W (Northbound) EXIT onto the 3RD STREET U of M EXIT #17C. Stay in the right lane to the WASHINGTON AVE, U of M WEST BANK OFF RAMP. Turn LEFT onto WASHINGTON AVE. Continue to WASHINGTON AVE. Turn RIGHT onto HENNEPIN AVE crossing the bridge. Turn Right onto 2nd Ave SE. Take I-94 (West) to the 5TH ST EXIT #234B. Continuing on 5TH ST EXIT bear RIGHT to DOWTOWN VIA S. WASHINGTON. Turn RIGHT onto S. 11TH AVE. Continue on WASHINGTON AVE. To Ramp entrance on your Right. Take I-394 (East) downtown to the 3RD AVE. N. /N. WASHINGTON AVE. EXIT #9C. Turn RIGHT onto N. WASHINGTON AVE. Continue on N. WASHINGTON AVE. Turn LEFT onto HENNEPIN AVE crossing the bridge. Take I-94 (East) to the 4TH ST. N. EXIT #230. Continue on N. 4TH ST. Turn LEFT onto HENNEPIN AVE. Follow HENNEPIN AVE across the bridge. Explore the supernatural among the ghosts and legends of Minneapolis' original and only real ghost tour. Maybe it's the location on the shores of the fabled Mississippi or our long winter nights by a flickering fire. Perhaps it is the history of native tribes whose Spirit Island once rose from the river and has now disappeared forever. Or the legacy of fortune seekers who flocked to the area in the 1800s, some of whom found the wealth they sought and many of whom died trying. For over three centuries, men, women, and children with hopes, fears, and dreams have lived and died here. But their stories—and perhaps their spirits—remain. Nowhere is their existence more felt than in the oldest parts of the city, along the river where Spirit Island once stood. This is the site of the original St. Anthony Falls, which drew engineers and entrepreneurs, loggers and lumber pirates, saloonkeepers, floozies, and scoundrels of every stripe. Today, it is still the site of some of the city's most historic buildings and, allegedly, its best-known ghosts. High-tech ghost hunters and spiritualists alike attest to their continued presence—the weeping Ojibway woman, the lost children, the Wolfman of Nicollet Island, the builders, destroyers, homicides, and suicides. This is where, to this day, the ghost train runs where no tracks can be found. Below your feet, caverns, both natural and manmade, honeycomb the limestone and are home to strange things, also natural and manmade. The Real Ghost Tour™ takes you there. This is no dry history lesson or haunted house, jack-in-the-box, gore-fest. Rather, it is a thoroughly researched dramatization, drawing on real legends and real history in the actual places in which the people lived and died, the events took place, and the ghosts have been seen. Today's most modern technology brings their stories to life in an indoor, outdoor, underground tour that you, your friends, and your family will never forget. Believers and skeptics alike will find themselves swept up in the re-creations of actual events and actual sightings in the venues in which they occurred. Open your mind. Spend 90 fascinating minutes seeing what scientific and spiritual explorers alike tell us is here. See it for yourself. Where every story has a ghost, and every ghost has a story. View the live ghost cam! This webcam points at a room that serves as a terminus of the spirit river than runs under St. Anthony Main - and where William Satterlee can interact via the EMF reader and computer. View. Have you experienced the strange, the unexplainable, the paranormal? You are not alone. The supernatural still holds a powerful place in our world. The Real Ghost Tour has worked with medium, Kathleen Cotter and the West Metro Paranormal Society to examine evidence of real ghosts in the area, and employs the techniques of science to explore the spirit world. Our resident ghost hunter will show you how parapsychologists employ both ancient skills and modern technology to study the unknown. Have you felt the presence of a departed loved one or some wandering soul? Open your mind, and learn the ancient arts from the best in the field. Visit The Other Side — Beneath your feet is a world few ever see. What secrets do these 19th century limestone walls hold? What lies beneath today's bustling Mill City? Step into the past with Real Ghost Tours of Minneapolis, Minnesota. Join a real ghost hunter on an expedition into the city's shadowlands, to meet the colorful characters of bygone days, where technology, theater, and a bit of the supernatural bring lingering souls to life. Join us at 125 Main Street SE Minneapolis to touch, and be touched by, the past ... if you dare. Visit us at our blog for further information & updated tour times. An EMF meter reads the fluctuation in electromagnetic fields. They help us track down spirits at every turn. How Does Our EMF Detector Work? An electromagnetic field (EMF) meter can detect fluctuations in the amount of electromagnetic energy in an area, and unusual fluctuations frequently indicate paranormal activity. Over the last few decades, as electronic devices and their fields have proliferated, the EMF meter has become an essential ghost-hunting tool for detecting paranormal disturbances. At Real Ghost Tours, we have meters set up in our building's EMF hot spot. When paranormal activity is detected, our meters post the EMF readings to Twitter (pat. pending). For more information on our findings, visit realghosthunting.com. An Iowa farmer with a sideline in shoddy mattresses, he moved with his family to Minneapolis in 1877 & established what would eventually be the Salisbury & Satterlee Mattress Factory. He has a bench of his own in St. Anthony Main & his ghost is said to visit the ladies room on the second floor. The son of a teetotaling, fire-breathing Wisconsin preacher, Gene Satterlee moved to Minneapolis & began working in the Salisbury, Rolph & Co. Mattress Factory, eventually being made partner of the firm & becoming a prominent Minneapolis businessman. His spirit is the most communicative in St. Anthony Main. A leading businessman of the city, he built one of the earliest still-standing structures in Minneapolis. At times successful, his businesses met with many of the tragedies common in the early days of the city: his lumberyard burned to the ground & his steamer sunk in the Mississippi. Perhaps his survivor's guilt, of being the captain of a ship with only one other survivor, kept him from crossing over. Little is known of this spirit, except that one medium sensed her as a show girl above Denell’s (formerly Pracna’s) bar. © 2019 Real Ghost Tours.com is a division of Mobile Entertainment, LLC. All rights reserved.
#!env/python3 # coding: utf-8 try: import ipdb except ImportError: pass import os import json import aiohttp import aiohttp_jinja2 import datetime import time from aiohttp import web from urllib.parse import parse_qsl from config import * from core.framework.common import * from core.model import * from api_rest.rest import * class EventHandler: @user_role('Authenticated') def list(self, request): """ Get list of last 100 events """ return rest_success(core.events.list()) @user_role('Authenticated') async def search(self, request): """ Get events list corresponding to provided filters """ data = await request.json() try: data = json.loads(data) if isinstance(data, str) else data return rest_success(core.events.list(**data)) except Exception as ex: return rest_error("Error occured when retriving list of requested events. {}".format(ex), ex=ex) @user_role('Authenticated') def get(self, request): """ Get details about the event """ event_id = request.match_info.get('event_id', -1) event = core.events.get(event_id) if not event: return rest_error("Unable to find the event (id={})".format(event_id)) return rest_success(event) @user_role('Authenticated') async def new(self, request): """ Create new event with provided data """ data = await request.json() data = json.loads(data) if isinstance(data, str) else data message = check_string(data.pop("message")) if "message" in data else None details = check_string(data.pop("details")) if "detals" in data else None author_id = 1 # TODO: retrieve author_id from session date = check_date(data.pop("date")) if "date" in data else datetime.datetime.now() # Create the event try: event = core.events.new(author_id, date, message, details, data) except Exception as ex: return rest_error("Error occured when creating the new event. {}".format(ex), ex=ex) if event is None: return rest_error("Unable to create a new event with provided information.") return rest_success(event) @user_role('Authenticated') async def edit(self, request): """ Edit event data """ data = await request.json() data = json.loads(data) if isinstance(data, str) else data message = check_string(data.pop("message")) if "message" in data else None author_id = 1 # TODO: retrieve author_id from session date = check_date(data.pop("date")) if "date" in data else datetime.datetime.now() # Edit the event try: event = core.events.edit(author_id, event_id, date, message, data) except Exception as ex: return rest_error("Error occured when creating the new event. {}".format(ex), ex=ex) if event is None: return rest_error("Unable to create a new event with provided information.") return rest_success(event) @user_role('Authenticated') def delete(self, request): """ Delete the event """ event_id = request.match_info.get('event_id', -1) event = core.events.delete(event_id) if not event: return rest_error("Unable to delete the event (id={})".format(event_id)) return rest_success(event)
Hekla sings tuneful ethereal otherworldly, minimal yet essential whispers as sort of icy stories. spooky and singular. While a handful of reference points share a similar ground to Á - Colleen’s interplay of voice and instrumentation; the richly immersive filmscore work of sadly passed fellow Icelander Jóhann Jóhannsson’s; “grandmother of theremin” Clara Rockmore’s close relationship with such a singular instrument; Julia Holter’s intelligent and classically-aligned songwriting - Hekla’s music still exists singularly. A one-off talent, emerging from no particular scene, ascribing to no particular rules.
from setuptools import setup, find_packages import sys, os version = '0.2.2' setup(name='baas', version=version, description="'Buddy as a Service' is a xmpp / wavelet robot using Yahoo YQL API, Google API and other services to do searches (web, news, reviews, wikipedia, wiktionary, imdb) and some other stuff (translations, weather forecast, etc) for you.", long_description="""\ The XMPP bot also runs on the google appengine. BaaS is easy extensible through plugins. No API Keys required! \ See http://mborho.github.com/baas for more infos. """, classifiers=[ "Programming Language :: Python :: 2.5", "Topic :: Communications :: Chat", "Development Status :: 4 - Beta", "License :: OSI Approved :: GNU General Public License (GPL)", "Intended Audience :: Other Audience", "Operating System :: POSIX :: Linux", ], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='xmpp', author='Martin Borho', author_email='martin@borho.net', url='http://mborho.github.com/baas', license='GNU General Public License (GPL)', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), #data_files=[('conf',"conf/baas.conf")], include_package_data=True, zip_safe=False, install_requires=[ 'twisted', 'feedparser', 'chardet', 'simplejson' ], entry_points=""" [console_scripts] baas_bot = baas.scripts.bot:main """, )
Good Game Programmer Resume Personal Qualities Fill Online, Printable . Great Nice Video Game Designer Resume Ideas The Best Curriculum Vitae . Web Developer Resumes. Game Developer Resume Resume Makeover Junior . High Quality Research And Development Resume Research And Development Resume Objectives . 6 | P A G E ; 7. Research And Development Resume R Research And Development Resume Objectives . Real Estate Agent Resume Real Estate Agent Cover Letter Flash Game Developer Cover Letter Design Synthesis . Game Programming Resume Developer Resume Examples Example Resume Game Programmer Developer Resume Examples Example Resume Game . To Make Your Task Even Easier, Weu0027ve Created A Number Of Versions Of This Checklist, Including An HTML Version, Markdown, PDF And Evernote.
import networkx as nx import community as co from edgesense.network.utils import extract_dpsg from datetime import datetime def extract_content_metrics(nodes_map, posts_map, comments_map, ts, timestep, timestep_window): ts_metrics = { 'ts': ts, 'full:users_count':0., # Number of Posts total 'user:users_count':0., # Number of Posts total 'team:users_count':0., # Number of Posts total 'full:posts_count':0., # Number of Posts total 'user:posts_count':0., # - Number of Posts by contributors 'team:posts_count':0., # - Number of Posts by team 'user:posts_share':0., # - Share of User Generated Posts 'user:team_posts_share':0., # - Share of Team/User Posts 'full:ts_posts_count': 0., # - Number of Posts in period 'user:ts_posts_count':0., # - Number of Posts by contributors in period 'team:ts_posts_count':0., # - Number of Posts by team in period 'user:ts_posts_share': 0., # - Share of User Generated Posts in period 'user:ts_team_posts_share': 0., # - Share of Team/User Posts in period 'full:comments_count':0., # - Number of Comments total 'user:comments_count': 0., # - Number of Comments by contributors 'team:comments_count': 0., # - Number of Comments by team 'user:comments_share': 0., # - Share of Team/User Generated Comments 'user:team_comments_share': 0., # - Share of User Generated Comments 'full:ts_comments_count':0., # - Number of Comments total in period 'user:ts_comments_count':0., # - Number of Comments by contributors in period 'team:ts_comments_count':0., # - Number of Comments by contributors in period 'user:ts_comments_share': 0., # - Share of User Generated Comments in period 'user:ts_team_comments_share': 0., # - Share of Team/User Generated Comments in period 'user:active_count': 0., 'user:noteam_active_count': 0., 'user:active_share': 0., 'user:conversations': 0., 'user:noteam_conversations': 0., 'user:conversations_share': 0. } # Users count metrics for u in nodes_map.values(): if int(u['created_ts'])<=ts: ts_metrics['full:users_count'] += 1 if u['team']: ts_metrics['team:users_count'] += 1 else: ts_metrics['user:users_count'] += 1 # Posts Count metrics for p in posts_map.values(): if p['created_ts']<=ts: ts_metrics['full:posts_count'] += 1 if p['team']: ts_metrics['team:posts_count'] += 1 else: ts_metrics['user:posts_count'] += 1 if p['created_ts']<=ts and p['created_ts']>=ts-timestep*timestep_window: ts_metrics['full:ts_posts_count'] += 1 if p['team']: ts_metrics['team:ts_posts_count'] += 1 else: ts_metrics['user:ts_posts_count'] += 1 if ts_metrics['full:posts_count'] > 0: ts_metrics['user:posts_share'] = float(ts_metrics['user:posts_count'])/float(ts_metrics['full:posts_count']) if ts_metrics['user:posts_count'] > 0: ts_metrics['user:team_posts_share'] = float(ts_metrics['team:posts_count'])/float(ts_metrics['user:posts_count']) if ts_metrics['full:ts_posts_count'] > 0: ts_metrics['user:ts_posts_share'] = float(ts_metrics['user:ts_posts_count'])/float(ts_metrics['full:ts_posts_count']) if ts_metrics['user:ts_posts_count'] > 0: ts_metrics['user:ts_team_posts_share'] = float(ts_metrics['team:ts_posts_count'])/float(ts_metrics['user:ts_posts_count']) # Comments Count metrics for c in comments_map.values(): if c['created_ts']<=ts: ts_metrics['full:comments_count'] += 1 if c['team']: ts_metrics['team:comments_count'] += 1 else: ts_metrics['user:comments_count'] += 1 if c['created_ts']<=ts and c['created_ts']>=ts-timestep*timestep_window: ts_metrics['full:ts_comments_count'] += 1 if c['team']: ts_metrics['team:ts_comments_count'] += 1 else: ts_metrics['user:ts_comments_count'] += 1 if ts_metrics['full:comments_count'] > 0: ts_metrics['user:comments_share'] = float(ts_metrics['user:comments_count'])/float(ts_metrics['full:comments_count']) if ts_metrics['user:comments_count'] > 0: ts_metrics['user:team_comments_share'] = float(ts_metrics['team:comments_count'])/float(ts_metrics['user:comments_count']) if ts_metrics['full:ts_comments_count'] > 0: ts_metrics['user:ts_comments_share'] = float(ts_metrics['user:ts_comments_count'])/float(ts_metrics['full:ts_comments_count']) if ts_metrics['user:ts_comments_count'] > 0: ts_metrics['user:ts_team_comments_share'] = float(ts_metrics['team:ts_comments_count'])/float(ts_metrics['user:ts_comments_count']) # - User counts actives = set() noteam_actives = set() conversations = set() noteam_conversations = set() for c in comments_map.values(): if c['created_ts']<=ts and nodes_map.has_key(c['author_id']) and nodes_map.has_key(c['recipient_id']): a = nodes_map[c['author_id']] r = nodes_map[c['recipient_id']] cnv = '-'.join(sorted([str(c['author_id']), str(c['recipient_id'])])) if not (a['team'] and a['team_ts'] <=ts): actives.add(c['author_id']) conversations.add(cnv) if not (r['team'] and r['team_ts'] <=ts): noteam_actives.add(c['recipient_id']) noteam_conversations.add(cnv) ts_metrics['user:active_count'] = len(actives) ts_metrics['user:noteam_active_count'] = len(noteam_actives) if ts_metrics['user:active_count'] > 0: ts_metrics['user:active_share'] = float(ts_metrics['user:noteam_active_count'])/float(ts_metrics['user:active_count']) ts_metrics['user:conversations'] = len(conversations) ts_metrics['user:noteam_conversations'] = len(noteam_conversations) if ts_metrics['user:conversations'] > 0: ts_metrics['user:conversations_share'] = float(ts_metrics['user:noteam_conversations'])/float(ts_metrics['user:conversations']) return ts_metrics
Our premier Alan Hannah and MiaMia stockist, Boo Bridal Boutique in Hartley Wintney Hants is hosting a double designer day with both Alan Hannah and MiaMia on 25th April. Call Anna on 01252 849556. You can see a large selection of both collections, and each bride will receive a personal sketch of their dress and a piece of Alan Hannah jewellery. Catwalk launch of 2016 collection (trade event). 11th July 2015. Designer day at the wonderful Sam Cox Boutique in Plymouth. Marguerite and Alan will be present with 3 collections, Alan Hannah MiaMia and Rita Mae. All brides will have an individual consultation with the designer present and will also receive a personal sketch of their dress and a piece of Alan Hannah branded jewellery. Please call 01752 228451 or email samcoxbridalwear@btconnect.com for an appointment. We are very excited to introduce our new stockist, Mode Bridal in Hove, Sussex. Jana has created a beautiful but friendly boutique which is a heavenly experience for any bride to be. You will see a collection of the most sublime gowns and we are so pleased that Alan Hannah is going to be represented here. To introduce the collection Jana is hosting an Alan Hannah designer event and launch 15th -17th October. Although Mode will have some of the collection by the end of July, on these special days you will be able to see the whole our new Watercolours collection and every bride finding their dream dress will receive a gift of Alan Hannah jewellery and a personalised original sketch of their dress. Call Jana on 01273 930 990 for more details and to make an appointment. 23rd and 24th October 2015. Boo Bridal Boutique Hants. See the whole Alan Hannah Watercolours collection, the 2016 MiaMia collection and the brand new Rita Mae short wedding dresses. All brides will receive a sketch of their dress and a piece of Alan Hannah jewellery. Telephone 01252 849556 to book your place. Designer event at Love Bridal Altrincham Cheshire. This truly beautiful and intimate boutique is hosting the whole Watercolours collection on the 14th of November. Each bride will receive a signed sketch of her chosen dress and a piece of Alan Hannah Jewellery. Call Emma on 0161 941 6446 or email hello@lovebridalboutique.co.uk for more details and to make an appointment.
from teafacto.core.base import Block, Var, Val, param, tensorops as T from IPython import embed # TODO: INPUT MASK !!!!!!!! and attention etc # TODO: what about memory mask? # TODO: MEMORY POSITION EMBEDDINGS/ENCODINGS # SYMBOLIC OUTPUT MEMORY ENABLED SEQ2SEQ # - can place attention over all of temporary created output sequence # - can write to any time step of output (write/erase interface) # - can do multiple attention steps without actual output (change scalars) # -> loss is placed over the symbolic output memory class BulkNN(Block): def __init__(self, inpencoder=None, memsampler=None, memembmat=None, memencoder=None, memlen=None, mem_pos_repr=None, inp_pos_repr=None, inp_attention=None, mem_attention=None, inp_addr_extractor=None, mem_addr_extractor=None, write_addr_extractor=None, write_addr_generator=None, write_value_generator=None, write_value_extractor=None, mem_erase_generator=None, mem_change_generator=None, nsteps=100, core=None, **kw): super(BulkNN, self).__init__(**kw) if mem_pos_repr is not None: self._memposvecs = mem_pos_repr(memlen) else: self._memposvecs = None self._inp_pos_repr = inp_pos_repr self._nsteps = nsteps self._memlen = memlen self._inpencoder = inpencoder self._inp_att = inp_attention self._memencoder = memencoder self._mem_att = mem_attention self._memembmat = memembmat self._memsampler = memsampler self._core = core # extractors from top core state: self._inp_addr_extractor = inp_addr_extractor self._mem_addr_extractor = mem_addr_extractor self._write_addr_extractor = write_addr_extractor self._write_addr_generator = write_addr_generator self._write_value_extractor = write_value_extractor self._write_value_generator = write_value_generator self._mem_change_generator = mem_change_generator self._mem_erase_generator = mem_erase_generator def apply(self, inpseq): # int-(batsize, seqlen) inpenco = self._inpencoder(inpseq) # may carry mask, based on encoder's embedder batsize = inpenco.shape[0] outvocsize = self._memembmat.shape[0] mem_0 = T.concatenate([ T.ones((batsize, self._memlen, 1), dtype="float32") * 0.95, T.ones((batsize, self._memlen, outvocsize-1), dtype="float32") * 0.05, ], axis=2) # (batsize, outseqlen, outvocsize) mem_0 = T.softmax(mem_0) core_init_states = self._core.get_init_info(batsize) core_state_spec = self._core.get_statespec(flat=False) assert(len(core_state_spec) == len(core_init_states)) h_0 = None # take last output of core states as initial state c = 0 for ss in core_state_spec: h_0_isout = False for sss in ss: if sss[0] == "output": h_0_isout = True h_0 = core_init_states[c] if not h_0_isout: h_0 = core_init_states[c] c += 1 if self._inp_pos_repr is not None: inpposvecs = self._inp_pos_repr(inpseq.shape[1]) inpposvecs = T.repeat(inpposvecs.dimadd(0), batsize, axis=0) inpenc = T.concatenate([inpenco, inpposvecs], axis=2) inpenc.mask = inpenco.mask else: inpenc = inpenco outputs = T.scan(fn=self.rec, outputs_info=[None, mem_0, h_0] + core_init_states, n_steps=self._nsteps, non_sequences=inpenc) ret = outputs[0] ret.push_extra_outs({"mem_0": mem_0, "h_0": h_0}) # DEBUGGING return ret[-1], ret def rec(self, mem_tm1, h_tm1, *args): inpenc = args[-1] states_tm1 = args[:-1] batsize = inpenc.shape[0] # mem_tm1: f(batsize, outseqlen, outvocsize) # h_tm1: f(batsize, thinkerdim) # inpenc: f(batsize, inplen, inpencdim) # summarize memory mem_tm1_sam = self._memsample(mem_tm1) # sample from mem mem_tm1_embsum = T.dot(mem_tm1_sam, self._memembmat) # f(batsize, outseqlen, memembdim) mem_tm1_sum = self._memencode(mem_tm1_embsum) # f(batsize, outseqlen, memsumdim) if self._memposvecs is not None: memposvecs = T.repeat(self._memposvecs.dimadd(0), batsize, axis=0) mem_tm1_sum = T.concatenate([mem_tm1_sum, memposvecs], axis=2) # input and memory read attentions inp_ctx_t = self._get_inp_ctx(h_tm1, inpenc) # (batsize, inpencdim) mem_ctx_t = self._get_mem_ctx(h_tm1, mem_tm1_sum) # (batsize, memsumdim) # update thinker state i_t = T.concatenate([inp_ctx_t, mem_ctx_t], axis=1) rnuret = self._core.rec(i_t, *states_tm1) h_t = rnuret[0] states_t = rnuret[1:] # memory change interface mem_t_addr = self._get_addr_weights(h_t, mem_tm1_sum) # float-(batsize, outseqlen) mem_t_write = self._get_write_weights(h_t) # (batsize, memvocsize) e_t = self._get_erase(h_t) # (0..1)-(batsize,) c_t = self._get_change(h_t) # (0..1)-(batsize,) # memory change can_mem_t = mem_tm1 - T.batched_dot(e_t, mem_tm1 * mem_t_addr.dimshuffle(0, 1, 'x')) # erase where we addressed can_mem_t = can_mem_t + T.batched_tensordot(mem_t_addr, mem_t_write, axes=0) # write new value mem_t = T.batched_dot(1 - c_t, mem_tm1) + T.batched_dot(c_t, can_mem_t) # interpolate between old and new value mem_t = T.softmax(mem_t) # normalize to probabilities return (mem_t, mem_t, h_t) + tuple(states_t) def _memsample(self, mem): if self._memsampler is None: return mem else: return self._memsampler(mem) def _memencode(self, mem): if self._memencoder is None: return mem else: return self._memencoder(mem) def _get_inp_ctx(self, h, inpenc): crit = self._inp_addr_extractor(h) return self._inp_att(crit, inpenc) def _get_mem_ctx(self, h, mem): crit = self._mem_addr_extractor(h) return self._mem_att(crit, mem) def _get_addr_weights(self, h, mem): crit = self._write_addr_extractor(h) return self._write_addr_generator(crit, mem) def _get_write_weights(self, h): crit = self._write_value_extractor(h) return self._write_value_generator(crit) # generate categorical write distr def _get_erase(self, h): return self._mem_erase_generator(h) def _get_change(self, h): return self._mem_change_generator(h) from teafacto.blocks.seq.rnn import SeqEncoder, MakeRNU, RecStack, RNNWithoutInput from teafacto.blocks.seq.rnu import GRU from teafacto.blocks.match import CosineDistance from teafacto.blocks.seq.attention import Attention, AttGen from teafacto.blocks.basic import MatDot, Linear, Forward, SMO from teafacto.blocks.activations import GumbelSoftmax from teafacto.core.base import asblock from teafacto.util import issequence class SimpleBulkNN(BulkNN): """ Parameterized simple interface for BulkNN that builds defaults for subcomponents """ def __init__(self, inpvocsize=None, inpembdim=None, inpemb=None, inpencinnerdim=None, bidir=False, maskid=None, dropout=False, rnu=GRU, inpencoder=None, memvocsize=None, memembdim=None, memembmat=None, memencinnerdim=None, memencoder=None, inp_att_dist=CosineDistance(), mem_att_dist=CosineDistance(), inp_attention=None, mem_attention=None, coredims=None, corernu=GRU, core=None, explicit_interface=False, scalaraggdim=None, write_value_dim=None, nsteps=100, posvecdim=None, mem_pos_repr=None, inp_pos_repr=None, inp_addr_extractor=None, mem_addr_extractor=None, write_addr_extractor=None, write_addr_generator=None, write_addr_dist=CosineDistance(), write_value_generator=None, write_value_extractor=None, mem_erase_generator=None, mem_change_generator=None, memsampler=None, memsamplemethod=None, memsampletemp=0.3, **kw): # INPUT ENCODING if inpencoder is None: inpencoder = SeqEncoder.RNN(indim=inpvocsize, inpembdim=inpembdim, inpemb=inpemb, innerdim=inpencinnerdim, bidir=bidir, maskid=maskid, dropout_in=dropout, dropout_h=dropout, rnu=rnu).all_outputs() lastinpdim = inpencinnerdim if not issequence(inpencinnerdim) else inpencinnerdim[-1] else: lastinpdim = inpencoder.block.layers[-1].innerdim # MEMORY ENCODING if memembmat is None: memembmat = param((memvocsize, memembdim), name="memembmat").glorotuniform() if memencoder is None: memencoder = SeqEncoder.RNN(inpemb=False, innerdim=memencinnerdim, bidir=bidir, dropout_in=dropout, dropout_h=dropout, rnu=rnu, inpembdim=memembdim).all_outputs() lastmemdim = memencinnerdim if not issequence(memencinnerdim) else memencinnerdim[-1] else: lastmemdim = memencoder.block.layers[-1].innerdim # POSITION VECTORS if posvecdim is not None and inp_pos_repr is None: inp_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout) if posvecdim is not None and mem_pos_repr is None: mem_pos_repr = RNNWithoutInput(posvecdim, dropout=dropout) xtra_dim = posvecdim if posvecdim is not None else 0 # CORE RNN - THE THINKER if core is None: corelayers, _ = MakeRNU.fromdims([lastinpdim+lastmemdim+xtra_dim*2] + coredims, rnu=corernu, dropout_in=dropout, dropout_h=dropout, param_init_states=True) core = RecStack(*corelayers) lastcoredim = core.get_statespec()[-1][0][1][0] # ATTENTIONS if mem_attention is None: mem_attention = Attention(mem_att_dist) if inp_attention is None: inp_attention = Attention(inp_att_dist) if write_addr_generator is None: write_addr_generator = AttGen(write_addr_dist) # WRITE VALUE if write_value_generator is None: write_value_generator = WriteValGenerator(write_value_dim, memvocsize, dropout=dropout) # MEMORY SAMPLER if memsampler is not None: assert(memsamplemethod is None) if memsamplemethod is not None: assert(memsampler is None) memsampler = GumbelSoftmax(temperature=memsampletemp) ################ STATE INTERFACES ################# if not explicit_interface: if inp_addr_extractor is None: inp_addr_extractor = Forward(lastcoredim, lastinpdim + xtra_dim, dropout=dropout) if mem_addr_extractor is None: inp_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout) # WRITE INTERFACE if write_addr_extractor is None: write_addr_extractor = Forward(lastcoredim, lastmemdim + xtra_dim, dropout=dropout) if write_value_extractor is None: write_value_extractor = Forward(lastcoredim, write_value_dim, dropout=dropout) # MEM UPDATE INTERFACE if mem_erase_generator is None: mem_erase_generator = StateToScalar(lastcoredim, scalaraggdim) if mem_change_generator is None: mem_change_generator = StateToScalar(lastcoredim, scalaraggdim) else: inp_addr_extractor, mem_addr_extractor, write_addr_extractor, \ write_value_extractor, mem_erase_generator, mem_change_generator = \ make_vector_slicers(0, lastinpdim + xtra_dim, lastmemdim + xtra_dim, lastmemdim + xtra_dim, write_value_dim, 1, 1) super(SimpleBulkNN, self).__init__(inpencoder=inpencoder, memembmat=memembmat, memencoder=memencoder, inp_attention=inp_attention, mem_attention=mem_attention, core=core, memsampler=memsampler, nsteps=nsteps, inp_addr_extractor=inp_addr_extractor, mem_addr_extractor=mem_addr_extractor, write_addr_extractor=write_addr_extractor, write_addr_generator=write_addr_generator, mem_erase_generator=mem_erase_generator, mem_change_generator=mem_change_generator, write_value_generator=write_value_generator, write_value_extractor=write_value_extractor, inp_pos_repr=inp_pos_repr, mem_pos_repr=mem_pos_repr, **kw) class WriteValGenerator(Block): def __init__(self, dim, vocsize, interdims=tuple(), dropout=False, **kw): super(WriteValGenerator, self).__init__(**kw) self.dims = (dim,) + interdims self.vocsize = vocsize self.layers = [] for i in range(len(self.dims)-1): layer = Forward(self.dims[i], self.dims[i+1], dropout=dropout) self.layers.append(layer) self.smo = SMO(self.dims[-1], outdim=self.vocsize) def apply(self, x): for layer in self.layers: x = layer(x) ret = self.smo(x) return ret class StateToScalar(Block): def __init__(self, dim, outdim, **kw): super(StateToScalar, self).__init__(**kw) self.block = Forward(dim, outdim) self.agg = param((outdim,), name="scalartostate_agg").uniform() def apply(self, x): y = T.dot(x, self.block) z = T.dot(y, self.agg) # (batsize,) ret = T.nnet.sigmoid(z) return ret def make_vector_slicers(*sizes): sizes = list(sizes) boundaries = [sizes[0]] del sizes[0] while len(sizes) > 0: boundaries.append(sizes[0]+boundaries[-1]) del sizes[0] rets = [] for i in range(len(boundaries) - 1): a, b = boundaries[i], boundaries[i + 1] yield Slicer(a, b) class Slicer(Block): def __init__(self, a, b, **kw): super(Slicer, self).__init__(**kw) self.a = a self.b = b def apply(self, x): attrs = [slice(None, None, None)] * x.ndim if self.b - self.a == 1: attrs[-1] = self.a else: attrs[-1] = slice(self.a, self.b, None) ret = x[attrs] return ret if __name__ == "__main__": from teafacto.blocks.seq.rnn import RNNWithoutInput m = RNNWithoutInput(3, 2) out = m(5) print out.eval().shape print out.eval()
Jefferson’s luxury lodging choice! This century-old house has been completely renovated inside and out for your comfort and privacy, offering three unique guest accommodations. Rooms feature solid oak flooring, crown molding, king beds, luxurious baths, designer décor, cable TV, individual climate control, refrigerators, and CD players. Each room /suite accommodates two persons. Jefferson’s newest choice for luxury lodging. This circa 1870’s cottage has been sumptuously renovated throughout and offers two distinctive guest accommodations for the discriminating. Rooms feature solid wood floors, king beds, designer detailing, cable TV/VCR/DVD, crown moldings, individual zoned climate control, refrigerators, CD players, ceiling fans, and in-room coffee service. Each room accommodates 2 persons. Adding to the comfort of your stay in the “Memories Cottage” are the grandest baths in Jefferson! Each spacious ceramic tiled bathroom offers a palatial two-person soaking tub, separate marble shower for two and five feet of counter space.
import nose.tools from foldkin.scipy_optimizer import ScipyOptimizer from foldkin.coop.coop_collection import CoopCollectionFactory from foldkin.coop.coop_model_parameter_set import CoopModelParameterSet from foldkin.fold_rate_judge import CoopCollectionJudge from foldkin.fold_rate_predictor import FoldRateCollectionPredictor,\ FoldRatePredictor from foldkin.fold_rate_target_data import FoldRateCollectionTargetData from foldkin.file_archiver import CoopCollectionFileArchiver @nose.tools.istest class TestFitManyFoldRates(object): def make_score_fcn(self, model_factory, parameter_set, judge, data_predictor, target_data): def f(current_parameter_array): parameter_set.update_from_array(current_parameter_array) current_model = model_factory.create_model(parameter_set) score, prediction = judge.judge_prediction(current_model, data_predictor, target_data, noisy=False) # print score, parameter_set return score return f @nose.tools.istest def predicted_rate_similar_to_true_rate(self): '''This example fits coop model to experimental rate of one protein. ''' target_data = FoldRateCollectionTargetData() target_data.load_data('N') feature_range = range(1,31) id_list = range(len(feature_range)) model_factory = CoopCollectionFactory(id_list, 'N', feature_range) initial_parameters = CoopModelParameterSet() initial_parameters.set_parameter_bounds('log_k0', 5.5, 5.7) judge = CoopCollectionJudge() data_predictor = FoldRateCollectionPredictor(FoldRatePredictor) score_fcn = self.make_score_fcn(model_factory, initial_parameters, judge, data_predictor, target_data) optimizer = ScipyOptimizer(maxfun=5) results = optimizer.optimize_parameters(score_fcn, initial_parameters) new_params, score, num_iterations = results optimized_model = model_factory.create_model(new_params) score, prediction = judge.judge_prediction(optimized_model, data_predictor, target_data) print new_params print score archiver = CoopCollectionFileArchiver() archiver.save_results(target_data, prediction, "test_many_markov_results.txt")
Planned Giving is a Win/Win scenario. Are you interested in empowering your favorite organizations, without compromising your promise to family and loved ones? “I was introduced to Tom Ligare and the Legacy Life Giving program over four years ago at its inception. The program was brought to me because of my background in non-profit organizations and my background in marketing for major companies. "The Legacy Life Giving Program introduces to on-line donors a creative way to leverage a modest gift into a much larger legacy gift by using life insurance. It is a definite win~win for the donor, the nonprofit, and the development professional." © 2017 Planned Giving Marketing Solutions. All Rights Reserved.
# Copyright Collab 2014-2016 # See LICENSE for details. """ Widgets. """ from __future__ import unicode_literals from itertools import chain from django import forms from django.utils.safestring import mark_safe class MediaDisplayWidget(forms.SelectMultiple): """ Widget for displaying media in admin forms. """ class Media: js = ("encode/js/media.js",) def render(self, name, value, attrs=None, choices=()): paths = [] script = '' if value is not None: for option_value, option_label in chain(self.choices, choices): if option_value in [int(x) for x in value]: try: from encode import models path = models.MediaFile.objects.get( title=option_label).file.url paths.append(path) except models.MediaFile.DoesNotExist: pass script = '''<script type="text/javascript"> $(document).ready(function() { var elem = $('#id_%(name)s'); var widget = new collab.PreviewWidget(elem, %(paths)s); }); </script>''' % {'name': name, 'paths': paths} if attrs is None: attrs = {} output = super(MediaDisplayWidget, self).render(name, value, attrs, choices) return mark_safe(output + script)
Whether you are looking for cheap hotels in Lamarque-Pontacq, best family-friendly hotel for children and elderlies in Lamarque-Pontacq, getaway hotels in Lamarque-Pontacq for a large group, Hotels.com makes hotel hunting quick and easy for a memorable trip ahead. If you are planning for a family trip to Lamarque-Pontacq, a special and romantic hotel stay for couples in Lamarque-Pontacq, a relaxing or quick getaway over the weekend in Lamarque-Pontacq, or even a corporate business function in Lamarque-Pontacq, Hotels.com suggests the best accommodations that fit your exact wishlist. 2. Provide insights on staying experience with 3977 genuine reviews. The detailed location mapping allows you to find your ideal Lamarque-Pontacq hotel closest to tourist attractions. Navigate the map image of this page or results from search to identify the must-visit places and landmarks in Lamarque-Pontacq followed by hotels in that area. You can further refine your searches for specific neighborhood and transport options such as train stations, airports or public transport to help you travel around with ease. Start booking Lamarque-Pontacq hotels with Hotels.com today.
import sys, os, pdb, time import numpy as np import scipy.linalg import matplotlib import matplotlib.pyplot as plt PERTURB = 1e-4#1e-3 def random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, ndraws=5, \ plot_draws=True, mesh_dim=0, lw=3 ): """ SUMMARY Draws one or more random realisations from the gp and (optionally) plots them, along with the mean function (black dashed line) and 1- and 2-sigma uncertainty regions (shaded grey regions). CALLING draws = random_draw( gp_obj, xmesh=None, emesh=None, conditioned=True, perturb=PERTURB, \ ndraws=5, plot_draws=True, mesh_dim=0, lw=3 ) INPUTS 'xmesh' [KxD array] - input locations for the random draw points; if set to None (default), a fine grid spanning the xtrain range will be used. 'emesh' [float] - white noise value for the random draw points; if set to None (default) or zero, then this will be set to the value of the perturb variable for numerical stability. 'conditioned' [bool] - if set to True (default), the GP will be trained on the training data stored in the object; otherwise, it will be drawn from the unconditioned prior. 'perturb' [float] - small perturbation to be added to the covariance diagonal for numerical stability if the white noise errors are set to None/zero. 'ndraws' [integer] - the number of random draws to be made. 'plot_draws' [bool] - if set to True, the random draws will be plotted. 'mesh_dim' [integer] - for cases where D>1 (i.e. multidimensional input), a single input dimension must be specified for the mesh to span; the other input variables will be held fixed to the corresponding median values in the training data set. 'lw' [integer] - thickness of plot lines. OUTPUT 'draws' [list] - a list containing the separate random draws from the GP. """ xtrain = gp_obj.xtrain dtrain = gp_obj.dtrain etrain = gp_obj.etrain n = np.shape( xtrain )[0] d = np.shape( xtrain )[1] if xmesh==None: nmesh = 1000 xmesh_i = np.r_[ xtrain[:,mesh_dim].min() : xtrain[:,mesh_dim].max() : 1j*nmesh ] xmesh = np.zeros( [ nmesh, d ] ) for i in range( d ): if i!=mesh_dim: xmesh[:,i] = np.median( xtrain[:,i] ) else: xmesh[:,i] = xmesh_i else: nmesh = np.shape( xmesh )[0] if conditioned==True: print( '\nDrawing from GP posterior (i.e. after being trained on data set)' ) title_str = 'posterior (i.e. trained)' else: print( '\nDrawing from GP prior (i.e. not trained on any data set)' ) title_str = 'prior (i.e. untrained)' mu, cov = meancov( gp_obj, xnew=xmesh, enew=emesh, conditioned=conditioned, perturb=perturb ) sig = np.sqrt( np.diag( cov ).flatten() ) mu = mu.flatten() sig = sig.flatten() xmesh_i = xmesh[:,mesh_dim].flatten() if plot_draws==True: fig = plt.figure() ax = fig.add_axes( [ 0.05, 0.05, 0.9, 0.9 ] ) zorder0 = 0 ax.fill_between( xmesh_i, mu-2*sig, mu+2*sig, color=[ 0.8, 0.8, 0.8 ], zorder=zorder0 ) zorder0 = 1 ax.fill_between( xmesh_i, mu-1*sig, mu+1*sig, color=[ 0.6, 0.6, 0.6 ], zorder=zorder0 ) zorder0 = 2 ax.plot( xmesh_i, mu, ls='--', c='g', lw=2, zorder=zorder0 ) ax.set_title('%i random GP draws - %s' % ( ndraws, title_str ) ) # Draw random samples from the GP: colormap = matplotlib.cm.cool colormap = plt.cm.ScalarMappable( cmap=colormap ) colormap.set_clim( vmin=0, vmax=1 ) line_colors = np.r_[ 0.05 : 0.95 : 1j*ndraws ] ax.set_xlim( [ xmesh_i.min(), xmesh_i.max() ] ) draws = [] for i in range( ndraws ): print( ' drawing %i of %i on a mesh of %i points' % ( i+1, ndraws, nmesh ) ) # The following step can be a computation bottleneck if there are too # many points on the mesh: draw = np.random.multivariate_normal( mu, cov ) draws += [ draw ] if plot_draws==True: color = colormap.to_rgba( line_colors[i] ) zorder0 = 3 ax.plot( xmesh_i, draw, ls='-', c=color, lw=lw, zorder=1 ) if ( plot_draws==True )*( conditioned==True ): dtrain = dtrain.flatten() zorder0 = 4 xtrain_i = xtrain[:,mesh_dim].flatten() if n<1000: marktype = 'o' elif n<2000: marktype = '.' else: marktype = ',' if ( np.all( etrain==0 ) )+( np.all( etrain==None ) )+( n>=2000 ): ax.plot( xtrain_i, dtrain, marktype, mec='k', mfc='k', zorder=zorder0 ) else: errs = etrain + np.zeros( n ) ax.errorbar( xtrain_i, dtrain, yerr=errs, fmt=marktype, mec='k', mfc='k', ecolor='k', \ capsize=0, elinewidth=2, barsabove=True, zorder=zorder0 ) return draws def meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ): """ SUMMARY Returns the mean and full covariance of a gp at the locations of xnew, with random errors enew. If conditioned==True, the gp will be conditioned on the training data stored in the gp_obj. If etrain==None or etrain==0 (stored within gp_obj), a perturbation term of magnitude perturb will be added to the diagonal entries of the training covariance matrix before it is inverted for numerical stability. CALLING: mu, cov = meancov( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ) INPUTS 'gp_obj' [gp class object], containing: 'mfunc', 'cfunc' [functions] - mean and covariance functions. 'mpars', 'cpars' [dictionaries] - mean and covariance function parameters. 'xtrain' [NxD array] - training data input locations. 'dtrain' [Nx1 array] - training data values. 'etrain' [float] - white noise value for the training data points. 'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at; if set to None (default), the values for xtrain will be used. 'enew' [float] - white noise value to be incorporated into the covariance diagonal; if set to None (default) or zero, it will be set to the value of the perturb variable for numerical stability. 'conditioned' [bool] - if set to True (default), the gp will be trained on the training data stored in the object. 'perturb' [float] - small perturbation to be added to the covariance diagonal for numerical stability if the white noise errors are set to None/zero. OUTPUT 'mu' [Px1 array] - gp mean function values. 'cov' [PxP array] - gp covariance values. """ # Unpack the variables stored in the GP object: mfunc = gp_obj.mfunc mpars = gp_obj.mpars cfunc = gp_obj.cfunc cpars = gp_obj.cpars xtrain = gp_obj.xtrain xinduc = gp_obj.xinduc dtrain = gp_obj.dtrain etrain = gp_obj.etrain n = np.shape( xtrain )[0] m = np.shape( xinduc )[0] if xnew==None: xnew = xtrain conditioned = False p = np.shape( xnew )[0] # Ensure that etrain is formatted as an array # and any zero entries replaced with jitter: if np.ndim( etrain )==0: if ( etrain==None )+( etrain==0 ): etrain = perturb*np.ones( n ) else: ixs = ( etrain==None ) etrain[ixs] = perturb ixs = ( etrain==0 ) etrain[ixs] = perturb # Do the same for enew: if np.ndim( enew )==0: if ( enew==None ): enew = np.zeros( p ) else: ixs = ( enew==None ) enew[ixs] = perturb ixs = ( enew==0 ) enew[ixs] = perturb if mfunc==None: mfunc = zero_mfunc if mpars==None: mpars = {} if cpars==None: cpars = {} # Calculate the unconditioned mean and covariance values # at the new input locations: mnew = mfunc( xnew, **mpars ) Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m ) Kmp = cfunc( xinduc, xnew, **cpars ) Kmn = cfunc( xinduc, xtrain, **cpars ) knn = cfunc( xtrain, None, **cpars ).flatten() kpp = cfunc( xnew, None, **cpars ).flatten() Lm = np.linalg.cholesky( Km ) # The following lines calculate the pxp low-rank projection matrix: # Qp = (Kmp^T)*(Km^-1)*(Kmp) Vmp = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmp ) Qp = np.array( np.matrix( Vmp ).T * Vmp ) qpp = np.diag( Qp ) Deltap = np.diag( kpp - qpp ) sig2Ip = ( enew**2. ) * np.eye( p ) # If we are using the unconditioned GP, we are finished: if conditioned==False: mu = np.array( mnew.flatten() ) cov = np.array( Qp + Deltap + sig2Ip ) # If we want to use the conditioned GP, we still have work to do: else: mtrain = mfunc( xtrain, **mpars ) resids = dtrain.flatten() - mtrain.flatten() # The following lines calculate the diagonal of the nxn Gamma matrix, # as given by Eq C.1. To do this, we make use of the Cholesky identity # given by Eq B.8. Note that: # sig2*Gamma = Deltan + sig2*I # where Deltan is the NxN diagonal matrix used in Eq 2.12. Lm = np.linalg.cholesky( Km ) Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn ) gnn = 1. + ( knn.flatten() - np.sum( Vmn**2., axis=0 ).flatten() ) / ( etrain**2. ) # To make things more concise, we will divide the rows of the Vmn and # resids arrays by the square root of the corresponding entries on the # Gamma matrix diagonal. # Vmn --> Vmn * (Gamma^-0.5) # resids --> (Gamma^-0.5) * resids Vmn = np.matrix( Vmn / np.tile( np.sqrt( gnn ).flatten(), [ m, 1 ] ) ) resids = resids.flatten() / np.sqrt( gnn.flatten() ) resids = np.matrix( np.reshape( resids, [ n, 1 ] ) ) Vmn_resids = np.array( Vmn * resids ) # Now we need to calculate the term involving B^-1 in Eq 2.12, which # we do using two Cholesky decompositions: W = np.array( np.linalg.cholesky( ( enew**2. ) * np.eye( m ) + np.array( Vmn*Vmn.T ) ) ) Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids ) H = np.linalg.lstsq( Lm, Kmp )[0] J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H ) # Finally, we use Eqs 2.9 and 2.12 to calculate the predictive mean and # covariance matrix of the GP: mu = np.array( mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten() ) KmpTBinvKmp = ( enew**2. ) * np.array( np.matrix( J ).T * np.matrix( J ) ) cov = np.array( Deltap + sig2Ip + KmpTBinvKmp ) mu = np.reshape( mu, [ p, 1 ] ) cov = np.reshape( cov, [ p, p ] ) return mu, cov def predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ): """ SUMMARY Returns the predictive mean and standard deviation of a gp. If conditioned==True, the gp will be conditioned on the training data stored in the gp_obj. If etrain==None or etrain==0 (stored within gp_obj), a perturbation term of magnitude perturb will be added to the diagonal entries of the training covariance matrix before it is inverted for numerical stability. This routine is very similar to meancov, except that it only calculates the diagonal entries of the conditioned gp's covariance matrix to save time. CALLING: mu, sig = predictive( gp_obj, xnew=None, enew=None, conditioned=True, perturb=PERTURB ) INPUTS: 'gp_obj' [gp class object], containing: 'mfunc', 'cfunc' [functions] - mean and covariance functions. 'mpars', 'cpars' [dictionaries] - mean and covariance function parameters. 'xtrain' [NxD array] - training data input locations. 'dtrain' [Nx1 array] - training data values. 'etrain' [float] - white noise value for the training data points. 'xnew' [PxD array] - input locations for the mean and covariance to be evaluated at; if set to None (default), the values for xtrain will be used. 'enew' [float] - white noise value to be incorporated into the covariance diagonal; if set to None (default) or zero, it will be set to the value of the perturb variable for numerical stability. 'conditioned' [bool] - if set to True (default), the gp will be trained on the training data stored in the object. 'perturb' [float] - small perturbation to be added to the covariance diagonal for numerical stability if the white noise errors are set to None/zero. OUTPUT: 'mu' [Px1 array] - gp mean function values. 'sig' [Px1 array] - 1-sigma marginalised uncertainties, i.e. the square roots of the entries along the diagonal of the full covariance matrix. """ # Unpack the variables stored in the GP object: mfunc = gp_obj.mfunc mpars = gp_obj.mpars cfunc = gp_obj.cfunc cpars = gp_obj.cpars xtrain = gp_obj.xtrain xinduc = gp_obj.xinduc dtrain = gp_obj.dtrain etrain = gp_obj.etrain n = np.shape( xtrain )[0] m = np.shape( xinduc )[0] p = np.shape( xnew )[0] if mfunc==None: mfunc = zero_mfunc if mpars==None: mpars = {} if cpars==None: cpars = {} if xnew==None: xnew = xtrain conditioned = False # Ensure that etrain is formatted as an array # and any zero entries replaced with jitter: if np.ndim( etrain )==0: if ( etrain==None )+( etrain==0 ): etrain = perturb*np.ones( n ) else: ixs = ( etrain==None ) etrain[ixs] = perturb ixs = ( etrain==0 ) etrain[ixs] = perturb # Do the same for enew: if np.ndim( enew )==0: if ( enew==None ): enew = np.zeros( p ) else: ixs = ( enew==None ) enew[ixs] = perturb ixs = ( enew==0 ) enew[ixs] = perturb # Calculate the unconditioned mean and covariance values # at the new input locations: mnew = mfunc( xnew, **mpars ) kpp = cfunc( xnew, None, **cpars ).flatten() # If we are using the unconditioned GP, we are finished: if conditioned==False: mu = mnew.flatten() sig = np.sqrt( kpp.flatten() + ( enew**2. ) ) # If we want to use the conditioned GP, we still have work to do: else: mtrain = mfunc( xtrain, **mpars ) Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m ) Kmn = cfunc( xinduc, xtrain, **cpars ) Kmp = cfunc( xinduc, xnew, **cpars ) knn = cfunc( xtrain, None, **cpars ).flatten() resids = dtrain.flatten() - mtrain.flatten() # The following lines calculate the diagonal of the NxN Gamma matrix, # as given by Eq C.1. To do this, we make use of the Cholesky identity # given by Eq B.8. Note that: # sig2*Gamma = Delta + sig2*I # where Delta is the diagonal matrix used in Eq 2.12. Lm = np.linalg.cholesky( Km ) Vmn = scipy.linalg.lu_solve( scipy.linalg.lu_factor( Lm ), Kmn ) # Diagonal of QN: Qnn_diag = np.sum( Vmn**2., axis=0 ).flatten() # Diagonal of the D=sig2*Gamma matrix: D_diag = knn - Qnn_diag + etrain**2. # To make things more concise, we will divide the rows of the Vmn and # resids arrays by the square root of the corresponding entries on the # Gamma matrix diagonal. # Vmn --> Vmn * (Gamma^-0.5) # resids --> (Gamma^-0.5) * resids Vmn = np.matrix( Vmn / np.tile( np.sqrt( D_diag ).flatten(), [ m, 1 ] ) ) resids = resids.flatten() / np.sqrt( D_diag.flatten() ) resids = np.matrix( np.reshape( resids, [ n, 1 ] ) ) Vmn_resids = np.array( Vmn * resids ) # Now we need to calculate the terms involving B^-1 in Eq 2.12, which # we do using two Cholesky decompositions: W = np.array( np.linalg.cholesky( np.eye( m ) + np.array( Vmn*Vmn.T ) ) ) Y = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn_resids ) H = np.linalg.lstsq( Lm, Kmp )[0] J = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), H ) # Finally, we use Eq 2.12 to calculate the predictive mean and standard # deviation of the GP: mu = mnew.flatten() + np.array( np.matrix( J ).T * np.matrix( Y ) ).flatten() sig = np.sqrt( kpp.flatten() + ( enew**2. ) \ - np.sum( H**2., axis=0 ).flatten() \ + np.sum( J**2., axis=0 ).flatten() ) # Note that: # np.sum( H**2., axis=0 ) = diagonal of (H^T)*H # np.sum( J**2., axis=0 ) = diagonal of (J^T)*J mu = np.reshape( mu, [ p, 1 ] ) sig = np.reshape( sig, [ p, 1 ] ) return mu, sig def logp_builtin( gp_obj, perturb=None ): """ Uses the contents of the gp object to calculate its log likelihood. The logp() routine is actually used to perform the calculation. Note that the latter can be called directly if for some reason it is preferable to do the precomputations separately outside the routine. """ xtrain = gp_obj.xtrain dtrain = gp_obj.dtrain etrain = gp_obj.etrain xinduc = gp_obj.xinduc mfunc = gp_obj.mfunc mpars = gp_obj.mpars cfunc = gp_obj.cfunc cpars = gp_obj.cpars n = np.shape( dtrain )[0] m = np.shape( xinduc )[0] if mpars==None: mpars = {} if cpars==None: cpars = {} # Ensure that etrain is formatted as an array # and any zero entries replaced with jitter: if np.ndim( etrain )==0: if ( etrain==None )+( etrain==0 ): etrain = perturb*np.ones( n ) else: ixs = ( etrain==None ) etrain[ixs] = perturb ixs = ( etrain==0 ) etrain[ixs] = perturb if mfunc==None: mfunc = zero_mfunc mu = mfunc( xtrain, **mpars ) resids = dtrain.flatten() - mu.flatten() resids = np.reshape( resids, [ n, 1 ] ) if xinduc==None: print( 'Must specify inducing inputs (xinduc)' ) pdb.set_trace() Km = cfunc( xinduc, xinduc, **cpars ) Kmn = cfunc( xinduc, xtrain, **cpars ) knn = cfunc( xtrain, None, **cpars ) loglikelihood = logp( resids, Km, Kmn, knn, etrain, perturb=perturb ) return loglikelihood def logp( resids=None, Km=None, Kmn=None, knn=None, sigw=None, perturb=PERTURB ): """ SUMMARY Evaluates the log likelihood of residuals that are assumed to be generated by a gp with a specified covariance. The mean and covariance are passed directly into the function as inputs, to allow flexibility in how they are actually computed. This can be useful when repeated evaluations of logp are required (eg. likelihood maximisation or MCMC), as it may be possible to optimise how these precomputations are done outside the function. The loglikelihood is calculated according to: loglikelihood = -0.5*n*np.log( 2*np.pi ) - 0.5*L1 - 0.5*L2 where 'n' is the number of data points and: L1 = logdet[ (Kmm^-1)*( Kmm+Kmn*(W^-1)*(Kmn^T) ) ] - logdet(W) L2 = norm[ V*r ]^2 - norm[ (U^-1)*Kmn*(W^-1)*r ]^2 W = diag[ Knn - (Kmn^T)*(Km^-1)*Kmn ] + (sigw^2)*I V*(V^T) = W U*(U^T) = (Kmn^T)*(Km^-1)*Kmn + W CALLING loglikelihood = logp( resids, Kn, sigw, perturb=PERTURB ) INPUTS 'resids' [Nx1 array] - residuals between the training data and the gp mean function. 'Kn' [NxN array] - the covariance matrix between the training inputs. 'sigw' [Nx1 array or float] - white noise value to be incorporated into the covariance diagonal; if set to None or zero, it will be set to the value of the perturb variable for numerical stability. 'perturb' [float] - small perturbation to be added to the covariance diagonal for numerical stability if the white noise errors are set to None/zero. OUTPUT 'loglikelihood' [float] - the gp log likelihood. """ # Convert sigw to an array and replace any zero # entries with jitter: if np.ndim( sigw )==0: if ( sigw==None )+( sigw==0 ): sigw = perturb*np.ones( n ) else: ixs = ( sigw==None ) sigw[ixs] = perturb ixs = ( sigw==0 ) sigw[ixs] = perturb # Unpack and prepare: n = np.shape( Kmn )[1] # number of data points m = np.shape( Kmn )[0] # number of inducing variables Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) ) Kmn = np.matrix( Kmn ) knn = ( knn + perturb**2. ).flatten() r = np.reshape( resids, [ n, 1 ] ) Sig2_diag = sigw**2. # Calculate the diagonal entries of the Qnn matrix, where: # Qnn = (Kmn^T)*(Kmm^-1)*Kmn H = np.linalg.cholesky( Km ) V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) ) Qnn_diag = np.sum( V**2., axis=0 ) # Generate an array holding the diagonal entries of the D matrix, where: # D = Qnn + diag[ Knn - Qnn ] D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten() # Convert V to V*(D^-0.5) and compute V*(D^-1)*V: V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) ) VVT = V*V.T # Convert r to (D^-0.5)*r and compute (r^T)*(D^-1)*r: r = np.matrix( np.reshape( r.flatten()/np.sqrt( D_diag ), [ n, 1 ] ) ) # To obtain L1, compute: # L1 = 0.5*logdet(B) + 0.5*logdet(D) # where: # B*(B^T) = I + V*(V^T) # = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T) # = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T) # = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H # det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ] # (this is a standard result of the Cholesky decomposition) # --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ] # (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X]) B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT ) logdetB = 2*np.sum( np.log( np.diag( B ) ) ) logdetD = np.sum( np.log( D_diag ) ) L1 = 0.5*( logdetB + logdetD ) # To obtain L2, compute: # L2 = 0.5*(r^T)*r - 0.5*(Y^T)*Y # where: # (Y^T)*Y = (r^T)*(D^-0.5)*(Z^T)*Z*(D^0.5)*r # Z = (B^-1)*V*(D^-0.5) # = (B^-1)*(H^-1)*Kmn*(D^-0.5) # = (B^-1)*(H^-1)*Kmn*(D^-0.5) # Z^T = (D^-0.5)*(Kmn^T)*(H^-T)*(B^-T) # so that: # (Y^T)*Y = (r^T)*(D^-1)*(Kmn^T)*(H^-T)*(B^-T)*(B^-1)*(H^-1)*Kmn*(D^-1)*r # = norm[ H*B*Kmn*(D^-1)*r ]^2 # as it can be verified that: # (H*B)*[(H*B)^T] = Kmm + Kmn*(D^-1)*(Kmn^T) # so that: # (H^-T)*(B^-T)*(B^-1)*(H^-1) = (Kmm + Kmn*(D^-1)*(Kmn^T))^-1 rTr = float( r.T*r ) Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) ) Y = Z*r YTY = float( Y.T*Y ) L2 = 0.5*( rTr - YTY ) L3 = 0.5*n*np.log( 2*np.pi ) return -float( L1 + L2 + L3 ) def prep_fixedcov( gp_obj, perturb=PERTURB ): """ Prepares a dictionary containing variables that remain unchanged in calculating the log likelihood when the covariance parameters are fixed. The usage of this routine is along the lines of: >> resids = data - model >> kwpars = gp.prep_fixedcov() >> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars ) """ # Unpack the variables stored in the GP object: mfunc = gp_obj.mfunc mpars = gp_obj.mpars cfunc = gp_obj.cfunc cpars = gp_obj.cpars xtrain = gp_obj.xtrain xinduc = gp_obj.xinduc dtrain = gp_obj.dtrain sigw = gp_obj.etrain Kmn = cfunc( xinduc, xtrain, **cpars ) n = np.shape( Kmn )[1] # number of data points m = np.shape( Kmn )[0] # number of inducing variables Km = cfunc( xinduc, xinduc, **cpars ) + ( perturb**2. ) * np.eye( m ) knn = cfunc( xtrain, None, **cpars ).flatten() knn = ( knn + perturb**2. ).flatten() # Convert sigw to an array and replace any zero # entries with jitter: if np.ndim( sigw )==0: if ( sigw==None )+( sigw==0 ): sigw = perturb*np.ones( n ) else: ixs = ( sigw==None ) sigw[ixs] = perturb ixs = ( sigw==0 ) sigw[ixs] = perturb Sig2_diag = sigw**2. # Calculate the diagonal entries of the Qnn matrix, where: # Qnn = (Kmn^T)*(Kmm^-1)*Kmn H = np.linalg.cholesky( Km ) V = np.array( scipy.linalg.lu_solve( scipy.linalg.lu_factor( H ), Kmn ) ) Qnn_diag = np.sum( V**2., axis=0 ) # Generate an array holding the diagonal entries of the D matrix, where: # D = Qnn + diag[ Knn - Qnn ] D_diag = ( knn - Qnn_diag + Sig2_diag ).flatten() # CHECK THIS IS DOING THE RIGHT THING: # Convert V to V*(D^-0.5) and compute V*(D^-1)*V: V = np.matrix( V/np.tile( np.sqrt( D_diag ), [ m, 1 ] ) ) VVT = V*V.T # To obtain L1, compute: # L1 = 0.5*logdet(B) + 0.5*logdet(D) # where: # B*(B^T) = I + V*(V^T) # = I + (H^-1)*Kmn*(D^-1)*(Kmn^T)*(H^-T) # = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(H^-T) # = (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H # det[ (H^-1)*[ Kmm + Kmn*(D^-1)*(Kmn^T) ]*(Km^-1)*H ] = prod[ diag(B)^2 ] # (the above is a standard result of the Cholesky decomposition) # --> logdet[ ( Kmm + Kmn*(D^-1)*(Kmn^T) )*(Km^-1) ] = 2*sum[ diag(B) ] # (using standard results det[ X*Y ]=det[X]*det[Y] and det[X^-1]=1/det[X]) B = np.linalg.cholesky( np.matrix( np.eye( m ) ) + VVT ) logdetB = 2*np.sum( np.log( np.diag( B ) ) ) logdetD = np.sum( np.log( D_diag ) ) L1 = 0.5*( logdetB + logdetD ) Z = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( B ), V ) ) L3 = 0.5*n*np.log( 2*np.pi ) sqrt_D_diag = np.reshape( np.sqrt( D_diag ), [ n, 1 ] ) kwpars = { 'L1':L1, 'L3':L3, 'Z':Z, 'sqrt_D_diag':sqrt_D_diag } return kwpars def logp_fixedcov( resids=None, kwpars=None ): """ Calculates the log likehood using a specific dictionary of arguments that are generated using the prep_fixedcov() routine. This routine is used to avoid re-calculating the components of the log likelihood that remain unchanged if the covariance parameters are fixed, which can potentially save time for things like type-II maximum likelihood. The usage of this routine is along the lines of: >> resids = data - model >> kwpars = gp.prep_fixedcov() >> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars ) """ L1 = kwpars['L1'] L3 = kwpars['L3'] Z = kwpars['Z'] sqrt_D_diag = kwpars['sqrt_D_diag'] r = np.matrix( resids/sqrt_D_diag ) # rTr should be rT*(D^(-1))*r rTr = float( r.T*r ) Y = Z*r YTY = float( Y.T*Y ) L2 = 0.5*( rTr - YTY ) return -float( L1 + L2 + L3 ) def prep_fixedcov_OLD( gp_obj, perturb=PERTURB ): """ Prepares a dictionary containing variables that remain unchanged in calculating the log likelihood when the covariance parameters are fixed. The usage of this routine is along the lines of: >> resids = data - model >> kwpars = gp.prep_fixedcov() >> logp = gp.logp_fixedcov( resids=resids, kwpars=kwpars ) """ # Ensure that etrain is formatted as an array # and any zero entries replaced with jitter: etrain = gp_obj.etrain if np.ndim( etrain )==0: if ( etrain==None )+( etrain==0 ): etrain = perturb*np.ones( n ) else: ixs = ( etrain==None ) etrain[ixs] = perturb ixs = ( etrain==0 ) etrain[ixs] = perturb # Do the same for enew: if np.ndim( enew )==0: if ( enew==None ): enew = np.zeros( p ) else: ixs = ( enew==None ) enew[ixs] = perturb ixs = ( enew==0 ) enew[ixs] = perturb Km = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xinduc, **gp_obj.cpars ) Kmn = gp_obj.cfunc( gp_obj.xinduc, gp_obj.xtrain, **gp_obj.cpars ) knn = gp_obj.cfunc( gp_obj.xtrain, None, **gp_obj.cpars ) n = np.shape( Kmn )[1] m = np.shape( Kmn )[0] Km = np.matrix( Km + ( perturb**2. ) * np.eye( m ) ) Kmn = np.matrix( Kmn ) knn = np.matrix( knn + perturb**2. ) L = np.linalg.cholesky( Km ) Vmn = np.matrix( scipy.linalg.lu_solve( scipy.linalg.lu_factor( L ), Kmn ) ) gnn = 1. + ( knn.flatten() - np.sum( np.power( Vmn, 2. ), axis=0 ) ) / ( etrain**2. ) gnn = np.reshape( gnn, [ n, 1 ] ) Vmn = Vmn / np.tile( np.sqrt( gnn ).T, [ m, 1 ] ) VmnVmnT = Vmn * Vmn.T W = np.linalg.cholesky( np.matrix( ( etrain**2. ) * np.eye( m ) ) + VmnVmnT ) Z = scipy.linalg.lu_solve( scipy.linalg.lu_factor( W ), Vmn ) Z = np.matrix( Z ) L1 = 0.5 * ( 2 * np.sum( np.log( np.diag( W ) ) ) + np.sum( np.log( gnn ) ) \ + ( n-m ) * np.log( gp_obj.etrain**2. ) ) L3 = 0.5*n*np.log( 2*np.pi ) kwpars = { 'L1':L1, 'L3':L3, 'gnn':gnn, 'Z':Z, 'sigw':etrain } return kwpars def zero_mfunc( x, **kwargs ): """ A simple zero mean function, used whenever mfunc==None in any of the above routines. It takes an [NxD] array as input and returns an [Nx1] array of zeros. """ n = np.shape( x )[0] return np.zeros( [ n, 1 ] )
Home / Decor / decorations / Home / Home decoration / living room / living room decor / HOME DECOR :: BEAUTIFUL COLORFUL LIVING ROOM IDEAS. HOME DECOR :: BEAUTIFUL COLORFUL LIVING ROOM IDEAS. Let's check out how you can add Au tuseme tu how you can make your living room more colorful, this is perfect kwa wale wanaopenda rangi. . Here are ideas showing you living rooms with different colors kuanzia red, yellow, green, purple and so on..Be inspired..
import mimetypes import requests from smqtk.representation import DataElement __author__ = "paul.tunison@kitware.com" MIMETYPES = mimetypes.MimeTypes() class DataUrlElement (DataElement): """ Representation of data loadable via a web URL address. """ @classmethod def is_usable(cls): # have to be able to connect to the internet try: # using github because that's where this repo has been hosted. r = requests.get('http://github.com') _ = r.content return True except Exception, ex: cls.logger().warning( "DataUrlElement not usable, cannot connect to " "http://github.com" ) return False def __init__(self, url_address): """ :raises requests.exceptions.HTTPError: URL address provided does not resolve into a valid GET request. :param url_address: Web address of element :type url_address: str """ super(DataUrlElement, self).__init__() self._url = url_address # make sure that url has a http:// or https:// prefix if not (self._url[:7] == "http://" or self._url[:8] == "https://"): self._url = "http://" + self._url # Check that the URL is valid, i.e. actually points to something requests.get(self._url).raise_for_status() def get_config(self): return { "url_address": self._url } def content_type(self): """ :return: Standard type/subtype string for this data element, or None if the content type is unknown. :rtype: str or None """ return requests.get(self._url).headers['content-type'] def get_bytes(self): """ :return: Get the byte stream for this data element. :rtype: bytes :raises requests.exceptions.HTTPError: Error during request for data via GET. """ # Fetch content from URL, return bytes r = requests.get(self._url) r.raise_for_status() if r.ok: return r.content else: raise RuntimeError("Request response not OK. Status code returned: " "%d", r.status_code) DATA_ELEMENT_CLASS = DataUrlElement
On August 28th, 1991 Søren Schødt and Claus Johanson together with a group of large Danish companies founded the consultancy company Danish Travel Pool (DTP). From the very beginning, DTP offered expert advice on corporate travel. Two years later, DTP set up its own travel office as a counterpart to the established corporate travel agencies. This was the beginning of TravelpoolEurope as we know it today: a shared service centre for corporate travel – the way our members want it and on their terms. TPE now has travel and management activities in 31 countries and handles a travel budget of more than 260 million DKK. The demand for TPE’s global End to End solution is increasing among members. TPE implements new user-controlled member portal which provides the members with easy and quick access to travel booking, management and company-specific travel information. As a new feature, it is now also possible for member companies to upload and edit relevant news for their travellers. TPE offers a global credit card program as a part of the expense management solution and is thereby able to automate the management process for companies worldwide. As of May 1st, 2012 TPE has reached 32 members. In its 20th year of existence, TPE had 28 members and travel activities worth more than 194 million DKR in 2011. TPEs travel reimbursement solution, Expenture, is entering into a new agreement with Concur Technologies, the leading supplier of travel expense management solutions. Based on a thorough market analysis it is estimated that upgrading to the newest solution with Concur CE provides Expenture’s clients with a unique combination of Concur software and a complete BPO solution and thus the best terms on the market. Via its subsidiary company TMT, TPE developed software for ConTgo. ConTgo is specialists in mobile phone services aimed at the corporate travel industry. TPE consolidated its position in a time of financial crisis by having more member companies and organisations that remain strong during a slump.In 2009 TPE’s members included not only a number of large Danish companies but also trade, public and humanitarian organisations. In 2008 the members of TPE purchased more than 200 million DKR worth of travels. This makes TPE an even more powerful negotiating partner and counterpart to the traditional corporate travel agencies. TPE repurchased the travel reimbursement solution and strengthened its core business. The subsidiary company Expenture A/S was sold. TPE purchased the activities in BTC Data and assumed the rights to its back-office system, which is a data warehouse system. The acquisition included clients such as FCm Travel and Lego. TPE’s subsidiary company TMT ApS developed a programme, WebCrawler, which was able to scan more than 100 different websites. This programme considerably reduced the time it took to book a flight ticket and also helped automate the billing process. In 2005-2006 TPE entered into agreements with a number of Swedish and Norwegian suppliers of various travel related services (hotels, etc.) in order to consolidate its activities in Scandinavia further. TPE’s subsidiary company ePostTrip A/S changed the name to Expenture A/S and joined its investors in distributing the travel reimbursement solution to external clients as a complete, outsourced solution. First in Europe to introduce "direct booking" To meet with our members’ demand and help reduce their costs, TPE opened a department for group travel. The purpose of this was to professionalise the area and create a more transparent price structure. TPE established a knowledge centre for custom-made reports. The reports help companies bring their travel expenditure into focus. Coop Norden became a member of TPE and an increasing number of our existing members opened up branches in Sweden. TPE opened a branch for our Swedish travellers and travel purchasing agents, and the employees in this branch speak Swedish. We changed our name from Danish Travel Pool (DTP) to TravelpoolEurope (TPE). The new name more aptly reflected our increase in activities all over Europe. DTP implemented travel reimbursement in all of Europe for one of its members, General Electric (GE). DTP and Andersen Consulting (later Deloitte) developed the subsidiary company ePostTrip A/S (later Expenture A/S). The subsidiary was put in charge of administration, development and sale of the DTP travel reimbursement solution to the existing member companies. Concur agreed to deliver software for DTP’s travel reimbursement solution. DTP attracted a lot of media attention thanks to the SAS case, and took advantage of this by drawing attention to the lack of competition in the Scandinavian air travel industry, and also attempted to get Danish politicians involved in the issue. DTP and the National Consumer Agency of Denmark attempted to put pressure on SAS in order to get SAS to cover the loss that DTP incurred as a result of the cartel-like collaboration between SAS and Maersk Air. It was estimated that the collaboration had cost the members of DTP several million due to artificially high ticket prices. Total travel budget of more than 100 million DKK. In its 10th year of existence, DTP had 21 members which all in all had 55,000 travellers and travel activities worth more than 100 million DKR annually. DTP added yet another supplier and also offered management of Brobizz, which reduced the amount of administrative work in the member companies significantly. DTP developed a method of ensuring accurate pricing in online booking, and this method was also used to report on deviations and lost savings. These reports helped members comply with the travel policy of their company. DTP started managing meetings and conferences as well, and entered into agreements with various Nordic businesses that offered meeting and conference facilities. DTP and GetThere launched a member portal where the members could book online and get advice on booking and travel expenditure. DTP was among the first in Denmark to implement using bonus points (that had been earned during a business trip) when purchasing tickets for a new business trip. The new element in this was that the bonus points went to the company rather than the employee. DTP opened a travel management office of its own. The travel office worked as a shared service centre with a central purchasing, counselling and control function. The aim was to help members save money on corporate travel via increased purchasing power, centralised know -how and managing the travel activities of member companies in a more efficient manner. Danish Travel Pool (DTP) was founded by managing director Søren Schødt and Claus Johansson as a response to the growing need for independent external consultancy in the area of corporate travel. DTP started out with six members, including Nordisk Re (owned by General Electric) which also contributed in identifying this need.
from django.db import models from openslides.motions.models import Category, Motion, MotionPoll from openslides.users.models import User from openslides.utils.models import RESTModelMixin from .access_permissions import ( AbsenteeVoteAccessPermissions, VotingShareAccessPermissions, VotingProxyAccessPermissions, ) class VotingShare(RESTModelMixin, models.Model): access_permissions = VotingShareAccessPermissions() delegate = models.ForeignKey(User, on_delete=models.CASCADE, related_name='shares') category = models.ForeignKey(Category, on_delete=models.CASCADE) shares = models.DecimalField(max_digits=15, decimal_places=6) class Meta: default_permissions = () unique_together = ('delegate', 'category') def __str__(self): return '%s, %s, %s' % (self.delegate, self.category, self.shares) class VotingProxy(RESTModelMixin, models.Model): access_permissions = VotingProxyAccessPermissions() delegate = models.OneToOneField(User, on_delete=models.CASCADE) proxy = models.ForeignKey(User, on_delete=models.CASCADE, related_name='mandates') class Meta: default_permissions = () # TODO: Review permissions. permissions = ( ('can_manage', 'Can manage proxy voting'), ) def __str__(self): return '%s >> %s' % (self.delegate, self.proxy) class AbsenteeVote(RESTModelMixin, models.Model): access_permissions = AbsenteeVoteAccessPermissions() motion = models.ForeignKey(Motion, on_delete=models.CASCADE) delegate = models.ForeignKey(User, on_delete=models.CASCADE) vote = models.CharField(max_length=1) class Meta: default_permissions = () unique_together = ('motion', 'delegate') def __str__(self): return '%s, %s, %s' % (self.motion, self.delegate, self.vote) class MotionPollBallot(models.Model): poll = models.ForeignKey(MotionPoll, on_delete=models.CASCADE) delegate = models.ForeignKey(User, on_delete=models.CASCADE, related_name='delegate_set') # voter = models.ForeignKey(User, on_delete=models.CASCADE, related_name='voter_set') # keypad = models.IntegerField(default=0) vote = models.CharField(max_length=1, blank=True) # shares = models.DecimalField(max_digits=15, decimal_places=6) class Meta: default_permissions = () unique_together = ('poll', 'delegate') def __str__(self): return '%s, %s, %s' % (self.poll, self.delegate, self.vote)
Bills offensive coordinator Brian Daboll's coaching journey began when he suffered a career-ending injury playing for the University of Rochester. Brian Daboll was born in Welland, Ontario, but grew up in Buffalo. He attended the University of Rochester where he played football and earned a degree in economics. During more than 20 years as a coach, he earned five Super Bowl championship rings with the Patriots. Twenty-six autumns ago, John Snell was sitting in the stands under the Friday night lights at St. Francis High School outside Buffalo, searching for candidates to play football for the University of Rochester. Meaning, they usually had good players, and one he certainly liked was Brian Polian, the son of then-Buffalo Bills general manager Bill Polian. But that night, there was another Red Raider Snell liked even more. “I saw this skinny little safety who was tougher than nails,” said Snell, who, for the past 24 years has worked at Baldwin Wallace University, first as an assistant football coach, later for 15 years as the head coach, and now as the school’s assistant athletic director. Snell did indeed meet him, talk to him, and ultimately persuade him to come play for the Yellowjackets at Fauver Stadium. His name was Brian Daboll, who returns to Rochester for Buffalo Bills training camp this week in his new role as Buffalo’s offensive coordinator. Like anyone who has made football his life’s work — be it as a player, coach, or administrator — Daboll’s journey has taken him to so many places that it’s tough to keep track of it all. But make no mistake, the place where it all began was over on Wilson Boulevard on the banks of the Genesee River. Daboll played sparingly as a freshman, but he earned a starting role as a sophomore and he was in on 51 tackles and made two interceptions that year, a prelude to what took place on Sept. 9, 1995, opening day of his junior year, when Daboll played his greatest game, one of the greatest games ever by a UR defensive player. In the season opener at Case Western, Daboll intercepted three passes, the last in the end zone with nine seconds remaining to secure a 9-5 victory. And what made it all the more memorable is that one of his best friends growing up in Buffalo, wide receiver Brian Flynn, caught the go-ahead touchdown pass with 54 seconds to go. Daboll doesn’t talk much about his playing days, but he did say of that game, “That was a good one." One that wasn’t so good occurred two months later, a 44-7 season-ending loss at Union, the game that turned out to be the last one Daboll ever played. “I got dinged up; I’ll leave it at that,” Daboll said, the memory still difficult to reflect on. While making a helmet-to-helmet tackle, Daboll suffered a neck injury that left him prone on the field. Thankfully, Daboll was able to get up and walk off, but after he was examined in the days after, the determination was made that it was no longer safe for him to play football because the next neck injury could result in paralysis. Football was Daboll’s passion, and to have it taken away so suddenly at the ripe old age of 20 was traumatic. Yet, in a way, one of the worst moments of his young life turned out to be the first step in what is now a two-decades plus coaching career, 17 of those years spent in the National Football League during which he has collected five Super Bowl rings with the Patriots and one college football national championship with Alabama. When he got hurt, Daboll still had a year of school remaining to complete his degree in economics, and though he couldn’t play, he felt he could still help the team. He had knowledge, energy and a burning competitive spirit, and there was no way he was going to waste it, so he asked Parrinello if he could work the 1996 season as a volunteer coach, and Parrinello agreed. Eric Thurley, who is a social studies teacher at Webster Schroeder, where he also serves as defensive coordinator of the Warriors football team, was a teammate of Daboll’s for two years at UR and he still can vividly recall the passion that permeated from Daboll. That year Daboll spent helping the Yellowjackets defense had an impact on Thurley, especially gameday mornings, when Daboll would call Thurley with heavy metal music blasting in the background to get the junior defensive captain properly fired up. Jeff Smith of Hilton could attest to that. When Smith made his college visit to UR, Daboll was his host. “Brian was one of the first guys I met at UR during my recruiting trip,” said Smith, who decided to quit football and went to the University of Maryland, but then realized he missed playing so he transferred to UR after his freshman year and joined the Yellowjackets the season Daboll was coaching. His one year of coaching was enough to convince Daboll that a job in the real world could wait, so after graduation, he asked Parrinello whether he could make a couple calls. “I didn’t have a set plan on what was going to happen, but I really enjoyed it and I thought there’s no better time to give it a try than when you’re young,” Daboll said. Parrinello recommended him to William & Mary College coach Jimmy Laycock, and off he went to Williamsburg, Virginia, to serve as a restricted-earnings coach (meaning about $2,000 for the season) for a team that had an undersized, tough-as-nails safety in the Daboll mold named Sean McDermott. At the end of 1997, Laycock kept the ball rolling for Daboll by calling Nick Saban to see if he had a spot on his Michigan State staff. After two years in the Big Ten working as a graduate assistant, Daboll got his big break when Saban suggested him to his pal Bill Belichick, who was taking over as head coach of the New England Patriots in 2000. Daboll started as a defensive assistant — he jokingly called it a 20-20 job, meaning 20 hours a day for about $20,000 per year — before getting promoted to receivers coach in 2002. After Smith graduated from UR, he moved from Hilton to take a job in Boston around the same time Daboll started with the Patriots. Smith remains a big Bills fan, even though he still lives in Patriots territory. However, it was tough to root against the Patriots when his buddy was coaching there, especially when that buddy brought him along to two of New England’s Super Bowls, Nos. 38 and 39 in Houston and Jacksonville, where they beat the Panthers and Eagles, respectively. “We had some good times, especially when they were going on their Super Bowl runs,” said Smith, who said the first playoff game he attended was the infamous Tuck Rule game when the Patriots defeated the Raiders in the snow thanks to one of the most controversial non-fumble rulings in NFL history. That ended when Daboll left after 2006 for New York to become quarterbacks coach of the Jets under his former Patriots colleague, Eric Mangini, with one of those years spent coaching Brett Favre. He then went with Mangini to Cleveland, where he served two years as offensive coordinator, followed by one year as OC for the Dolphins, one as OC with the Chiefs, and then a return to the Patriots for four more years with Belichick as tight ends coach. When Lane Kiffin quit as Saban’s offensive coordinator at the end of 2016, Saban asked Belichick to return the favor from many years earlier and send Daboll back to him for 2017, and as OC, Daboll helped the Crimson Tide win yet another national title. All of which has led Daboll back to his hometown as McDermott, at the end of his first season as the Bills’ head coach, fired Rick Dennison and hired Daboll. And you could count Daboll as one of those. He never knew his father, a Canadian who abandoned him after he was born in Welland, Ontario, in 1975, so his mother moved back to Buffalo to live with her parents. His grandfather was a farmer who also worked as a groundskeeper for the West Seneca school district, his grandmother took care of their home, and Daboll’s mother worked several jobs through the years, all three playing a key role in raising him. “A lot of respect for my grandparents and my mother trucking me back and forth to different places,” Daboll said. Usually, that meant sporting events because as a kid, sports were everything, and he played whatever was in season. Hockey was a particular favorite, and he was good enough to be on a travel team, a grinding right winger who scored “some junk goals” along the way. Eventually, he homed in on football because that was the sport he thought he’d have a chance to play in college. Buffalo was a great place to grow up, and now his own family will get to experience that. Daboll had two children with his first wife, and his second wife, Beth, had two children when she and Daboll got married in 2009. Those four are now all teenagers — the oldest, his son Mark, will be the first to head off to college in the fall — but Brian and Beth wanted children together, and they now have a 2-year-old and an 8-month-old as part of their brood. Throughout his football journey, Daboll always held out hope that he’d get to come back to Buffalo, and now that he has, he feels like this is right where he belongs. When he returns to Rochester, he’ll be busy at St. John Fisher College, but he hopes to have time to reconnect with some of his old friends from UR because the four years he spent there remain a special time in his life.
import hashlib import urllib import time from django.template import Context, RequestContext, loader from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseServerError from django.forms import widgets from django.template.loader import get_template from django.core.exceptions import ObjectDoesNotExist, PermissionDenied from django.core.urlresolvers import reverse from django.contrib.auth import logout from django.contrib.auth.models import User, UNUSABLE_PASSWORD from django.contrib.sites.models import RequestSite from django.contrib import messages from django.utils.translation import ugettext as _ from django.utils.safestring import mark_safe from django.conf import settings from twistranet.twistapp.signals import invite_user, reset_password, user_imported from twistranet.twistapp.models import * from twistranet.twistapp.forms import account_forms, registration_forms from twistranet.twistapp.lib.slugify import slugify from twistranet.twistapp.lib.log import log from twistranet.actions import * from twistranet.core.views import * class UserAccountView(BaseWallView): """ This is what is used as a base view for accounts """ context_boxes = [ 'account/profile.box.html', 'actions/context.box.html', 'account/relations.box.html', ] template_variables = BaseWallView.template_variables + [ "account", "n_communities", "n_network_members" ] model_lookup = UserAccount template = "account/view.html" title = None name = "account_by_id" def prepare_view(self, *args, **kw): """ Add a few parameters for the view """ # Regular creation super(UserAccountView, self).prepare_view(*args, **kw) if not hasattr(self, "useraccount"): self.useraccount = self.auth self.account = self.useraccount self.n_communities = self.account and self.account.communities.count() or False self.n_network_members = self.account and self.account.network.count() or False # Add a message for ppl who have no content if self.template == UserAccountView.template: if self.account and self.auth and self.account.id == self.auth.id: if not Content.objects.filter(publisher = self.auth).exists(): messages.info(self.request, mark_safe(_("""<p> It seems that you do not have created content yet. Maybe it's time to do so! </p> <p> Creating content in twistranet is easy. For example, just tell what you're working on in the form below and click the "Send" button. </p> <p> Want to learn about what you can do in twistranet? Just take a look here: [help] </p> """))) def get_objects_list(self,): return Content.objects.getActivityFeed(self.object) def get_recent_content_list(self): """ Retrieve recent content list for the given account. XXX TODO: Optimize this by adding a (first_twistable_on_home, last_twistable_on_home) values pair on the Account object. This way we can just query objects with id > last_twistable_on_home """ nb_all = self.objects_list.count() batch = self.batch_list(nb_all) nb_from = batch[0] nb_to = batch[1] if nb_from < nb_all: objects_list = self.objects_list.order_by("-id").values_list('id', flat = True)[nb_from:nb_to] latest_list = Content.objects.__booster__.filter(id__in = tuple(objects_list)).select_related(*self.select_related_summary_fields).order_by("-created_at") return latest_list return [] def get_title(self,): """ We override get_title in a way that it could be removed easily in subclasses. Just define a valid value for self.title and this get_title() will keep the BaseView behaviour """ if not self.title: return _("%(name)s's profile") % {'name': self.account.title} return super(UserAccountView, self).get_title() class HomepageView(UserAccountView): """ Special treatment for homepage. """ name = "twistranet_home" title = _("Timeline") def get_objects_list(self): """ Retrieve recent content list for the given account. XXX TODO: Optimize this by adding a (first_twistable_on_home, last_twistable_on_home) values pair on the Account object. This way we can just query objects with id > last_twistable_on_home """ objects_list = None if not self.auth.is_anonymous: if Content.objects.filter(publisher = self.auth).exists(): objects_list = Content.objects.followed.exclude(model_name = "Comment") if objects_list is None: objects_list = Content.objects.exclude(model_name = "Comment") return objects_list def prepare_view(self, ): """ We just have the account set as curently-auth account. """ # Get the actual view instance. Not optimal, but, well, works. if not self.auth.is_anonymous: prep_id = self.auth.id else: prep_id = None super(HomepageView, self).prepare_view(prep_id) class PublicTimelineView(UserAccountView): name = "timeline" title = _("Public timeline") def get_objects_list(self): """ Just return all public / available content """ return Content.objects.exclude(model_name = "Comment") # # # LISTING VIEWS # # # class AccountListingView(BaseView): """ Todo: ALL accounts listing page. """ title = _("Accounts") template = "account/list.html" template_variables = BaseView.template_variables + [ "accounts", ] def prepare_view(self, ): super(AccountListingView, self).prepare_view() self.accounts = Account.objects.get_query_set()[:settings.TWISTRANET_COMMUNITIES_PER_PAGE] class AccountNetworkView(AccountListingView, UserAccountView): """ All networked accounts for an account page """ template = AccountListingView.template template_variables = UserAccountView.template_variables + AccountListingView.template_variables def get_title(self,): if self.account.id == self.auth.id: return _("Your network") return _("%(name)s's network" % {'name': self.account.title} ) def prepare_view(self, *args, **kw): super(AccountNetworkView, self).prepare_view() UserAccountView.prepare_view(self, *args, **kw) self.accounts = self.account.network class AccountCommunitiesView(AccountListingView, UserAccountView): """ All communities for an account. """ template = AccountListingView.template template_variables = UserAccountView.template_variables + AccountListingView.template_variables def get_title(self,): if self.account.id == self.auth.id: return _("Your communities") return _("%(name)s's communities" % {'name': self.account.title} ) def prepare_view(self, *args, **kw): super(AccountCommunitiesView, self).prepare_view() UserAccountView.prepare_view(self, *args, **kw) self.accounts = self.account.communities class AccountAdminCommunitiesView(AccountListingView, UserAccountView): """ All communities administred by an account. """ template = AccountListingView.template template_variables = UserAccountView.template_variables + AccountListingView.template_variables # XXX TODO def get_title(self,): if self.account.id == self.auth.id: return _("Your communities") return _("%(name)s's communities" % {'name': self.account.title} ) def prepare_view(self, *args, **kw): super(AccountCommunitiesView, self).prepare_view(*args, **kw) UserAccountView.prepare_view(self, *args, **kw) self.accounts = self.account.communities class PendingNetworkView(AccountListingView, UserAccountView): """ All pending network relations for an account """ template = AccountListingView.template template_variables = UserAccountView.template_variables + AccountListingView.template_variables title = _("Pending network requests") name = "account_pending_network" category = ACCOUNT_ACTIONS def as_action(self,): """Only return the action if there's pending nwk requests """ if self.auth.is_anonymous: return req = self.auth.get_pending_network_requests() if not req: return action = BaseView.as_action(self) action.label = mark_safe(_('<span class="badge">%(number)d</span> Pending network requests') % {"number": len(req)}) return action def prepare_view(self, *args, **kw): super(PendingNetworkView, self).prepare_view() UserAccountView.prepare_view(self, self.auth.id) self.accounts = self.account.get_pending_network_requests() # # # ACTION VIEWS # # # class AccountDelete(BaseObjectActionView): """ Delete a community from the base """ model_lookup = UserAccount name = "account_delete" confirm = _("Do you really want to delete this account?<br />All content for this user WILL BE DELETED.") title = _("Delete account") def as_action(self): if not isinstance(getattr(self, "object", None), self.model_lookup): return None if not self.object.can_delete: return None # Can't delete myself ;) if self.object.id == Twistable.objects.getCurrentAccount(self.request).id: return None return super(AccountDelete, self).as_action() def prepare_view(self, *args, **kw): super(AccountDelete, self).prepare_view(*args, **kw) if not self.object.can_delete: raise ValueError("You're not allowed to delete this account") name = self.useraccount.title underlying_user = self.useraccount.user __account__ = SystemAccount.get() # self.useraccount.delete() underlying_user.delete() del __account__ messages.info( self.request, _("'%(name)s' account has been deleted.") % {'name': name}, ) raise MustRedirect(reverse("twistranet_home")) class AddToNetworkView(BaseObjectActionView): """ Add sbdy to my network, with or without authorization """ model_lookup = UserAccount name = "add_to_my_network" def as_action(self, ): """ as_action(self, ) => generate the proper action. """ if not hasattr(self, "object"): return None if not isinstance(self.object, UserAccount): return None # Networking actions if self.object.has_pending_network_request: return Action( label = _("Accept in your network"), url = reverse(self.name, args = (self.object.id, ), ), confirm = _( "Would you like to accept %(name)s in your network?<br />" "He/She will be able to see your network-only content." ) % { "name": self.object.title }, category = MAIN_ACTION, ) elif self.object.can_add_to_my_network: return Action( label = _("Add to your network"), url = reverse(self.name, args = (self.object.id, ), ), confirm = _( "Would you like to add %(name)s to your network?<br />" "He/She will have to agree to your request." ) % {"name": self.object.title}, category = MAIN_ACTION, ) def prepare_view(self, *args, **kw): super(AddToNetworkView, self).prepare_view(*args, **kw) self.redirect = self.useraccount.get_absolute_url() self.useraccount.add_to_my_network() name = self.useraccount.title if self.useraccount in self.auth.network: messages.success( self.request, _("You're now connected with %(name)s.") % {'name': name} ) else: messages.info( self.request, _("A network request has been sent to %(name)s for approval.") % {'name': name} ) class RemoveFromNetworkView(BaseObjectActionView): """ Add sbdy to my network, with or without authorization """ model_lookup = UserAccount name = "remove_from_my_network" def as_action(self, ): if not isinstance(getattr(self, "object", None), self.model_lookup): return None if self.object.has_received_network_request: return Action( category = LOCAL_ACTIONS, label = _("Cancel your network request"), url = reverse(self.name, args = (self.object.id, ), ), confirm = _("Would you like to cancel your network request?"), ) if self.object.in_my_network: return Action( category = LOCAL_ACTIONS, label = _("Remove from your network"), url = reverse(self.name, args = (self.object.id, ), ), confirm = _("Would you like to remove %(name)s from your network?") % {"name": self.object.title}, ) def prepare_view(self, *args, **kw): super(RemoveFromNetworkView, self).prepare_view(*args, **kw) self.redirect = self.useraccount.get_absolute_url() was_in_my_network = self.useraccount in self.auth.network self.useraccount.remove_from_my_network() name = self.useraccount.title if was_in_my_network: messages.success( self.request, _("You're not connected with %(name)s anymore.") % {'name': name} ) else: messages.info( self.request, _("Your network request to %(name)s has been canceled.") % {'name': name} ) # # # Edition / Creation views # # # class UserAccountEdit(UserAccountView): """ Edit form for user account. Not so far from the view itself. """ template = "account/edit.html" form_class = account_forms.UserAccountForm content_forms = [] latest_content_list = [] name = "user_account_edit" category = LOCAL_ACTIONS def as_action(self,): """ Return action only if can_edit user """ if not self.is_model: return None if self.object.can_edit: return super(UserAccountEdit, self).as_action() def get_title(self,): """ Title suitable for creation or edition """ if self.title: return super(UserAccountEdit, self).get_title() if not getattr(self, 'object', None): return _("Create a user account") elif self.object.id == self.auth.id: return _("Edit your account") return _("Edit %(name)s" % {'name' : self.object.title }) class UserAccountInvite(UserAccountEdit): """ UserAccount invitation. Close to the edit class! """ context_boxes = [] form_class = account_forms.UserInviteForm title = _("Invite user") category = GLOBAL_ACTIONS name = "user_account_invite" def as_action(self): if not Account.objects.can_create: return None return BaseView.as_action(self) def prepare_view(self): """ Process additional form stuff. Here we've got a valid self.form object. """ super(UserAccountInvite, self).prepare_view() is_admin = UserAccount.objects.getCurrentAccount(self.request).is_admin if not is_admin: self.form.fields['make_admin'].widget = widgets.HiddenInput() if self.form_is_valid: # Double-check that user is not already registered email = self.form.cleaned_data['email'] if User.objects.filter(email = email).exists(): messages.error(self.request, _("This user already exists.")) self.form_is_valid = False if self.form_is_valid: # Generate the invitation link. # Invitation is in two parts: the verification hash and the email address. admin_string = "" if is_admin: if self.form.cleaned_data['make_admin']: admin_string = "?make_admin=1" h = "%s%s%s" % (settings.SECRET_KEY, email, admin_string) h = hashlib.md5(h).hexdigest() invite_link = reverse(AccountJoin.name, args = (h, urllib.quote_plus(email))) # Send the invitation (as a signal) invite_user.send( sender = self.__class__, inviter = UserAccount.objects.getCurrentAccount(self.request), invitation_uri = "%s" % (invite_link, ), target = email, message = self.form.cleaned_data['invite_message'], ) # Say we're happy and redirect if self.form_is_valid: messages.success(self.request, _("Invitation sent successfuly.")) raise MustRedirect(reverse(self.name)) # # # Account login/logout/join # # # class AccountJoin(UserAccountEdit): """ join TN """ template = "registration/join.html" form_class = account_forms.UserAccountCreationForm name = "account_join" title = _("Join") def prepare_view(self, check_hash, email): """ Render the join form. """ # Check if hash and email AND admin priviledge match is_admin = False admin_string = "?make_admin=1" h = "%s%s%s" % (settings.SECRET_KEY, email, admin_string) h = hashlib.md5(h).hexdigest() if check_hash == h: is_admin = True else: # Check if hash and email match. h = "%s%s" % (settings.SECRET_KEY, email) h = hashlib.md5(h).hexdigest() if not check_hash == h: raise ValidationError("Invalid email. This invitation has been manually edited.") # If user is already registered, return to login form if User.objects.filter(email = email).exists(): raise MustRedirect(reverse(AccountLogin.name)) # Call form processing. Prepare all arguments, esp. email and username username = email.split('@')[0] username = slugify(username) self.initial = { "email": email, "username": username, } super(AccountJoin, self).prepare_view() # Now save user info. But before, double-check that stuff is still valid if self.form_is_valid: cleaned_data = self.form.cleaned_data # Check password and username if not cleaned_data["password"] == cleaned_data["password_confirm"]: messages.warning(self.request, _("Password and confirmation do not match")) elif User.objects.filter(username = cleaned_data["username"]).exists(): messages.warning(self.request, _("A user with this name already exists.")) else: # Create user and set information __account__ = SystemAccount.get() u = User.objects.create( username = cleaned_data["username"], first_name = cleaned_data["first_name"], last_name = cleaned_data["last_name"], email = cleaned_data["email"], is_superuser = is_admin, is_active = True, ) u.set_password(cleaned_data["password"]) u.save() useraccount = UserAccount.objects.get(user = u) useraccount.title = u"%s %s" % (cleaned_data["first_name"], cleaned_data["last_name"]) useraccount.save() if is_admin: admin_community = AdminCommunity.objects.get() if not admin_community in useraccount.communities: admin_community.join(useraccount, is_manager = True) del __account__ # Display a nice success message and redirect to login page messages.success(self.request, _("Your account is now created. You can login to twistranet.")) raise MustRedirect(reverse(AccountLogin.name)) class AccountLogin(BaseView): template = "registration/login.html" name = "login" title = _("Login") template_variables = BaseView.template_variables + \ ['form', 'site', 'next', ] global_boxes = [ 'registration/introduction.box.html', ] def prepare_view(self,): """ request, template_name='registration/login.html', redirect_field_name=REDIRECT_FIELD_NAME, authentication_form=AuthenticationForm): Displays the login form and handles the login action. this is from django.contrib.auth.views """ from django.contrib.auth.views import REDIRECT_FIELD_NAME as redirect_field_name # = 'next' from django.contrib.auth.views import AuthenticationForm as authentication_form from django.contrib.auth.views import auth_login from django.contrib.sites.models import Site, RequestSite redirect_to = self.request.REQUEST.get(redirect_field_name, '') if self.request.method == "POST": self.form = authentication_form(data=self.request.POST) if self.form.is_valid(): # Light security check -- make sure redirect_to isn't garbage. if not redirect_to or ' ' in redirect_to: redirect_to = settings.LOGIN_REDIRECT_URL # Heavier security check -- redirects to http://example.com should # not be allowed, but things like /view/?param=http://example.com # should be allowed. This regex checks if there is a '//' *before* a # question mark. elif '//' in redirect_to and re.match(r'[^\?]*//', redirect_to): redirect_to = settings.LOGIN_REDIRECT_URL # Okay, security checks complete. Log the user in. auth_login(self.request, self.form.get_user()) setattr(self, redirect_field_name, redirect_to) if self.request.session.test_cookie_worked(): self.request.session.delete_test_cookie() raise MustRedirect(redirect_to) else: # Invalid user/password messages.warning(self.request, _("Sorry, that's not a valid username or password")) else: self.form = authentication_form(self.request) self.request.session.set_test_cookie() if Site._meta.installed: self.site = Site.objects.get_current() else: self.site = RequestSite(self.request) setattr(self, redirect_field_name, redirect_to) class AccountForgottenPassword(AccountLogin): """ Forgotten pwd. Sorry, this has yet to be implemented. """ name = "forgotten_password" title = _("Forgot your password") template = "registration/forgotten.html" template_variables = BaseView.template_variables + ['form', ] def prepare_view(self,): if self.request.method == "POST": self.form = registration_forms.ForgottenPasswordForm(data=self.request.POST) if self.form.is_valid(): # Generate the reset password link. # The link is in two parts: the verification hash and the email. # The verification hash is a combination of the server's secret key, user's email, # HASHED version of the user password and current date. # That way, we ensure that an email/site hash/password hash combination will # get a unique reset password link. email = self.form.cleaned_data['email'] user = User.objects.get(email = email) h = "%s%s%s%s" % (settings.SECRET_KEY, email, user.password, time.strftime("%Y%m%d")) h = hashlib.md5(h).hexdigest() reset_link = reverse(ResetPassword.name, args = (h, urllib.quote_plus(email))) # Send the invitation (as a signal) useraccount = UserAccount.objects.__booster__.get(user__id = user.id) reset_password.send( sender = self.__class__, target = useraccount, reset_password_uri = "%s" % (reset_link, ), ) # Say we're happy and redirect messages.success(self.request, _("We've sent you a password reset email.")) raise MustRedirect(reverse("twistranet_home")) else: self.form = registration_forms.ForgottenPasswordForm() class ResetPassword(AccountLogin): """ Provide a way for users to reset their password. Works with a hash generated in the AccountForgottenPassword view. """ name = "reset_password" title = _("Reset your password") template = "registration/reset_password.html" template_variables = BaseView.template_variables + ['form', ] def prepare_view(self, check_hash, email): if self.request.method == "POST": self.form = registration_forms.ResetPasswordForm(data=self.request.POST) if self.form.is_valid(): # Generate the reset password link. # The link is in two parts: the verification hash and the password hash. # That way, we ensure that an email/site hash/password hash combination will # get a unique reset password link. user = User.objects.get(email = email) if user.password == UNUSABLE_PASSWORD: raise ValidationError(_("Can't set password on this user.")) h = "%s%s%s%s" % (settings.SECRET_KEY, email, user.password, time.strftime("%Y%m%d")) h = hashlib.md5(h).hexdigest() if not h == check_hash: raise ValidationError("Attempt to access an invalid verification hash.") # Actually change password user.set_password(self.form.cleaned_data['password']) user.save() # Say we're happy and redirect messages.success(self.request, _("Your password is set to its new value. You can now login.")) raise MustRedirect(reverse("twistranet_home")) else: self.form = registration_forms.ResetPasswordForm() class ChangePassword(UserAccountEdit): """ Classic "change password" with former password validation. """ name = "change_password" title = _("Change your password") template = "account/edit.html" form_class = account_forms.ChangePasswordForm template_variables = UserAccountEdit.template_variables + ['form', ] def as_action(self,): """ Display this action only on current account, with user-settable backends. """ if not hasattr(self, "object"): return None if not self.auth.id == self.object.id: return None if self.auth.user.password == UNUSABLE_PASSWORD: return None return super(ChangePassword, self).as_action() def prepare_view(self, *args, **kw): super(ChangePassword, self).prepare_view(*args, **kw) if self.request.method == "POST": self.form = account_forms.ChangePasswordForm(data=self.request.POST) if self.form.is_valid(): # Actually change password user = self.useraccount.user user.set_password(self.form.cleaned_data['new_password']) user.save() # Say we're happy and redirect messages.success(self.request, _("New password set.")) raise MustRedirect(reverse("twistranet_home")) else: self.form = account_forms.ChangePasswordForm() class AccountLogout(BaseView): template = "registration/login.html" template_variables = BaseView.template_variables + ["justloggedout", ] name = "logout" title = _("Logged out") def prepare_view(self,): messages.info(self.request, mark_safe(_("You are now logged out.<br />Thanks for spending some quality time on Twistranet."))) self.justloggedout = True logout(self.request) import os, random, string from StringIO import StringIO import csv class CSVDialect(csv.excel): delimiter = ';' lineterminator = '\r\n' class AccountsImport(BaseView): """ import users view """ template="registration/import_accounts.html" name = "accounts_import" title = _("Import accounts") def prepare_view(self): """ Render the import form or do the import (from csv file posted). """ if self.request.method == "POST" and \ self.request.FILES.get("csv_file", None): csv_file = self.request.FILES.get("csv_file") reader = csv.reader(csv_file, dialect=CSVDialect) for line in reader: if not line: continue # firstname;lastname;email firstname = line[0].decode('utf8') lastname = line[1].decode('utf8') email = line[2] username = email.split('@')[0] username = slugify(username).replace('_','-') if User.objects.filter(username = username).exists(): u = User.objects.get(username = username) useraccount = UserAccount.objects.get(user = u) log.info( "User account '%s' already exixts" %useraccount.title ) else: # create user try: __account__ = SystemAccount.get() u = User.objects.create( username = username, first_name = firstname, last_name = lastname, email = email, is_superuser = False, is_active = True, ) chars = string.ascii_letters + string.digits random.seed = (os.urandom(1024)) password = ''.join(random.choice(chars) for i in range(6)) u.set_password(password) u.save() useraccount = UserAccount.objects.get(user = u) useraccount.title = u"%s %s" % (firstname, lastname) useraccount.save() log.info( "User account '%s' for %s %s (%s) created !" %(username, firstname, lastname, email)) # notify imported user (a mail is sent to prevent user) h = "%s%s%s%s" % (settings.SECRET_KEY, email, password, time.strftime("%Y%m%d")) h = hashlib.md5(h).hexdigest() reset_link = reverse(ResetPassword.name, args = (h, urllib.quote_plus(email))) user_imported.send( sender = self.__class__, target = useraccount, reset_password_url = reset_link, ) del __account__ except: log.warning( "Impossible to create account '%s' for %s %s (%s)" %(username, firstname, lastname, email)) continue community_title = line[3].decode('utf8') cid = slugify(community_title) if Community.objects.filter(slug = cid).exists(): log.info( "Community %s already exists !" %community ) else: c = Community.objects.create( slug = cid, title = community_title, permissions = "workgroup" ) c.save() com = Community.objects.get(slug= cid) com.join(account=useraccount) log.info( "user %s join the community %s !" %(useraccount.title, community_title) ) messages.info( self.request, u"import finished",)
Courts • Ronald E. Nehring has served on the state's high court for more than 11 years. Trent Nelson | The Salt Lake Tribune Utah Supreme Court Justice Ronald E. Nehring hears arguments. Associate Chief Justice Ronald E. Nehring asks a question during oral arguments at the Utah Supreme Court. Justice Ronald E. Nehring has announced his retirement after more than 11 years on the Utah Supreme Court bench. Nehring, who will leave his post in February, according to an announcement, had served eight years as a judge in 3rd District Court before he was appointed to the state&apos;s high court by Gov. Mike Leavitt in May 2003. At the time of his appointment, Nehring was the presiding judge of 3rd District Court. The court will be accepting applications for Nehring&apos;s replacement to the Supreme Court through Aug. 22. Once applications have been submitted, the Judicial Nominating Commission will submit seven names to the governor, at which point Gov. Herbert will have 30 days to make a selection. That nominee will then be subject to approval by the Utah State Senate. Nehring has served as the associate chief justice on the state&apos;s Supreme Court since 2012. A University of Utah law school graduate, Nehring began his career with Utah Legal Services. He has served as chair of the Board of District Court Judges, a member of the Supreme Court Advisory Committee on the Rules of Professional Conduct and has been a member of the Utah Judicial Council.
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Update the agasc_supplement.h5. This file is a supplement to the stable AGASC to inform star selection and star catalog checking. Currently this script only has the capability to add a bad star to the bad star table. It might end up including functionality to automatically update another table with effective mags based on acq / guide history. For process instructions see: https://github.com/sot/mica/wiki/AGASC-supplement """ import os import argparse from pathlib import Path import pyyaks.logger from astropy.table import Table SKA = Path(os.environ['SKA']) logger = None # Set via global in main() def get_options(args=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--data-root", default='.', help=("Directory containing agasc_supplement.h5 (default='.')")) parser.add_argument("--bad-star-id", type=int, help="AGASC ID of star to add to bad-star list") parser.add_argument("--bad-star-source", type=int, help=("Source identifier indicating provenance (default=max " "existing source + 1)")) parser.add_argument("--log-level", default=20, help="Logging level (default=20 (info))") parser.add_argument("--dry-run", action="store_true", help="Dry run (no actual file or database updates)") opt = parser.parse_args(args) return opt def main(args=None): global logger # Setup for updating the sync repository opt = get_options(args) # Set up logging loglevel = int(opt.log_level) logger = pyyaks.logger.get_logger(name='mica_update_agasc_supplement', level=loglevel, format="%(message)s") data_root = Path(opt.data_root) suppl_file = data_root / 'agasc_supplement.h5' if suppl_file.exists(): logger.info(f'Updating agasc_supplement at {suppl_file}') else: raise IOError(f'file {suppl_file.absolute()} not found') if opt.bad_star_id: add_bad_star(opt.bad_star_id, opt.bad_star_source, suppl_file, opt.dry_run) def add_bad_star(bad_star_id, bad_star_source, suppl_file, dry_run): bad_star_id = int(bad_star_id) dat = Table.read(str(suppl_file), format='hdf5', path='bad') if bad_star_source is None: bad_star_source = dat['source'].max() + 1 else: bad_star_source = int(bad_star_source) dat.add_row((bad_star_id, bad_star_source)) logger.info(f'Appending {bad_star_id} with source={bad_star_source} to {suppl_file}') logger.info('') logger.info('IMPORTANT:') logger.info('Edit following if source ID is new:') logger.info(' https://github.com/sot/mica/wiki/AGASC-supplement') logger.info('') logger.info('The wiki page also includes instructions for test, review, approval') logger.info('and installation.') if not dry_run: dat.write(str(suppl_file), format='hdf5', path='bad', append=True, overwrite=True) if __name__ == '__main__': main()
The Internet is a wonderful place to find information on all kinds of things. If your needs include the installation or monitoring services of an alarm system, you can find a number of good companies to consider by performing a Google search with the term Alarm companies Harbor, CA. This will provide you with a list of companies which covers this area. You will most likely find companies which have helpful websites. These will most likely contain good information on their products and services. If you are considering the purchase of an alarm system, many details can be provided on the site. The site can also list some of the other services which that company might provide. For example many people require central monitoring services. Check into that company's capabilities in this area since it's an important consideration. For example you should find a company which has well maintained centers staffed by highly trained personnel. The equipment should be redundant in case there is some type of equipment failure. Some companies can transfer calls to other centers in case a problem occurs at the local one. And the Internet can also provide you with information on the reliability and reputation of the company. Check out their history of complaints from the Better Business Bureau website. There is very helpful information pertaining to the integrity and reliability of the company there. There are also some forums which have discussions pertaining to alarm companies. You may be able to find information concerning the reputation of a particular company you are considering. In this way you can find a good alarm company which effectively serves the local area and provides a high degree of excellent products and services at fair prices.
from pyramid.traversal import find_root from snovault import ( calculated_property, collection, load_schema, ) from snovault.util import Path from .base import ( ALLOW_SUBMITTER_ADD, Item, paths_filtered_by_status, SharedItem ) from .dataset import Dataset from .shared_calculated_properties import ( CalculatedAssaySynonyms, CalculatedAssayTermID, CalculatedVisualize, CalculatedBiosampleSummary, CalculatedSimpleSummary, CalculatedReplicates, CalculatedAssaySlims, CalculatedAssayTitle, CalculatedCategorySlims, CalculatedTypeSlims, CalculatedObjectiveSlims, CalculatedReplicationType ) from .assay_data import assay_terms @collection( name='experiments', unique_key='accession', properties={ 'title': 'Experiments', 'description': 'Listing of Experiments', }) class Experiment(Dataset, CalculatedAssaySynonyms, CalculatedAssayTermID, CalculatedVisualize, CalculatedBiosampleSummary, CalculatedSimpleSummary, CalculatedReplicates, CalculatedAssaySlims, CalculatedAssayTitle, CalculatedCategorySlims, CalculatedTypeSlims, CalculatedObjectiveSlims, CalculatedReplicationType): item_type = 'experiment' schema = load_schema('encoded:schemas/experiment.json') embedded = Dataset.embedded + [ 'biosample_ontology', 'files.platform', 'files.analysis_step_version.analysis_step', 'files.analysis_step_version.analysis_step.pipelines', 'files.quality_metrics', 'related_series', 'replicates.antibody', 'replicates.library', 'replicates.library.biosample.biosample_ontology', 'replicates.library.biosample.submitted_by', 'replicates.library.biosample.source', 'replicates.library.biosample.applied_modifications', 'replicates.library.biosample.organism', 'replicates.library.biosample.donor', 'replicates.library.biosample.donor.organism', 'replicates.library.biosample.part_of', 'replicates.library.biosample.part_of.donor', 'replicates.library.biosample.part_of.treatments', 'replicates.library.biosample.treatments', 'replicates.library.construction_platform', 'replicates.library.treatments', 'possible_controls', 'target.genes', 'target.organism' ] audit_inherit = [ 'original_files', 'original_files.replicate', 'original_files.platform', 'target', 'files.analysis_step_version.analysis_step.pipelines', 'revoked_files', 'revoked_files.replicate', 'submitted_by', 'lab', 'award', 'default_analysis', 'documents', 'replicates.antibody.characterizations.biosample_ontology', 'replicates.antibody.characterizations', 'replicates.antibody.targets', 'replicates.library', 'replicates.library.documents', 'replicates.library.biosample', 'replicates.library.biosample.biosample_ontology', 'replicates.library.biosample.organism', 'replicates.library.biosample.treatments', 'replicates.library.biosample.applied_modifications', 'replicates.library.biosample.donor.organism', 'replicates.library.biosample.donor', 'replicates.library.biosample.treatments', 'replicates.library.biosample.originated_from', 'replicates.library.biosample.originated_from.biosample_ontology', 'replicates.library.biosample.part_of', 'replicates.library.biosample.part_of.biosample_ontology', 'replicates.library.biosample.pooled_from', 'replicates.library.biosample.pooled_from.biosample_ontology', 'replicates.library.spikeins_used', 'replicates.library.treatments', 'target.organism', ] set_status_up = [ 'original_files', 'replicates', 'documents', 'target', 'analyses', ] set_status_down = [ 'original_files', 'replicates', 'analyses', ] rev = Dataset.rev.copy() rev.update({ 'replicates': ('Replicate', 'experiment'), 'related_series': ('Series', 'related_datasets'), 'superseded_by': ('Experiment', 'supersedes') }) @calculated_property(schema={ "title": "Related series", "type": "array", "items": { "type": ['string', 'object'], "linkFrom": "Series.related_datasets", }, "notSubmittable": True, }) def related_series(self, request, related_series): return paths_filtered_by_status(request, related_series) @calculated_property(schema={ "title": "Superseded by", "type": "array", "items": { "type": ['string', 'object'], "linkFrom": "Experiment.supersedes", }, "notSubmittable": True, }) def superseded_by(self, request, superseded_by): return paths_filtered_by_status(request, superseded_by) @calculated_property(schema={ "title": "Protein tags", "description": "The protein tags introduced through the genetic modifications of biosamples investigated in the experiment.", "comment": "Do not submit. This field is calculated through applied_modifications.", "type": "array", "notSubmittable": True, "minItems": 1, "items": { "title": "Protein tag", "description": "The protein tag introduced in the modification.", "type": "object", "additionalProperties": False, "properties": { "name": { "title": "Tag name", "type": "string", "enum": [ "3xFLAG", "6XHis", "DsRed", "eGFP", "ER", "FLAG", "GFP", "HA", "mCherry", "T2A", "TagRFP", "TRE", "V5", "YFP", "mAID-mClover", "mAID-mClover-NeoR", "mAID-mClover-Hygro" ] }, "location": { "title": "Tag location", "type": "string", "enum": [ "C-terminal", "internal", "N-terminal", "other", "unknown" ] }, "target": { "title": "Tagged protein", "type": "string", "linkTo": "Target", } } } }) def protein_tags(self, request, replicates=None): protein_tags = [] if replicates is not None: for rep in replicates: replicateObject = request.embed(rep, '@@object?skip_calculated=true') if replicateObject['status'] in ('deleted', 'revoked'): continue if 'library' in replicateObject: libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true') if libraryObject['status'] in ('deleted', 'revoked'): continue if 'biosample' in libraryObject: biosampleObject = request.embed(libraryObject['biosample'], '@@object') if biosampleObject['status'] in ('deleted', 'revoked'): continue genetic_modifications = biosampleObject.get('applied_modifications') if genetic_modifications: for gm in genetic_modifications: gm_object = request.embed(gm, '@@object?skip_calculated=true') if gm_object.get('introduced_tags') is None: continue if gm_object.get('introduced_tags'): for tag in gm_object.get('introduced_tags'): tag_dict = {'location': tag['location'], 'name': tag['name']} if gm_object.get('modified_site_by_target_id'): tag_dict.update({'target': gm_object.get('modified_site_by_target_id')}) protein_tags.append(tag_dict) if len(protein_tags) > 0: return protein_tags @calculated_property(schema={ "title": "Life stage and age summary", "description": "Life stage and age display summary to be used for the mouse development matrix.", "type": "string", "notSubmittable": True, }) def life_stage_age(self, request, replicates=None): biosample_accessions = set() all_life_stage = set() all_age_display = set() life_stage_age = '' if replicates is not None: for rep in replicates: replicateObject = request.embed(rep, '@@object?skip_calculated=true') if replicateObject['status'] in ('deleted', 'revoked'): continue if 'library' in replicateObject: libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true') if libraryObject['status'] in ('deleted', 'revoked'): continue if 'biosample' in libraryObject: biosampleObject = request.embed(libraryObject['biosample'], '@@object') if biosampleObject['status'] in ('deleted', 'revoked'): continue if biosampleObject['accession'] not in biosample_accessions: biosample_accessions.add(biosampleObject['accession']) life_stage = biosampleObject.get('life_stage') if life_stage: all_life_stage.add(life_stage) age_display = biosampleObject.get('age_display') if age_display: all_age_display.add(age_display) # Only return life_stage_age if all biosamples have the same life_stage and age_display if len(all_life_stage) == 1 and len(all_age_display) == 1: life_stage_age = ''.join(all_life_stage) + ' ' + ''.join(all_age_display) return life_stage_age @calculated_property(schema={ "title": "Perturbed", "description": "A flag to indicate whether any biosamples have been perturbed with treatments or genetic modifications.", "type": "boolean", }) def perturbed(self, request, replicates=None): if replicates is not None: bio_perturbed = set() for rep in replicates: replicateObject = request.embed(rep, '@@object?skip_calculated=true') if replicateObject['status'] in ('deleted', 'revoked'): continue if 'library' in replicateObject: libraryObject = request.embed(replicateObject['library'], '@@object?skip_calculated=true') if libraryObject['status'] in ('deleted', 'revoked'): continue if 'biosample' in libraryObject: biosampleObject = request.embed(libraryObject['biosample'], '@@object') if biosampleObject['status'] in ('deleted', 'revoked'): continue bio_perturbed.add(biosampleObject['perturbed']) return any(bio_perturbed) return False matrix = { 'y': { 'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'], 'label': 'Biosample', }, 'x': { 'group_by': 'assay_title', 'label': 'Assay', }, } sescc_stem_cell_matrix = { 'y': { 'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'], 'label': 'Biosample', }, 'x': { 'group_by': ['assay_title', 'target.label'], 'label': 'Assay', }, } chip_seq_matrix = { 'y': { 'group_by': [ 'replicates.library.biosample.donor.organism.scientific_name', 'target.label', ], 'label': 'Target', }, 'x': { 'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name', ('protein_tags.name', 'no_protein_tags')], 'label': 'Term Name', }, } summary_matrix = { 'x': { 'group_by': 'status' }, 'y': { 'group_by': ['replication_type'] } } reference_epigenome = { 'y': { 'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'], 'label': 'Biosample', }, 'x': { 'group_by': ['assay_title', 'target.label'], 'label': 'Assay', }, } entex = { 'y': { 'group_by': ['biosample_ontology.classification', 'biosample_ontology.term_name'], 'label': 'Biosample', }, 'x': { 'group_by': ['assay_title', ('target.label', 'no_target'), 'replicates.library.biosample.donor.sex', 'replicates.library.biosample.donor.accession'], 'label': 'Assay', }, } mouse_development = { 'y': { 'group_by': ['biosample_ontology.term_name', 'life_stage_age'], 'label': 'Biosample', }, 'x': { 'group_by': ['assay_title', 'target.label'], 'label': 'Assay', }, } encore_matrix = { 'y': { 'group_by': ['target.label'], 'label': 'Target', }, 'x': { 'group_by': ['assay_title', 'biosample_ontology.term_name'], 'label': 'Assay', }, } encore_rna_seq_matrix = { 'y': { 'group_by': [('replicates.library.biosample.subcellular_fraction_term_name', 'no_term_name')], 'label': 'Subcellular localization', }, 'x': { 'group_by': ['assay_title', 'biosample_ontology.term_name'], 'label': 'Assay', }, } audit = { 'audit.ERROR.category': { 'group_by': 'audit.ERROR.category', 'label': 'Error' }, 'audit.INTERNAL_ACTION.category': { 'group_by': 'audit.INTERNAL_ACTION.category', 'label': 'Internal Action'}, 'audit.NOT_COMPLIANT.category': { 'group_by': 'audit.NOT_COMPLIANT.category', 'label': 'Not Compliant' }, 'audit.WARNING.category': { 'group_by': 'audit.WARNING.category', 'label': 'Warning' }, 'x': { 'group_by': 'assay_title', 'label': 'Assay' } } @collection( name='replicates', acl=ALLOW_SUBMITTER_ADD, properties={ 'title': 'Replicates', 'description': 'Listing of Replicates', }) class Replicate(Item): item_type = 'replicate' schema = load_schema('encoded:schemas/replicate.json') embedded = [ 'antibody', 'experiment', 'library', 'library.biosample', 'library.biosample.donor', 'library.biosample.donor.organism', ] set_status_up = [ 'library', 'antibody', ] set_status_down = [] def unique_keys(self, properties): keys = super(Replicate, self).unique_keys(properties) value = u'{experiment}/{biological_replicate_number}/{technical_replicate_number}'.format( **properties) keys.setdefault('replicate:experiment_biological_technical', []).append(value) return keys def __ac_local_roles__(self): properties = self.upgrade_properties() root = find_root(self) experiment = root.get_by_uuid(properties['experiment']) return experiment.__ac_local_roles__()
Stylish, unique, and functional. BoxWave's Cyclone Crystal Slip for the Apple iPhone 5 is an ultra low profile rubberized case that slips and grips onto your Apple iPhone 5. Precision constructed with anti-slip and cushioned material, the Cyclone Crystal Slip case provides reliable protection from accidental bumps. It comes in your choice of various colors. This lets you customize the color of your Apple iPhone 5! Designed with cyclone debossed patterns on the back, the Cyclone Crystal Slip accentuates and adds character to your Apple iPhone 5.
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Numpy implementations of sets functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np from tensorflow_probability.python.internal.backend.numpy import _utils as utils __all__ = [ 'difference', ] def _difference(a, b, aminusb=True, validate_indices=True): if not aminusb: raise NotImplementedError( 'Argument `aminusb != True` is currently unimplemented.') if not validate_indices: raise NotImplementedError( 'Argument `validate_indices != True` is currently unimplemented.') return np.setdiff1d(a, b) # --- Begin Public Functions -------------------------------------------------- # TODO(b/136555907): Add unit test. difference = utils.copy_docstring( 'tf.sets.difference', _difference)
H2One Ltd. is an Antigua based plumbing company that was established in 2015 and is your key partner in plumbing technology. We are a young, dynamic firm whose employees were not only trained in Germany, but have also acquired several years of working experience there. This means that we offer high quality installation of a standard that will be difficult to find anywhere else. Regardless, if you have small or large projects, we are the right partner for high class installation. We complement this excellent service by using Swiss and German technologies, and are the first official reseller for GEBERIT and HANSGROHE products in the Caribbean. So you know you are getting quality you can see, from the installation, over the materials, to the finished product. Though the company is based in Antigua, we also offer our services to other Caribbean islands. The guarantees for the products provided by Geberit and Hansgrohe, and a 3 year warranty for our work upon installation, will of course be provided to you. So you are secure in the knowledge, that even though maintenance is unlikely to be an issue if you ever do need anything, we will be there to take care of it.
# # This file is part of ROA-Analysis # # Author: Samir Al-Sheikh (Freie Universitaet, Berlin) # s.al-sheikh@fu-berlin.de # # MIT License # # Copyright (c) 2017 The ROA-Analysis authors # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os, re, matplotlib, calendar, types, numpy, pandas, copy, time, warnings, glob from collections import Counter from datetime import datetime from subprocess import Popen from utils.parse import parse_window, parse_shell, parse_cfg from utils.util import cc_refactoring config_file = os.path.dirname(os.path.abspath(__file__)) + '/config.txt' if parse_cfg(0).ssh_enabled: matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib as mpl options = 'i:p:c:m:d:t' ## Note: Warnings ignored (Type 3 font error if tex mode is active) warnings.filterwarnings('ignore') def get_options(): return options def start_bgpreader(interval, project, collectors, archive_an, cfg): """ Starts a BGPReader instance for multiple RPKI Cache Server for a given time interval """ ## Check whether LD_LIBRARY_PATH is set to prevent rtrlib linker errors rtr_env = os.environ.copy() if 'LD_LIBRARY_PATH' not in os.environ: exit("Warning: LD_LIBRARY_PATH is not set, add the rtrlib path\n") ## Start BGPReader and log errors to file dump_file = archive_an + '/' + cfg.bgp_project + \ '_' + cfg.bgp_vantage_point + '_' + interval + '.csv' if not os.path.exists('LOG/'): os.makedirs('LOG/') log_file = 'LOG/' + 'plot_BGP_Impact_log.txt' print 'Info: Executing BGPReader...' with open(log_file, 'a') as f, open(os.devnull, 'w') as devnull: command = cfg.bgpreader_path + ' -t updates -p ' + cfg.bgp_project + ' -c ' + cfg.bgp_vantage_point + \ ' -H ' + '\"1,0,0,' + project + ',' + (',' + project + ',').join(collectors) + '\"' + \ ' -w ' + interval + ' > \"' + dump_file + '\"' print command p = Popen(['/bin/bash', '-c', command], env=rtr_env, stdout=devnull, stderr=f) p.communicate() print 'Info: Finished BGPReader' if not os.stat(dump_file).st_size: return -1 return dump_file def main(): """ Dumps BGPReader output to file and analyses the different validation status """ ## Parse shell arguments and config constants and build up file infrastructure args = parse_shell(options); cfg = parse_cfg(args) if isinstance(args.collector, types.StringTypes): exit('Error: Collector number should be minimal two') archive_an = cfg.archive_an + 'BGP_Impact/' + '_'.join(args.collector) if not os.path.exists(archive_an): os.makedirs(archive_an) ## Get dump files and convert argument interval to unix timestamps dump_files, dump_diff = ([] for i in range(2)) for c in args.collector: collector_search = cfg.archive_cc + c + '/*/' + cfg.pf + '*' + cfg.sf dump_files.append([c, sorted([e.split('/')[-1] for e in glob.iglob(collector_search)])]) start, end = args.interval.split(',') if(start == '0' or end == '-1'): exit('Error: Open interval is not allowed') start_unix = calendar.timegm(datetime.strptime(start, '%Y%m%d.%H%M').utctimetuple()) end_unix = calendar.timegm(datetime.strptime(end, '%Y%m%d.%H%M').utctimetuple()) inv_unix = str(start_unix) + ',' + str(end_unix) ## Start BGPReader and wait for the output if(args.mode == 'w'): dump_file = start_bgpreader(inv_unix, args.project, args.collector, archive_an, cfg) if(dump_file == -1): exit('Error: BGPStream caused an error') ## Create a output Dump containing only the updates with different validation results with open(dump_file, 'r') as f: for line in f: if 'epoch_filetime' in line: dump_diff.append(line); continue if(line.split('|')[1] == 'A'): c_count = [] for c in args.collector: nf = line.count(c + ',notfound'); iv = line.count(c + ',invalid') v = line.count(c + ',valid') if c in line: c_count.append([nf, iv, v]) if(len(set(tuple(c) for c in c_count)) > 1): dump_diff.append(line) with open(dump_file[:-4] + '_diff.csv' , 'w') as f: f.write(''.join(dump_diff)) else: dump_file = archive_an + '/' + '-'.join(args.collector) + '_' + cfg.bgp_project + \ '_' + cfg.bgp_vantage_point + '_' + inv_unix + '.csv' print dump_file if not os.path.exists(dump_file): exit('Error: BGP-Impact file does not exist, rerun with w-mode') ## Possible second only plot mode nf_diff = pandas.read_csv(archive_an + '/not_found.csv', encoding='utf8', delimiter=',') iv_diff = pandas.read_csv(archive_an + '/invalid.csv', encoding='utf8', delimiter=',') v_diff = pandas.read_csv(archive_an + '/valid.csv', encoding='utf8', delimiter=',') v_diff.columns = ['Timestamp'] + list(v_diff.columns.values[1:]) v_diff['Timestamp'] = pandas.to_datetime(v_diff['Timestamp']) v_diff.set_index('Timestamp', inplace=True) v_diff = v_diff.reindex_axis(sorted(v_diff.columns), axis=1) iv_diff.columns = ['Timestamp'] + list(iv_diff.columns.values[1:]) iv_diff['Timestamp'] = pandas.to_datetime(iv_diff['Timestamp']) iv_diff.set_index('Timestamp', inplace=True) iv_diff = iv_diff.reindex_axis(sorted(iv_diff.columns), axis=1) nf_diff.columns = ['Timestamp'] + list(nf_diff.columns.values[1:]) nf_diff['Timestamp'] = pandas.to_datetime(nf_diff['Timestamp']) nf_diff.set_index('Timestamp', inplace=True) nf_diff = nf_diff.reindex_axis(sorted(nf_diff.columns), axis=1) # Debug Print print nf_diff; print iv_diff; print v_diff plot(nf_diff, iv_diff, v_diff, args.collector, archive_an, dump_file, args.dpi, args.tex) return ## Open the output file and split it by interval df = pandas.read_csv(dump_file, sep='\n', names=['Line'], dtype=str) inds = df[df['Line'].str.contains('epoch_filetime')].index.values.tolist() + [df.index[-1]] dumps = [(df['Line'][i].split('epoch_filetime: ')[1].split('\n')[0], \ '\n'.join(df['Line'][i:k].tolist())) for i,k in zip(inds[0::], inds[1::])] timestamps = [d[0] for d in dumps] del df, inds ## Count the NFs, IVs, Vs for every collector and set the dataframes nf = pandas.DataFrame(columns=args.collector, index=timestamps).fillna(numpy.NaN) iv = copy.deepcopy(nf); v = copy.deepcopy(nf) for i,y in enumerate(dumps): nf_l, iv_l, v_l = ([] for i in range(3)) for c in args.collector: if not len(re.findall(re.escape(c), dumps[i][1])): nf_l.append(numpy.NaN), iv_l.append(numpy.NaN), v_l.append(numpy.NaN) else: nf_l.append(len(re.findall(re.escape(c + ',notfound'), dumps[i][1]))) iv_l.append(len(re.findall(re.escape(c + ',invalid'), dumps[i][1]))) v_l.append(len(re.findall(re.escape(c + ',valid'), dumps[i][1]))) nf.loc[dumps[i][0]] = nf_l; iv.loc[dumps[i][0]] = iv_l; v.loc[dumps[i][0]] = v_l timestamps = [datetime.strptime(time.strftime('%Y%m%d.%H%M', time.gmtime(float(t))), \ '%Y%m%d.%H%M') for t in timestamps] nf.index = timestamps; iv.index = timestamps; v.index = timestamps; nf_diff = nf[args.collector[1:]].sub(nf[args.collector[0]], axis=0) iv_diff = iv[args.collector[1:]].sub(iv[args.collector[0]], axis=0) v_diff = v[args.collector[1:]].sub(v[args.collector[0]], axis=0) nf_diff.columns = cc_refactoring(nf_diff.columns.values) nf_diff = nf_diff.reindex_axis(sorted(nf_diff.columns), axis=1) iv_diff.columns = cc_refactoring(iv_diff.columns.values) iv_diff = iv_diff.reindex_axis(sorted(iv_diff.columns), axis=1) v_diff.columns = cc_refactoring(v_diff.columns.values) v_diff = v_diff.reindex_axis(sorted(v_diff.columns), axis=1) ## Debug Print print nf_diff; print iv_diff; print v_diff ## Export dataframe to csv to enable only plot mode nf_diff.to_csv(archive_an + '/not_found.csv', sep=',', encoding='utf-8') iv_diff.to_csv(archive_an + '/invalid.csv', sep=',', encoding='utf-8') v_diff.to_csv(archive_an + '/valid.csv', sep=',', encoding='utf-8') plot(nf_diff, iv_diff, v_diff, args.collector, archive_an, dump_file, args.dpi, args.tex) return def plot(nf_diff, iv_diff, v_diff, collector, archive_an, dump_file, dpi, tex): """ Plot the graph for differences for the BGP validation over time """ ## Set figure properties figsize = (20, 10) if not int(tex) else (25, 16) mpl.rcParams['figure.figsize'] = figsize; mpl.rcParams['figure.dpi'] = dpi mpl.rcParams['figure.facecolor'] = 'w'; mpl.rcParams['figure.edgecolor'] = 'k' if int(tex): mpl.rc('font',family='serif',serif='Latin Modern Roman',size=24) ## Plotting fig, ax = plt.subplots(nrows=len(collector[1:]), ncols=1, sharey=True) ax1 = fig.add_subplot(111, frameon=False) plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off') start, end = nf_diff.index[0].strftime('%b %d '), nf_diff.index[-2].strftime('until %b %d, %Y') collectors = sorted(cc_refactoring(collector)); y = 1.02 if int(tex) else 1 plt.title('Impact on BGP - Differences for '+ collectors[0].replace('Web', 'W') +' vs. '+ ', '.join([c.replace('Web', 'W').replace('RTR', 'R') for c in collectors[1:]]) + \ ' - ' + start + end, y=y) names = list(nf_diff.columns.values) for i,c in enumerate(names): fmt = '%Y-%m-%d' if not int(tex) else '%m-%d' lab = list(set([pandas.to_datetime(str(l)).strftime(fmt) for l in list(nf_diff.index.values)])) ax[i].set_xticklabels(sorted(lab)[::3]); ax[i].grid(axis='y') if(i != len(names)-1): ax[i].tick_params(labelbottom='off') nf_diff[c].plot(y=names, kind='line', ax=ax[i], label='notfound') iv_diff[c].plot(y=names, kind='line', ax=ax[i], label='invalid') v_diff[c].plot(y=names, kind='line', ax=ax[i], label='valid') ax_offset = 1.02 if int(tex) else 1.01 ax[i].set_xlabel(c,rotation=270); ax[i].xaxis.set_label_coords(ax_offset,.90) plt.setp(ax[i].xaxis.get_majorticklabels(), rotation=0); ax[i].minorticks_off() for t in ax[i].xaxis.get_major_ticks(): t.label1.set_horizontalalignment('center') lgd_y = -.8 if int(tex) else -.7 lgd = ax[i].legend(bbox_to_anchor=(0., lgd_y, 1., lgd_y), loc=8, ncol=3, mode='expand', borderaxespad=0) y_offset = -.05 if int(tex) else -.03; ax1.grid(False); ax1.get_yaxis().set_label_coords(y_offset,0.5) plt.ylabel('Differences between the validation results of BGP announcements [#prefixes]') ax = plt.gca().get_xaxis().set_label_coords(0.5,-.05); plt.xlabel('Timestamp'); fig.tight_layout() ## Export Plot if not os.path.exists(archive_an + '/Plots/'): os.makedirs(archive_an + '/Plots/') dump_pdf = dump_file.split('/')[-1].replace('csv','pdf') print '\nPlot: ' + archive_an + '/Plots/' + 'plots_bgp_impact_' + dump_pdf fig.savefig(archive_an + '/Plots/' + 'plots_bgp_impact_' + dump_pdf, bbox_inches='tight') if __name__ == '__main__': main()
• You can cancel your order within 14 working days. In order to do this we require written notification of your intent to cancel no later than 14 days after the order has been placed. This can be in the form of an email or letter and we will confirm receipt of this notification once received. • If goods have already been received and are being returned, please send them back along with clear information detailing who they were returned by (to include order, number, name and contact details). These need to be returned to us within 14 days of you notifying us of your intention to cancel the order. • When cancelling an order you are responsible for the delivery cost incurred to return the goods – please note that aerosols may not be accepted by some carriers. • Products which are custom made (such as colour matched products) cannot be cancelled but please note that this does not affect your statutory rights (see below). Please contact our customer service team in the first instance. • You have the right to return your goods for a refund within 30 days of receiving them. • If you wish to return a product because you no longer want it you will need to cover the delivery costs associated with returning the item. The amount of the refund will be determined once we have confirmed that the product is in good condition once received back (items need to be unused and in their original packaging). Please note that aerosols, or kits containing aerosols, cannot be sent via Royal Mail. If you would like more information on this please call our customer service team. • Products which are custom made (such as colour matched products) cannot be refunded but please note that this does not affect your statutory rights (see below). Please contact our customer service team in the first instance. • Statutory rights – in the event that a product does not perform as stated, a full refund will be offered within 6 months of purchase. If the goods are required to be returned the cost will be covered by Gliptone Leathercare. • Please note that if a product does not perform because it was used on the wrong type of leather then we are unable to take responsibility for this. If in any doubt our customer service team are always happy to advise on all aspects of leather care and which products are suitable.
# -*- coding: UTF-8 -*- import os import json import logging import time import copy import tornado from sqlalchemy.orm import query,aliased from sqlalchemy import and_,or_,desc,asc from sqlalchemy import func from sqlalchemy.orm.exc import NoResultFound import async import static_config import database import config import util import callback_lib import mm from controller import BaseHandler from controller import shell_lib from model.instance import Instance from model.host_group_var import Host,Group,GroupHost,HostVar,GroupVar from model.task import Task from model.services import Service from model.callback import CallBack app_log = logging.getLogger("tornado.application") class AdminHandler(BaseHandler): @tornado.web.authenticated def get(self): user = self.get_current_user(); if user['type'] != 0 : self.ret("error","this user is not admin") return self.render("admin.html") class AdminBackHandler(BaseHandler): @tornado.web.authenticated def get(self , path): user = self.get_current_user(); if user['type'] != 0 : self.ret("error","this user is not admin") return if hasattr(self, path) : fun = getattr(self, path); if callable(fun): apply(fun) else: self.ret("error","unsupport action") @tornado.web.authenticated def post(self , path): self.get(path) def user(self): user = self.get_current_user(); ret = {"user":user,"menus":static_config.adminmenus} self.ret("ok", "", ret); #获取所有服务的静态信息 def services_info(self): session = database.getSession() active = [] for service in session.query(Service): if service.status == Service.STATUS_ACTIVE : active.append(service.service) services_copy = copy.deepcopy(static_config.services) for temp in services_copy: if temp['name'] in active: temp['active'] = True; else: temp['active'] = False; #计算url if temp.has_key('web') : urls = [] for web in temp['web'] : port = "" for gv in session.query(GroupVar).filter(GroupVar.name == web['port']) : port = gv.value for instance in session.query(Instance).filter(Instance.role == web['role']) : url = {"role":web['role'],"host":instance.host,"port":port} urls.append(url) temp['urls'] = urls; else: temp['urls'] = [] #特殊规则 #根据dfs_namenode_support_allow_format 配置 控制是否放出format参数 if temp['name'] == 'hdfs' : should_format = database.get_service_conf(session,'hdfs','dfs_namenode_support_allow_format') if should_format != None and should_format != 'true' : wi = 0 find = False; for action in temp['actions']: if action['name'] == 'format': find = True break; wi = wi +1 if find: del temp['actions'][wi] ret = { "services" : services_copy , "role_check_map" : static_config.role_check_map } session.close() self.ret("ok", "", ret); def service_info(self): service = self.get_argument("service") ret = { "name": service,"instances" : self.get_instance(service),"summary":self.get_service_summary(service) } self.ret("ok", "", ret) def get_instance(self,service): session = database.getSession() instances = session.query(Instance).filter(Instance.service == service ) ret = [] for instance in instances: ret.append(instance.format()) session.close() return ret; def get_service_summary(self,service): session = database.getSession() ret = {} for role in static_config.get_role_from_service(service): ret[role] = {} for instance in session.query(Instance).filter(Instance.service==service): inst = instance.format() if not ret[inst["role"]].has_key(inst["health"]) : ret[inst["role"]][inst["health"]] = 0 ret[inst["role"]][inst["health"]] += 1 session.close() return ret; #获取所有的机器和组 def group_host_list(self): session = database.getSession() groups = session.query(Group) ret={} temp=[] for group in groups: temp.append( {"name" : group.group}); ret["groups"]=temp hosts = session.query(Host).filter(Host.status==Host.STATUS_READY) temp=[]; for host in hosts: temp.append( {"name" : host.hostname}); ret["hosts"]=temp session.close() self.ret("ok", "", ret); #获取配置变量的接口 兼容组变量和机器变量,机器变量不过滤机器名称 def conf_var(self): service = self.get_argument("service") group = self.get_argument("group","all") showType = self.get_argument("showType") temp = [] session = database.getSession() if showType=="group": groupVars = session.query(GroupVar).filter(and_( GroupVar.service == service , GroupVar.group == group ) ) for groupVar in groupVars: temp.append( groupVar.format() ); else: hostVars = session.query(HostVar).filter( HostVar.service == service ) for hostVar in hostVars: temp.append( hostVar.format() ); session.close() self.ret("ok", "", {"conf":temp}) #获取fair scheduler的信息 def fair_scheduler_config(self): session = database.getSession() #获取队列 queues = database.get_service_conf(session,"yarn","fair_scheduler_queues") yarn_app_mapreduce_am_resource_mb = database.get_service_conf(session,"yarn","yarn_app_mapreduce_am_resource_mb") mapreduce_map_memory_mb = database.get_service_conf(session,"yarn","mapreduce_map_memory_mb") mapreduce_reduce_memory_mb = database.get_service_conf(session,"yarn","yarn_app_mapreduce_am_resource_mb") #计算node nodes = 0; node = [] for instance in session.query(Instance).filter(Instance.role == "nodemanager"): nodes = nodes + 1 node.append(instance.host) session.query() node_memory = database.get_conf_from_host(session,node,"yarn","yarn_nm_resource_memory_mb") #计算host total_memory = 0; for (node,memory) in node_memory.items(): total_memory = total_memory + int(memory) self.ret("ok","",{"fair_scheduler_queues":queues,"yarn_app_mapreduce_am_resource_mb":yarn_app_mapreduce_am_resource_mb, "mapreduce_map_memory_mb":mapreduce_map_memory_mb,"mapreduce_reduce_memory_mb":mapreduce_reduce_memory_mb, "total_memory":total_memory,"nodes":nodes,"node_memory":node_memory }) #保存 修改 删除 分组变量或者机器变量 #TODO增加区分是否第一次插入 def save_conf_var(self): service = self.get_argument("service") showType = self.get_argument("showType") group = self.get_argument("group","") host = self.get_argument("host","") name = self.get_argument("name") value = self.get_argument("value") type = self.get_argument("type") text = self.get_argument("text","") showdel = self.get_argument("del","") self.save_var_todb(service,showType,group,host,name,value,type,text,showdel) self.ret("ok", "", {}) def save_var_todb(self,service,showType,group,host,name,value,type,text,showdel=""): value = str(value) session = database.getSession() if showType=="group": groupVar = GroupVar(group,service,name,value,type,text) if showdel=="del": for groupVar in session.query(GroupVar).filter( and_( GroupVar.service == service , GroupVar.group == group , GroupVar.name == name )) : session.delete(groupVar) session.commit() else: session.merge(groupVar) session.commit() else: hostVar = HostVar(host,service,name,value,type,text) if showdel=="del": for hostVar in session.query(HostVar).filter( and_( HostVar.service == service , HostVar.host == host , HostVar.name == name )) : session.delete(hostVar) session.commit() else: session.merge(hostVar) session.commit() session.close() # 提交一个执行任务 # 当前直接把收到start的instance,标记为start状态 # 当前直接把收到stop的instance,标记为stop状态 # TODO 加入starting stopping 状态进行检查 # def send_action(self): taskType = self.get_argument("taskType","ansible") service = self.get_argument("service") actionType = self.get_argument("actionType") instances = self.get_argument("instances","") taskName = self.get_argument("taskName") running_id = [] session = database.getSession() #在执行action之前,检查角色的数量是不是符合要求 #如果不符合,给出提示 ret_msg = [] #角色数量检查 (check,warn_msg) = self.check_role_num_by_service(session, service, "It could make the task fail.") if not check: ret_msg += warn_msg if actionType=="service": #针对服务的操作 self.update_with_service_action(session,service,taskName) taskid = database.build_task(session,taskType,service,"","",taskName) running_id.append(taskid) elif actionType=="instance": for instance in instances.split(","): (host,role) = Instance.split_instance_name(instance) if host != None and role != None : self.update_with_instance_action(session,service,host,role,taskName) taskid = database.build_task(session,taskType,service,host,role,taskName) running_id.append(taskid) else: self.ret("error","split instance name %s error" % instance) return else: self.ret("error", "unsport actionType") return session.commit() session.close() #发送消息到MQ msg = ','.join([str(rid) for rid in running_id]) if not mm.send(msg): ret_msg.append("send message to worker error") ret_msg_str = "" if len(ret_msg) != 0: ret_msg_str = ",".join(ret_msg) self.ret("ok", ret_msg_str, {"runningid": running_id}) #对某个task发送kill命令 def kill_task(self): taskid = self.get_argument("taskid") #发送消息到MQ if not mm.kill_task(int(taskid)): self.ret("error", "killing task failed") else: self.ret("ok", "") #尝试重跑某个失败的task def rerun_task(self): taskid = self.get_argument("taskid") session = database.getSession() try: task = session.query(Task).filter(Task.id == taskid).one() except NoResultFound: return self.ret("error", "Cant't find the task with id: %s" % taskid) newTaskid = database.build_task(session,task.taskType,task.service,task.host,task.role,task.task) for cb in session.query(CallBack).filter(CallBack.taskid == taskid): callback_lib.add_callback(session,newTaskid,cb.func,json.loads(cb.params) ) #发送消息到MQ retMsg = "" msg = str(newTaskid) if not mm.send(msg): retMsg = "send message to worker error" app_log.info("send msg to mq") session.close() self.ret("ok", retMsg, {"taskid":newTaskid} ) def update_with_service_action(self,session,service,taskName): ''' 收一个action,进行状态更新 进行状态管理 ''' if taskName == "start" : session.query(Instance).filter(Instance.service==service) \ .update({Instance.status:Instance.STATUS_START,\ Instance.uptime:int(time.time())}) session.commit(); elif taskName == "stop" : session.query(Instance).filter(Instance.service==service) \ .update({Instance.status:Instance.STATUS_STOP, Instance.uptime:0}) session.commit(); if taskName == "aux" and service == "hive" : upload_path = config.aux_upload_dir aux_list = [] for file in os.listdir(upload_path): if file.startswith('.'): continue file_path = os.path.join(upload_path,file) if os.path.isfile(file_path): aux_list.append("file://" + file_path) session.query(GroupVar).filter( and_((GroupVar.service==service),(GroupVar.name=="hive_aux_jars_path")) ) \ .update({GroupVar.value : ','.join(aux_list) }) session.commit(); def update_with_instance_action(self,session,service,host,role,taskName): if taskName == "start" : session.query(Instance).filter(and_(Instance.service==service, \ Instance.host == host, Instance.role == role )) \ .update({Instance.status:Instance.STATUS_START, Instance.uptime:int(time.time())}) session.commit(); elif taskName == "stop" : session.query(Instance).filter(and_(Instance.service==service, \ Instance.host == host, Instance.role == role )) \ .update({Instance.status:Instance.STATUS_STOP, Instance.uptime:0}) session.commit(); #添加一个机器 #端口 用户名 密码 等都是空的 在异步连接的时候会补充这个 def add_host(self): hosts = self.get_argument("hosts") port = self.get_argument("port","") user = self.get_argument("user","") passwd = self.get_argument("passwd","") sudopasswd = self.get_argument("sudopasswd","") host_array = hosts.split(",") (check,msg) = self.check_add_host(host_array) if not check: self.ret("error", msg) return id = async.async_setup() async.async_run(async.add_host,(id,host_array,(user,port,passwd,sudopasswd))) self.ret("ok", "", {"runningId": [id]}) def check_add_host(self,hostArray): session = database.getSession() for host in hostArray: num = session.query(Host).filter(Host.hostname==host).count() if util.look_like_ip(host) : return (False,host+" look like ip, please check") if num != 0 : return (False,host+" is already in host table") session.close() return (True,"") #查询进度 def query_progress(self): idList = self.get_argument("id") ids = json.loads(idList) progress = 0; progress_msg = ""; session = database.getSession() for nid in ids: (pg,msg) = self.query_id_process(session,nid) if nid < 0: progress_msg += "SyncTask taskid: (%d) %s \n" % (-nid,msg); else: progress_msg += "Task taskid:(%d) %s \n" % (nid,msg); progress += int(pg) session.close() progress /= len(ids) self.ret("ok", "", {"id": ids,"progress":progress,"progressMsg":progress_msg } ) def query_id_process(self,session,nid): if nid <0 : #同步任务 return (async.async_get(nid,"progress","0"),async.async_pop(nid,"progressMsg","")) else: #worker 任务 queryTask = session.query(Task).filter(Task.id==nid) if queryTask.count() == 0: return (0,str(id)+" isn't exist") else: nowTask = queryTask[0] return (nowTask.getProcess(),nowTask.msg) #获取机器列表 def hosts(self): session = database.getSession() hosts = session.query(Host) ret={} for host in hosts: ret[host.hostname]={"info":host.format()} session.close() self.ret("ok", "", {"hosts":ret}) def set_rack(self): hosts = self.get_argument("hosts") rack = self.get_argument("rack") session = database.getSession() session.query(Host).filter(Host.hostname.in_(hosts.split(","))).update( { Host.rack:rack },synchronize_session="fetch" ) session.commit() session.close() self.ret("ok","") def del_host(self): hosts = self.get_argument("hosts") session = database.getSession() (check,msg)=self.check_del_host(session,hosts) if not check: self.ret("error", msg) return #删除机器 queryHosts = session.query(Host).filter(Host.hostname.in_(hosts.split(","))) for host in queryHosts: session.delete(host) #删除分组信息 queryGH = session.query(GroupHost).filter(GroupHost.hostname.in_(hosts.split(","))) for gh in queryGH: session.delete(gh) session.commit() session.close() self.ret("ok", "") def check_del_host(self,session,hosts): num = session.query(Instance).filter(Instance.host.in_(hosts.split(","))).count() if num != 0 : return (False,"some host find in instance.please remove them first") return (True,""+str(num)) #查询机器和角色的关系 def host_role(self): session= database.getSession() active=[] for service in session.query(Service): if service.status == Service.STATUS_ACTIVE : active.append(service.service) roles = {}; for service in static_config.services: if service["name"] in active: roles[service["name"]] = service["role"] hostroles = {} doing=[] #补充所有的host列表 hosts = session.query(Host).filter(Host.status == Host.STATUS_READY) for host in hosts: hostname = host.hostname; hostroles[hostname]={}; hostroles[hostname]['role']=[] instances = session.query(Instance) for instance in instances: role = instance.role host = instance.host hostroles[host]['role'].append(role) if instance.status == Instance.STATUS_SETUP or instance.status == Instance.STATUS_REMOVING : doing.append({"host":host,"role":role,"status":instance.status}) session.close() self.ret("ok", "",{"roles":roles,"hostroles":hostroles,"doing":doing}) #查询正在进行的服务 def doing(self): doing = [] session = database.getSession() instances = session.query(Instance) for instance in instances: role = instance.role host = instance.host if instance.status == Instance.STATUS_SETUP or instance.status == Instance.STATUS_REMOVING : doing.append({"host":host,"role":role,"status":instance.status}) session.close() self.ret("ok", "",{"doing":doing}) #添加一个服务 def add_service(self): service = self.get_argument("service") add_args = self.get_argument("add") var_args = self.get_argument("vars","[]") add_instance = json.loads(add_args) #设定一些必要的变量 varArgs = json.loads(var_args) for var in varArgs: self.save_var_todb(var['service'],var['showType'],var['group'], var['host'],var['name'],var['value'],var['type'], var['text']) #开启服务 new_ser = Service(service,Service.STATUS_ACTIVE) session = database.getSession() session.merge(new_ser) session.commit() session.close() self.inner_add_del_instance(add_instance, []) def can_del_service(self): service = self.get_argument("service") session = database.getSession() instances = []; for instance in session.query(Instance).filter(Instance.service == service): instances.append(instance.get_instance_name(instance.host, instance.role)) session.close() if len(instances) == 0: self.ret("ok", "") else: self.ret("error","some instance is exist please remove then first. instances:"+(",".join(instances))) def del_service(self): service = self.get_argument("service") #关闭服务 new_ser = Service(service,Service.STATUS_INIT) session = database.getSession() session.merge(new_ser) session.commit() session.close() self.ret("ok", "") #添加删除实例instance #删除提交任务,并且轮询任务是否执行完成 #如果任务执行完成,就删除 def add_del_instance(self): add_args = self.get_argument("add","[]") del_args = self.get_argument("del","[]") var_args = self.get_argument("vars","[]") #设定一些必要的变量 var_args = json.loads(var_args) for var in var_args: self.save_var_todb(var['service'],var['showType'],var['group'], var['host'],var['name'],var['value'],var['type'], var['text']) add_instance = json.loads(add_args) del_instance = json.loads(del_args) self.inner_add_del_instance(add_instance,del_instance) def inner_add_del_instance(self,add_instance,del_instance): session = database.getSession() ret_msg = [] (check,msg) = self.check_add_del_instance( session, add_instance, del_instance) if not check: self.ret("error", msg); return; else: if msg != "" and isinstance(msg, list) : ret_msg += msg elif isinstance(msg, str) : ret_msg.append(msg) add_running_id = self.add_instance( add_instance ) del_running_id = self.del_instance( del_instance ) for taskid in add_running_id: callback_lib.add_callback(session,taskid,"dealAddInstance") for taskid in del_running_id: callback_lib.add_callback(session,taskid,"dealDelInstance") session.close() #发送消息到MQ msg = ','.join([str(id) for id in (add_running_id + del_running_id)]) if not mm.send(msg): ret_msg.append("send message to worker error") self.ret("ok", '\n'.join(ret_msg), {"addRunningId":add_running_id,"delRunningId":del_running_id}) def add_instance(self,addInstance): #将add插入到instance表 session = database.getSession() for add_inst in addInstance: temp_service = static_config.get_service_from_role(add_inst["role"]) new_in = Instance(temp_service,add_inst["host"],add_inst["role"]) new_in.status = Instance.STATUS_SETUP session.merge(new_in) session.commit() #提交活动 running_id=[] for add_inst in addInstance: temp_service = static_config.get_service_from_role(add_inst["role"]) taskid = database.build_task(session,"ansible",temp_service,add_inst["host"],add_inst["role"],"setup") running_id.append(taskid) session.commit() session.close() return running_id def del_instance(self,delInstance): #更新instance表的对应状态为removing session = database.getSession() for delInst in delInstance: session.query(Instance).filter(and_(Instance.host==delInst["host"],Instance.role==delInst["role"])) \ .update({Instance.status:Instance.STATUS_REMOVING}) session.commit() #提交卸载活动 running_id=[] for delInst in delInstance: tempService = static_config.get_service_from_role(delInst["role"]) #newTask = Task("ansible",tempService,delInst["host"],delInst["role"],"remove") #session.add(newTask) #session.flush(); #running_id.append(newTask.id) new_taskid = database.build_task(session,"ansible",tempService,delInst["host"],delInst["role"],"remove") running_id.append(new_taskid) session.commit() session.close() return running_id def check_add_del_instance(self,session,add_instance,del_instance): if len(add_instance) == 0 and len(del_instance) == 0: self.ret("error", "no instance need to add or del"); return; #角色数量检查 role_num_query = session.query(Instance.role,func.count(Instance.id)).group_by(Instance.role) role_num = {} for record in role_num_query: role_num[record[0]] = record[1] add_del_num = {} for add_inst in add_instance: num = session.query(Instance).filter(and_(Instance.host == add_inst["host"], \ Instance.role == add_inst["role"])).count() if num == 1: return (False,"instance is exist (%s,%s) " % ( add_inst["host"], add_inst["role"]) ) if add_del_num.has_key( add_inst["role"] ) : add_del_num[add_inst["role"]] = add_del_num[add_inst["role"]] + 1 else: add_del_num[add_inst["role"]] = 1; for del_inst in del_instance: query = session.query(Instance).filter(and_(Instance.host == del_inst["host"], \ Instance.role == del_inst["role"])) num = query.count(); if num == 0 or num > 1: return (False,"instance is not exist ( %s,%s) " % ( del_inst["host"] ,del_inst["role"] )) else: for instance in query: if instance.status != "stop": return (False,"instance's status is not stop (%s,%s) " % ( del_inst["host"], del_inst["role"]) ) if add_del_num.has_key( del_inst["role"] ) : add_del_num[del_inst["role"]] = add_del_num[del_inst["role"]] - 1 else: add_del_num[del_inst["role"]] = -1; #合并role_num和add_del_num,然后计算角色数量是否符合 warn_msg = [] for (role,new_num) in add_del_num.items(): old_num = 0; if role_num.has_key(role) : old_num = role_num[role] (check,msg) = self.check_role_num( role, old_num+new_num ) if not check : warn_msg.append(msg) return (True, warn_msg) def check_role_num_by_service(self, session, service, add_more_msg=""): #角色数量检查 role_num_query = session.query(Instance.role,func.count(Instance.id)).group_by(Instance.role) checkResult = True warnMsg = [] for record in role_num_query: (check,msg) = self.check_role_num( record[0], record[1], add_more_msg ) if not check: checkResult = False warnMsg.append(msg) return ( checkResult, warnMsg ) def check_role_num(self, role, new_num, add_more_msg=""): """ 检查这个角色的数量是不是符合要求 """ if static_config.role_check_map.has_key( role ) : temp = static_config.role_check_map[role] if temp.has_key("min") and new_num < temp["min"] : return (False, "role %s 's number %d shoule more than or equal %d.%s" % ( role, new_num, temp["min"], add_more_msg) ) if temp.has_key("max") and new_num > temp["max"] : return (False, "role %s 's number %d shoule less than or equal %d.%s" % ( role, new_num, temp["max"], add_more_msg) ) if temp.has_key("equal") and new_num != temp["equal"] : return (False, "role %s 's number %d shoule equal to %d.%s" % ( role, new_num, temp["equal"], add_more_msg) ) return (True,"") #查询任务 #dir=desc&limit=50&offset=0&orderby=id&search=aaa def tasks(self): search = self.get_argument("search","") orderby = self.get_argument("orderby","") dir = self.get_argument("dir","") offset = self.get_argument("offset","") limit = self.get_argument("limit","") session = database.getSession() query = session.query(Task) if search != "" : search='%'+search+'%' query = query.filter(or_(Task.id.like(search),Task.taskType.like(search),Task.service.like(search), \ Task.host.like(search),Task.role.like(search),Task.task.like(search), \ Task.status.like(search), Task.result.like(search))) total_task = query.count(); if dir=="asc": query = query.order_by(asc(orderby))[int(offset):int(offset)+int(limit)] else : query = query.order_by(desc(orderby))[int(offset):int(offset)+int(limit)] task_list=[] for task in query: task_list.append(task.format()) session.close() self.ret("ok", "", {"tasks":task_list,"totalTask":total_task}) #查询单个任务的详细 def task_detail(self): taskid = self.get_argument("taskid") session = database.getSession() task = session.query(Task).filter(Task.id==taskid).first() tf = task.format() tf['msg'] = task.msg session.close() self.ret("ok", "", {"task":tf}) #查询机器和组的对应关系 def host_group(self): session = database.getSession() groups = {} hostgroups = {} for host in session.query(Host).filter(Host.status == Host.STATUS_READY ): hostgroups[host.hostname]={} hostgroups[host.hostname]["group"]=['all'] for group in session.query(Group): groups[group.group]=group.text for gh in session.query(GroupHost): hostgroups[gh.hostname]["group"].append(gh.group) session.close() self.ret("ok","",{"groups":groups,"hostgroups":hostgroups}) #保存组 def save_group(self): name = self.get_argument("group") text = self.get_argument("text","") toDel = self.get_argument("del","") nowGroup = Group(name,text) session = database.getSession() if toDel=="del": for group in session.query(Group).filter(Group.group==name): session.delete(group) session.commit() else: session.merge(nowGroup) session.commit() session.close() self.ret("ok","") #修改机器和分组的关系 def setup_group(self): add_args = self.get_argument("add") del_args = self.get_argument("del") add_groups = json.loads(add_args) del_groups = json.loads(del_args) session = database.getSession() for addGroup in add_groups: gh = GroupHost(addGroup['group'],addGroup['host']) session.merge(gh) session.commit for delGroup in del_groups: query = session.query(GroupHost).filter(and_(GroupHost.hostname==delGroup['host'],GroupHost.group==delGroup['group'])) for gh in query: session.delete(gh) session.commit() session.close() self.ret("ok","") #****************************************************** #获取所有的template文件 def template_list(self): templates={} for dir in os.listdir(config.template_dir): if dir.startswith('.') : continue; dirPath = os.path.join(config.template_dir,dir) if os.path.exists(dirPath) and os.path.isdir(dirPath): templates[dir] = [] for file in os.listdir(dirPath): filePath = os.path.join(dirPath,file) app_log.info(filePath) if os.path.exists(filePath) and os.path.isfile(filePath): file = file.replace(".j2","") templates[dir].append(file); templates[dir].sort() self.ret("ok","",{"templates":templates}) #获取指定的文件内容 def template_file(self): dir = self.get_argument("dir") file = self.get_argument("file") file = file+".j2" filePath = os.path.join(config.template_dir,dir,file) if os.path.exists(filePath) and os.path.isfile(filePath): content = open(filePath, "r").read() self.ret("ok","",{"content":content,"row":self.get_content_row(content)}) else: self.ret("error","file not exist") def template_build_file(self): ''' 获取生成的配置文件 ''' dir = self.get_argument("dir") file = self.get_argument("file") file = file+".j2" host = self.get_argument("host") (content,output) = shell_lib.get_template_file(host,dir,file); if content != "": self.ret("ok","",{"content":content,"row":self.get_content_row(content) }) else: self.ret("error",output) def template_download_file(self): ''' 生成整个服务的配置文件 ''' dir = self.get_argument("dir") host = self.get_argument("host") (url,output) = shell_lib.download_template_file(host,dir); if url != None and url != "": self.ret("ok","",{"url" : url }) else: self.ret("error",output) def get_content_row(self,content): count = 0 ; for c in content: if c == "\n" : count = count+1 return count; def save_template_file(self): dir = self.get_argument("dir") file = self.get_argument("file") file = file+".j2" content = self.get_argument("content") filePath = os.path.join(config.template_dir,dir,file) fd = open(filePath,"w") fd.write(content.encode('utf8')); time.sleep(2) self.ret("ok","") #**************************************************************************************** #manual获取数据库的表 def manual_metadata(self): table={} models = database.get_all_models() temp = {} for model in models: temp = {} temp['column']=[] temp['primary']=[] for col in model.__table__.columns: if col.primary_key: temp['primary'].append(col.name) else: temp['column'].append(col.name) table[model.__tablename__]=temp self.ret("ok","",{"table":table}) def manual_query(self): sql = self.get_argument("sql") session = database.getSession() result = session.execute(sql) data = [] for record in result: temp = []; for value in record: temp.append(value) data.append(temp); session.close() self.ret("ok","",{"column":result.keys(),"data":data}) #修改数据库 直接使用merge进行合并 def manual_execute(self): sql = self.get_argument("sql") session = database.getSession() result = session.execute(sql) session.commit() session.flush() session.close() self.ret("ok","") #以下是aux 相关的配置 def aux_get(self): upload_path = config.aux_upload_dir file_list = [] if not os.path.exists(upload_path) : os.makedirs(upload_path) for file in os.listdir(upload_path): if file.startswith('.'): continue file_path = os.path.join(upload_path,file) if os.path.isfile(file_path): size = os.path.getsize(file_path) file_list.append({"name":file,"size":size}) self.ret("ok","",{"files":file_list}) def aux_upload(self): upload_path = config.aux_upload_dir file_metas = self.request.files['file'] result = {} for meta in file_metas: filename = meta['filename'] filepath = os.path.join(upload_path,filename) with open(filepath,'wb') as up: up.write(meta['body']) result[filename] = "ok" self.ret("ok", "", {"result":result}) def aux_delete(self): upload_path = config.aux_upload_dir file_name = self.get_argument("filename") file_path = os.path.join(upload_path, file_name) try: os.remove(file_path) self.ret("ok","") except: self.ret("error","delete file %s error" % file_path)
To disable your entire profile, click on "Disable my account" in your settings: https://www.duolingo.com/settings/account. Please be careful as this will disable all your Duolingo-related accounts! Your account will be fully deactivated, but you will be able to re-activate it yourself by logging in again. If you deactivate your account, your username and email associated with your account will be reserved so that nobody else can sign up with those. Any messages in the forum posted by you under this account will appear as [deactivated user] unless you re-activate the account. Be advised that if you are a Duolingo Plus subscriber via the mobile apps, disabling your account will not suspend charges, as those are done by Apple or Google. Please refer to this article if you need to adjust your Plus payment settings on Apple and Google. If someone used your email address to sign up for Duolingo without your consent, please read "Someone created a Duolingo account with my email. How can I remove it and unsubscribe?" If you are looking to completely delete all your information and account, please read How can I delete my account and data? (your username will not be reserved for you if you choose this option). Someone created a Duolingo account with my email. How can I remove it and unsubscribe?
import importlib from os import path, listdir from app.plugins import logger from .platform_extension import PlatformExtension platforms = None def get_all_platforms(): # Cache platforms search global platforms if platforms == None: platforms = read_platform_from_files() return platforms def get_platform_by_name(platform_name): platforms = get_all_platforms() for platform in platforms: if platform.name == platform_name: return platform return None def get_all_extended_platforms(): return [platform for platform in get_all_platforms() if isinstance(platform, PlatformExtension)] def read_platform_from_files(): platforms_path = get_platforms_path() platforms = [] for platform_script in [platform for platform in listdir(platforms_path) if path.isfile(path.join(platforms_path, platform))]: # Each python script must have a class called Platform # Instantiate the platform try: module_path = "plugins.cloudimport.platforms.{}".format(path.splitext(platform_script)[0]) module = importlib.import_module(module_path) platform = (getattr(module, "Platform"))() platforms.append(platform) except Exception as e: logger.warning("Failed to instantiate platform {}: {}".format(platform_script, e)) assert_all_platforms_are_called_differently(platforms) return platforms def assert_all_platforms_are_called_differently(platforms): platform_names = [] for platform in platforms: if platform.name in platform_names: # ToDo: throw an error logger.warning('Found multiple platforms with the name {}. This will cause problems...'.format(platform.name)) else: platform_names.append(platform.name) def get_platforms_path(): current_path = path.dirname(path.realpath(__file__)) return path.abspath(path.join(current_path, 'platforms'))
US soldiers extinguish flames in a burning vehicle from a truck bomb explosion outside of the Rashad police station July 24, 2005, in the eastern neighborhood of Mashtal, Baghdad. It was the same lunatic corkscrew landing in the same little Lebanese plane, barreling down into the sandstorm of Baghdad airport. Piloting his 20-passenger twin-prop aircraft – from Flying Carpet Airlines, no less – Captain Hussam has three things on his mind: American helicopters, pilotless reconnaissance drones and incoming missiles. So we all scan the dun-coloured runway and terminals and the grotty slums beside the airport road for the tell-tale pink flame surviving pilots have sometimes caught sight of. But we landed safely and a scruffy bus took us to the terminal where I bid the customs officer Salaam Aleikum and he cheerfully asked me if I was a Muslim. “English,” I replied, which seemed to be good enough to him. He couldn’t break the airline security string on my bag so he waved me through. Then there came The Airport Road. We all need to put this in capitals these days. As my Iraqi fixer put it very well: “It’s really just a matter of luck.” Sometimes you glide safely across to the city, sometimes you get caught up in a firefight, sometimes – like poor Marla Ruzicka, the American girl who tried to count casualties – you are too close to a suicide attack. “I’m alive,” she cried just before she died. So we concentrate very hard on The Airport Road. The Americans have put a squadron of Bradley Fighting Vehicles on the central reservation and Iraqi army units on each side of the highway. But they still get bombed. Got Yer Licence There, Drive? So I’m not keen on stopping for Iraqi checkpoints. We drive across the Tigris, waved through by a policeman in a hood – cops and insurgents both wear hoods which makes life a little tiring – and arrive at the grim little hotel where The Independent has its office. Extra security now. More armed men on the gates – most are Kurdish – and a guard who wants to search my bag. He, too, cannot cut the airline security string on my bag and waves me through. So a piece of string twice stopped my baggage being searched. Very comforting. My Iraqi fixer offers to buy groceries for me but I decide I’ve got to buy them myself. Once you let Iraqis buy your food on the streets, tell you what people are saying, come back to you with their observations, you have entered the pointless hothouse of hotel journalism, the reporter with the mobile phone trapped in his room who might as well be broadcasting or writing from Co Mayo. So we slink off down side streets to the Warda grocery store in Karada. It’s a broad street with lots of men languishing on the pavements, many holding mobiles. That’s how it’s done these days. A guy with a mobile sees an American patrol, a police unit, a foreigner, and squeezes the dial pad and a bunch of gunmen in a car not far away roar round to blow themselves up or kidnap the stranger – for money, for execution, for politics. The Egyptian diplomat murdered last month had stopped at a newspaper stand. So we say, “10 minutes”. That’s all I’ve got in the grocery store. Sugar, Arabic bread – a big queue so I squeeze through and grab two loaves and hear someone mutter ajnabi (foreigner) and I go for the Perrier bottles, the tinned fruits, the sardines, and I push up to the counter. Eight minutes. “Change in Iraqi money?” Doesn’t matter. Wrong reply. Too desperate. Should have said “Iraqi”. Three boxes of bottled water. Nine minutes. Your time is up. Out into the oven-like heat, into the car, a sharp turn to the right, into another alleyway. Ten minutes. Made it.
#! /usr/bin/env python3 """Test bio_utils' entry_verifier Copyright: test_entry_verifier.py test bio_utils' entry_verifier Copyright (C) 2015 William Brazelton, Alex Hyer This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from bio_utils.verifiers import entry_verifier from bio_utils.verifiers import FormatError import os __author__ = 'Alex Hyer' __email__ = 'theonehyer@gmail.com' __license__ = 'GPLv3' __maintainer__ = 'Alex Hyer' __status__ = 'Production' __version__ = '1.0.0' def test_entry_verifier(): """Test bio_utils' entry_verifier with multiple regex and subjects""" # Store multiple entries for testing # Good data regex_solo = r'^>.+{0}[ACGTU]+{0}$'.format(os.linesep) subject_solo = '>entry1{0}AGGAATTCGGCTAGCTTGAC{0}'.format(os.linesep) delimiter_solo = r'{0}'.format(os.linesep) # Bad set 1 regex_set = r'^@.+\t[ACGTURYKMSWBDHVNX]+\t\+.*\t.+\t$' subject_set = [r'@entry1\tAAGGATTCG\t+\t142584412\t' r'@entry\tAGGTZCCCCG\t+\t1224355221\t', r'@entry3\tGCCTAGC\t+\t6443284\t'] delimiter_set = r'\t' # Bad set 2 regex_set2 = r'^@.+\\r\\n[ACGTURYKMSWBDHVNX]+\\r\\n\+.*\\r\\n.+\\r\\n$' subject_set2 = [r'@entry1\r\nAAGGATTCG\r\n+\r\n142584412\r\n' r'@entry\r\nAGGTGCCCCG\r\n+\r\n1224355221\r\n', r'4entry3\r\nGCCTAGC\r\n+\r\n6443284\r\n'] delimiter_set2 = r'\\r\\n' # Next line will throw a FormatError if entry_verifier is broken # and doesn't deem subject_solo to match regex_solo entry_verifier([subject_solo], regex_solo, delimiter_solo) # Test set try: entry_verifier(subject_set, regex_set, delimiter_set) except FormatError as error: assert error.template == r'^[ACGTURYKMSWBDHVNX]+$' assert error.subject == 'AGGTZCCCCG' assert error.part == 1 # Test set 2 try: entry_verifier(subject_set2, regex_set2, delimiter_set2) except FormatError as error: assert error.template == r'^@.+$' assert error.subject == '4entry3' assert error.part == 0
Fully computerized digital wheel balancer that operates on one single, fully automatic cycle, distance, diameter and wheel width. Easy access to the different programs are available using the various function keys. Från 1 890kr per månad. WB 277N is a fully computerized digital wheel balancer that operates on one single, fully automatic cycle. Easy access to the different programs are available using the various function keys. The double display generates both weight and position readings. The balancer has the capacity to balance wheels up to 44" in diameter. WB 277N comes with an automatic width gauge. The Aludata progam resolves all problems with balancing aluminium rims. The arm is positioned so that it indicates the exact position for application of adhesive weights. The hidden spoke program enables the operator to hide the weights behind the spokes of the wheel thus optimizing the quality/aesthetic ratio. 5gr Balance weights for alloy wheels. Package 100 pcs.
#!/usr/bin/env python3 # ---------------------------------------------------------------------------- # Copyright (c) 2013--, Qiyun Zhu and Katharina Dittmar. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import re from os import remove, makedirs, cpu_count from os.path import join, isdir, isfile from shutil import which, rmtree from tempfile import mkdtemp from time import time, sleep from math import log from urllib.parse import quote from urllib.request import urlopen, HTTPError, URLError from hgtector.util import ( timestamp, load_configs, get_config, arg2bool, list_from_param, file2id, id2file_map, read_taxdump, read_input_prots, read_prot2taxid, get_product, seqid2accver, write_fasta, run_command, contain_words, is_latin, is_capital, is_ancestral, taxid_at_rank) description = """batch sequence homology searching and filtering""" arguments = [ 'basic', ['-i|--input', 'input protein set file, or directory where one or more ' 'input files are located', {'required': True}], ['-o|--output', 'directory where search results are to be saved', {'required': True}], ['-m|--method', 'search method', {'choices': ['auto', 'diamond', 'blast', 'remote', 'precomp'], 'default': 'auto'}], ['-s|--precomp', 'file or directory of precomputed search results (when ' 'method = precomp)'], 'database', ['-d|--db', 'reference protein sequence database'], ['-t|--taxdump', 'directory of taxonomy database files (nodes.dmp and ' 'names.dmp)'], ['--taxmap', 'sequence Id to taxId mapping file (not necessary if ' 'protein database already contains taxonomy)'], 'search behaviors', ['-k|--maxhits', 'maximum number of hits to preserve per query (0 for ' 'unlimited)', {'type': int}], ['--minsize', 'minimum length of query sequence (aa)', {'type': int}], ['--queries', 'number of queries per run (0 for whole sample)', {'type': int}], ['--maxchars', 'maximum number of characters per run (0 for unlimited)', {'type': int}], 'search cutoffs', ['--maxseqs', 'maximum number of sequences to return', {'type': int}], ['--evalue', 'maximum E-value cutoff', {'type': float}], ['--identity', 'minimum percent identity cutoff', {'type': int}], ['--coverage', 'minimum percent query coverage cutoff', {'type': int}], ['--extrargs', 'extra arguments for choice of search method'], 'taxonomic filters', ['--tax-include', 'include taxa under those taxIds'], ['--tax-exclude', 'exclude taxa under those taxIds'], ['--tax-unique', 'ignore more than one hit with same taxID', {'choices': ['yes', 'no']}], ['--tax-unirank', 'ignore more than one hit under same taxon at this ' 'rank'], ['--tax-capital', 'ignore taxon names that are not capitalized', {'choices': ['yes', 'no']}], ['--tax-latin', 'ignore species names that are not Latinate', {'choices': ['yes', 'no']}], ['--tax-block', 'ignore taxon names containing those words'], 'local search behaviors', ['-p|--threads', 'number of threads (0 for all CPU cores)', {'type': int}], ['--tmpdir', 'temporary directory'], ['--diamond', 'diamond executable'], ['--blastp', 'blastp executable'], ['--blastdbcmd', 'blastdbcmd executable'], 'remote search behaviors', ['--algorithm', 'remote search algorithm'], ['--retries', 'maximum number of retries per search', {'type': int}], ['--delay', 'seconds between two search requests', {'type': int}], ['--timeout', 'seconds before program gives up waiting', {'type': int}], ['--entrez', 'entrez query text'], ['--server', 'remote search server URL'], 'self-alignment options', ['--aln-method', 'self-alignment method', {'choices': ['auto', 'native', 'fast', 'lookup', 'precomp']}], ['--aln-precomp', 'file or directory of precomputed sequence Id to score ' 'maps (when self-alignment method = precomp)'], ['--aln-server', 'remote align server URL'], 'remote fetch options', ['--fetch-enable', 'whether to enable remote fetch', {'choices': ['auto', 'yes', 'no']}], ['--fetch-queries', 'maximum number of query entries per search'], ['--fetch-retries', 'maximum number of retries per search'], ['--fetch-delay', 'seconds between two fetch requests', {'type': int}], ['--fetch-timeout', 'seconds before program gives up waiting', {'type': int}], ['--fetch-server', 'remote fetch server URL'], ] class Search(object): def __init__(self): self.arguments = arguments self.description = description def __call__(self, args): print(f'Homology search started at {timestamp()}.') # load configurations self.cfg = load_configs() # read and validate arguments self.args_wf(args) # read and validate input data self.input_wf() # perform homology search for each sample for sid, sample in sorted(self.data.items()): if 'done' in sample: continue prots = sample['prots'] print(f'Batch homology search of {sid} started at {timestamp()}.') # collect sequences to search id2idx = {} seqs = [] for i, prot in enumerate(prots): if 'hits' in prot: continue id_ = prot['id'] seqs.append((id_, prot['seq'])) id2idx[id_] = i print(f'Number of queries: {len(seqs)}.') # divide sequences into batches batches = ([seqs] if self.method == 'precomp' else self.subset_seqs(seqs, self.queries, self.maxchars)) # run search for each batch n = 0 for batch in batches: # batch homology search res = self.search_wf( batch, self.pcmap[sid] if self.method == 'precomp' else None) # update taxIds of hits self.taxid_wf(res) # update taxonomic information of new taxIds self.taxinfo_wf(res) # perform taxonomy-based filtering self.taxfilt_wf(res) # update samples with search results indices = [id2idx[x[0]] for x in batch] self.update_search_results(prots, res, set(indices)) # perform self-alignment seqs2a = [x for x in batch if 'score' not in prots[ id2idx[x[0]]]] if seqs2a: for id_, score in self.selfaln_wf(seqs2a, res).items(): prots[id2idx[id_]]['score'] = score # write search results to file with open(join(self.output, f'{sid}.tsv'), 'a') as f: self.write_search_results(f, prots, indices) n += len(batch) print(f' {n} queries completed.') print(f'Batch homology search of {sid} ({len(prots)} proteins) ' f'ended at {timestamp()}.') # clean up if hasattr(self, 'mkdtemp'): rmtree(self.tmpdir) print(f'Batch homology search finished at {timestamp()}.') """master workflows""" def args_wf(self, args): """Workflow for validating and setting arguments. Parameters ---------- args : dict command-line arguments Notes ----- Workflow: 1. Load command-line arguments. 2. Update arguments from configurations. 3. Validate input and output directories. 4. Determine search method and parameters. 5. Determine taxonomy database and map. 6. Determine self-alignment method and parameters. 7. Determine remote fetch method and parameters. 8. Print major settings if desired. """ # load arguments for key, val in vars(args).items(): setattr(self, key, val) # check input directory and data if isfile(self.input): self.input_map = {file2id(self.input): self.input} elif isdir(self.input): self.input_map = {k: join(self.input, v) for k, v in id2file_map( self.input).items()} if len(self.input_map) == 0: raise ValueError( f'No input data are found under: {self.input}.') else: raise ValueError( f'Invalid input data file or directory: {self.input}.') # check / create output directory makedirs(self.output, exist_ok=True) self.prev_map = id2file_map(self.output, 'tsv') """determine search strategy""" # load search parameters get_config(self, 'evalue', 'search.evalue', float) for key in ('method', 'minsize', 'maxseqs', 'identity', 'coverage'): get_config(self, key, f'search.{key}') for key in ('diamond', 'blastp', 'blastdbcmd'): get_config(self, key, f'program.{key}') if self.method not in {'auto', 'diamond', 'blast', 'remote', 'precomp'}: raise ValueError(f'Invalid search method: {self.method}.') # look for precomputed search results if self.method == 'precomp' and not self.precomp: raise ValueError('Must specify location of pre-computed search ' 'results.') if self.precomp: if isfile(self.precomp): if len(self.input_map) > 1: raise ValueError('A directory of multiple pre-computed ' 'search result files is needed.') self.pcmap = {file2id(self.precomp): self.precomp} elif isdir(self.precomp): self.pcmap = {k: join(self.precomp, v) for k, v in id2file_map( self.precomp).items()} if len(self.pcmap) == 0: raise ValueError('Cannot locate any pre-computed search ' f'results at: {self.precomp}.') else: raise ValueError('Invalid pre-computed search result file or ' f'directory: {self.precomp}.') if self.method == 'auto': self.method = 'precomp' # check local search executables and databases diamond_db = self.check_diamond() blast_db = self.check_blast() # choose a local search method if available, or do remote search if self.method == 'auto': if diamond_db: self.method = 'diamond' self.db = diamond_db elif blast_db: self.method = 'blast' self.db = blast_db else: self.method = 'remote' # load method-specific arguments for key in ('queries', 'maxchars', 'extrargs'): get_config(self, key, f'{self.method}.{key}') # load remote search settings if self.method == 'remote': for key in ('db', 'algorithm', 'delay', 'timeout', 'entrez'): get_config(self, key, f'remote.{key}') get_config(self, 'server', 'server.search') # determine number of threads if self.method in ('diamond', 'blast') and not self.threads: # use all available CPUs if threads is set to zero or left empty self.threads = cpu_count() # do single-threading if CPU count not working if self.threads is None: print('WARNING: Cannot determine number of CPUs. Will do ' 'single-threading instead.') self.threads = 1 # apply BLAST CPU number cap if self.method == 'blast' and self.threads > 8: print('WARNING: BLAST can only use a maximum of 8 CPUs.') self.threads = 8 # check / create temporary directory if self.method in ('diamond', 'blast'): dir_ = self.tmpdir if not dir_: self.tmpdir = mkdtemp() setattr(self, 'mkdtemp', True) # mark for cleanup elif not isdir(dir_): raise ValueError(f'Invalid temporary directory: {dir_}.') """determine taxonomy database and filters""" # initialize protein-to-taxId map self.prot2tid = {} # assign taxonomy database for key in ('taxdump', 'taxmap'): get_config(self, key, f'database.{key}') if self.method != 'remote': # check local taxonomy database if not self.taxdump: print('WARNING: Local taxonomy database is not specified. ' 'Will try to retrieve taxonomic information from remote ' 'server.') elif not isdir(self.taxdump): raise ValueError( f'Invalid taxonomy database directory: {self.taxdump}.') else: for fname in ('names.dmp', 'nodes.dmp'): if not isfile(join(self.taxdump, fname)): raise ValueError( f'Taxonomy database file {fname} is not found.') # check local taxonomy map if self.taxmap and not isfile(self.taxmap): raise ValueError( f'Invalid protein-to-taxId map: {self.taxmap}.') # load taxonomic filters and convert to lists for key in ('include', 'exclude', 'block'): attr = f'tax_{key}' get_config(self, attr, f'taxonomy.{key}') setattr(self, attr, list_from_param(getattr(self, attr))) # load taxonomy switches for key in ('unique', 'unirank', 'capital', 'latin'): get_config(self, f'tax_{key}', f'taxonomy.{key}') """determine self-alignment strategy""" # load configurations get_config(self, 'aln_method', 'search.selfaln') get_config(self, 'aln_server', 'server.selfaln') met_ = self.aln_method if met_ not in ('auto', 'native', 'fast', 'lookup', 'precomp'): raise ValueError(f'Invalid self-alignment method: {met_}.') # look for precomputed self-alignment results if met_ == 'precomp' and not self.aln_precomp: raise ValueError('Must specify location of pre-computed self-' 'alignment scores.') pre_ = self.aln_precomp if pre_: if isfile(pre_): if len(self.input_map) > 1: raise ValueError('A directory of multiple pre-computed ' 'self-alignment result files is needed.') self.aln_pcmap = {file2id(pre_): pre_} elif isdir(pre_): self.aln_pcmap = {k: join(pre_, v) for k, v in id2file_map(pre_).items()} if len(self.aln_pcmap) == 0: raise ValueError('Cannot locate any pre-computed self-' f'alignment results at: {pre_}.') else: raise ValueError('Invalid pre-computed self-alignment result ' f'file or directory: {pre_}.') if met_ == 'auto': self.aln_method = 'precomp' # use the same search method for self-alignment, otherwise use fast if met_ in ('auto', 'native'): self.aln_method = 'fast' if self.method == 'precomp' else 'native' """determine fetch strategy""" # load configurations get_config(self, 'fetch_server', 'server.fetch') for key in ('enable', 'queries', 'retries', 'delay', 'timeout'): get_config(self, f'fetch_{key}', f'fetch.{key}') # determine remote or local fetching if self.fetch_enable == 'auto': self.fetch_enable = 'yes' if ( self.method == 'remote' or not self.taxdump) else 'no' """final steps""" # convert boolean values for key in ('tax_unique', 'tax_capital', 'tax_latin'): setattr(self, key, arg2bool(getattr(self, key, None))) # convert fractions to percentages for metric in ('identity', 'coverage'): val = getattr(self, metric) if val and val < 1: setattr(self, metric, val * 100) # print major settings print('Settings:') print(f' Search method: {self.method}.') print(f' Self-alignment method: {self.aln_method}.') print(f' Remote fetch enabled: {self.fetch_enable}.') def check_diamond(self): """Check if DIAMOND is available. Returns ------- str or None valid path to DIAMOND database, or None if not available Raises ------ ValueError If settings conflict. """ if self.method in ('diamond', 'auto'): if not self.diamond: self.diamond = 'diamond' if which(self.diamond): try: db = self.db or self.cfg['database']['diamond'] except KeyError: pass if db: if isfile(db) or isfile(f'{db}.dmnd'): return db elif self.method == 'diamond': raise ValueError(f'Invalid DIAMOND database: {db}.') elif self.method == 'diamond': raise ValueError( 'A protein database is required for DIAMOND search.') elif self.method == 'diamond': raise ValueError( f'Invalid diamond executable: {self.diamond}.') def check_blast(self): """Check if BLAST is available. Returns ------- str or None valid path to BLAST database, or None if not available Raises ------ ValueError If settings conflict. """ if self.method in ('blast', 'auto'): if not self.blastp: self.blastp = 'blastp' if which(self.blastp): try: db = self.db or self.cfg['database']['blast'] except KeyError: pass if db: if all(isfile(f'{db}.{x}') for x in ('phr', 'pin', 'psq')): return db elif self.method == 'blast': raise ValueError(f'Invalid BLAST database: {db}.') elif self.method == 'blast': raise ValueError( 'A protein database is required for BLAST search.') elif self.method == 'blast': raise ValueError(f'Invalid blastp executable: {self.blastp}.') def input_wf(self): """Master workflow for processing input data. Notes ----- Workflow: 1. Read proteins of each protein set. 2. Process search results from previous runs. 3. Import precomputed self-alignment scores. 4. Fetch sequences for proteins to be searched. 5. Drop sequences shorter than threshold. 6. Read or initiate taxonomy database. 7. Read protein-to-taxId map. """ # initiate data structure self.data = {} # read proteins of each sample print('Reading input proteins...') nprot = 0 for id_, fname in self.input_map.items(): prots = read_input_prots(fname) n = len(prots) if n == 0: raise ValueError(f'No protein entries are found for {id_}.') print(f' {id_}: {n} proteins.') self.data[id_] = {'prots': prots} nprot += n print(f'Done. Read {nprot} proteins from {len(self.data)} samples.') # process search results from previous runs ndone = 0 if self.prev_map: print('Processing search results from previous runs...') for id_ in self.data: if id_ not in self.prev_map: continue prots_ = self.data[id_]['prots'] n = len(prots_) m = len(self.parse_prev_results(join( self.output, self.prev_map[id_]), prots_)) if m == n: self.data[id_]['done'] = True ndone += m print(f'Done. Found results for {ndone} proteins, remaining ' f'{nprot - ndone} to search.') # check if search is already completed if (ndone == nprot): return # import precomputed self-alignment scores if self.aln_method == 'precomp' and hasattr(self, 'aln_pcmap'): n, m = 0, 0 print('Importing precomputed self-alignment scores...', end='') for sid, file in self.aln_pcmap.items(): # read scores scores = {} with open(file, 'r') as f: for line in f: line = line.rstrip('\r\n') if not line or line.startswith('#'): continue id_, score = line.split('\t') scores[id_] = float(score) # assign scores if not already for prot in self.data[sid]['prots']: if 'score' in prot: continue n += 1 id_ = prot['id'] try: prot['score'] = scores[id_] m += 1 except KeyError: pass print(' done.') print(f' Imported scores for {n} proteins.') dif = n - m if dif > 0: raise ValueError(f'Missing scores for {dif} proteins.') # fetch sequences for unsearched proteins seqs2q = self.check_missing_seqs(self.data) n = len(seqs2q) if n > 0: print(f'Sequences of {n} proteins are to be retrieved.') if self.method == 'blast': print('Fetching sequences from local BLAST database...', end='') seqs = self.blast_seqinfo(seqs2q) n = self.update_prot_seqs(seqs) print(' done.') print(f' Obtained sequences of {n} proteins.') seqs2q = self.check_missing_seqs(self.data) n = len(seqs2q) if n > 0: print(f' Remaining {n} proteins.') if n > 0 and self.fetch_enable == 'yes': print(f'Fetching {n} sequences from remote server...', flush=True) seqs = self.remote_seqinfo(seqs2q) n = self.update_prot_seqs(seqs) print(f'Done. Obtained sequences of {n} proteins.') seqs2q = self.check_missing_seqs(self.data) n = len(seqs2q) if n > 0: raise ValueError(f' Cannot obtain sequences of {n} proteins.') # drop short sequences if self.minsize: print(f'Dropping sequences shorter than {self.minsize} aa...', end='') for sid, sample in self.data.items(): for i in reversed(range(len(sample['prots']))): if len(sample['prots'][i]['seq']) < self.minsize: del sample['prots'][i] print(' done.') # read or initiate taxonomy database # read external taxdump if self.taxdump is not None: print('Reading local taxonomy database...', end='') self.taxdump = read_taxdump(self.taxdump) print(' done.') print(f' Read {len(self.taxdump)} taxa.') # read taxdump generated by previous runs elif (isfile(join(self.output, 'names.dmp')) and isfile(join(self.output, 'nodes.dmp'))): print('Reading custom taxonomy database...', end='') self.taxdump = read_taxdump(self.output) print(' done.') print(f' Read {len(self.taxdump)} taxa.') # build taxdump from scratch else: print('Initiating custom taxonomy database...', end='') self.taxdump = {'1': { 'name': 'root', 'parent': '1', 'rank': 'no rank'}} self.update_dmp_files(['1']) print(' done.') # record invalid taxIds to save compute self.badtaxids = set() def search_wf(self, seqs, file=None): """Master workflow for batch homology search. Parameters ---------- seqs : list of tuple query sequences (Id, sequence) file : str, optional file of precomputed results Returns ------- dict sequence Id to hit table Notes ----- Workflow: 1. Generate an Id-to-length map. 2. Import precomputed search results (if method = precomp). 3. Call choice of method (remote, blast or diamond) to search. """ # generate an Id-to-length map lenmap = {x[0]: len(x[1]) for x in seqs} # import pre-computed search results if self.method == 'precomp': print('Importing pre-computed search results...', end='') res = self.parse_hit_table(file, lenmap) print(' done.') print(f' Found hits for {len(res)} proteins.') # run de novo search elif self.method == 'remote': res = self.remote_search(seqs) sleep(self.delay) elif self.method == 'blast': res = self.blast_search(seqs) elif self.method == 'diamond': res = self.diamond_search(seqs) return res def taxid_wf(self, prots): """Master workflow for associating hits with taxIds. Parameters ---------- prots : dict of list proteins (search results) Notes ----- Workflow: 1. Update taxmap with taxIds directly available from hit tables. 2. Get taxIds for sequence Ids without them: 2.1. Query external taxon map, if available. 2.2. Do local fetch (from a BLAST database) if available. 2.3. Do remote fetch (from NCBI server) if available. 3. Delete hits without associated taxIds. """ added_taxids = set() # update taxon map with taxIds already in hit tables ids2q, added = self.update_hit_taxids(prots) added_taxids.update(added) # attempt to look up taxIds from external taxon map if ids2q and self.method != 'remote' and self.taxmap is not None: # load taxon map on first use (slow and memory-hungry) if isinstance(self.taxmap, str): print('Reading protein-to-TaxID map (WARNING: may be slow and ' 'memory-hungry)...', end='', flush=True) self.taxmap = read_prot2taxid(self.taxmap) print(' done.') print(f' Read {len(self.taxmap)} records.') ids2q, added = self.update_hit_taxids(prots, self.taxmap) added_taxids.update(added) # attempt to look up taxIds from local BLAST database if (ids2q and self.method in ('blast', 'precomp') and self.db and self.blastdbcmd): newmap = {x[0]: x[1] for x in self.blast_seqinfo(ids2q)} ids2q, added = self.update_hit_taxids(prots, newmap) added_taxids.update(added) # attempt to look up taxIds from remote server if ids2q and self.fetch_enable == 'yes': print(f'Fetching taxIds of {len(ids2q)} sequences from remote ' 'server...', flush=True) newmap = {x[0]: x[1] for x in self.remote_seqinfo(ids2q)} print(f'Done. Obtained taxIds of {len(newmap)} sequences.') ids2q, added = self.update_hit_taxids(prots, newmap) added_taxids.update(added) # drop hits whose taxIds cannot be identified n = len(ids2q) if n > 0: print(f'WARNING: Cannot obtain taxIds for {n} sequences. These ' 'hits will be dropped.') for hits in prots.values(): for i in reversed(range(len(hits))): if hits[i]['id'] in ids2q: del hits[i] def taxinfo_wf(self, prots): """Master workflow for associating hits with taxonomic information. Parameters ---------- prots : dict of list proteins (search results) Notes ----- Workflow: 1. Obtain a list of taxIds represented by hits. 2. List taxIds that are missing in current taxonomy database. 3. Get taxonomic information for taxIds by remote fetch, if available. 4. Append new taxonomic information to dump files, if available. 5. Delete hits whose taxIds are not associated with information. """ # list taxIds without information tids2q = set() for prot, hits in prots.items(): for hit in hits: tid = hit['taxid'] if tid not in self.taxdump: tids2q.add(tid) tids2q = sorted(tids2q) # retrieve information for these taxIds if tids2q and self.fetch_enable == 'yes': print(f'Fetching {len(tids2q)} taxIds and ancestors from remote ' 'server...', flush=True) xml = self.remote_taxinfo(tids2q) added = self.parse_taxonomy_xml(xml) print(f'Done. Obtained taxonomy of {len(tids2q)} taxIds.') self.update_dmp_files(added) tids2q = [x for x in tids2q if x not in added] # drop taxIds whose information cannot be obtained if tids2q: print(f'WARNING: Cannot obtain information of {len(tids2q)} ' 'taxIds. These hits will be dropped.') tids2q = set(tids2q) for hits in prots.values(): for i in reversed(range(len(hits))): if hits[i]['taxid'] in tids2q: del hits[i] self.badtaxids.update(tid) def taxfilt_wf(self, prots): """Workflow for filtering hits by taxonomy. Parameters ---------- prots : dict of list of dict proteins (search results) Notes ----- Workflow: 1. Bottom-up filtering (delete in place) 1.1. Delete taxIds already marked as bad. 1.2. Delete taxIds whose ancestors are not in the "include" list. 1.3. Delete taxIds, any of whose ancestors is in the "exclude" list. 1.4. Delete empty taxon names. 1.5. Delete taxon names that are not capitalized. 1.6. Delete taxon names in which any word is in the "block" list. 2. Bottom-up filtering (mark and batch delete afterwards) 2.1. Mark taxIds that already appeared in hit table for deletion. 2.2. Mark taxIds whose ancestor at given rank already appeared in hit table for deletion. """ # filtering of taxIds and taxon names for id_, hits in prots.items(): # bottom-up filtering by independent criteria for i in reversed(range(len(hits))): todel = False tid = hits[i]['taxid'] taxon = self.taxdump[tid]['name'] if tid in self.badtaxids: todel = True elif (self.tax_include and not is_ancestral(tid, self.tax_include, self.taxdump)): todel = True elif (self.tax_exclude and is_ancestral(tid, self.tax_exclude, self.taxdump)): todel = True elif taxon == '': todel = True elif self.tax_capital and not is_capital(taxon): todel = True elif self.tax_block and contain_words(taxon, self.tax_block): todel = True elif self.tax_latin: tid_ = taxid_at_rank(tid, 'species', self.taxdump) if not tid_ or not is_latin(tid_): todel = True if todel: del hits[i] self.badtaxids.add(tid) # top-down filtering by sorting-based criteria todels = [] used = set() used_at_rank = set() for i in range(len(hits)): tid = hits[i]['taxid'] if self.tax_unique: if tid in used: todels.append(i) continue else: used.add(tid) if self.tax_unirank: tid_ = taxid_at_rank(tid, self.tax_unirank, self.taxdump) if tid_ and tid_ in used_at_rank: todels.append(i) else: used_at_rank.add(tid_) for i in reversed(todels): del hits[i] def selfaln_wf(self, seqs, prots=None): """Master workflow for protein sequence self-alignment. Parameters ---------- seqs : list of tuple query sequences (Id, sequence) prots : dict of list of dict, optional hit tables, only relevant when amet = lookup Returns ------- dict Id-to-score map Notes ----- Workflow: 1. If amet = lookup, just look up, and raise if any sequences don't have self-hits. 2. If amet = fast, run fast built-in algorithm on each sequence. 3. If amet = native, find the corresponding search method. 4. Run self-alignment in batches only when method = precomp, otherwise the query sequences are already subsetted into batches. 5. If some sequences don't have self hits in batch self-alignments, try to get them via single self-alignments. 6. If some sequences still don't have self hits, do built-in algorithm, but note that the output may be slightly different from others. 7. If some sequences still don't have self hits, raise. """ res = [] # just look up (will fail if some are not found) if self.aln_method == 'lookup': res = self.lookup_selfaln(seqs, prots) # use built-in algorithm elif self.aln_method == 'fast': for id_, seq in seqs: bitscore, evalue = self.fast_selfaln(seq) res.append((id_, bitscore, evalue)) # use the same search method for self-alignment elif self.aln_method == 'native': # divide sequences into batches, when search results are # precomputed batches = ([seqs] if self.method != 'precomp' else self.subset_seqs(seqs, self.queries, self.maxchars)) # do self-alignments in batches to save compute for batch in batches: res_ = [] # call search method if self.method == 'remote': res_ = self.remote_selfaln(batch) sleep(self.delay) elif self.method == 'blast': res_ = self.blast_selfaln(batch) elif self.method == 'diamond': res_ = self.diamond_selfaln(batch) # merge results res += res_ # if some hits are not found, do single alignments left = set([x[0] for x in seqs]) - set([x[0] for x in res]) if left: print('WARNING: The following sequences cannot be self-aligned ' 'in a batch. Do individual alignments instead.') print(' ' + ', '.join(left)) for id_, seq in seqs: if id_ not in left: continue res_ = None # call search method if self.method == 'remote': res_ = self.remote_selfaln([(id_, seq)]) sleep(self.delay) elif self.method == 'blast': res_ = self.blast_selfaln([(id_, seq)]) elif self.method == 'diamond': res_ = self.diamond_selfaln([(id_, seq)]) # if failed, do built-in alignment if not res_: print(f'WARNING: Sequence {id_} cannot be self-aligned ' 'using the native method. Do fast alignment ' 'instead.') bitscore, evalue = self.fast_selfaln(seq) res_ = [(id_, bitscore, evalue)] # merge results res += res_ # check if all sequences have results left = set([x[0] for x in seqs]) - set([x[0] for x in res]) if left: raise ValueError('Cannot calculate self-alignment metrics for ' 'the following sequences:\n ' + ', '.join( sorted(left))) return {x[0]: x[1] for x in res} """input/output functions""" @staticmethod def subset_seqs(seqs, queries=None, maxchars=None): """Generate subsets of sequences based on cutoffs. Parameters ---------- seqs : list of tuple sequences to subset (id, sequence) queries : int, optional number of query sequences per subset maxchars : int, optional maximum total length of query sequences per subset Returns ------- list of list of tuple subsets Raises ------ ValueError If any sequence exceeds maxchars. """ if not maxchars: # no subsetting if not queries: return [seqs] # subsetting only by queries subsets = [] for i in range(0, len(seqs), queries): subsets.append(seqs[i:i + queries]) return subsets # subsetting by maxchars, and by queries if applicable subsets = [[]] cquery, cchars = 0, 0 for id_, seq in seqs: chars = len(seq) if chars > maxchars: raise ValueError(f'Sequence {id_} exceeds maximum allowed ' f'length {maxchars} for search.') if cchars + chars > maxchars or queries == cquery > 0: subsets.append([]) cquery, cchars = 0, 0 subsets[-1].append((id_, seq)) cquery += 1 cchars += chars return subsets def update_search_results(self, prots, res, indices=set()): """Update proteins with new search results. Parameters ---------- prots : list of dict proteins to update res : dict search results indices : set of int, optional indices of proteins to be updated if omitted, only proteins with hits will be updated """ for i, prot in enumerate(prots): if 'hits' in prot: continue if indices and i not in indices: continue id_ = prot['id'] if id_ in res: prot['hits'] = [] n = 0 for hit in res[id_]: prot['hits'].append(hit) n += 1 if self.maxhits and n == self.maxhits: break elif indices and i in indices: prot['hits'] = [] @staticmethod def write_search_results(f, prots, indices=None): """Write search results to a file. Parameters ---------- f : file handle file to write to (in append mode) prots : array of hash protein set indices : list of int, optional limit to these proteins """ for i in indices if indices else range(len(prots)): prot = prots[i] f.write(f'# ID: {prot["id"]}\n') f.write(f'# Length: {len(prot["seq"])}\n') f.write(f'# Product: {prot["product"]}\n') f.write(f'# Score: {prot["score"]}\n') f.write(f'# Hits: {len(prot["hits"])}\n') for hit in prot['hits']: f.write('\t'.join([hit[x] for x in ( 'id', 'identity', 'evalue', 'score', 'coverage', 'taxid')]) + '\n') @staticmethod def parse_prev_results(fp, prots): """Parse previous search results. Parameters ---------- file : str file containing search results prots : list protein records Returns ------- list of str completed protein Ids """ done = [] with open(fp, 'r') as f: for line in f: if line.startswith('# ID: '): done.append(line[6:].rstrip('\r\n')) doneset = set(done) for prot in prots: if prot['id'] in doneset: prot['score'] = 0 prot['hits'] = [] return done @staticmethod def check_missing_seqs(data): """Get a list of proteins whose sequences remain to be retrieved. Parameters ---------- data : dict protein sets Returns ------- list of str Ids of proteins without sequences """ res = set() for sid, sample in data.items(): if 'done' in sample: continue for prot in sample['prots']: if not prot['seq'] and 'hits' not in prot: res.add(prot['id']) return sorted(res) def update_dmp_files(self, ids): """Write added taxonomic information to custom taxdump files. Parameters ---------- ids : list of str added taxIds Notes ----- Taxonomic information will be appended to nodes.dmp and names.dmp in the working directory. """ fo = open(join(self.output, 'nodes.dmp'), 'a') fa = open(join(self.output, 'names.dmp'), 'a') for id_ in sorted(ids, key=int): fo.write('\t|\t'.join(( id_, self.taxdump[id_]['parent'], self.taxdump[id_]['rank'])) + '\t|\n') fa.write('\t|\t'.join(( id_, self.taxdump[id_]['name'])) + '\t|\n') fo.close() fa.close() """sequence query functions""" def blast_seqinfo(self, ids): """Retrieve information of given sequence Ids from local BLAST database. Parameters ---------- ids : list of str query sequence Ids Returns ------- list of tuple (id, taxid, product, sequence) Notes ----- When making database (using makeblastdb), one should do -parse_seqids to enable search by name (instead of sequence) and -taxid_map with a seqId-to-taxId map to enable taxId query. """ # run blastdbcmd # fields: accession, taxid, sequence, title cmd = ' '.join(( self.blastdbcmd, '-db', self.db, '-entry', ','.join(ids), '-outfmt', '"%a %T %s %t"')) out = run_command(cmd)[1] # parse output res = [] header = True for line in out: # catch invalid database error if header: # letter case is dependent on BLAST version if line.lower().startswith('blast database error'): raise ValueError(f'Invalid BLAST database: {self.db}.') header = False # if one sequence Id is not found, program will print: # Error: [blastdbcmd] Entry not found: NP_123456.1 # if none of sequence Ids are found, program will print: # Error: [blastdbcmd] Entry or entries not found in BLAST # database if (line.startswith('Error') or 'not found' in line or line.startswith('Please refer to')): continue # limit to 4 partitions because title contains spaces x = line.split(None, 3) # if database was not compiled with -taxid_map, taxIds will be 0 if x[1] in ('0', 'N/A'): x[1] = '' # title will be empty if -parse_seqids was not triggered if len(x) == 3: x.append('') # parse title to get product else: x[3] = get_product(x[3]) res.append((x[0], x[1], x[3], x[2])) return res def remote_seqinfo(self, ids): """Retrieve information of given sequence Ids from remote server. Parameters ---------- ids : list of str query sequence Ids (e.g., accessions) Returns ------- list of tuple (id, taxid, product, sequence) Raises ------ ValueError All sequence Ids are invalid. Failed to retrieve info from server. """ return self.parse_fasta_xml(self.remote_fetches( ids, 'db=protein&rettype=fasta&retmode=xml&id={}')) def remote_fetches(self, ids, urlapi): """Fetch information from remote server in batch mode Parameters ---------- ids : list of str query entries (e.g., accessions) urlapi : str URL API, with placeholder for query entries Returns ------- str fetched information Notes ----- The function dynamically determines the batch size, starting from a large number and reducing by half on every other retry. This is because the NCBI server is typically busy and frequently runs into the "502 Bad Gateway" issue. To resolve, one may subset queries and retry. """ cq = self.fetch_queries # current number of queries cids = ids # current list of query Ids res = '' while True: batches = [cids[i:i + cq] for i in range(0, len(cids), cq)] failed = [] # batch fetch sequence information for batch in batches: try: res += self.remote_fetch(urlapi.format(','.join(batch))) print(f' Fetched information of {len(batch)} entries.', flush=True) sleep(self.fetch_delay) except ValueError: failed.extend(batch) # reduce batch size by half on each trial if failed and cq > 1: cids = failed cq = int(cq / 2) print('Retrying with smaller batch size...', flush=True) else: cids = [] break if cids: print(f'WARNING: Cannot retrieve information of {len(cids)} ' 'entries.') return res def remote_fetch(self, urlapi): """Fetch information from remote server. Parameters ---------- urlapi : str URL API Returns ------- str fetched information Raises ------ ValueError Fetch failed. """ url = f'{self.fetch_server}?{urlapi}' for i in range(self.fetch_retries): if i: print('Retrying...', end=' ', flush=True) sleep(self.fetch_delay) try: with urlopen(url, timeout=self.fetch_timeout) as response: return response.read().decode('utf-8') except (HTTPError, URLError) as e: print(f'{e.code} {e.reason}.', end=' ', flush=True) print('', flush=True) raise ValueError('Failed to fetch information from remote server.') def update_prot_seqs(self, seqs): """Update protein sets with retrieved sequences. Parameters ---------- seqs : list of tuple protein sequences (id, taxid, product, sequence) Returns ------- int number of proteins with sequence added Notes ------ Different protein sets may contain identical protein Ids. """ # hash proteins by id prots = {x[0]: (x[2], x[3]) for x in seqs} # if queries are accessions without version, NCBI will add version acc2ver = {} for id_, info in prots.items(): acc_ = re.sub(r'\.\d+$', '', id_) acc2ver[acc_] = id_ n = 0 for sid, sample in self.data.items(): for prot in sample['prots']: if prot['seq']: continue # try to match protein Id (considering de-versioned accession) id_ = prot['id'] if id_ not in prots: try: id_ = acc2ver[id_] except KeyError: continue if id_ not in prots: continue # update protein information for i, key in enumerate(['product', 'seq']): if not prot[key]: prot[key] = prots[id_][i] n += 1 return n @staticmethod def parse_fasta_xml(xml): """Parse sequence information in FASTA/XML format retrieved from NCBI server. Parameters ---------- xml : str sequence information in XML format Returns ------- list of str [id, taxid, product, sequence] Notes ----- NCBI EFectch record type = TinySeq XML .. _NCBI RESTful API: https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_ values_of__retmode_and/?report=objectonly .. _NCBI RESTful API example: https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein &rettype=fasta&retmode=xml&id=NP_454622.1,NP_230502.1,NP_384288.1 """ seqs = [] for m in re.finditer(r'<TSeq>(.+?)<\/TSeq>', xml, re.DOTALL): s_ = m.group(1) seq = [] for key in (('accver', 'taxid', 'defline', 'sequence')): m_ = re.search(r'<TSeq_%s>(.+)<\/TSeq_%s>' % (key, key), s_) seq.append(m_.group(1) if m_ else '') seq[2] = get_product(seq[2]) seqs.append(seq) return seqs """homology search functions""" def blast_search(self, seqs): """Run BLAST to search a sequence against a database. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- dict of list of dict hit table per query sequence Raises ------ ValueError If BLAST run fails. Notes ----- - In ncbi-blast+ 2.7.1, the standard tabular format (-outfmt 6) is: qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore - In older versions, fields 1 and 2 are qseqid and sseqid. The difference is that an sseqid may read "ref|NP_123456.1|" instead of "NP_123456.1". - staxids are ;-delimited, will be "N/A" if not found or the database does not contain taxIds. - The BLAST database should ideally be prepared as: makeblastdb -in seqs.faa -dbtype prot -out db -parse_seqids \ -taxid map seq2taxid.txt - Unlike blastn, blastp does not have -perc_identity. """ tmpin = join(self.tmpdir, 'tmp.in') with open(tmpin, 'w') as f: write_fasta(seqs, f) cmd = [self.blastp, '-query', tmpin, '-db', self.db] args = {x: getattr(self, x, None) for x in ( 'evalue', 'coverage', 'maxseqs', 'threads', 'extrargs')} if args['evalue']: cmd.extend(['-evalue', str(args['evalue'])]) if args['coverage']: cmd.extend(['-qcov_hsp_perc', str(args['coverage'])]) if args['maxseqs']: cmd.extend(['-max_target_seqs', str(args['maxseqs'])]) if args['threads'] is not None: cmd.extend(['-num_threads', str(args['threads'])]) if args['extrargs']: cmd.append(args['extrargs']) cmd.append('-outfmt "6 qaccver saccver pident evalue bitscore qcovhsp' ' staxids"') ec, out = run_command(' '.join(cmd)) remove(tmpin) if ec: raise ValueError(f'blastp failed with error code {ec}.') return self.parse_def_table(out) def diamond_search(self, seqs): """Run DIAMOND to search a sequence against a database. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- dict of list of dict hit table per query sequence Raises ------ ValueError If DIAMOND run fails. Notes ----- The DIAMOND database should ideally be prepared as: diamond makedb --in seqs.faa --db db \ --taxonmap prot.accession2taxid """ tmpin = join(self.tmpdir, 'tmp.in') with open(tmpin, 'w') as f: write_fasta(seqs, f) cmd = [self.diamond, 'blastp', '--query', tmpin, '--db', self.db, '--threads', str(self.threads), '--tmpdir', self.tmpdir] args = {x: getattr(self, x, None) for x in ( 'evalue', 'identity', 'coverage', 'maxseqs', 'extrargs')} if args['evalue']: cmd.extend(['--evalue', str(args['evalue'])]) if args['identity']: cmd.extend(['--id', str(args['identity'])]) if args['coverage']: cmd.extend([' --query-cover', str(args['coverage'])]) if args['maxseqs']: cmd.extend(['--max-target-seqs', str(args['maxseqs'])]) if args['extrargs']: cmd.append(args['extrargs']) cmd.extend(['--outfmt', '6 qseqid sseqid pident evalue bitscore qcovhsp staxids']) ec, out = run_command(' '.join(cmd), merge=False) remove(tmpin) if ec: raise ValueError(f'diamond failed with error code {ec}.') return self.parse_def_table(out) def remote_search(self, seqs): """Perform BLAST search through a remote server. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- dict of list of dict hit table per query sequence .. _NCBI's official reference of RESTful APIs: https://ncbi.github.io/blast-cloud/dev/using-url-api.html .. _NCBI's official sample Perl script: https://blast.ncbi.nlm.nih.gov/docs/web_blast.pl .. _NCBI has restrictions on the frequency and bandwidth of remote BLAST searches. See this page: https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE= BlastDocs&DOC_TYPE=DeveloperInfo .. _Instead, NCBI recommends setting up custom BLAST servers. See: https://ncbi.github.io/blast-cloud/ """ # generate query URL query = ''.join([f'>{id_}\n{seq}\n' for id_, seq in seqs]) url = f'{self.server}?CMD=Put&PROGRAM=blastp&DATABASE={self.db}' if self.algorithm: url += '&BLAST_PROGRAMS=' + self.algorithm if self.evalue: url += '&EXPECT=' + str(self.evalue) if self.maxseqs: url += '&MAX_NUM_SEQ=' + str(self.maxseqs) if self.entrez: url += '&EQ_TEXT=' + quote(self.entrez) if self.extrargs: url += '&' + self.extrargs.lstrip('&') url += '&QUERY=' + quote(query) print(f'Submitting {len(seqs)} queries for search.', end='', flush=True) trial = 0 while True: if trial: if trial == (self.retries or 0) + 1: raise ValueError( f'Remote search failed after {trial} trials.') print(f'Retry {trial} times.', end='', flush=True) sleep(self.delay) trial += 1 # get request Id with urlopen(url) as response: res = response.read().decode('utf-8') m = re.search(r'^ RID = (.*$)', res, re.MULTILINE) if not m: print('WARNING: Failed to obtain RID.') continue rid = m.group(1) print(f' RID: {rid}.', end='', flush=True) sleep(1) # check status url_ = f'{self.server}?CMD=Get&FORMAT_OBJECT=SearchInfo&RID={rid}' starttime = time() success = False while True: with urlopen(url_) as response: res = response.read().decode('utf-8') m = re.search(r'\s+Status=(.+)', res, re.MULTILINE) if not m: print('WARNING: Failed to retrieve remote search status.') break status = m.group(1) if status == 'WAITING': if time() - starttime > self.timeout: print('WARNING: Remote search timeout.') break print('.', end='', flush=True) sleep(self.delay) continue elif status in ('FAILED', 'UNKNOWN'): print('WARNING: Remote search failed.') break elif status == 'READY': if 'ThereAreHits=yes' not in res: print('WARNING: Remote search returned no result.') break success = True break else: print(f'WARNING: Unknown remote search status: {status}.') break if not success: continue sleep(1) # retrieve result url_ = (f'{self.server}?CMD=Get&ALIGNMENT_VIEW=Tabular' f'&FORMAT_TYPE=Text&RID={rid}') if self.maxseqs: url_ += (f'&MAX_NUM_SEQ={self.maxseqs}' f'&DESCRIPTIONS={self.maxseqs}') with urlopen(url_) as response: res = response.read().decode('utf-8') if '# blastp' not in res or '# Query: ' not in res: print('WARNING: Invalid format of remote search results.') continue print(' Results retrieved.') break # fields (as of 2018): query acc.ver, subject acc.ver, % identity, # alignment length, mismatches, gap opens, q. start, q. end, s. start, # s. end, evalue, bit score, % positives m = re.search(r'<PRE>(.+?)<\/PRE>', res, re.DOTALL) out = m.group(1).splitlines() lenmap = {id_: len(seq) for id_, seq in seqs} return self.parse_m8_table(out, lenmap) def parse_hit_table(self, file, lenmap=None): """Determine hit table type and call corresponding parser. Parameters ---------- file : str hit table file lenmap : dict, optional map of sequence Ids to lengths (only needed for m8) Returns ------- list of dict hit table """ ism8 = None lines = [] with open(file, 'r') as f: for line in f: line = line.rstrip('\r\n') if line and not line.startswith('#'): lines.append(line) if ism8 is None: x = line.split('\t') ism8 = len(x) > 8 return (self.parse_m8_table(lines, lenmap) if ism8 else self.parse_def_table(lines)) def parse_def_table(self, lines): """Parse search results in default tabular format. Parameters ---------- lines : list of str search result in default tabular format fields: qseqid sseqid pident evalue bitscore qcovhsp staxids Returns ------- dict of list of dict hits per query """ res = {} ths = {x: getattr(self, x, 0) for x in ( 'evalue', 'identity', 'coverage', 'maxhits')} for line in lines: line = line.rstrip('\r\n') if not line or line.startswith('#'): continue x = line.split('\t') # filter by thresholds if ths['evalue']: if x[3] != '*' and ths['evalue'] < float(x[3]): continue if ths['identity']: if x[2] != '*' and ths['identity'] > float(x[2]): continue if ths['coverage']: if x[5] != '*' and ths['coverage'] > float(x[5]): continue # pass if maximum targets reached if ths['maxhits']: if x[0] in res and ths['maxhits'] == len(res[x[0]]): continue # add hit to list res.setdefault(x[0], []).append({ 'id': seqid2accver(x[1]), 'identity': x[2], 'evalue': x[3], 'score': x[4], 'coverage': x[5], 'taxid': x[6] if x[6] not in {'', 'N/A', '0'} else ''}) return res def parse_m8_table(self, lines, lenmap): """Parse search results in BLAST's standard tabular format (m8). Parameters ---------- lines : list of str search result in BLAST m8 tabular format fields: qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore lenmap : dict map of sequence Ids to lengths (needed for calculating coverage) Returns ------- list of dict hit table Raises ------ ValueError Query Id not found in length map. """ res = {} ths = {x: getattr(self, x, 0) for x in ( 'evalue', 'identity', 'coverage', 'maxhits')} for line in lines: line = line.rstrip('\r\n') if not line or line.startswith('#'): continue x = line.split('\t') # calculate coverage if x[0] not in lenmap: raise ValueError(f'Invalid query sequence Id: {x[0]}.') try: cov = (int(x[7]) - int(x[6]) + 1) / lenmap[x[0]] * 100 except ValueError: cov = 0 # filter by thresholds if ths['evalue']: if x[10] != '*' and ths['evalue'] < float(x[10]): continue if ths['identity']: if x[2] != '*' and ths['identity'] > float(x[2]): continue if ths['coverage']: if cov and ths['coverage'] > cov: continue # pass if maximum targets reached if ths['maxhits']: if x[0] in res and ths['maxhits'] == len(res[x[0]]): continue # add hit to list res.setdefault(x[0], []).append({ 'id': seqid2accver(x[1]), 'identity': x[2], 'evalue': x[10], 'score': x[11], 'coverage': f'{cov:.2f}', 'taxid': ''}) return res """taxonomy query functions""" def update_hit_taxids(self, prots, taxmap={}): """Update hits with taxIds, and update master sequence Id to taxId map. Parameters ---------- prots : dict of list of dict proteins (e.g., search results) taxmap : dict, optional reference sequence Id to taxId map Returns ------- list, list sequence Ids still without taxIds taxIds added to master map """ idsotid = set() # proteins without taxIds newtids = set() # newly added taxIds for prot, hits in prots.items(): for hit in hits: id_, tid = hit['id'], hit['taxid'] # taxId already in hit table if tid: if id_ not in self.prot2tid: self.prot2tid[id_] = tid newtids.add(tid) continue # taxId already in taxon map try: hit['taxid'] = self.prot2tid[id_] continue except KeyError: pass # taxId in reference taxon map: try: tid = taxmap[id_] hit['taxid'] = tid self.prot2tid[id_] = tid newtids.add(tid) continue except KeyError: pass # not found idsotid.add(id_) return sorted(idsotid), sorted(newtids) def remote_taxinfo(self, ids): """Retrieve complete taxonomy information of given taxIds from remote server. Parameters ---------- ids : list of str query taxIds Returns ------- str taxonomy information in XML format Raises ------ ValueError TaxID list is invalid. ValueError Failed to retrieve info from server. """ res = self.remote_fetches(ids, 'db=taxonomy&id={}') # this error occurs when taxIds are not numeric if '<ERROR>ID list is empty' in res: raise ValueError('Invalid taxId list.') return res def parse_taxonomy_xml(self, xml): """Parse taxonomy information in XML format retrieved from NCBI server. Parameters ---------- xml : str taxonomy information in XML format Returns ------- list of str taxIds added to taxonomy database Notes ----- The function will update taxonomy database. """ added = [] # get result for each query p = re.compile(r'<Taxon>\n' r'\s+<TaxId>(\d+)<\/TaxId>\n.+?' r'\s+<ScientificName>([^<>]+)<\/ScientificName>.+?' r'\s+<ParentTaxId>(\d+)<\/ParentTaxId>.+?' r'\s+<Rank>([^<>]+?)<\/Rank>(.+?)\n' r'<\/Taxon>', re.DOTALL | re.VERBOSE) for m in p.finditer(xml): tid = m.group(1) if tid in self.taxdump: continue # add query taxId to taxdump self.taxdump[tid] = { 'name': m.group(2), 'parent': m.group(3), 'rank': m.group(4)} added.append(tid) # get lineage m1 = re.search(r'<LineageEx>(.+?)<\/LineageEx>', m.group(5), re.DOTALL) if not m1: continue # move up through lineage p1 = re.compile(r'\s+<Taxon>\n' r'\s+<TaxId>(\d+)<\/TaxId>\n' r'\s+<ScientificName>([^<>]+)<\/ScientificName>\n' r'\s+<Rank>([^<>]+)<\/Rank>\n' r'\s+<\/Taxon>\n', re.DOTALL | re.VERBOSE) for m2 in reversed(list(p1.finditer(m1.group(1)))): tid_ = m2.group(1) pid = self.taxdump[tid]['parent'] if pid == '': self.taxdump[tid]['parent'] = tid_ elif pid != tid_: raise ValueError( f'Broken lineage for {tid}: {pid} <=> {tid_}.') tid = tid_ if tid in self.taxdump: continue self.taxdump[tid] = { 'name': m2.group(2), 'parent': '', 'rank': m2.group(3)} added.append(tid) # stop at root if self.taxdump[tid]['parent'] == '': self.taxdump[tid]['parent'] = '1' return added """self-alignment functions""" @staticmethod def lookup_selfaln(seqs, hits): """Look up self-alignment metrics of sequences from their hit tables. Parameters ---------- seqs : list of tuple query sequences (id, sequence) hits : dict of list of dict hit tables Returns ------- list of tuple (id, bitscore, evalue) """ res = [] for id_, seq in seqs: msg = (f'Cannot find a self-hit for sequence {id_}. Consider ' 'setting self-alignment method to other than "lookup".') if id_ not in hits: raise ValueError(msg) found = False for hit in hits[id_]: if hit['id'] == id_: res.append((id_, hit['score'], hit['evalue'])) found = True break if not found: raise ValueError(msg) return res @staticmethod def fast_selfaln(seq): """Calculate self-alignment statistics using built-in algorithm. Parameters ---------- seq : str query sequence Returns ------- tuple of (str, str) bitscore and evalue Notes ----- Statistics are calculated following: .. _Official BLAST documentation: https://www.ncbi.nlm.nih.gov/BLAST/tutorial/Altschul-1.html Default BLASTp parameters are assumed (matrix = BLOSUM62, gapopen = 11, gapextend = 1), except for that the composition based statistics is switched off (comp-based-stats = 0). Result should be identical to that by DIAMOND, but will be slightly different from that by BLAST. """ # BLOSUM62 is the default aa substitution matrix for BLAST / DIAMOND blosum62 = {'A': 4, 'R': 5, 'N': 6, 'D': 6, 'C': 9, 'Q': 5, 'E': 5, 'G': 6, 'H': 8, 'I': 4, 'L': 4, 'K': 5, 'M': 5, 'F': 6, 'P': 7, 'S': 4, 'T': 5, 'W': 11, 'Y': 7, 'V': 4} # calculate raw score (S) n, raw = 0, 0 for c in seq.upper(): try: n += 1 raw += blosum62[c] # in case there are non-basic amino acids except KeyError: pass # BLAST's empirical values when gapopen = 11, gapextend = 1. See: # ncbi-blast-2.7.1+-src/c++/src/algo/blast/core/blast_stat.c, line #268 lambda_, K = 0.267, 0.041 # calculate bit score (S') bit = (lambda_ * raw - log(K)) / log(2) # calculate e-value (E) e = n ** 2 * 2 ** -bit return f'{bit:.1f}', f'{e:.3g}' def blast_selfaln(self, seqs): """Run BLAST to align sequences to themselves. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- list of tuple (id, bitscore, evalue) """ tmpin = join(self.tmpdir, 'tmp.in') with open(tmpin, 'w') as f: write_fasta(seqs, f) cmd = ' '.join(( self.blastp, '-query', tmpin, '-subject', tmpin, '-num_threads', str(self.threads), '-outfmt', '6')) extrargs = getattr(self, 'extrargs', None) if extrargs: cmd += ' ' + extrargs ec, out = run_command(cmd) if ec: raise ValueError(f'blastp failed with error code {ec}.') remove(tmpin) return(self.parse_self_m8(out)) def diamond_selfaln(self, seqs): """Run DIAMOND to align sequences to themselves. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- list of tuple (id, bitscore, evalue) """ # generate temporary query file tmpin = join(self.tmpdir, 'tmp.in') with open(tmpin, 'w') as f: write_fasta(seqs, f) # generate temporary database tmpdb = join(self.tmpdir, 'tmp.dmnd') cmd = ' '.join(( self.diamond, 'makedb', '--in', tmpin, '--db', tmpdb, '--threads', str(self.threads), '--tmpdir', self.tmpdir)) ec, out = run_command(cmd, merge=False) if ec: raise ValueError(f'diamond failed with error code {ec}.') # perform search cmd = ' '.join(( self.diamond, 'blastp', '--query', tmpin, '--db', tmpdb, '--threads', str(self.threads), '--tmpdir', self.tmpdir)) extrargs = getattr(self, 'extrargs', None) if extrargs: cmd += ' ' + extrargs ec, out = run_command(cmd, merge=False) if ec: raise ValueError(f'diamond failed with error code {ec}.') remove(tmpin) remove(tmpdb) return(self.parse_self_m8(out)) def remote_selfaln(self, seqs): """Perform BLAST search through a remote server. Parameters ---------- seqs : list of tuple query sequences (id, sequence) Returns ------- list of tuple (id, bitscore, evalue) """ # further split sequences into halves (to comply with URI length limit) batches = self.subset_seqs(seqs, maxchars=int( (self.maxchars + 1) / 2)) if self.maxchars else [seqs] result = [] for batch in batches: # generate query URL query = ''.join([f'>{id_}\n{seq}\n' for id_, seq in batch]) query = quote(query) url = (f'{self.aln_server}?CMD=Put&PROGRAM=blastp&' f'DATABASE={self.db}&QUERY={query}&SUBJECTS={query}') if self.extrargs: url += '&' + self.extrargs.lstrip('&') print(f'Submitting {len(batch)} queries for self-alignment.', end='', flush=True) trial = 0 while True: if trial: if trial == (self.retries or 0) + 1: raise ValueError('Remote self-alignment failed after ' f'{trial} trials.') print(f'Retry {trial} times.', end='', flush=True) sleep(self.delay) trial += 1 # get request Id with urlopen(url) as response: res = response.read().decode('utf-8') m = re.search(r'^ RID = (.*$)', res, re.MULTILINE) if not m: print('WARNING: Failed to obtain RID.') continue rid = m.group(1) print(f' RID: {rid}.', end='', flush=True) sleep(1) # check status url_ = (f'{self.aln_server}?CMD=Get&FORMAT_OBJECT=SearchInfo&' f'RID={rid}') starttime = time() success = False while True: with urlopen(url_) as response: res = response.read().decode('utf-8') m = re.search(r'\s+Status=(.+)', res, re.MULTILINE) if not m: print('WARNING: Failed to retrieve remote self-' 'alignment status.') break status = m.group(1) if status == 'WAITING': if time() - starttime > self.timeout: print('WARNING: Remote self-alignment timeout.') break print('.', end='', flush=True) sleep(self.delay) continue elif status in ('FAILED', 'UNKNOWN'): print('WARNING: Remote self-alignment failed.') break elif status == 'READY': if 'ThereAreHits=yes' not in res: print('WARNING: Remote self-alignment returned no ' 'result.') break success = True break else: print('WARNING: Unknown remote self-alignment status: ' f'{status}.') break if not success: continue sleep(1) # retrieve result url_ = (f'{self.aln_server}?CMD=Get&ALIGNMENT_VIEW=Tabular&' f'FORMAT_TYPE=Text&RID={rid}') with urlopen(url_) as response: res = response.read().decode('utf-8') if '# blastp' not in res or '# Query: ' not in res: print('WARNING: Invalid format of remote self-alignment ' 'results.') continue print(' Results retrieved.') break m = re.search(r'<PRE>(.+?)<\/PRE>', res, re.DOTALL) out = m.group(1).splitlines() result += self.parse_self_m8(out) return result @staticmethod def parse_self_m8(lines): """Extract self-alignment results from m8 format table. Parameters ---------- lines : list of str hit table in BLAST m8 format fields: qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore Returns ------- list of tuple hit table (id, bitscore, evalue) """ res = [] used = set() for line in lines: x = line.rstrip('\r\n').split('\t') if x[0].startswith('#'): continue if len(x) < 12: continue if x[0] != x[1]: continue if x[0] in used: continue res.append((x[1], x[11], x[10])) used.add(x[0]) return res
Video Watch Araw Gabi is one of the most famous and popular serial of the Pinoy Tambayan. The following Araw Gabi October 11 2018 has been released. Araw Gabi October 11 2018 Pinoy Tv. Pinoy TV Show Araw Gabi October 11 2018 Full Episode HD Online. Pinoy official Channel Araw Gabi October 11 2018 online on pinoy Channel. For more information subscribe our Pinoy Tv Channel website to watch latest shows. Watch Araw Gabi October 2018 Full Episode Part by Part in HD Video Quality. Here you can watch daily Pinoy Tambayan as well as Pinoy Teleserye Shows online. Watch and Share our posts with your friends online. will always be the first to have the episode so please Bookmark This site.
## -*- mode: python -*- ## Lots of utility functions to abstract away platform differences. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys import os from shutil import copyfile, copytree, rmtree from subprocess import CalledProcessError, call, STDOUT try: from subprocess import check_output except ImportError: import subprocess def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. Backported from Python 2.7 as it's implemented as pure python on stdlib. >>> check_output(['/usr/bin/python', '--version']) Python 2.6.2 """ process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] error = subprocess.CalledProcessError(retcode, cmd) error.output = output raise error return output def hg(args): """Run a Mercurial command and return its output. All errors are deemed fatal and the system will quit.""" full_command = ['hg'] full_command.extend(args) try: output = check_output(full_command, env=os.environ, universal_newlines=True) output = output.rstrip() except CalledProcessError as e: print('Failed to execute hg.') raise return output def git(args): """Run a git command and return its output. All errors are deemed fatal and the system will quit.""" full_command = ['git'] full_command.extend(args) try: output = check_output(full_command, env=os.environ, stderr=STDOUT, universal_newlines=True) output = output.rstrip() except CalledProcessError as e: # print('Failed to execute "%s"' % str(full_command)) raise return output def is_dirty(): """Return True if the current working copy has uncommited changes.""" raise NotImplementedError() return hg(['hg', 'id', '-i'])[-1] == '+' def git_version(pep440=True): """Return somewhat readible version number from git, like '0.1-6015-ga0a3769' if not pep440 else '0.1.6015'""" try: res = git(['describe', '--tags']) if pep440: return '.'.join(res.split('-')[:2]) else: return res except: # print('git version call failed') return '' def git_revision(): """Return unreadible git revision identifier, like a0a3769da32436c27df84d1b9b0915447aebf4d0""" try: return git(['rev-parse', 'HEAD']) except: # print('git revision call failed') return "" def run(directory, args): print("RUN\t%s in %s" % (" ".join(args), directory)) oldwd = os.getcwd() try: os.chdir(directory) output = check_output(args, stderr=STDOUT, env=os.environ, universal_newlines=True) # print(output) except CalledProcessError as e: print("ERROR: return value=%i" % e.returncode) print(e.output) raise finally: os.chdir(oldwd) def python(directory, args, env=None): print("PYTHON\t%s in %s" % (" ".join(args), directory)) oldwd = os.getcwd() if os.environ.get('PYTHON') is not None: ## Use the Python interpreter specified in the PYTHON ## environment variable. full_command = [os.environ['PYTHON']] else: ## No interpreter specified. Use the Python interpreter that ## is used to execute this script. full_command = [sys.executable] full_command.extend(args) try: os.chdir(directory) output = check_output(full_command, stderr=STDOUT, env=os.environ, universal_newlines=True) # print(output) except CalledProcessError as e: print("ERROR: return value=%i" % e.returncode) print(e.output) raise finally: os.chdir(oldwd) def rscript(directory, args, env=None): print("RSCRIPT\t%s in %s" % (" ".join(args), directory)) oldwd = os.getcwd() if os.environ.get('RSCRIPT') is not None: ## Use the Rscript interpreter specified in the RSCRIPT ## environment variable. full_command = [os.environ['RSCRIPT']] else: ## No interpreter specified. Try to find an Rscript interpreter. full_command = ['Rscript'] full_command.extend(args) try: os.chdir(directory) output = check_output(full_command, stderr=STDOUT, env=os.environ, universal_newlines=True) except CalledProcessError as e: print("ERROR: return value=%i" % e.returncode) print(e.output) raise finally: os.chdir(oldwd) def copy_file(source, destination): print("COPY\t%s -> %s" % (source, destination)) copyfile(source, destination) def copy_tree(source_directory, destination_directory): """CAVEAT: this removes the destination tree if present!""" if os.path.isdir(destination_directory): rmtree(destination_directory) print("COPY\t%s -> %s" % (source_directory, destination_directory)) copytree(source_directory, destination_directory) def write_file(string, destination): print("WRITE\t%s" % destination) with open(destination, 'w') as fd: fd.write(string) def make(directory, target): """Run make to build a target""" print("MAKE\t%s in %s" % (target, directory)) oldwd = os.getcwd() try: os.chdir(directory) # prepare makefile(s) if ((('win32' in sys.platform) or ('win64' in sys.platform)) and ('cygwin' not in os.environ['PATH'])): # only if under Windows and without Cygwin, we need a specific # Windows makefile copy_file('Makefile_win_gcc.in', 'Makefile') else: copy_file('Makefile.in', 'Makefile') output = check_output(['make', target], stderr=STDOUT, env=os.environ, universal_newlines=True) except CalledProcessError as e: print("ERROR: return value=%i" % e.returncode) print(e.output) raise finally: os.chdir(oldwd) def expand_file(source, destination, dictionary): print("EXPAND\t%s to %s" % (source, destination)) from string import Template with open(source, 'r') as fd: content = Template(fd.read()) with open(destination, "w") as outfd: outfd.write(content.safe_substitute(dictionary))
Heat therapy is the soothing way to treat aches and pains. Soehnle have a range of inventive, cordless heat pads that will keep you warm and toasty on the move, whether you need them to treat pain or just to keep that winter chill at bay. The heat belt is perfect for lower back and shoulder pain, or just as an attractive and functional winter wrap. Its adjustable design is great for wearing under clothes to warm you up when you're outside, with up to 4 hours of cordless heat! Sitting on the heated pillow feels so cosy and amazing; it's rechargeable and cordless, so you can take it with you to picnics and sporting events and enjoy the outside while feeling like you're at home on your sofa!
from __future__ import absolute_import, print_function, division import os import urwid from mitmproxy.console import signals from mitmproxy.console.grideditor import base from netlib import strutils def read_file(filename, callback, escaped): # type: (str, Callable[...,None], bool) -> Optional[str] if not filename: return filename = os.path.expanduser(filename) try: with open(filename, "r" if escaped else "rb") as f: d = f.read() except IOError as v: return str(v) if escaped: try: d = strutils.escaped_str_to_bytes(d) except ValueError: return "Invalid Python-style string encoding." # TODO: Refactor the status_prompt_path signal so that we # can raise exceptions here and return the content instead. callback(d) class Column(base.Column): def Display(self, data): return Display(data) def Edit(self, data): return Edit(data) def blank(self): return b"" def keypress(self, key, editor): if key == "r": if editor.walker.get_current_value() is not None: signals.status_prompt_path.send( self, prompt="Read file", callback=read_file, args=(editor.walker.set_current_value, True) ) elif key == "R": if editor.walker.get_current_value() is not None: signals.status_prompt_path.send( self, prompt="Read unescaped file", callback=read_file, args=(editor.walker.set_current_value, False) ) elif key == "e": o = editor.walker.get_current_value() if o is not None: n = editor.master.spawn_editor(o) n = strutils.clean_hanging_newline(n) editor.walker.set_current_value(n) elif key in ["enter"]: editor.walker.start_edit() else: return key class Display(base.Cell): def __init__(self, data): # type: (bytes) -> Display self.data = data escaped = strutils.bytes_to_escaped_str(data) w = urwid.Text(escaped, wrap="any") super(Display, self).__init__(w) def get_data(self): return self.data class Edit(base.Cell): def __init__(self, data): # type: (bytes) -> Edit data = strutils.bytes_to_escaped_str(data) w = urwid.Edit(edit_text=data, wrap="any", multiline=True) w = urwid.AttrWrap(w, "editfield") super(Edit, self).__init__(w) def get_data(self): # type: () -> bytes txt = self._w.get_text()[0].strip() try: return strutils.escaped_str_to_bytes(txt) except ValueError: signals.status_message.send( self, message="Invalid Python-style string encoding.", expire=1000 ) raise
Learn all there is to know about cloud ERP, including what it is and how it compares to on premise ERP. Cloud ERP systems are now a very common business solution. In fact, many of the traditional on premise vendors now also offer some kind of cloud-based deployment option, or are completely axing their on premise solutions altogether.
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Invoke-NinjaCopy', 'Author': ['@JosephBialek'], 'Description': ('Copies a file from an NTFS partitioned volume by reading the ' 'raw volume and parsing the NTFS structures.'), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : True, 'OpsecSafe' : False, 'Language' : 'powershell', 'MinLanguageVersion' : '2', 'Comments': [ 'https://github.com/mattifestation/PowerSploit/blob/master/Exfiltration/Invoke-NinjaCopy.ps1', 'https://clymb3r.wordpress.com/2013/06/13/using-powershell-to-copy-ntds-dit-registry-hives-bypass-sacls-dacls-file-locks/' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'Path' : { 'Description' : 'The full path of the file to copy (example: c:\\windows\\ntds\\ntds.dit)', 'Required' : True, 'Value' : '' }, 'LocalDestination' : { 'Description' : 'A file path to copy the file to on the local computer.', 'Required' : False, 'Value' : '' }, 'RemoteDestination' : { 'Description' : 'A file path to copy the file to on the remote computer. If this isn\'t used, LocalDestination must be specified.', 'Required' : False, 'Value' : '' }, 'ComputerName' : { 'Description' : 'An array of computernames to run the script on.', 'Required' : False, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/collection/Invoke-NinjaCopy.ps1" if obfuscate: helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand) moduleSource = moduleSource.replace("module_source", "obfuscated_module_source") try: f = open(moduleSource, 'r') except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" moduleCode = f.read() f.close() script = moduleCode scriptEnd = "$null = Invoke-NinjaCopy " for option,values in self.options.iteritems(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": # if we're just adding a switch scriptEnd += " -" + str(option) else: scriptEnd += " -" + str(option) + " " + str(values['Value']) scriptEnd += "; Write-Output 'Invoke-NinjaCopy Completed'" if obfuscate: scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand) script += scriptEnd return script
Interviews are for both the employer and you, as an applicant. A great way to ensure “fit” is to ask questions during an interview. Never walk into an interview without specific questions for the interviewers. Even if you work in the unit where you’re interviewing, it is important to demonstrate that you have thought about the position and are interested in the interviewers’ needs and wants, not just your own. Typically, you’ll have an opportunity to ask questions toward the end of the interview; you want to be prepared! You should not ask questions about how you will be trained, when you will get promoted, or whether you can work at home; instead, focus on demonstrating that you are interested in the employer’s needs. You may want to prepare 5-6 questions; you may not get to ask them all (3 is probably plenty) and it is possible that your interviewers will have answered at least a couple during the process. Here are some questions you could ask; you should prioritize what is most important to you. • What are the next steps in the process? • What are your goals for the person who fills this job in their first 90 days? First year? • What did the person who held this job before do well that you would like to see continued? • What do expect to be the biggest learning curve for the person who fills this job? • Since this position has been vacant, what topic or issue of this position has taken up the most time? • What is a typical day like? • Describe the organization culture. • What do you most like about working here? • Is there anything else I can provide that would be helpful to you in making your decision?
import os from helper import SshClient, ScpClient, HadoopConfigGen class HBase: def __init__(self): pass @staticmethod def install(options): hbase = HadoopConfigGen() hbase.add_property('hbase.rootdir', 'hdfs://%s/hbase' % options.hadoop_cluster_name) hbase.add_property('hbase.cluster.distributed', 'true') zookeeper_names = [] for zookeeper_node in options.all_zookeeper_hosts: zookeeper_names.append(options.host_names[zookeeper_node]) hbase.add_property('hbase.zookeeper.quorum', ','.join(zookeeper_names)) hbase.add_property('hbase.zookeeper.property.clientPort', '2181') hbase.add_property('hbase.zookeeper.property.dataDir', options.zookeeper_data_dir) hbase.add_property('hbase.tmp.dir', options.hbase_tmp_dir) hbase.save('./tmp/hbase-site.xml') regionserver_path = './tmp/regionservers' if os.path.exists(regionserver_path): os.remove(regionserver_path) regionservers = open(regionserver_path, 'w') for hb in options.hbase_nodes: regionservers.write('%s\n' % options.host_names[hb]) regionservers.close() for host in options.all_hbase_hosts: ssh = SshClient(host, options.root_password) if 'cannot open' in ssh.execute("file " + options.hbase_path): ScpClient.local2remote(host, options.root_password, options.hbase_package_path, '/usr/local/hbase') ssh.execute('tar zxf /usr/local/hbase -C /usr/local') ssh.execute('rm -rf /usr/local/hbase') ssh.execute( 'echo export HBASE_HOME=%s >>/etc/profile' % options.hbase_path) ssh.execute('echo export PATH=\$HBASE_HOME/bin:\$PATH >>/etc/profile') ssh.execute('source /etc/profile') ssh.execute('rm -rf %s/lib/slf4j-log4j12*.jar' % options.hbase_path) ssh.execute( 'sed -i \'s:# export JAVA_HOME=/usr/java/jdk1.6.0/:export JAVA_HOME=%s:\' %s/conf/hbase-env.sh' % ( options.jdk_path, options.hbase_path)) ssh.execute( 'sed -i \'s:# export HBASE_MANAGES_ZK=true:export HBASE_MANAGES_ZK=false:\' %s/conf/hbase-env.sh' % options.hbase_path) ScpClient.local2remote(host, options.root_password, './tmp/hbase-site.xml', '%s/conf/' % options.hbase_path) ScpClient.local2remote(host, options.root_password, './tmp/regionservers', '%s/conf/' % options.hbase_path) ssh.close() @staticmethod def start(options): master = SshClient(options.all_hbase_hosts[0], options.root_password) print 'start hbase cluster' master.execute('$HBASE_HOME/bin/hbase-daemon.sh start master') master.close() for regionserver in options.all_hbase_hosts: region = SshClient(regionserver, options.root_password) region.execute('$HBASE_HOME/bin/hbase-daemon.sh start regionserver') region.close() @staticmethod def stop(options): for regionserver in options.all_hbase_hosts: region = SshClient(regionserver, options.root_password) region.execute('$HBASE_HOME/bin/hbase-daemon.sh stop regionserver') region.close() master = SshClient(options.all_hbase_hosts[0], options.root_password) print 'stop hbase cluster' master.execute('$HBASE_HOME/bin/hbase-daemon.sh stop master') master.close()
What Drives You to Study Poker? In honor of our 100th episode, we have an all-star lineup on this week's podcast. James "SplitSuit" Sweeney, Ed Miller, Mike Gano, Dr. Tricia Cardner and Doug Hull all drop by to share their answer to a single question: What first inspired you to study, and what inspires you to continue to study?" You'll hear five very different stories about falling in love not just with the game of poker, but the study thereof. It's a unique look into the lives of five very accomplished poker coaches, and there is something for poker players at every level to relate to here.
#-*- coding: utf-8 -*- import sys import time import argparse import logging from decimal import Decimal import requests try: from requests.exceptions import ConnectTimeout MyTimeoutException=ConnectTimeout except: from requests.exceptions import Timeout MyTimeoutException=Timeout import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt class StaticGraph(object): def __init__(self, elapsed_times, timeout): self.elapsed = { 'x': [x for x in range(1, len(elapsed_times)+1)], 'y': elapsed_times } self.timeout = { 'x': [x for x in range(1, len(elapsed_times)+1)], 'y': [timeout]*len(elapsed_times) } def make(self): plt.title("Elapsed Times") plt.xlabel("time [sec]") plt.ylabel("elapsed time [sec]") plt.xlim([1,len(self.elapsed['x'])]) plt.ylim([0,self.timeout['y'][0]+1]) plt.legend(loc='upper right') plt.grid() plt.plot(self.timeout['x'],self.timeout['y'], color='r') plt.plot(self.elapsed['x'],self.elapsed['y']) plt.savefig("elapsed.png") plt.show() class Response(object): def __init__(self, url, elapsed, status_code): self.url = url if not isinstance(elapsed, int): #Not Error Number self._elapsed = elapsed.total_seconds() else: self._elapsed = elapsed self.status_code = status_code self.is_timeout = elapsed == 1 or status_code == 1 @property def elapsed(self): return self._elapsed def __str__(self): if not self.is_timeout: msg = "[{status_code}] from {url}: Time= {elapsed}[sec]".format( \ status_code=self.status_code,url=self.url,elapsed=self._elapsed) else: msg = "[!] from {url}: Request timeout" return msg make_response = lambda d: Response(d['url'],d['elapsed'],d['status_code']) class HTTPTest(object): """ HTTP GET Tester """ def __init__(self, url, count, timeout): self.url = url self.count = count self.timeout = timeout self.fail_count = 0 self.elapsed_times = [] self.INTERVAL = 1 self.logger = logging.getLogger("HTTPTest") self.logger.setLevel(logging.DEBUG) # File handler = logging.FileHandler('http_get.log', mode='w') handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) self.logger.addHandler(handler) #Stdout handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) self.logger.addHandler(handler) def do_get(self): """ Do HTTP GET with requests module. """ try: res = requests.get(self.url, timeout=self.timeout) response = make_response(res.__dict__) self.elapsed_times.append(response.elapsed) self.logger.info( str(response) ) except MyTimeoutException: response = make_response({'url':self.url,'elapsed':-1,'status_code':-1}) self.elapsed_times.append(self.timeout) self.logger.info( str(response) ) self.fail_count += 1 def display_statics(self): pktloss_ratio = Decimal(str((self.fail_count/self.count)))*Decimal('100') self.logger.info("+++++ HTTP GET Tester Statics +++++") self.logger.info("Send: {}".format(self.count)) self.logger.info("Recv: {}".format(self.count-self.fail_count)) self.logger.info("Loss: {}".format(self.fail_count)) self.logger.info("{}% Packet Loss".format(pktloss_ratio)) self.logger.info("+++++++++++++++++++++++++++++++++++") # Make static graph images statgraph = StaticGraph(self.elapsed_times, self.timeout) statgraph.make() def start(self): """ call do_get <self.count> time. """ self.logger.info("[+] Start {} times HTTP GET Test to {}!".format(self.count, self.url)) for i in range(self.count): self.do_get() time.sleep(self.INTERVAL) self.display_statics() def parse_argument(): parser = argparse.ArgumentParser(description='ping like HTTP GET tester.') parser.add_argument("-u", "--url", type=str, help='Request to this url.', default="http://www.yahoo.co.jp/") parser.add_argument("-c", "--count", type=int, help='HTTP GET test count.', default=2000) parser.add_argument("-t", "--timeout", type=int, help='Request timeout limit.', default=1) args = parser.parse_args() return args if __name__ == '__main__': args = parse_argument() tester = HTTPTest(args.url, args.count, args.timeout) tester.start()
Apple Pay is the service that will enable you to use your iPhone* as a credit card, and Apple is expecting to launch it in the UK this year. When it happens it will mark the beginning of the end of the plastic credit card. We all love our mobile phone and sales have rocketed over the years as more and more features have been added. Your phone is also your camera, your diary, your source of email and messaging and it’s also your gateway to the whole Internet. Now it’s going to be your wallet too. Apple Pay is Apple’s own payment service. Link your credit card to the phone and use the special chip in the phone to communicate with credit card payment machines. A click on your phone is all you will need to make a payment. This is undoubtedly a mark of the future. Expect your bank to be linked your phone very soon, because where Apple leads the rest of the market will surely follow. *You need an iPhone 6 because earlier iPhones don’t have the necessary Near Field Communication (NFC) chip.
from __future__ import print_function import json import warnings import pickle import itertools from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline import scipy.optimize as opt import retrieve class Node: def __init__(self, n, pipeline): self.name = n self.children = {} self.pipeline = pipeline self.threshold = 0.00 def hasChildren(self): return self.children != dict() def getset(self, amount): return list(retrieve.entities(amount, self.name)) def train(self, entities): if not entities: return with warnings.catch_warnings(): warnings.simplefilter("ignore") corpus = features(entities) target = labels(entities) self.pipeline = self.pipeline.fit(corpus, target) assert hasattr(self.pipeline.steps[0][1], "vocabulary_") def test(self): if not self.testset: print(self.name + "'s test set not initialized!") return corpus = features(self.testset) target = labels(self.testset) return self.pipeline.score(corpus, target) def predict(self, x): ''' take an entity and classify it into either a child node (if confident about prediction) or self (if unconfident) ''' fs = features([x]) proba = max(self.pipeline.predict_proba(fs)[0]) if proba < self.threshold: label = self.name else: label = self.pipeline.predict(fs)[0] return label def isTrained(self): return hasattr(self.pipeline.steps[0][1], "vocabulary_") def distance(self, predicted, label): ''' error function for optimization of node thresholds. correct classification is a 0, withholded classification is a 1, and misclassification is a 2 ''' if predicted == label: return 0 elif predicted == self.name: return 1 else: return 2 def score(self, threshold): ''' gets entities from this node and its children, to score how well the node classifies the entities (using "distance") ''' self.threshold = threshold total = sum([self.distance(self.predict(e), e["class"]) for e in self.testset]) return total / len(self.testset) def learnthreshold(self): print("loading test set.") self.testset = list(itertools.chain(retrieve.direct(100, self.name), retrieve.entities(200, self.name))) print("optimizing.") result = opt.minimize_scalar(self.score, bounds=(0.0, 1.0), method='bounded') print(result) self.threshold = result.x print(self.name, "threshold set to", self.threshold) class TreeClassifier: def __init__(self, subclass=None): # make tree from nested ontology with open("../data/nestedontology.json", 'r') as f: ontology = json.load(f)[0] self.root = self._buildtree(ontology) if subclass: self.root = self.getsubnode(subclass) def getsubnode(self, classname): ''' returns the node in the tree that matches classname ''' for node in iter(self): if node.name == classname: return node raise ValueError(classname + " is not a valid class name!") def _buildtree(self, json_tree): ''' build tree from nested json ''' root = Node(json_tree["name"], pipeline()) for child in json_tree["children"]: root.children[child["name"]] = (self._buildtree(child)) return root def __iter__(self): ''' BFS traversal of tree ''' queue = [self.root] while queue != []: current = queue.pop(0) queue.extend(list(current.children.values())) yield current def train(self, entities): ''' train each node's classifier ''' for node in iter(self): print("Training", node.name) node.train(entities) def autotrain(self, amount): ''' train each node's classifier ''' for node in iter(self): entities = node.getset(amount) node.train(entities) def learnthresholds(self): for node in iter(self): if node.isTrained(): print("learning threshold for", node.name, end='. ') node.learnthreshold() def predict(self, entity): ''' returns predicted classes for entity. predicts downwards in tree from root node ''' node = self.root while node.hasChildren() and node.isTrained(): predicted_label = node.predict(entity) if predicted_label == node.name: break node = node.children[predicted_label] return node.name def predictions(self, entities): ''' runs predict function ovr set of entities ''' for entity in entities: self.predict(entity) def score(self, entities): total = 0 for entity in entities: realclass = entity["deepest"] predicted = self.predict(entity) if predicted == realclass: total += 1 return total / len(entities) def features(dataset): return [ ' '.join(x["properties"]) for x in dataset] def labels(dataset): return [ x["class"] for x in dataset ] def pipeline(): return Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) def dump(trainnum, testnum, filename): tree = TreeClassifier("owl:Thing") print("Tree created with", tree.root.name, "as root.") tree.train(trainnum, testnum) with open(filename, 'wb') as f: pickle.dump(tree, f) def load(filename): with open(filename, 'rb') as f: tree = pickle.load(f) return tree if __name__ == "__main__": tree = TreeClassifier() tree.train(1000, 10) entities = [e for e in retrieve.entities(10, "owl:Thing")] for e in entities: print(e["name"], e["class"], tree.predict(e))
Hempfield recCenter is a nonprofit organization committed to providing positive, fun experiences and value through a variety of relevant wellness programs and events inclusive to all members of the community. We are governed by a volunteer board of directors—with whom our future vision and mission are entrusted. Adam Aloisi – Hempfield School Dist. Ian Daecher – Hempfield School Dist. Board of Directors meetings are held the fourth Wednesday of each month at 5:30 pm.
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import page as page_module from telemetry.page import shared_page_state from telemetry import story class ToughCompositorPage(page_module.Page): def __init__(self, url, page_set): super(ToughCompositorPage, self).__init__( url=url, page_set=page_set, credentials_path = 'data/credentials.json', shared_page_state_class=shared_page_state.SharedMobilePageState) self.archive_data_file = 'data/tough_compositor_cases.json' def RunNavigateSteps(self, action_runner): super(ToughCompositorPage, self).RunNavigateSteps(action_runner) # TODO(epenner): Remove this wait (http://crbug.com/366933) action_runner.Wait(5) class ToughCompositorScrollPage(ToughCompositorPage): def __init__(self, url, page_set): super(ToughCompositorScrollPage, self).__init__(url=url, page_set=page_set) def RunPageInteractions(self, action_runner): # Make the scroll longer to reduce noise. with action_runner.CreateGestureInteraction('ScrollAction'): action_runner.ScrollPage(direction='down', speed_in_pixels_per_second=300) class ToughCompositorWaitPage(ToughCompositorPage): def __init__(self, url, page_set): super(ToughCompositorWaitPage, self).__init__(url=url, page_set=page_set) def RunPageInteractions(self, action_runner): # We scroll back and forth a few times to reduce noise in the tests. with action_runner.CreateInteraction('Animation'): action_runner.Wait(8) class ToughCompositorCasesPageSet(story.StorySet): """ Touch compositor sites """ def __init__(self): super(ToughCompositorCasesPageSet, self).__init__( archive_data_file='data/tough_compositor_cases.json', cloud_storage_bucket=story.PUBLIC_BUCKET) scroll_urls_list = [ # Why: Baseline CC scrolling page. A long page with only text. """ 'http://jsbin.com/pixavefe/1/quiet?CC_SCROLL_TEXT_ONLY', # Why: Baseline JS scrolling page. A long page with only text. """ 'http://jsbin.com/wixadinu/2/quiet?JS_SCROLL_TEXT_ONLY', # Why: Scroll by a large number of CC layers """ 'http://jsbin.com/yakagevo/1/quiet?CC_SCROLL_200_LAYER_GRID', # Why: Scroll by a large number of JS layers """ 'http://jsbin.com/jevibahi/4/quiet?JS_SCROLL_200_LAYER_GRID', ] wait_urls_list = [ # Why: CC Poster circle animates many layers """ 'http://jsbin.com/falefice/1/quiet?CC_POSTER_CIRCLE', # Why: JS poster circle animates/commits many layers """ 'http://jsbin.com/giqafofe/1/quiet?JS_POSTER_CIRCLE', # Why: JS invalidation does lots of uploads """ 'http://jsbin.com/beqojupo/1/quiet?JS_FULL_SCREEN_INVALIDATION', # Why: Creates a large number of new tilings """ 'http://jsbin.com/covoqi/1/quiet?NEW_TILINGS', ] for url in scroll_urls_list: self.AddStory(ToughCompositorScrollPage(url, self)) for url in wait_urls_list: self.AddStory(ToughCompositorWaitPage(url, self))
This picture shows a tree branch.I took this picture because we are going to fix our playground.In this picture my class can pick up the branches that are on the ground and clean the ground. Audrey, your idea of cleaning up the playground is a good one. It won't cost us any money at all to clean up the branches and other debris.
# -*-python-*- # GemRB - Infinity Engine Emulator # Copyright (C) 2003 The GemRB Project # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # MessageWindow.py - scripts and GUI for main (walk) window ################################################### import GemRB import GUIClasses import GUICommon import GUICommonWindows import CommonWindow import GUIWORLD from GameCheck import MAX_PARTY_SIZE from GUIDefines import * MWindow = 0 ActionsWindow = 0 PortraitWindow = 0 OptionsWindow = 0 MessageTA = 0 def OnLoad(): global MWindow, ActionsWindow, PortraitWindow, OptionsWindow # TODO: we can uncomment the "HIDE_CUT" lines below to hide the windows for cutscenes # the original doesn't hide them and it looks like there is a map drawing bug at the bottom of the screen due to the bottom # row of tiles getting squished for not fitting perfectly on screen (tho I havent seen this in BG2, but maybe wasnt paying attention) ActionsWindow = GemRB.LoadWindow(0, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_LEFT) ActionsWindow.AddAlias("ACTWIN") #ActionsWindow.AddAlias("HIDE_CUT", 1) ActionsWindow.AddAlias("NOT_DLG", 0) ActionsWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR) OptionsWindow = GemRB.LoadWindow(2, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_RIGHT) OptionsWindow.AddAlias("OPTWIN") #OptionsWindow.AddAlias("HIDE_CUT", 2) OptionsWindow.AddAlias("NOT_DLG", 1) OptionsWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR) MWindow = GemRB.LoadWindow(7, GUICommon.GetWindowPack(), WINDOW_BOTTOM|WINDOW_HCENTER) MWindow.SetFlags(WF_DESTROY_ON_CLOSE, OP_NAND) MWindow.AddAlias("MSGWIN") MWindow.AddAlias("HIDE_CUT", 0) MWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR) PortraitWindow = GUICommonWindows.OpenPortraitWindow (1, WINDOW_BOTTOM|WINDOW_HCENTER) #PortraitWindow.AddAlias("HIDE_CUT", 3) PortraitWindow.AddAlias("NOT_DLG", 2) PortraitWindow.SetFlags(WF_BORDERLESS|IE_GUI_VIEW_IGNORE_EVENTS, OP_OR) pframe = PortraitWindow.GetFrame() pframe['x'] -= 16 PortraitWindow.SetFrame(pframe) MessageTA = MWindow.GetControl (1) MessageTA.SetFlags (IE_GUI_TEXTAREA_AUTOSCROLL|IE_GUI_TEXTAREA_HISTORY) MessageTA.SetResizeFlags(IE_GUI_VIEW_RESIZE_ALL) MessageTA.AddAlias("MsgSys", 0) CloseButton= MWindow.GetControl (0) CloseButton.SetText(28082) CloseButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, lambda: MWindow.Close()) CloseButton.SetFlags (IE_GUI_BUTTON_MULTILINE, OP_OR) CloseButton.MakeDefault() OpenButton = OptionsWindow.GetControl (10) OpenButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, lambda: MWindow.Focus()) # Select all Button = ActionsWindow.GetControl (1) Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommon.SelectAllOnPress) # Select all Button = ActionsWindow.GetControl (3) Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.ActionStopPressed) FormationButton = ActionsWindow.GetControl (4) FormationButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUIWORLD.OpenFormationWindow) GUICommonWindows.SetupClockWindowControls (ActionsWindow) GUICommonWindows.SetupMenuWindowControls (OptionsWindow) UpdateControlStatus () def UpdateControlStatus (): if GemRB.GetGUIFlags() & (GS_DIALOGMASK|GS_DIALOG): Label = MWindow.GetControl (0x10000003) Label.SetText (str (GemRB.GameGetPartyGold ())) MWindow.Focus() elif MWindow: MWindow.Close()
Thanks for the update deveauzt. I've spoken with the designer of the board and sent him a description and pictures of what happened so that the manufacturer is aware of it. When he saw my pictures he said..."It's pretty obvious from the second photo that battery power was shorted. That Pch FET with the crater in it is the 110A high curret switching FET that allows a normal, cheap power switch to switch the high currents the controller can handle. Looking at the first photo I would say that it's a good chance the Switchmode regulator also got fried. In that case the 5V regulator might be trying to step 11.1V down to 5V when normally it would be reducing 6.2V to 5V. (Just a guess). That might explain the wisp of smoke since the input capacitors on the 5V regulator are only rated for 10V. That might explain the wisp of smoke since the input capacitors on the 5V regulator are only rated for 10V." That may help you in your understanding of the board. In my case, I chucked the first one as a casualty of war, and got a new one. I put it on my own standoffs. It has worked perfectly ever since. I like the board. However, I do wish it had more available digital pins to add sensors to, etc. You can use the so-called servo pins as general I/O pins, but I still need more. Follow-up: I hooked the T'Rex up on a basic RC transmitter/receiver. It sort of worked for a little bit on the bench. The wheels turned in response to throttle movement, etc., but the slightest bit of movement caused little bits of smoke to come up. Then it got worse. Now when I give it throttle, the whole board resets itself. When I checked the back of the T'Rex board, the large black component labeled "110P04-05" looks like it has black marks around it and it's cracked. It clearly has suffered damage, probably overheating damage. I don't see any other visually obvious problems on the back of the board. Thank you, Toni. That's what I needed to know. I'm hoping to use this shield on an Arduino Mega 2660 R3. The shield's product page mentions "any digital pin on the Arduino (D2 and D3 default)". How does one accomplish changing the default pins? In particular, how do I change it to use D16 and D17 on the Arduino Mega? Perhaps that's not possible, since those pins are completely off the shield. Or perhaps I could run a jumper over to those pins. But I need to make sure D2 and D3 are not interfered with (they are being used by something else). I see the "3" and "2" solder pads by TX and RX, but I'm not sure how to utilize those or if those are related to my goal.