text
stringlengths 29
850k
|
|---|
import platform
'''
This file defines Token class and Lexer class
'''
class Token():
def __init__(self):
self.type = None
self.content = ''
self.s_line = 1
self.s_col = 0
self.e_line = 1
self.e_col = 0
operation_list = ['+', '-', '*', '/']
def isoperator(char):
if char in operation_list:
return True
else:
return False
class Lexer():
'''
state = 'PROCESS', 'DONE'
'''
def __init__(self):
self.token_list = []
self.token_num = 0
self.cur_line = 1
self.cur_col = 0
self.systype = platform.system()
self.NewLine = False
def update_pos(self):
self.cur_col += 1
def get_next_char(self):
if self.cur_idx+1 <= len(self.string)-1:
self.cur_idx += 1
self.cur_ch = self.string[self.cur_idx]
self.update_pos()
else:
self.cur_ch = None
def update_token(self, Found_token):
if Found_token and self.cur_ch != None:
(self.token.e_line, self.token.e_col) = self.step_back()
else:
(self.token.e_line, self.token.e_col) = (self.cur_line, self.cur_col)
self.token.end_pos = str(self.token.e_line)+'.'+str(self.token.e_col)
def step_back(self):
return (self.cur_line, self.cur_col-1)
def skip_whitespace(self):
while self.cur_ch == ' ':
self.token.content += self.cur_ch
self.get_next_char()
self.token.type = 'WhiteSpace'
# move back the cur_pos
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def eatID(self):
self.token.type = 'Identifier'
while self.cur_ch != None and (self.cur_ch.isalpha() or self.cur_ch.isdigit()):
self.token.content += self.cur_ch
self.get_next_char()
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def eatChar(self):
self.token.type = 'Charactor'
self.token.content += self.cur_ch
if self.cur_ch == '\n':
self.NewLine = True
self.get_next_char()
self.update_token(True)
self.token_list.append(self.token)
self.new_token()
def new_token(self):
self.token = Token()
self.token.type = None
self.token.content = ''
if self.NewLine:
self.cur_line += 1
self.cur_col = 0
self.NewLine = False
self.token.s_line = self.cur_line
self.token.s_col = self.cur_col
self.token.start_pos = str(self.token.s_line)+'.'+str(self.token.s_col)
#print "New token start at: %s" %(self.token.start_pos)
def update(self, string):
# prepare for the first token
self.cur_line = 1
self.cur_col = 0
self.string = string
self.token_list = []
self.cur_idx = 0
self.cur_ch = self.string[0]
self.NewLine = False
# alloc the first token
self.new_token()
while self.cur_ch != None:
if self.cur_ch == ' ':
self.skip_whitespace()
elif self.cur_ch.isalpha():
self.eatID()
#elif cur_cur == '\n':
else:
#print "Unknown type"
self.eatChar()
print "Updated"
lexer = Lexer()
from Tkinter import *
def parse(main, string):
text = main.text
#print string
if len(string) > 0:
lexer.update(string)
#for token in lexer.token_list:
#text.tag_add(token.type, token.start_pos, token.end_pos)
#print "Token: %s(%s-%s)" %(token.content, token.start_pos, token.end_pos)
|
This course has been accredited by the Continuing Professional Development Standards Office. This means that a person who takes this course is now able to log credits for the hours spent on these courses.
Register by 27 July (11:55 pm BST) to receive £45 off the full tuition cost. This discount only applies to new students.
If you cancel your booking within seven days of registering, with the exception of the week of the course itself, you may request a full refund (including deposit). After this time we retain a deposit of £75 and transfer your registration to the next available course.
|
# -*- Mode: Python; test-case-name: flumotion.test.test_bundleclient -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""bundle interface for fetching, caching and importing
"""
import os
import sys
from flumotion.common import bundle, errors, log, package
from flumotion.configure import configure
__all__ = ['BundleLoader']
__version__ = "$Rev$"
class BundleLoader(log.Loggable):
"""
I am an object that can get and set up bundles from a PB server.
@cvar remote: a remote reference to an avatar on the PB server.
"""
remote = None
_unbundler = None
def __init__(self, callRemote):
"""
@type callRemote: callable
"""
self.callRemote = callRemote
self._unbundler = bundle.Unbundler(configure.cachedir)
def getBundles(self, **kwargs):
# FIXME: later on, split out this method into getBundles which does
# not call registerPackagePath, and setupBundles which calls getBundles
# and register. Then change getBundles calls to setupBundles.
"""
Get, extract and register all bundles needed.
Either one of bundleName, fileName or moduleName should be specified
in **kwargs, which should be strings or lists of strings.
@returns: a deferred firing a a list of (bundleName, bundlePath)
tuples, with lowest dependency first.
bundlePath is the directory to register
for this package.
"""
def annotated(d, *extraVals):
def annotatedReturn(ret):
return (ret, ) + extraVals
d.addCallback(annotatedReturn)
return d
def getZips(sums):
# sums is a list of name, sum tuples, highest to lowest
# figure out which bundles we're missing
toFetch = []
for name, md5 in sums:
path = os.path.join(configure.cachedir, name, md5)
if os.path.exists(path):
self.log('%s is up to date', name)
else:
self.log('%s needs fetching', name)
# FIXME: We cannot be completelly sure the bundle has the
# correct content only by checking that the directory exists.
# The worker/manager could have died during a download leaving
# the package incomplete.
toFetch.append(name)
if toFetch:
return annotated(self.callRemote('getBundleZips', toFetch),
toFetch, sums)
else:
return {}, [], sums
def unpackAndRegister((zips, toFetch, sums)):
for name in toFetch:
if name not in zips:
msg = "Missing bundle %s was not received"
self.warning(msg, name)
raise errors.NoBundleError(msg % name)
b = bundle.Bundle(name)
b.setZip(zips[name])
path = self._unbundler.unbundle(b)
# register all package paths; to do so we need to reverse sums
sums.reverse()
ret = []
for name, md5 in sums:
self.log('registerPackagePath for %s' % name)
path = os.path.join(configure.cachedir, name, md5)
if not os.path.exists(path):
self.warning("path %s for bundle %s does not exist",
path, name)
else:
package.getPackager().registerPackagePath(path, name)
ret.append((name, path))
return ret
# get sums for all bundles we need
d = self.callRemote('getBundleSums', **kwargs)
d.addCallback(getZips)
d.addCallback(unpackAndRegister)
return d
def loadModule(self, moduleName):
"""
Load the module given by name.
Sets up all necessary bundles to be able to load the module.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a deferred that will fire when the given module is loaded,
giving the loaded module.
"""
def gotBundles(bundles):
self.debug('Got bundles %r', bundles)
# load up the module and return it
__import__(moduleName, globals(), locals(), [])
self.log('loaded module %s', moduleName)
return sys.modules[moduleName]
self.debug('Loading module %s', moduleName)
# get sums for all bundles we need
d = self.getBundles(moduleName=moduleName)
d.addCallback(gotBundles)
return d
def getBundleByName(self, bundleName):
"""
Get the given bundle locally.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a deferred returning the absolute path under which the
bundle is extracted.
"""
def gotBundles(bundles):
name, path = bundles[-1]
assert name == bundleName
self.debug('Got bundle %s in %s', bundleName, path)
return path
self.debug('Getting bundle %s', bundleName)
d = self.getBundles(bundleName=bundleName)
d.addCallback(gotBundles)
return d
def getFile(self, fileName):
"""
Do everything needed to get the given bundled file.
@returns: a deferred returning the absolute path to a local copy
of the given file.
"""
def gotBundles(bundles):
name, bundlePath = bundles[-1]
path = os.path.join(bundlePath, fileName)
if not os.path.exists(path):
self.warning("path %s for file %s does not exist",
path, fileName)
return path
self.debug('Getting file %s', fileName)
d = self.getBundles(fileName=fileName)
d.addCallback(gotBundles)
return d
|
Hi! We're Katy & Albert, a husband-and-wife team and the brains behind Fresh Out of Ink. We fell in love via snail mail when we were 14 years old - hence our love for paper and heartfelt, handwritten messages. We started Fresh Out of Ink in 2016 to spread the joy we felt to others.
We work together to come up with the fun, unique ideas that make up our collection, and Katy hand-draws and illustrates every quirky, colorful product. When we're not creating, we live in Dallas, Texas and spend our time drinking coffee, laughing with friends, and snuggling with our cats: Sushi & Queso.
Broadway musicals, Starbucks, small fuzzy animals, weddings, the color teal, & pretty things.
When people give away the endings to books, movies, or tv shows. Seriously, people, stop it with the spoilers.
guitar, TOMS shoes, puns, fountain pens, & haikus.
When guys answer their phones in the bathroom. Do girls do this too? Please, please stop.
|
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from gevent import monkey
monkey.patch_all()
import os
import sys
import socket
import subprocess
import json
import time
import datetime
import platform
import select
import gevent
import ConfigParser
from nodemgr.common.event_manager import EventManager
from ConfigParser import NoOptionError
from supervisor import childutils
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames,\
Module2NodeType, INSTANCE_ID_DEFAULT, SERVICE_CONTRAIL_DATABASE, \
RepairNeededKeyspaces
from subprocess import Popen, PIPE
from StringIO import StringIO
from database.sandesh.database.ttypes import \
NodeStatusUVE, NodeStatus, DatabaseUsageStats,\
DatabaseUsageInfo, DatabaseUsage
from database.sandesh.database.process_info.ttypes import \
ProcessStatus, ProcessState, ProcessInfo, DiskPartitionUsageStats
from database.sandesh.database.process_info.constants import \
ProcessStateNames
class DatabaseEventManager(EventManager):
def __init__(self, rule_file, discovery_server,
discovery_port, collector_addr,
hostip, minimum_diskgb, cassandra_repair_interval):
EventManager.__init__(
self, rule_file, discovery_server,
discovery_port, collector_addr)
self.node_type = "contrail-database"
self.module = Module.DATABASE_NODE_MGR
self.module_id = ModuleNames[self.module]
self.hostip = hostip
self.minimum_diskgb = minimum_diskgb
self.cassandra_repair_interval = cassandra_repair_interval
self.supervisor_serverurl = "unix:///tmp/supervisord_database.sock"
self.add_current_process()
# end __init__
def process(self):
if self.rule_file is '':
self.rule_file = "/etc/contrail/" + \
"supervisord_database_files/contrail-database.rules"
json_file = open(self.rule_file)
self.rules_data = json.load(json_file)
node_type = Module2NodeType[self.module]
node_type_name = NodeTypeNames[node_type]
_disc = self.get_discovery_client()
sandesh_global.init_generator(
self.module_id, socket.gethostname(), node_type_name,
self.instance_id, self.collector_addr, self.module_id, 8103,
['database.sandesh'], _disc)
# sandesh_global.set_logging_params(enable_local_log=True)
self.sandesh_global = sandesh_global
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
self.stderr.write("analytics_dir is " + analytics_dir + "\n")
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
disk_space_total = int(disk_space_used) + int(disk_space_available)
if (disk_space_total / (1024 * 1024) < self.minimum_diskgb):
cmd_str = "service " + SERVICE_CONTRAIL_DATABASE + " stop"
(ret_value, error_value) = Popen(
cmd_str, shell=True, stdout=PIPE).communicate()
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
def send_process_state_db(self, group_names):
self.send_process_state_db_base(
group_names, ProcessInfo, NodeStatus, NodeStatusUVE)
def send_nodemgr_process_status(self):
self.send_nodemgr_process_status_base(
ProcessStateNames, ProcessState, ProcessStatus,
NodeStatus, NodeStatusUVE)
def get_process_state(self, fail_status_bits):
return self.get_process_state_base(
fail_status_bits, ProcessStateNames, ProcessState)
def get_failbits_nodespecific_desc(self, fail_status_bits):
description = ""
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE:
description += "Disk for analytics db is too low," + \
" cassandra stopped."
if fail_status_bits & self.FAIL_STATUS_SERVER_PORT:
if description != "":
description += " "
description += "Cassandra state detected DOWN."
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE_NA:
description += "Disk space for analytics db not retrievable."
return description
def database_periodic(self):
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
db_stat = DatabaseUsageStats()
db_info = DatabaseUsageInfo()
db_stat.disk_space_used_1k = int(disk_space_used)
db_stat.disk_space_available_1k = int(disk_space_available)
db_stat.analytics_db_size_1k = int(analytics_db_size)
db_info.name = socket.gethostname()
db_info.database_usage = [db_stat]
usage_stat = DatabaseUsage(data=db_info)
usage_stat.send()
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
cassandra_cli_cmd = "cassandra-cli --host " + self.hostip + \
" --batch < /dev/null | grep 'Connected to:'"
proc = Popen(cassandra_cli_cmd, shell=True, stdout=PIPE, stderr=PIPE)
(output, errout) = proc.communicate()
if proc.returncode != 0:
self.fail_status_bits |= self.FAIL_STATUS_SERVER_PORT
else:
self.fail_status_bits &= ~self.FAIL_STATUS_SERVER_PORT
self.send_nodemgr_process_status()
# Record cluster status and shut down cassandra if needed
subprocess.Popen(["contrail-cassandra-status",
"--log-file", "/var/log/cassandra/status.log",
"--debug"])
# end database_periodic
def cassandra_repair(self):
subprocess.Popen(["contrail-cassandra-repair",
"--log-file", "/var/log/cassandra/repair.log",
"--debug"])
#end cassandra_repair
def send_disk_usage_info(self):
self.send_disk_usage_info_base(
NodeStatusUVE, NodeStatus, DiskPartitionUsageStats)
def runforever(self, test=False):
prev_current_time = int(time.time())
while 1:
# we explicitly use self.stdin, self.stdout, and self.stderr
# instead of sys.* so we can unit test this code
headers, payload = self.listener_nodemgr.wait(
self.stdin, self.stdout)
# self.stderr.write("headers:\n" + str(headers) + '\n')
# self.stderr.write("payload:\n" + str(payload) + '\n')
pheaders, pdata = childutils.eventdata(payload + '\n')
# self.stderr.write("pheaders:\n" + str(pheaders)+'\n')
# self.stderr.write("pdata:\n" + str(pdata))
# check for process state change events
if headers['eventname'].startswith("PROCESS_STATE"):
self.event_process_state(pheaders, headers)
# check for flag value change events
if headers['eventname'].startswith("PROCESS_COMMUNICATION"):
self.event_process_communication(pdata)
# do periodic events
if headers['eventname'].startswith("TICK_60"):
self.database_periodic()
prev_current_time = self.event_tick_60(prev_current_time)
# Perform nodetool repair every cassandra_repair_interval hours
if self.tick_count % (60 * self.cassandra_repair_interval) == 0:
self.cassandra_repair()
self.listener_nodemgr.ok(self.stdout)
|
It’s time to achieve the results you’ve always wanted!
Gastric Balloon, Lap-Band® Surgery, Sleeve Gastrectomy, Gastric Bypass, Reversible Sleeve, Revisional Surgery and Coversion Surgery. Each procedure is designed to provide those who’ve struggled with weight loss through diet and exercise an effective method for achieving results. Choosing to undergo bariatric weight loss surgery is one of the biggest decisions you can make. Coupled with a commitment to making permanent changes in your diet, it can generate life-changing improvements to your physical and psychological health.
If you’re 100 pounds over your ideal body weight – the weight at which you’re likely to live the longest – you may be a candidate. Surgery might also be a consideration if your Body Mass Index (BMI) is greater than 35.
Determining your ideal weight loss surgery option depends on a number of factors.
To get a true understanding of what’s most suitable for your personal needs, we encourage you to make an appointment today for a free in-office consultation.
"I have so much more energy and self-esteem. You begin noticing the smallest things, like being able to cross your legs while sitting, not getting winded trying to tie your shoes and being able to wear a belt again. I no longer have sleep apnea and have given up the CPAP machine. I am no longer in the pre-diabetes stage and have given up pricking my finger every day. My blood pressure and cholesterol levels are coming down and I look forward to giving up those medications soon. "
"It took me one year and three months to go from a size 26 to a size 8/10! Not only did I lose my “evil twin” as I put it (128 pounds), but I gained self-esteem and self-worth. I was finally the person I wanted to be."
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
]
setup(
name='porter2stemmer',
version='1.0',
description="An implementation of the Porter2 English stemming algorithm.",
long_description=readme + '\n\n' + history,
author="Evan Dempsey",
author_email='me@evandempsey.io',
url='https://github.com/evandempsey/porter2-stemmer',
packages=[
'porter2stemmer',
],
package_dir={'porter2stemmer':
'porter2stemmer'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='porter2stemmer',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
We report the generation of asymmetric Mathieu optical lattices: invariant intensity optical profiles that can described by three parameters. The first one describes the amount of ellipticity, the second one takes into account the degree of asymmetry of the profile, and the third parameter denotes the angular position where is localized the respective asymmetry. We propose a simple angular spectrum to generate these nondiffracting beams, and we report how changes their distribution of power and orbital angular momentum in function of their ellipticity and degree of asymmetry. We confirm the existence of these invariant beams by propagation in an experimental setup.
Barcelo-Chong Arturo,Estrada-Potrillo Brian,Canales-Benavides Arturo,Lopez Aguayo Servando. Asymmetric Mathieu optical lattices[J].Chinese Optics Letters,2018,16(12):12.
|
from __future__ import unicode_literals
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import LParen, RParen
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as symbols
from lib2to3.pytree import Leaf, Node
from textwrap import TextWrapper
from .utils import (tuplize_comments, get_quotes, wrap_leaves,
first_child_leaf, find_indentation, IS_26, add_leaves_method)
MAX_CHARS = 79
OPENING_TOKENS = [token.LPAR, token.LSQB, token.LBRACE]
CLOSING_TOKENS = [token.RPAR, token.RSQB, token.RBRACE]
SYMBOLS_WITH_NEWLINES_IN_COLONS = [symbols.funcdef, symbols.classdef,
symbols.if_stmt, symbols.for_stmt, symbols.while_stmt, symbols.lambdef,
symbols.try_stmt, symbols.with_stmt]
class FixMaximumLineLength(BaseFix):
'''
Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
'''
explicit = True # The user must ask for this fixer
def match(self, node):
if (node.type in [token.NEWLINE] or node.type == token.COLON and node.
parent.type in SYMBOLS_WITH_NEWLINES_IN_COLONS):
# Sometimes the newline is wrapped into the next node, so we need
# to check the colons also.
if self.need_to_check_node(node):
# For colon nodes, we need to add the len of the colon also
return True
if any(len(line) > MAX_CHARS for line in node.prefix.split('\n')):
# There is a line in the prefix greater than MAX_CHARS
return True
return False
def transform(self, node, results):
if self.node_needs_splitting(node):
node_to_split = node.prev_sibling
if node_to_split.type == token.STRING:
self.fix_docstring(node_to_split)
else:
if isinstance(node_to_split, Leaf):
node_to_split = node_to_split.parent
combined_prefix = self.fix_leaves(node_to_split)
if combined_prefix:
node.prefix = "%s\n%s" % (node.prefix, combined_prefix.
rstrip())
if (any(len(line) > MAX_CHARS for line in node.prefix.split('\n')) or
node.prefix.count("#") and node.column + len(node.prefix) >
MAX_CHARS):
# Need to fix the prefix
self.fix_prefix(node)
@staticmethod
def need_to_check_node(node):
# Returns if the node or it's docstring might need to be split
if IS_26:
node = add_leaves_method(node)
if node.column > MAX_CHARS:
return True
if (node.type == token.COLON
and node.column + len(node.value) > MAX_CHARS):
return True
if node.prev_sibling and any(child.column + len(child.value)
> MAX_CHARS for child in node.prev_sibling.leaves()):
return True
@staticmethod
def node_needs_splitting(node):
if not node.prev_sibling:
return False
if IS_26:
node = add_leaves_method(node)
if node.type == token.NEWLINE:
node_length = len(node.prefix)
elif node.type == token.COLON:
node_length = len(node.prefix) - len(node.value)
if node.type in [token.NEWLINE, token.COLON]:
if node.column - node_length > MAX_CHARS:
return True
for child in node.prev_sibling.leaves():
if child.type == token.STRING:
lines = node.value.split('\n')
if child.column + len(lines.pop(0)) > MAX_CHARS:
return True
elif any(len(line) > MAX_CHARS for line in lines):
return True
elif child.column + len(child.value) > MAX_CHARS:
return True
def fix_prefix(self, node):
before_comments, comments, after_comments = tuplize_comments(node.
prefix)
# Combine all comment lines together
all_comments = ' '.join([line.replace('#', '', 1).lstrip() for line
in comments.split('\n')])
# It's an inline comment if it has not newlines
is_inline_comment = not node.prefix.count('\n')
initial_indent_level = comments.find('#')
if initial_indent_level == -1:
split_lines = ['']
else:
if is_inline_comment and node.prev_sibling:
# If inline comment, find where the prev sibling started to
# know how to indent lines
initial_indent_level = (first_child_leaf(node.prev_sibling).
column)
indent = '%s# ' % (' ' * initial_indent_level)
wrapper = TextWrapper(width=MAX_CHARS, initial_indent=indent,
subsequent_indent=indent)
split_lines = wrapper.wrap(all_comments)
if is_inline_comment:
# If inline comment is too long, we'll move it to the next line
split_lines[0] = "\n%s" % split_lines[0]
else:
# We need to add back a newline that was lost above
after_comments = "\n%s" % after_comments
new_prefix = '%s%s%s' % (before_comments, '\n'.join(split_lines),
after_comments.lstrip(' '))
# Append the trailing spaces back
if node.prefix != new_prefix:
node.prefix = new_prefix
node.changed()
def fix_docstring(self, node_to_split):
# docstrings
quote_start, quote_end = get_quotes(node_to_split.value)
max_length = MAX_CHARS - node_to_split.column
triple_quoted = quote_start.count('"""') or quote_start.count("'''")
comment_indent = ' ' * (4 + node_to_split.column)
if not triple_quoted:
# If it's not tripled-quoted, we need to start and end each line
# with quotes
comment_indent = '%s%s' % (comment_indent, quote_start)
# Since we will be appending the end_quote after each line after
# the splitting
max_length -= len(quote_end)
# If it's not triple quoted, we need to paren it
node_to_split.value = "(%s)" % node_to_split.value
wrapper = TextWrapper(width=max_length,
subsequent_indent=comment_indent)
split_lines = wrapper.wrap(node_to_split.value)
if not triple_quoted:
# If it's not triple quoted, we need to close each line except for
# the last one
new_split_lines = []
for index, line in enumerate(split_lines):
if index != len(split_lines) - 1:
new_split_lines.append("%s%s" % (line, quote_end))
else:
new_split_lines.append(line)
split_lines = new_split_lines
new_nodes = [Leaf(token.STRING, split_lines.pop(0))]
for line in split_lines:
new_nodes.extend([Leaf(token.NEWLINE, '\n'), Leaf(token.STRING,
line)])
node_to_split.replace(new_nodes)
node_to_split.changed()
def fix_leaves(self, node_to_split):
if IS_26:
node_to_split = add_leaves_method(node_to_split)
parent_depth = find_indentation(node_to_split)
new_indent = "%s%s" % (' ' * 4, parent_depth)
# For now, just indent additional lines by 4 more spaces
child_leaves = []
combined_prefix = ""
prev_leaf = None
for index, leaf in enumerate(node_to_split.leaves()):
if index and leaf.prefix.count('#'):
if not combined_prefix:
combined_prefix = "%s#" % new_indent
combined_prefix += leaf.prefix.split('#')[-1]
# We want to strip all newlines so we can properly insert newlines
# where they should be
if leaf.type != token.NEWLINE:
if leaf.prefix.count('\n') and index:
# If the line contains a newline, we need to strip all
# whitespace since there were leading indent spaces
if (prev_leaf and prev_leaf.type in [token.DOT, token.LPAR]
or leaf.type in [token.RPAR]):
leaf.prefix = ""
else:
leaf.prefix = " "
# Append any trailing inline comments to the combined
# prefix
child_leaves.append(leaf)
prev_leaf = leaf
# Like TextWrapper, but for nodes. We split on MAX_CHARS - 1 since we
# may need to insert a leading parenth. It's not great, but it would be
# hard to do properly.
split_leaves = wrap_leaves(child_leaves, width=MAX_CHARS - 1,
subsequent_indent=new_indent)
new_node = Node(node_to_split.type, [])
# We want to keep track of if we are breaking inside a parenth
open_count = 0
need_parens = False
for line_index, curr_line_nodes in enumerate(split_leaves):
for node_index, curr_line_node in enumerate(curr_line_nodes):
if line_index and not node_index:
# If first node in non-first line, reset prefix since there
# may have been spaces previously
curr_line_node.prefix = new_indent
new_node.append_child(curr_line_node)
if curr_line_node.type in OPENING_TOKENS:
open_count += 1
if curr_line_node.type in CLOSING_TOKENS:
open_count -= 1
if line_index != len(split_leaves) - 1:
# Don't add newline at the end since it it part of the next
# sibling
new_node.append_child(Leaf(token.NEWLINE, '\n'))
# Checks if we ended a line without being surrounded by parens
if open_count <= 0:
need_parens = True
if need_parens:
# Parenthesize the parent if we're not inside parenths, braces,
# brackets, since we inserted newlines between leaves
parenth_before_equals = Leaf(token.EQUAL, "=") in split_leaves[0]
self.parenthesize_parent(new_node, parenth_before_equals)
node_to_split.replace(new_node)
return combined_prefix
def parenthesize_parent(self, node_to_split, parenth_before_equals):
if node_to_split.type == symbols.print_stmt:
self.parenthesize_print_stmt(node_to_split)
elif node_to_split.type == symbols.return_stmt:
self.parenthesize_after_arg(node_to_split, "return")
elif node_to_split.type == symbols.expr_stmt:
if parenth_before_equals:
self.parenthesize_after_arg(node_to_split, "=")
else:
self.parenthesize_expr_stmt(node_to_split)
elif node_to_split.type == symbols.import_from:
self.parenthesize_after_arg(node_to_split, "import")
elif node_to_split.type in [symbols.power, symbols.atom]:
self.parenthesize_call_stmt(node_to_split)
elif node_to_split.type in [symbols.or_test, symbols.and_test, symbols
.not_test, symbols.test, symbols.arith_expr, symbols.comparison]:
self.parenthesize_test(node_to_split)
elif node_to_split.type == symbols.parameters:
# Paramteres are always parenthesized already
pass
def parenthesize_test(self, node_to_split):
first_child = node_to_split.children[0]
if first_child != LParen():
# node_to_split.children[0] is the "print" literal strip the
# current 1st child, since we will be prepending an LParen
if first_child.prefix != first_child.prefix.strip():
first_child.prefix = first_child.prefix.strip()
first_child.changed()
left_paren = LParen()
left_paren.prefix = " "
node_to_split.insert_child(0, left_paren)
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_print_stmt(self, node_to_split):
# print "hello there"
# return a, b
second_child = node_to_split.children[1]
if second_child != LParen():
# node_to_split.children[0] is the "print" literal strip the
# current 1st child, since we will be prepending an LParen
if second_child.prefix != second_child.prefix.strip():
second_child.prefix = second_child.prefix.strip()
second_child.changed()
node_to_split.insert_child(1, LParen())
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_after_arg(self, node_to_split, value):
# parenthesize the leaves after the first node with the value
value_index = 0
for index, child in enumerate(node_to_split.children):
if child.value == value:
value_index = index + 1
break
value_child = node_to_split.children[value_index]
if value_child != LParen():
# strip the current 1st child, since we will be prepending an
# LParen
if value_child.prefix != value_child.prefix.strip():
value_child.prefix = value_child.prefix.strip()
value_child.changed()
# We set a space prefix since this is after the '='
left_paren = LParen()
left_paren.prefix = " "
node_to_split.insert_child(value_index, left_paren)
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_expr_stmt(self, node_to_split):
# x = "foo" + bar
if node_to_split.children[0] != LParen():
node_to_split.insert_child(0, LParen())
node_to_split.append_child(RParen())
node_to_split.changed()
def parenthesize_call_stmt(self, node_to_split):
# a.b().c()
first_child = node_to_split.children[0]
if first_child != LParen():
# Since this can be at the beginning of a line, we can't just
# strip the prefix, we need to keep leading whitespace
first_child.prefix = "%s(" % first_child.prefix
first_child.changed()
node_to_split.append_child(RParen())
node_to_split.changed()
|
the last geek bus home: Browncoats: Redemption, Dragon*Con, Knights of the Guild and writerly news - too much awesome!
Browncoats: Redemption, Dragon*Con, Knights of the Guild and writerly news - too much awesome!
Are you going to Dragon*Con?
Michael Dougherty, co-creator of the new fanmade film set in the Serenity/Firefly universe gave me an interview for Whedonage which went up today. They have finished principle photography and are racing towards the deadline for the film to be ready Dragon*Con 2010, but on Sunday they will be unveiling the trailer. The photos and videos from the set look stunning and I wish I could be there to see it! Full details are at the end of the article and you can also follow them on Facebook for all the current news.
Panelists Include: Sandeep Parikh (Creator / Co-Writer and Director, The Legend of Neil), Tony Janning (Star (Neil) and Co-Writer, The Legend of Neil)"
Why did it have to be the Unicorn Theatre? Haven't they suffered enough?!? If you don't know what I'm talking about then you need to head over to The Guild Forums and join the Axis of Unicorns and help the Unicorn Defense League protect these noble creatures from the jaws of unscrupulous Guildies!
"Quest for the Music Video" shows how hard that Kenny and Jenni work to bring us all the news and behind-the-scenes information we crave about The Guild. In this latest installment there is a great interview with Greg Aronowitz who created the wonderful weapons the cast sport in the "Do you wanna date my Avatar" video AND the reveal what the filming at the video was like. Kenny and Jenni don't just host the podcast, Kenny is the official behind-the-scenes photographer who has shot all the amazing BTS video and pictures used on The Guild DVDs and Jenni has a myriad of jobs - from Felicia's PA to Head Production Assistant to gold coin counter - and everything in between. There are also lots of feedback comments from listeners (both written and audio), which is really great to hear. It's very rewarding when people take the time to leave a comment.
And in all the excitement I had some pretty exciting news myself today. I've been podcasting "Turning Left at Albuquerque" for a few weeks now on turningleft.podbean.com. We're up to chapter 50 now, so if you haven't caught up yet please check it out. This week we posted a new promo instead of a new omnibus and we also had a request of listeners asking for some feedback. It doesn't take much to write a comment and you could hear it read out by Lord Poncenby! Or you could send an audio comment to turningleftpodcast at gmail dot com.
And then, today, I had an email from an agent who is interested in my new super-secret project asking me for a partial submission. This is a big deal you guys! I only queried a few agents for this new (non-fiction) project and I am very excited that this agency wants to hear more because I think they will really 'get' it. It doesn't mean it's in the bag, and it might all come to nothing, but it a great first step. So YAY!
Yes, please support the KOTG so Jenni doesn't have to take off her top.
:) She said that that would be the obvious next step and she's a determined person!
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import platform
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anyymore)"),
download_page=None, category=category, only_on_archs=_supported_archs, only_for_removal=True)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
category=category, only_on_archs=_supported_archs,
download_page="https://www.dartlang.org/downloads/linux.html",
dir_to_decompress_in_tarball="dart-sdk")
def parse_download_link(self, line, in_download):
"""Parse Dart Lang download link, expect to find a url"""
tag_machine = '64'
if platform.machine() == 'i686':
tag_machine = '32'
download_re = r'<a data-bits="{}" data-os="linux" data-tool="sdk".*href="(.*)">'.format(tag_machine)
p = re.search(download_re, line)
with suppress(AttributeError):
url = p.group(1)
return ((url, None), True)
return ((None, None), False)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(_("You need to restart a shell session for your installation to work")))
@property
def is_installed(self):
# check path and requirements
if not super().is_installed:
return False
if not os.path.isfile(os.path.join(self.install_path, "bin", "dart")):
logger.debug("{} binary isn't installed".format(self.name))
return False
return True
|
I've been in the studio today and I have a new card to share with you. This is one of the designs we created at the Tall Oaks stamp camp Monday night and the ladies loved it. It uses one of the most beautiful products from the New Occasions catalog, the "Bloomin' Heart" Thinlits Dies. One of the bonuses is that it can be bundled with the adorable, "Bloomin' Love" Stamp Set with a 15% discount.
Can you see those gorgeous, sparkly pink flowers?? Those are made with our newest Glimmer Paper that now comes in "Blushing Bride". It is positively yummy! It really makes this card just POP.
Okay....here are all of the products used to create this beauty. Just click on the item(s) you'd like to add to your shopping cart and you'll be taken directly to my Online Store for check out.
Have you signed up for the "Birthday Bash Marathon" yet? It's filling up so you'd better act fast!
Apple of My Eye...Orange You Sweet!
|
# Software licensed by the MIT License of Open Source (https://opensource.org/licenses/MIT)
import pyhdb
import urllib
from http.server import BaseHTTPRequestHandler, HTTPServer
# Change here the connection parameters for your HANA System
host = "0.0.0.0"
port = 30041
user = "USER"
pwsd = "PASSWORD"
# Recursive function to get all the dependencies from a view. It returns a JSON ready so the D3.js UI can render a hierarchy graph
def getDependent(view,parent,type,cursor,cache):
if view not in cache:
sql = 'SELECT BASE_OBJECT_NAME, BASE_OBJECT_TYPE FROM "PUBLIC"."OBJECT_DEPENDENCIES" WHERE DEPENDENT_OBJECT_NAME = \'' + view + '\' AND BASE_OBJECT_TYPE IN (\'VIEW\',\'TABLE\') AND DEPENDENCY_TYPE = 1';
cursor.execute(sql)
cache[view] = cursor.fetchall()
result = cache[view]
node = {}
node['name'] = view
node['parent'] = parent
node['value'] = 10 # Standard size choosen
node['type'] = 'black' # Standard color choosen
if type == 'VIEW':
node['level'] = 'red' # Meaning views
else:
node['level'] = 'green' # Meaning tables
if len(result) > 0:
node['children'] = []
for i in range(len(result)):
node['children'].append(getDependent(result[i][0],view,result[i][1],cursor,cache))
print('Hierarchy processed: ',node['name'])
return node
# Open the connection to HANA DB and saves the result in a file at the same folder
def viewHierarchy(view):
connection = pyhdb.connect(host = host, port = port, user = user, password = pwsd )
cursor = connection.cursor()
f = open('resultCalcViewHierarchy.json', 'w')
f.write(str(getDependent(view,'null','VIEW',cursor,{})).replace("'",'"'))
f.close()
connection.close()
# If you want just wanna call the function withou the UI comment everything below and run this:
# viewHierarchy('<path>/<view>')
# Just a simple handler and HTTP Server set up
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
if '/calcViewHierarchy' in self.path:
p = self.path.split("?")
path = p[0][1:].split("/")
params = {}
if len(p) > 1:
params = urllib.parse.parse_qs(p[1], True, True)
print('Starting Hierarchy JSON with ',params['object'][0])
viewHierarchy(params['object'][0])
print('Finished Hierarchy JSON')
if '/viewHierarchy' in self.path:
f = open('viewHierarchy.html','rb')
self.send_response(200)
self.send_header('Content-type','text-html')
self.end_headers()
self.wfile.write(f.read())
f.close()
if self.path == '/resultCalcViewHierarchy':
f = open('resultCalcViewHierarchy.json','rb')
self.send_response(200)
self.wfile.write(f.read())
f.close()
def run():
print('http server is starting...')
httpd = HTTPServer(("", 5000), MyHandler)
print('http server is running...')
httpd.serve_forever()
if __name__ == '__main__':
run()
|
The Department of Housing and Urban Development (HUD) has published a final regulation that requires federally-funded emergency shelters to provide access to shelter accommodations consistent with a person’s gender identity.
This follows similar Department of Justice guidance for domestic violence shelters and other programs funded by the Violence Against Women Act (VAWA). Over 300 national, state, and local organization advocating for victims of sexual and domestic violence have vocally supported ensuring access to facilities such as homeless shelters that are consistent with a person’s gender identity.
The new rule clarifies the 2012 HUD rule prohibiting anti-LGBT discrimination in all HUD-funded programs. It requires that shelters that segregate housing or programs by gender must respect each person’s self-identified gender, and cannot force a person into a shelter inconsistent with their gender identity. The rule permits any shelter-seeker to request alternative housing on a voluntary basis. Shelters will be required to post notices regarding the new rule.
|
# Copyright (c) 2012, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from .. import Command
from pyipmi.info import *
from pyipmi.tools.responseparser import ResponseParserMixIn
from pyipmi import IpmiError
class InfoBasicCommand(Command, ResponseParserMixIn):
""" Describes the cxoem info basic IPMI command
"""
name = "Retrieve basic SoC info"
ipmitool_args = ["cxoem", "info", "basic"]
def parse_results(self, out, err):
""" Parse ipmitool output
"""
result = InfoBasicResult()
if out.startswith("Calxeda SoC"):
for line in out.splitlines():
line = line.lstrip()
if line.startswith("Calxeda SoC"):
result.iana = int(line.split()[2].strip("()"), 16)
elif line.startswith("Firmware Version"):
result.firmware_version = line.partition(":")[2].strip()
elif line.startswith("SoC Version"):
result.ecme_version = line.partition(":")[2].strip()
elif line.startswith("Build Number"):
result.ecme_build_number = line.partition(":")[2].strip()
elif line.startswith("Timestamp"):
result.ecme_timestamp = int(line.split()[1].strip(":()"))
elif line.startswith("Node EEPROM Image Version"):
result.node_eeprom_version = line.partition(":")[2].strip()
elif line.startswith("Node EEPROM CFG id"):
result.node_eeprom_config = line.partition(":")[2].strip()
elif line.startswith("Slot EEPROM Image Version"):
result.slot_eeprom_version = line.partition(":")[2].strip()
elif line.startswith("Slot EEPROM CFG id"):
result.slot_eeprom_config = line.partition(":")[2].strip()
elif err.startswith("Error: "):
raise IpmiError(err.splitlines()[0][7:])
else:
raise IpmiError("Unknown Error")
return result
class InfoCardCommand(Command, ResponseParserMixIn):
""" Describes the cxoem info card IPMI command
"""
name = "Retrieve card info"
ipmitool_args = ["cxoem", "info", "card"]
result_type = InfoCardResult
response_fields = {
'Board Type' : {'attr' : 'type'},
'Board Revision' : {'attr' : 'revision'}
}
def parse_results(self, out, err):
result = ResponseParserMixIn.parse_results(self, out, err)
if not (hasattr(result, 'type') and hasattr(result, 'revision')):
raise IpmiError(out.strip())
return result
info_commands = {
"info_basic" : InfoBasicCommand,
"info_card" : InfoCardCommand
}
|
With MuckFest® MS San Francisco coming up next weekend, we had our mascot, Puck the Muck Duck, put together a list of his favorite places to play when he’s not rolling around in the mud in the Bay Area!
Let yourself be a tourist for the day and check out San Francisco’s most crooked landmark.
Lombard Street is known as one of the most crooked streets in the world and also one of the must-see streets in the area. This winding one-block road will take you down eight sharp turns as you make your way from top to bottom. If you’re up for some pre-muck exercise, you can take the trek from bottom to top, but don’t expect Puck to join you. He’ll fly from overhead and cheer you on.
After a long day in the mud enjoy a hot meal in Chinatown.
You can guarantee you won’t get bored discovering all that San Francisco’s Chinatown has to offer. We know Puck doesn’t! And with it being the oldest and largest Chinatown in North America you can turn a trip here into a day of sampling delicious food.
You might recognize this iconic spot from the opening credits of a popular 80’s TV show.
This long strip of beautiful Victorian houses is painted with bright colors and known as the Painted Ladies. Not only have they been featured in numerous different media, they are best known for being featured in the opening credits for the popular show Full House. So take a drive in Alamo Square and take in the scene. You might even find Puck trying to find an orange one!
Check out the most vibrant view in the Bay Area.
The first thing many Muckers think of when someone says San Francisco is the Golden Gate Bridge! And rightfully so, this 4,200-foot-long bridge is an iconic part of the Bay Area and one of Puck’s favorite aerial views. No matter which side of the bridge you’re on, the scenic view is breathtaking and might even be better than the view from the top of Crash Landing! If you have an hour, Puck recommends taking a stroll across the bridge for a MuckFest MS warm up!
Thought you saw some nutty stuff at MuckFest MS? Head over to Fisherman’s Wharf.
While the Wharf is a big tourist stop, there is no better way to keep your team laughing than by sitting out by Pier 39 to watch the sea lions. These playful and sometimes noisy sunbathers lay out on the pier in groups and provide endless entertainment for onlookers. For Puck, these guys are his favorite partners in crime for drying out his wings after a nice dip in the bay.
|
import math
import urllib.parse
import requests
from bs4 import BeautifulSoup as bSoup
from utils import utils
class MegaComputerScraper:
# Declare URL and class names to picked
BASE_URL = 'http://www.megacomputer.pk/catalogsearch/result/index/?is_ajax=1&limit=36&q={}'
PRODUCT_PRICE_CLASS_NAME = "old-price"
PRODUCT_PRICE_CLASS_NAME_SECONDARY = "price-box"
PRODUCT_SPECIAL_PRICE_CLASS_NAME = "special-price"
PRODUCT_TITLE_CLASS_NAME = "product-name"
@staticmethod
def search_item(product):
# Read the page contents and get structured data using beautiful soup
url = MegaComputerScraper.BASE_URL.format(urllib.parse.quote(product.name))
data = bSoup(requests.get(url).text, "html.parser")
# Find main container
main = data.find("div", {"class", "main"})
# Find all the item containers
containers = main.findAll("li", {"class", "item"})
# Get item information for each item in container
if len(containers) > 0:
for item in containers:
title_div = item.find(
"h2", {"class", MegaComputerScraper.PRODUCT_TITLE_CLASS_NAME}
)
price_div = item.find(
"p", {"class", MegaComputerScraper.PRODUCT_PRICE_CLASS_NAME}
)
special_price_div = item.find(
"p", {"class", MegaComputerScraper.PRODUCT_SPECIAL_PRICE_CLASS_NAME}
)
secondary_price_div = item.find(
"div", {"class", MegaComputerScraper.PRODUCT_PRICE_CLASS_NAME_SECONDARY}
)
has_price_div = price_div is not None
has_special_price_div = special_price_div is not None
has_secondary_price_div = secondary_price_div is not None
price_div = price_div.findAll("span", {})[1] \
if has_price_div else None
special_price_div = special_price_div.findAll("span", {})[1] \
if has_special_price_div else None
secondary_price_div = secondary_price_div.findAll("span", {})[1] \
if has_secondary_price_div else None
title = title_div.a["title"]
brand = str(title).split(" ", 1)
brand = brand[0] if len(brand) > 0 else "-"
link = title_div.a["href"]
if has_special_price_div:
price = special_price_div.text
elif has_price_div:
price = price_div.text
elif has_secondary_price_div:
price = secondary_price_div.text
else:
price = 0
price = MegaComputerScraper.extract_price(price)
is_valid_price = price is not None and price > 0
if is_valid_price and int(price) <= int(product.baseline_price):
prompt = "\"" + title.replace(",", "|") + "\" is now available in: " + str(
price) + " at Mega Computer (Baseline: " + product.baseline_price + ")"
details = utils.get_details(brand, price, title, link)
if utils.is_similar(title, product.description):
utils.print_similarity(title, product.description)
utils.display_windows_notification(brand, prompt)
utils.write_to_csv(details)
@staticmethod
def extract_price(price):
if price is None:
return 0
price = str(price).lower().replace(" ", "").replace("pkr", "").replace(",", "")
value = [int(s) for s in price.split() if s.isdigit()]
price = price if len(value) == 0 else value[0]
return math.floor(float(price))
|
Spending on Internet of Things (IoT) cybersecurity solutions is set to reach over $6 billion globally by 2023. A new report from Juniper Research highlighted rapid growth, with spending by product and service providers (in consumer markets) and end customers (in industrial and public services markets) to rise nearly 300% over the forecast period.
Juniper’s new research claimed that growing business risk and regulatory minimum standards would serve as key spending drivers.
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests the ECDF functionality """
import unittest
import random
from sparktkregtests.lib import sparktk_test
class ecdfTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(ecdfTest, self).setUp()
# generate a dataset to test ecdf on
# it will just be a single column of ints
column = [[random.randint(0, 5)] for index in xrange(0, 20)]
schema = [("C0", int)]
self.frame = self.context.frame.create(column,
schema=schema)
def validate_ecdf(self):
# call sparktk ecdf function on the data and get as pandas df
ecdf_sparktk_result = self.frame.ecdf("C0")
pd_ecdf = ecdf_sparktk_result.to_pandas(ecdf_sparktk_result.row_count)
# get the original frame as pandas df so we can calculate our own result
pd_original_frame = self.frame.to_pandas(self.frame.row_count)
# the formula for calculating ecdf is
# F(x) = 1/n * sum from 1 to n of I(x_i)
# where I = { 1 if x_i <= x, 0 if x_i > x }
# i.e., for each element in our data column count
# the number of items in that row which are less than
# or equal to that item, divide by the number
# of total items in the column
grouped = pd_original_frame.groupby("C0").size()
our_result = grouped.sort_index().cumsum()*1.0/len(pd_original_frame)
# finaly we iterate through the sparktk result and compare it with our result
for index, row in pd_ecdf.iterrows():
self.assertAlmostEqual(row["C0"+'_ecdf'],
our_result[int(row["C0"])])
def test_ecdf_bad_name(self):
"""Test ecdf with an invalid column name."""
with self.assertRaisesRegexp(Exception, "No column named bad_name"):
self.frame.ecdf("bad_name")
def test_ecdf_bad_type(self):
"""Test ecdf with an invalid column type."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.ecdf(5)
def test_ecdf_none(self):
"""Test ecdf with a None for the column name."""
with self.assertRaisesRegexp(Exception, "column is required"):
self.frame.ecdf(None)
if __name__ == '__main__':
unittest.main()
|
Hi. What does "not working" mean. Not loading, not syncing, throwing an error, crashing? And is this Mac / Windows / Desktop / Tablet / Web etc?
www.evernote.com's DNS is not have AAAA record.
therefore cannot access via IPv6.
should add IPv6 address to server.
So when will ipv6 work? So many years you can not turn on ipv6? And yes, I also have the same problems and have been looking at this topic for a long time!
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
import sys
import requests
import tempfile
import gzip
from configlib import getConfig, OptionParser
from tempfile import mkstemp
from os import close, fsync, path, rename
from mozdef_util.geo_ip import GeoIP
from mozdef_util.utilities.logger import logger, initLogger
def fetch_db_data(db_file):
db_download_location = 'https://updates.maxmind.com/geoip/databases/' + db_file[:-5] + '/update'
logger.debug('Fetching db data from ' + db_download_location)
auth_creds = (options.account_id, options.license_key)
response = requests.get(db_download_location, auth=auth_creds)
if not response.ok:
raise Exception("Received bad response from maxmind server: {0}".format(response.text))
db_raw_data = response.content
with tempfile.NamedTemporaryFile(mode='wb', prefix=db_file + '.zip.', suffix='.tmp', dir=options.db_store_location) as temp:
logger.debug('Writing compressed gzip to temp file: ' + temp.name)
temp.write(db_raw_data)
temp.flush()
logger.debug('Extracting gzip data from ' + temp.name)
gfile = gzip.GzipFile(temp.name, "rb")
data = gfile.read()
return data
def save_db_data(db_file, db_data):
save_path = path.join(options.db_store_location, db_file)
fd, temp_path = mkstemp(suffix='.tmp', prefix=db_file, dir=options.db_store_location)
with open(temp_path, 'wb') as temp:
logger.debug("Saving db data to " + temp_path)
temp.write(db_data)
fsync(temp.fileno())
temp.flush()
logger.debug("Testing temp geolite db file")
geo_ip = GeoIP(temp_path)
# Do a generic lookup to verify we don't get any errors (malformed data)
geo_ip.lookup_ip('8.8.8.8')
logger.debug("Moving temp file to " + save_path)
close(fd)
rename(temp_path, save_path)
def main():
logger.debug('Starting')
db_data = fetch_db_data(options.db_file)
asn_db_data = fetch_db_data(options.asn_db_file)
save_db_data(options.db_file, db_data)
save_db_data(options.asn_db_file, asn_db_data)
def initConfig():
# output our log to stdout or syslog
options.output = getConfig('output', 'stdout', options.configfile)
options.sysloghostname = getConfig('sysloghostname', 'localhost', options.configfile)
options.syslogport = getConfig('syslogport', 514, options.configfile)
options.db_store_location = getConfig('db_store_location', '', options.configfile)
options.db_file = getConfig('db_file', '', options.configfile)
options.asn_db_file = getConfig('asn_db_file', '', options.configfile)
options.account_id = getConfig('account_id', '', options.configfile)
options.license_key = getConfig('license_key', '', options.configfile)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option(
"-c",
dest='configfile',
default=sys.argv[0].replace('.py', '.conf'),
help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
initLogger(options)
main()
|
What is it important for me to be teaching now (focusing inquiry)?
What strategies are most likely to help my students learn this (teaching inquiry)?
What worked, and for whom, and what are the implications for my teaching (learning inquiry)?
Teaching as inquiry diagram from NZC.
The first question requires us to know our students; the second, to have (or acquire) the appropriate content knowledge and pedagogical content knowledge; the third, to assess what learning has gone on and in whose mind. The answer to this third question will have implications for our teaching – and possibly, our own learning. We may find we need to look for different strategies or even a completely different approach.
The curriculum offers a brief summary of pedagogical approaches that are applicable to all teaching. For a more comprehensive guide, with a specific focus on the social sciences, see Effective Pedagogy in Social Sciences/Tikanga ā Iwi Best Evidence Synthesis Iteration [BES]. This synthesis identifies four ‘mechanisms’ that are at the heart of effective social sciences teaching and learning.
Education for sustainability places strong emphasis on supporting students to develop action competence. This emphasis is strongly embedded in the learning objectives and requires teachers to adopt a holistic approach to teaching and learning.
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue_crawler import AwsGlueCrawlerHook
from airflow.sensors.base import BaseSensorOperator
class AwsGlueCrawlerSensor(BaseSensorOperator):
"""
Waits for an AWS Glue crawler to reach any of the statuses below
'FAILED', 'CANCELLED', 'SUCCEEDED'
:param crawler_name: The AWS Glue crawler unique name
:type crawler_name: str
:param aws_conn_id: aws connection to use, defaults to 'aws_default'
:type aws_conn_id: str
"""
def __init__(self, *, crawler_name: str, aws_conn_id: str = 'aws_default', **kwargs) -> None:
super().__init__(**kwargs)
self.crawler_name = crawler_name
self.aws_conn_id = aws_conn_id
self.success_statuses = 'SUCCEEDED'
self.errored_statuses = ('FAILED', 'CANCELLED')
self.hook: Optional[AwsGlueCrawlerHook] = None
def poke(self, context):
hook = self.get_hook()
self.log.info("Poking for AWS Glue crawler: %s", self.crawler_name)
crawler_state = hook.get_crawler(self.crawler_name)['State']
if crawler_state == 'READY':
self.log.info("State: %s", crawler_state)
crawler_status = hook.get_crawler(self.crawler_name)['LastCrawl']['Status']
if crawler_status == self.success_statuses:
self.log.info("Status: %s", crawler_status)
return True
else:
raise AirflowException(f"Status: {crawler_status}")
else:
return False
def get_hook(self) -> AwsGlueCrawlerHook:
"""Returns a new or pre-existing AwsGlueCrawlerHook"""
if self.hook:
return self.hook
self.hook = AwsGlueCrawlerHook(aws_conn_id=self.aws_conn_id)
return self.hook
|
November proved acted more like typical October with volatility taking over the globe. We had everything with trade wars, elections, more Brexit drama, and tons of earnings reports. As for Games Workshop, investors had fully digested the cryptic CEO message from October which still left left many scratching their heads.
With all that, lets see how Games Workshop performed over the month of November.
For most of November Games Workshop was losing ground, and strangely enough the slide can be timed to all the major Brexit negotiation announcements throughout November.
Here is how the stock is doing year to date.
Stock is till down from September highs, but with typically good holiday results their is little chance the stock will end below where it was for the start of the year. Games Workshop won’t though see doubling results it has experienced over the last couple years, which all goes to say Games Workshop might be reaching the upper limits for a company of its size and scope.
As for investor news, Games Workshop had a quiet month with some stocks changing hands, with one little oddity happing as Kevin Roundtree CEO sold some stock to buy a new house!
|
import re
import logging
from collections import defaultdict
template_re = re.compile(r"\{\{[^\}]*\}\}", re.UNICODE)
default_translation_re = re.compile(
ur"\{\{(t[\u00d8|\-\+])\|([^}]+)\}\}", re.UNICODE)
global_features = ["sourcewc", "article", "has_article"]
# tester method
def uprint(str_):
print str_.encode('utf8')
class ArticleParser(object):
""" Base class for all article parsers.
This class should not be instantiated.
"""
def __init__(self, wikt_cfg, parser_cfg, filter_langs=None):
self.cfg = parser_cfg
self.wikt_cfg = wikt_cfg
self.pairs = list()
self.titles = set()
self.stats = defaultdict(list)
self.build_skip_re()
self.build_trim_re()
if self.cfg['lower'] and self.cfg['lower'] == 1:
self.lower_all = True
else:
self.lower_all = False
def build_trim_re(self):
if self.cfg['trim_re']:
self.trim_re = re.compile(ur'' + self.cfg['trim_re'].decode('utf8'),
re.UNICODE)
def build_skip_re(self):
if not self.cfg['skip_translation']:
self.skip_translation_re = None
else:
self.skip_translation_re = re.compile(ur'' + self.cfg['skip_translation'].decode('utf8'), re.UNICODE)
if not self.cfg['skip_translation_line']:
self.skip_translation_line_re = None
else:
self.skip_translation_line_re = re.compile(self.cfg['skip_translation_line'], re.UNICODE)
def skip_translation_line(self, line):
if 'PAGENAME' in line:
return True
if self.skip_translation_line_re and self.skip_translation_line_re.search(line):
return True
return False
def parse_article(self, article, source_wc=None):
if self.skip_article(article) == True:
self.stats["skip_article"].append(article[0])
return None
title, text = article
if self.lower_all:
text = text.lower()
self.titles.add(title)
self.stats["ok"].append(title)
t = self.get_pairs(text)
if t:
self.store_translations(title, t, source_wc)
def get_pairs(self, text):
return dict()
def skip_article(self, article):
if not article[0] or not article[1]:
return True
if not article[1].strip() or not article[0].strip():
return True
# ASSUMPTION: articles with a namespace contain no useful data
if ':' in article[0]:
return True
return False
def store_translations(self, this_word, translations, source_wc=None):
for wc in translations.keys():
if len(translations[wc]) > 0:
self.pairs.extend(
[[source_wc, this_word, wc, i, "sourcewc=" + self.wc, \
"article=" + this_word]
for i in translations[wc]])
def write_word_pairs_to_file(self, append=True):
""" Write output to file
One pair and its features are written to tab separated file
"""
fn = self.cfg['dumpdir'] + '/' + self.cfg['fullname'] + '/' + self.cfg[\
'word_pairs_outfile']
if append:
outf = open(fn, 'a+')
else:
outf = open(fn, 'w')
for p in self.pairs:
out_str = self.generate_out_str(self.add_features_to_word_pair(p))
if out_str:
outf.write(out_str.encode('utf8'))
outf.close()
def generate_out_str(self, pair):
if not pair:
return None
if len(pair) < 4:
return None
# alphabetic order
if pair[0] < pair[2]:
outstr = "\t".join(pair[0:4])
else:
outstr = "\t".join(pair[2:4] + pair[0:2])
feat_d = dict()
for feat in pair[4:]:
fields = feat.split('=')
if not fields[0] in global_features:
self.log_handler.error('Feature not found {0}'.format(feat))
continue
if len(fields) > 1:
feat_d[fields[0]] = fields[1]
else:
feat_d[fields[0]] = '1'
for feat in global_features:
if feat in feat_d:
outstr += "\t" + feat_d[feat]
else:
outstr += "\t0"
outstr += "\n"
return outstr
def add_features_to_word_pair(self, pair):
""" Adding features to translation pairs
"""
# article of the word exists
if pair[3] in self.titles:
pair.append("has_article")
return pair
def trim_translation(self, text):
if self.cfg['trim_re']:
text = self.trim_re.sub(r'\1\2', text)
text = text.replace('[', '')
text = text.replace(']', '')
text = text.replace('{', '')
text = text.replace('}', '')
return text.strip()
|
Following its sudden rejection by the Irish government, the techno-border is back. To an extent, it turns out to have been here all along, writes Newton Emerson. Proposals published last week by the British government are heavily dependent on technology to move goods across a “streamlined” post-Brexit Irish border.
Dublin had seemed open to this idea. Former taoiseach Enda Kenny indulged the concept and the Revenue Commissioners had revealed preliminary work on the project, for both soft and hard Brexits. In a worst-case scenario, Ireland’s top customs official said documents would need to be checked for between 6%-8% of cross-border freight but only a “tiny number” of lorries would be physically inspected, as everything else could be done electronically or via glorified toll booths.
|
from __future__ import print_function
import sys
from functools import partial
from django.contrib.auth.models import User
from django.db import models
from django.core.exceptions import FieldError
'''
error function for adding errors to stdrr
'''
error = partial(print, sys.stderr)
'''
Model Manager for doing table actions on devices. Extends base model manager class to include method
for registering (creating) devices
'''
class DeviceManager(models.Manager):
def register_device(self, device_id, device_name, current_user):
if Device.objects.all().filter(user=current_user, device_id=device_id):
raise FieldError('This device has already been registered to this user.', device_id, current_user)
elif Device.objects.all().filter(device_id=device_id, is_active=True):
raise FieldError('This device has already been registered to a different user.', device_id)
try:
newDevice = Device(device_id=device_id, name=device_name, user=current_user)
newDevice.save()
except FieldError as fieldError:
errString = str(type(fieldError)) + ": " + str(fieldError.message)
error(errString)
except (ValueError, TypeError):
error("Invalid Device ID")
'''
model for SEADS devices like the SEADS plug, eGuage etc
# Required fields
# - device_id (primary_key) corresponds to a device id in the data_raw table
# - name (string) name of devices, defaults to 'Seads Device'
# Foreign keys
# - user_id (ForeignKey) corresponds to the user who 'owns' this device, allows null (device has not been registered)
'''
class Device(models.Model):
device_id = models.IntegerField(primary_key=True, unique=True)
name = models.CharField(max_length=200, default='Seads Device')
connection = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
user = models.ForeignKey(User, null=True)
objects = DeviceManager()
'''
# deativate_device()
# Summary: This will deactivate the device which removes it from view,
# doesn't do a full delete, as this would cause problems with foreign keys
'''
def deactivate_device(self):
if Device.objects.filter(device_id = self.device_id, is_active=False):
raise FieldError('This device has already been disactivated.', self)
self.user = None
self.is_active = False
self.save()
'''
# reactivate_device()
# Summary: This will reactivate the device which removes has already been deactivated,
'''
def reactivate_device(self, user):
if Device.objects.filter(device_id = self.device_id, is_active=True):
raise FieldError('This device is currently active.', self)
self.user = user
self.is_active = True
self.save()
'''
model for Images displayed on the site
'''
# Required Fields
# - docfile (file) corrensponds to a file upload path
class Document(models.Model):
docfile = models.FileField(upload_to='documents/%Y/%m/%d')
|
American football evolved in the United States, from merging the sports of association football and rugby football together. The first match of American football was played on November 6, 1869, between two college teams, Rutgers and Princeton, under rules based on the association football rules of the time. During the latter half of the 1870s, colleges playing association football switched to the Rugby Union code, which allowed carrying the ball. Rutgers won the game 6 goals to 4. Till date College football popular and enjoyed as much as the professional.
Football is a full-contact sport. It makes it fun to watch, yet it also makes it the most dangerous sport. Injuries are relatively common and most of them occur during training sessions. That's why, unlike other sports, players are must wear a set of special protection. Football helmet and a set of shoulder pads are mandatory. Additional padding such as thigh pads and guards, knee pads, chest protectors, and mouthguards may be used as well. The most common types of injuries include strains, sprains, bruises, fractures, dislocations, and concussions.
The grand finale is the Super Bowl game between the champions of the NFC and AFC (usually held in the first Sunday in February).
Those NFL games have the highest average attendance (67,591) of any professional sports league in the world and the Super Bowl games account for many of the most watched television programs in American history. People literally go crazy for it. Those who are not so much in to sports watch it for the halftime performance by celebrities.
Fans of the game love themselves some good quality merchandise to support the team during the game or any outing. There are tons of online stores offering so-called "official" outfits of your favorite teams, but you can't trust all of them. However, there is the portal you can rely on for sure - NFL Shop. NFL has the online store created specifically for the demanding fans of this great game.
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.gauges.granville_phillips.micro_ion_controller import MicroIonController
class PychronMicroIonController(MicroIonController):
def get_pressure(self, name, **kw):
return self.ask('Get Pressure {} {}'.format(self.name, name), **kw)
def get_ion_pressure(self, **kw):
return self.ask('Get Pressure {} IG'.format(self.name))
def get_convectron_a_pressure(self, **kw):
return self.ask('GetPressure {} CG1'.format(self.name))
def get_convectron_b_pressure(self, **kw):
return self.ask('GetPressure {} CG2'.format(self.name))
class QtegraMicroIonController(MicroIonController):
def get_pressures(self, verbose=False):
kw = {'verbose': verbose, 'force': True}
for d in self.gauges:
ig = self.ask('GetParameter {}'.format(d.name), **kw)
self._set_gauge_pressure(d.name, ig)
# def get_pressure(self, name, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_ion_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_convectron_a_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
#
# def get_convectron_b_pressure(self, **kw):
# k=''
# return self.ask('GetParameter {}'.format(k))
# ============= EOF =============================================
|
Here at Home Builder Guys, we'll be available to satisfy all your goals when it comes to Builders in Ashley, OH. We've got a crew of professional experts and the most resourceful technologies available to provide exactly what you want. We make sure that you get the most excellent service, the best price tag, and the finest quality supplies. Contact us today at 800-359-7760 to get started.
Saving money is an integral part of your mission. But, being economical shouldn't indicate that you lose excellent quality for Builders in Ashley, OH. We be sure our money conserving goals don't translate to a decreased level of quality work. Any time you work with us, you will receive the advantage of our expertise and top standard supplies to be sure that your project lasts even while saving your time and money. We are able to do this by giving you the very best savings in the market and eliminating costly mistakes. Save your time and cash through contacting Home Builder Guys right now. We are ready to take your phone call at 800-359-7760.
To put together the most suitable choices regarding Builders in Ashley, OH, you've got to be well informed. We will not encourage you to put together poor choices, because we know what we're doing, and we ensure you know exactly what to be expecting from the venture. That's the reason why we try to make every effort to make sure that you comprehend the strategy and are not confronted by any kind of surprises. Step one will be to contact us by dialing 800-359-7760 to set up your project. We're going to resolve your concerns and questions and arrange your initial appointment. Our company is going to show up at the arranged time with the necessary equipment, and will work with you through the entire process.
Lots of reasons exist to consider Home Builder Guys regarding Builders in Ashley, OH. Our company has the best client satisfaction scores, the best resources, and the most helpful and powerful money saving practices. Our company is ready to help you with the most practical knowledge and expertise in the field. Dial 800-359-7760 whenever you need Builders in Ashley, and we will work with you to systematically finish the project.
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the class is labeled 'class' in the data file
tpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)
features = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['class'], random_state=42)
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
StackingEstimator(estimator=LogisticRegression(C=10.0))
),
LogisticRegression(C=10.0)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
A bearer both of Gnawa tradition and of its fusion with Western instruments, Grooz is a group that delivers the secrets of a renewed trance. Led brilliantly by its singer and guitarist Abdelhak Benmedjebari, whose solid stage experience is due to his many performances with his father, the “Maalem Mejbar” and his uncles of the “Gaada Diwan Béchar,” Grooz offers a dynamic show with reggae, blues, rock and jazz beats grafted to the traditional repertory. An ancestral culture celebrated with a forceful modernity! In 2016, Grooz launched its first single, Jambirika.
|
#!/usr/bin/env python
# Read in a fasta file and split it into multiple files
# Author: Yu Fu (yfu at yfu dot me)
import argparse
parser = argparse.ArgumentParser(description='Split a fasta file into multiple files')
parser.add_argument('-p', '--parts', help='The number of (almost) equal parts', required=True)
parser.add_argument('-f', '--file', help='The fasta file', required=True)
parser.add_argument('-v', '--verbose', help='Print lots of useless information', action="store_true")
args = parser.parse_args()
n = int(args.parts)
fn = args.file
verbose = args.verbose
total = 0
fh = open(fn, 'r')
for line in fh.readlines():
line = line.strip()
if line[0] == '>':
total += 1
fh.close()
# Do notice that total might not be a multiple of parts, say 151 total line and 3 parts.
each = int(total / float(n))
fh = open(fn, 'r')
output = []
# Notice that inside the program, the index of files starts from 0
# and the filenames start from 1
for i in range(n):
output.append( open(fn + '.' + str(i+1), "w") )
counter = -1;
for line in fh.readlines():
if(line[0] == '>'):
counter += 1
line = line.strip()
file_number = int(counter / each)
# In order to put the last bit of the file into the last file...
if( counter / each > n-1 ):
file_number = n-1
# print file_number, line
if(verbose==True):
print str(file_number) +"\t" + line
print >>output[file_number], line
for i in range(n):
output[i].close()
|
The Bermuda Stock Exchange (BSX) and the Channel Islands Securities Exchange (CISE) have signed a Memorandum of Understanding (MoU) to explore opportunities to work together.
The CISE is best known for listing investment vehicles, including open and closed ended fund structures, and international debt securities.
BSX provides a fully electronic stock exchange platform to Bermuda’s domestic capital market. It is known for supporting the global reinsurance and capital markets through the listing of a variety of investment vehicles such as fund structures and insurance linked securities.
The MoU means the two exchanges will start to look into ways of working together, including the cross-fertilisation of regulatory best practice, information exchange in relation to market developments, potential resource sharing and joint promotional activities.
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import glob
from twitter.common import app
from twitter.common.java.perfdata import PerfData
app.add_option(
'-f',
dest='filename',
default=None,
help='Filename to load hsperfdata from.')
app.add_option(
'--hsperfdata_root',
dest='hsperfdata_root',
default='/tmp',
help='Root directory to search for hsperfdata files.')
app.add_option(
'-l',
dest='list',
default=False,
action='store_true',
help='List pids.')
app.add_option(
'-p',
dest='pid',
default=None,
type=int,
help='PID to load hsperfdata from.')
def file_provider():
options = app.get_options()
def provider():
with open(options.filename, 'rb') as fp:
return fp.read()
return provider
def list_pids():
options = app.get_options()
pattern = os.path.join(options.hsperfdata_root, 'hsperfdata_*', '*')
for path in glob.glob(pattern):
root, pid = os.path.split(path)
dirname = os.path.basename(root)
role = dirname[len('hsperfdata_'):]
yield path, role, int(pid)
def print_pids():
for path, role, pid in list_pids():
print('role %s pid %d path %s' % (role, pid, path))
def pid_provider():
options = app.get_options()
for path, _, pid in list_pids():
if pid == options.pid:
break
else:
app.error('Could not find pid %s' % options.pid)
def loader():
with open(path, 'rb') as fp:
return fp.read()
return loader
def main(args, options):
if len(args) > 0:
app.error('Must provide hsperfdata via -f/-p')
if options.list:
print_pids()
return
perfdata = None
if options.filename:
perfdata = PerfData.get(file_provider())
elif options.pid:
perfdata = PerfData.get(pid_provider())
if perfdata is None:
app.error('No hsperfdata provider specified!')
perfdata.sample()
for key in sorted(perfdata):
print('%s: %s' % (key, perfdata[key]))
app.main()
|
The Marauders seek out a mysterious woman who is a former mech warrior who served under the Lord Commander, call sign Coryphee. The team, and new recruit, then face off against General Kam and discover who was behind the hiring of The Lost Third.
Valravn rejoins what remains of Mason’s Marauders after her time away and decides that if Beta was willing to follow the Lord Commander, she is too. Talon and the Lord Commander relate to her what has gone on since she has been away, including the search for new mech warriors, when they receive word that a free lance mech warrior has been looking for them. A mysterious woman has been asking around for Mason’s Marauders and is currently staying with Lord Mulligan on his ranch. Despite the early morning hour, the Marauders load into the nearest skimmer and head off to the home of Lord Mulligan to see who this mech warrior is and why she is searching for them. Upon arriving at the ranch house they are met by a slightly disheveled Peter, Lord Mulligan’s senate liaison, who leads them up the stairs to Lord Mulligan who has just emerged from his room. He tells the team that the woman is a former mech warrior from The Lost Third who has defected from General Kam. Her name is Lea, call sign Coryphee, and she has been looking for the Lord Commander and his lance. She is also currently training in the gym which is where everyone heads off to.
Lea aka Coryphee is quite familiar with the Lord Commander as she was trained by him. In fact the two have a very obvious history but not one that the other Marauders understand. When questioned as to why she is looking for them and what she is doing on New Abilene, Coryphee will only say that she was previously serving under House Marik before joining The Lost Third and General Kam. Unfortunately, General Kam turned out to be an amoral, insane military commander so she left. Now she wants to join up with the Marauders, something that Talon is a bit hesitant to allow. The Lord Commander relates that she was a very talented mech warrior when she fought with him and he is willing to let her use the Orion with the lance. Talon agrees to give Coryphee a trial run in the simulator on the Yojimbo before signing off and the team, including Lord Mulligan, start to head back to the ship to do just that. As the start off air raid sirens sound as it appears that General Kam is launching an attack. Lord Mulligan presses the Marauders to make a stand against General Kam and take him down with deadly force. The trial will have to wait and the Marauders, and Lord Mulligan, prep their mechs for battle.
The Marauders take the the field with an already malfunctioning Atlas piloted by Lord Mulligan and an as yet untested lance member in Coryphee. Straight away it is clear that there is no love lost between Coryphee and General Kam. Both sides are fielding infantry, tanks, and mechs, and while General Kam is definitely not all there he is still a very competent tactician on the field. Both sides take damage but the Marauders eventually gain the upper hand. At this point General Kam offers to surrender in exchange for information regarding who has been paying him to cause instability and chaos on New Abilene. The Marauders, especially Coryphee, are less than inclined to take him up on this offer. Before they can strike General Kam down, the Senate makes their wishes clear and instructs the team and Lord Mulligan to capture him alive and bring him in for interrogation. Onboard the Yojimbo, Kam relates that he was hired by the intelligence service of House Marick who has considerable interest the area of New Abilene. General Kam, along with the Lord Commander and Talon, will have to appear to testify before the Senate. But that doesn't mean that he will escape the battle totally unharmed as Lord Mulligan shocks him unconscious and Coryphee cuts off his fingers, hopefully preventing him from ever piloting a mech again.
|
'''
Created on Dec 16, 2014
@author: delosari
'''
from os import remove
from os.path import isfile
import CodeTools.PlottingManager as plotMan
from PipeLineMethods.ManageFlow import DataToTreat
Pv = plotMan.myPickle()
LogFiles_Extension = ".plot"
CombinedFits = "WHT.fits"
NebularFits = "_Neb.fits"
StellarRemovedFits = "_WHT_Neb_Star.fits"
StellarContinuum = "_StellarContinuum.fits"
MaskFile = "_mask.txt"
TexFiles = ['.tex']
OldFiles = ['_LinesLog_v2.txt', '_LinesLog_v3.txt', '.LinesLog_v3']
RootFolder = DataToTreat()
Pattern = [LogFiles_Extension, NebularFits, StellarContinuum, StellarRemovedFits, CombinedFits] + OldFiles + TexFiles
LogImages_Extension = ".png"
ForceDelete = True
#Find and organize files from terminal command or .py file
FilesList = Pv.FindAndOrganize(Pattern, RootFolder, CheckComputer=True)
#Loop through files
for m in range(len(FilesList)):
for j in range(len(FilesList[m])):
CodeName, FileName, FileFolder = Pv.FileAnalyzer(FilesList[m][j], Texting=False)
#Case of logs
if LogFiles_Extension in FileName:
LogName = FileName
ImageName = LogName.replace(LogFiles_Extension, LogImages_Extension)
#Deleting log
if isfile(FileFolder + LogName):
print '--', LogName
if ForceDelete == True:
remove(FileFolder + LogName)
#Deleting images
if isfile(FileFolder + ImageName):
print '--', ImageName
if ForceDelete == True:
remove(FileFolder + ImageName)
#Case of fits file
if 'fit' in FileName:
print '\n-Fits Found'
FitsFile = FileName
print '--',FitsFile,'\n'
#Deleting Fits file
if ForceDelete == True:
remove(FileFolder + FitsFile)
#Case of line logs file
if ('LinesLog' in FileName) or ('.LinesLog_v3' in FileName):
print '\n-Lines Log Found2'
LinesLog = FileName
print '--',LinesLog,'\n'
#Deleting Fits file
if ForceDelete == True:
remove(FileFolder + LinesLog)
#Case of many tex files:
# for tex_extension in TexFiles:
# if tex_extension in FileName:
# print 'Tex file found3:'
# Tex_File = FileName
# print '--', Tex_File, '\n'
#
# #Deleting Fits file
# if ForceDelete == True:
# remove(FileFolder + Tex_File)
|
Christians have been polluted by so many ideas on how to get healed. If you believe you're healed because of something you feel, you may manifest that healing because of the grace and mercy of God - but you're subject to problems because you put your faith in something physical. Feelings aren't of the devil; they just need to be submitted unto Jesus.
There is no bad way to receive healing. I'll lay hands on you until I rub all the hair off your head if that's what it takes! But there are better ways to get healed than others. In the Old Testament, God was doing miracles and healing people on credit. Healing was harder to come by, and it had to come by a person. However this was appropriate in the Old Testament. Now everything has changed because of Jesus. Sad to say, the vast majority of the body of Christ is still operating under an Old Testament model. They are waiting for a super duper to lay hands on them to get them healed, but many Christians don't realize that God has already given them everything for "life and godliness" (2 Pet. 1:3).
The best thing I have to offer is what the Bible has to say. Satan cannot change the Word of God. It's important that you get this. In John 20:29, Jesus said, "Thomas, because thou hast seen me, thou hast believed: blessed are they that have not seen, and yet have believed." There is a greater blessing when you get to where you believe the Word and not just because you saw or felt something.
You need to take the word that's already been given to you and stand on it. First Peter 2:24 says, "Who his own self bare our sins in his own body on the tree, that we, being dead to sins, should live unto righteousness: by whose stripes ye were healed." You're healed. Now act healed.
I don't care if I ever see or feel the power of God again; I'm going to take the Word of God and believe I am healed. I want the greatest manifestation of power operating in my life. I don't want anything less. You have a covenant with God that says He's already healed you. It's already done! It's not a question of if God will heal you; He's already healed you. It's a matter of when you are going to believe with all of your heart. When you believe it's already done and you're not trying to get healed, you'll see that healing manifest - without anyone laying hands on you.
This month, I'm offering a Healing Is Here package for £45. It contains Greg Mohr's book Your Healing Door, Carlie Terradez's new book Miracles Made Easy, a CD or DVD album of the 2015 Healing Is Here conference and my book God Wants You Well. Discover the truths that will change the way you receive healing.
You can order this teaching today by going online: www.awme.net, calling our Helpline: +44 (0)1922 473 300, or completing the enclosed form and mailing it back to us.
Download full PDF of the February newsletter.
|
#!/usr/bin/env python2.7
#coding=utf-8
import logging
import sys
import time
import argparse
import pandas as pd
from pandas import DataFrame,Series
import tushare as ts
import lib.mylog
import lib.utils as utils
import conf.conf as conf
class DailyDataDownloader(object):
def __init__(self,date, interval=10, retryTimes=5):
self.date = date
self.interval = interval if not conf.DEBUG else 1
self.retryTimes = retryTimes
self.stockBasics = utils.downloadStockBasics()
def download(self):
codes = self.stockBasics.index.values
fqFactorDF = DataFrame()
codeDF = DataFrame()
for code in codes:
descStr = " (%s, %s) "%(code, self.date)
_intervalFactor = 2
_interval = self.interval
_retryCount = 0
while _retryCount < self.retryTimes:
_retryCount += 1
logging.info("Downloading daily %s trying %d times."%(descStr, _retryCount))
_interval *= _intervalFactor
try:
# a brand new code into market may cause '_parase_fq_factor' raise exceptions
_df = ts.get_realtime_quotes(code)
if _df is None: # if the code is off the market, this could happen
break
_df = _df[['code','open','high','pre_close','price','low','volume','amount','date']].set_index('date')
_df.rename(columns={'price':'close'},inplace=True)
# a brand new code into market, could also like this, the get_realtime_quotes may return something
if ((float(_df['high']) == 0) & (float(_df['low'])==0)):
break # no need to store
_fqDF = ts.stock.trading._parase_fq_factor(code,'','')
_fqDF.insert(0,"code",code,True)
_fqDF = _fqDF.drop_duplicates('date').set_index('date').sort_index(ascending=False)
#_fqDF = _fqDF.ix[self.date]
_fqDF = _fqDF.head(1)
# stock may exit the market or just pause
if ((float(_df['high']) == 0) & (float(_df['low'])==0)):
break # no need to store
#_rate = float(_fqDF['factor'])/float(_df['pre_close'])
else:
_rate = float(_fqDF['factor'])/float(_df['close'])
_df = _df.drop('pre_close',axis=1)
for label in ['open', 'high', 'close', 'low']:
_df[label] = float(_df[label]) * _rate
#_df[label] = _df[label].map(lambda x:'%.2f'%x)
_df[label] = _df[label].astype(float)
except Exception, e:
if _retryCount + 1 == self.retryTimes or conf.DEBUG:
raise e
logging.info("Download error, waiting for %d secs."%_interval)
time.sleep(_interval)
continue
fqFactorDF = pd.concat([fqFactorDF,_fqDF])
codeDF = pd.concat([codeDF, _df])
break
if conf.DEBUG:
break
self._save(fqFactorDF, codeDF)
def _save(self, fqFactorDF, codeDF):
logging.info("Saving daily fq factor.")
fqFactorDF.to_sql(name='t_daily_fqFactor', con=utils.getEngine(), if_exists='append', chunksize=20000)
logging.info("Saved daily fq factor.")
logging.info("Saving daily hfq data.")
codeDF.to_sql(name='t_daily_hfq_stock', con=utils.getEngine(), if_exists='append')
logging.info("Saved daily hfq data.")
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--production", action="store_true", help='''defalt is in debug mode, which only plays a little''')
args = parser.parse_args()
if args.production:
conf.DEBUG = False
downloader = DailyDataDownloader('')
downloader.download()
|
Trim Size, in inches: 10.5" x 9"
Wong Chung is thrilled when he has the chance to join his father's caravan and embark on a journey along the Silk Road. But with the harsh terrain, brutal sandstorms, and marauding bandits, the journey is not an easy one. With so many obstacles will they ever be able to reach the magnificent markets in Constantinople?
This book in the new Trade Winds series presents historical information about the most well-known trade route in a fun, accessible way.
"A story well suited to bringing this specific time in history to life that will enhance school collections."
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import fs_uae_launcher.fsui as fsui
class Book(fsui.Panel):
def __init__(self, parent):
fsui.Panel.__init__(self, parent)
self.layout = fsui.VerticalLayout()
self.page_titles = []
self.pages = []
self.current_page = None
def add_page(self, function, title=""):
self.page_titles.append(title)
self.pages.append(function)
def set_page(self, page):
try:
index = page + 0
except TypeError:
for i, p in enumerate(self.pages):
if page == p:
index = i
break
else:
raise Exception("page not found")
if self.current_page:
self.current_page.hide()
self.layout.remove(self.current_page)
if callable(self.pages[index]):
page = self.pages[index](self)
self.pages[index] = page
else:
page = self.pages[index]
self.layout.add(page, fill=True, expand=True)
self.current_page = page
page.show()
if hasattr(page, "on_show"):
page.on_show()
self.layout.update()
|
The Kovarik family started like all other Czechoslovakian immigrants who passed through Ellis Island – with the clothes on their backs, a dream and a strong work ethic. They settled in Binghamton’s 1st Ward to be close to other Slovak people as they assimilated into this new life. Frank Kovarik and his brother Fred enlisted in the Army and served during WWI, Frank having suffered from mustard gas poisoning. In 1922, the brothers decided to open a store they felt would benefit the area and opened Kovarik’s hardware store in the 3500sf building on Clinton St that is now occupied by the Mad Hatter Antiques. It was at this location that Kovarik’s went through various changes that helped solidify their presence in this area. Frank’s sons Frank Jr and Joseph took over the store, they became affiliated with Tru Value and their store in 1966 relocated to 276 Clinton St as they needed the 10,500sf facility to better meet the needs of their customers.
The leadership and the family members involved with the store changed but the same guiding principles from which the Kovariks had built the business have always remained constant “be a part of the community”.
For the third generation of Kovariks being a part of the community means more then ensuring they have the lowest prices or a wide selection of products or even a staff complete with master electricians, plumbers and mechanics. Being a part of the community means living in the community, working in the community and supporting the community. Today the store is run by 95 year old Joe and his beautiful wife Virginia, their daughter Debbie, their son Joe Jr who was handed the Presidency, and Joe Jr’s wife Cindy. Everyone walking through their door knows their name and a story or two to share. They know them as their neighbors and their friends. And they have supported religious and equality organizations, the veteran groups, scouts, libraries, retirement homes, golf tournaments and a wide array of not-for-profit charities.
They have always been there for the people of this community. In fact during the floods when all others couldn’t get around, it was the Kovarik family that risked it all to get to a warehouse full of generators and transported it back to help those in need.
As Joe Jr said “We are community and we are dedicated to take care of the people in this community”. After nearly 100 years in business Joe’s grandfather Frank has to be proud of his family and the principles he instilled in them.
|
# test_synchan.py ---
#
# Filename: test_synchan.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Apr 23 12:00:01 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
sys.path.append('../../python')
import moose
def make_synapse(path):
"""Create a synapse with two time constants. Connect a spikegen to the
synapse. Create a pulsegen to drive the spikegen."""
syn = moose.SynChan(path)
syn.tau1 = 5.0 # ms
syn.tau2 = 1.0 # ms
syn.Gk = 1.0 # mS
syn.Ek = 0.0
## NOTE: This is old implementation.
#syn.synapse.num = 1
## syn.bufferTime = 1.0 # ms
#syn.synapse.delay = 1.0
#syn.synapse.weight = 1.0
#print 'Synapses:', len(syn.synapse), 'w=', syn.synapse[0].weight
# IN new implementation, there is SimpleSynHandler class which takes cares
# of multiple synapses. Class SynChan does not have any .synapse field.
synH = moose.SimpleSynHandler( '%s/SynHandler' % path)
synH.synapse.num = 1
## syn.bufferTime = 1.0 # ms
synH.synapse.delay = 1.0
synH.synapse.weight = 1.0
synH.connect('activationOut', syn, 'activation')
print(('Synapses:', len(synH.synapse), 'w=', synH.synapse[0].weight ))
spikegen = moose.SpikeGen('%s/spike' % (syn.parent.path))
spikegen.edgeTriggered = False # Make it fire continuously when input is high
spikegen.refractT = 10.0 # With this setting it will fire at 1 s / 10 ms = 100 Hz
spikegen.threshold = 0.5
# This will send alternatind -1 and +1 to SpikeGen to make it fire
spike_stim = moose.PulseGen('%s/spike_stim' % (syn.parent.path))
spike_stim.delay[0] = 1.0
spike_stim.level[0] = 1.0
spike_stim.width[0] = 100.0
moose.connect(spike_stim, 'output', spikegen, 'Vm')
m = moose.connect(spikegen, 'spikeOut', synH.synapse.vec, 'addSpike', 'Sparse')
m.setRandomConnectivity(1.0, 1)
m = moose.connect(spikegen, 'spikeOut', synH.synapse[0], 'addSpike') # this causes segfault
return syn, spikegen
if __name__ == '__main__':
model = moose.Neutral('/model')
syn, spikegen = make_synapse('/model/synchan')
moose.setClock(0, 0.01)
moose.useClock(0, '/model/##', 'process')
moose.reinit()
moose.start(100)
#
# test_synchan.py ends here
|
What are you all hearing from our new recruits about our off season workout programs? I just wonder what they think they are up against in coming into an elite program like Alabama. RTR!
They went from being 4 and 5 star pampered athletes to plebes.
They went from being 4 and 5 star pampered athletes to plebes future first round draft picks.
As Lipless, from John Boy and Billy would say, "I like your answer better".
|
# -*- encoding: utf-8 -*-
from supriya.tools.systemtools.Enumeration import Enumeration
class DoneAction(Enumeration):
r'''An enumeration of scsynth UGen "done" actions.
::
>>> from supriya.tools import synthdeftools
>>> synthdeftools.DoneAction(2)
DoneAction.FREE_SYNTH
::
>>> synthdeftools.DoneAction.from_expr('pause synth')
DoneAction.PAUSE_SYNTH
'''
### CLASS VARIABLES ###
NOTHING = 0
PAUSE_SYNTH = 1
FREE_SYNTH = 2
FREE_SYNTH_AND_PRECEDING_NODE = 3
FREE_SYNTH_AND_FOLLOWING_NODE = 4
FREE_SYNTH_AND_FREEALL_PRECEDING_NODE = 5
FREE_SYNTH_AND_FREEALL_FOLLOWING_NODE = 6
FREE_SYNTH_AND_ALL_PRECEDING_NODES_IN_GROUP = 7
FREE_SYNTH_AND_ALL_FOLLOWING_NODES_IN_GROUP = 8
FREE_SYNTH_AND_PAUSE_PRECEDING_NODE = 9
FREE_SYNTH_AND_PAUSE_FOLLOWING_NODE = 10
FREE_SYNTH_AND_DEEPFREE_PRECEDING_NODE = 11
FREE_SYNTH_AND_DEEPFREE_FOLLOWING_NODE = 12
FREE_SYNTH_AND_ALL_SIBLING_NODES = 13
FREE_SYNTH_AND_ENCLOSING_GROUP = 14
|
Young nurses are still being hazed to death despite a government pledge to stamp out the practice, said the Dong-a Ilbo. Last February, South Koreans learned of the extent of the practice of taeum—in which senior nurses harass and berate rookies—after the suicide of one victim. Other nurses spoke out, telling of being belittled, mocked, yelled at, and ordered to go home early. So the Health Ministry took action, saying it would revoke the license of any doctor or nurse who bullied an incoming nurse, and would establish a “nurse human rights center” where victims could report harassment and receive counseling. But nearly a year later, “little has improved at nurses’ workplaces.” The legislation required to legalize license revocation is stalled in a parliamentary committee, and no new nurses’ rights center has been set up. Meanwhile, nursing departments are chronically understaffed, and Korean nurses are each expected to take care of more than four patients—twice as many as their American and Japanese counterparts. “Lack of nursing staff leads to poor working conditions, which in turn spawns the culture of bullying.” But who would want this job? Just last week, another nurse, only 20 years old, took her own life—and authorities again suspect hazing.
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Base class for a QtWebKit/QtWebEngine web inspector."""
import base64
import binascii
import enum
from typing import cast, Optional
from PyQt5.QtWidgets import QWidget
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QEvent
from PyQt5.QtGui import QCloseEvent
from qutebrowser.browser import eventfilter
from qutebrowser.config import configfiles
from qutebrowser.utils import log, usertypes, utils
from qutebrowser.keyinput import modeman
from qutebrowser.misc import miscwidgets, objects
def create(*, splitter: 'miscwidgets.InspectorSplitter',
win_id: int,
parent: QWidget = None) -> 'AbstractWebInspector':
"""Get a WebKitInspector/WebEngineInspector.
Args:
splitter: InspectorSplitter where the inspector can be placed.
win_id: The window ID this inspector is associated with.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webengineinspector
return webengineinspector.WebEngineInspector(splitter, win_id, parent)
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkitinspector
return webkitinspector.WebKitInspector(splitter, win_id, parent)
raise utils.Unreachable(objects.backend)
class Position(enum.Enum):
"""Where the inspector is shown."""
right = enum.auto()
left = enum.auto()
top = enum.auto()
bottom = enum.auto()
window = enum.auto()
class Error(Exception):
"""Raised when the inspector could not be initialized."""
class _EventFilter(QObject):
"""Event filter to enter insert mode when inspector was clicked.
We need to use this with a ChildEventFilter (rather than just overriding
mousePressEvent) for two reasons:
- For QtWebEngine, we need to listen for mouse events on its focusProxy(),
which can change when another page loads (which might be possible with an
inspector as well?)
- For QtWebKit, we need to listen for mouse events on the QWebView used by
the QWebInspector.
"""
clicked = pyqtSignal()
def eventFilter(self, _obj: QObject, event: QEvent) -> bool:
"""Translate mouse presses to a clicked signal."""
if event.type() == QEvent.MouseButtonPress:
self.clicked.emit()
return False
class AbstractWebInspector(QWidget):
"""Base class for QtWebKit/QtWebEngine inspectors.
Attributes:
_position: position of the inspector (right/left/top/bottom/window)
_splitter: InspectorSplitter where the inspector can be placed.
Signals:
recreate: Emitted when the inspector should be recreated.
"""
recreate = pyqtSignal()
def __init__(self, splitter: 'miscwidgets.InspectorSplitter',
win_id: int,
parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(QWidget, None)
self._layout = miscwidgets.WrapperLayout(self)
self._splitter = splitter
self._position: Optional[Position] = None
self._win_id = win_id
self._event_filter = _EventFilter(parent=self)
self._event_filter.clicked.connect(self._on_clicked)
self._child_event_filter = eventfilter.ChildEventFilter(
eventfilter=self._event_filter,
parent=self)
def _set_widget(self, widget: QWidget) -> None:
self._widget = widget
self._widget.setWindowTitle("Web Inspector")
self._widget.installEventFilter(self._child_event_filter)
self._layout.wrap(self, self._widget)
def _load_position(self) -> Position:
"""Get the last position the inspector was in."""
pos = configfiles.state['inspector'].get('position', 'right')
return Position[pos]
def _save_position(self, position: Position) -> None:
"""Save the last position the inspector was in."""
configfiles.state['inspector']['position'] = position.name
def _needs_recreate(self) -> bool:
"""Whether the inspector needs recreation when detaching to a window.
This is done due to an unknown QtWebEngine bug which sometimes prevents
inspector windows from showing up.
Needs to be overridden by subclasses.
"""
return False
@pyqtSlot()
def _on_clicked(self) -> None:
"""Enter insert mode if a docked inspector was clicked."""
if self._position != Position.window:
modeman.enter(self._win_id, usertypes.KeyMode.insert,
reason='Inspector clicked', only_if_normal=True)
def set_position(self, position: Optional[Position]) -> None:
"""Set the position of the inspector.
If the position is None, the last known position is used.
"""
if position is None:
position = self._load_position()
else:
self._save_position(position)
if position == self._position:
self.toggle()
return
if (position == Position.window and
self._position is not None and
self._needs_recreate()):
# Detaching to window
self.recreate.emit()
self.shutdown()
return
elif position == Position.window:
self.setParent(None) # type: ignore[call-overload]
self._load_state_geometry()
else:
self._splitter.set_inspector(self, position)
self._position = position
self._widget.show()
self.show()
def toggle(self) -> None:
"""Toggle visibility of the inspector."""
if self.isVisible():
self.hide()
else:
self.show()
def _load_state_geometry(self) -> None:
"""Load the geometry from the state file."""
try:
data = configfiles.state['inspector']['window']
geom = base64.b64decode(data, validate=True)
except KeyError:
# First start
pass
except binascii.Error:
log.misc.exception("Error while reading geometry")
else:
log.init.debug("Loading geometry from {!r}".format(geom))
ok = self._widget.restoreGeometry(geom)
if not ok:
log.init.warning("Error while loading geometry.")
def closeEvent(self, _e: QCloseEvent) -> None:
"""Save the geometry when closed."""
data = self._widget.saveGeometry().data()
geom = base64.b64encode(data).decode('ASCII')
configfiles.state['inspector']['window'] = geom
def inspect(self, page: QWidget) -> None:
"""Inspect the given QWeb(Engine)Page."""
raise NotImplementedError
@pyqtSlot()
def shutdown(self) -> None:
"""Clean up the inspector."""
self.close()
self.deleteLater()
|
Over the years we have frequently used Amazon.com to do our online shopping, because of it's accessibility and ease – especially when we lived and worked near their headquarters in Seattle. Now that we're away from our home in the Northwest and having a baby, we've found that Amazon Prime has become even more essential to our growing family.
Since we became Prime members and parents, we have been saving so much through their discounts and recently learned how to claim an Amazon Baby Registry Box – at no cost – after registering with Amazon.com for all of Baby Burm's needs this year.
We wanted to share our experience with you and any other expecting parents today, because who wants to miss out on quality baby brand products that are free?
Become an Amazon Prime member if you aren't already.
Complete an Amazon Baby Registry and have over $10 purchased from your baby registry.
Click the "Claim Now" button on your baby registry, which will allow you to redeem the free offer.
Wait 3-5 days to receive your free Amazon Baby Registry box ($35 value) and products while supplies last.
We also received a teething toy in our box that was left out in the nursery (oops!) and soon stolen by our baby girl's big sister, Olive the Aussie, who unfortunately left it unrecognizable.
We cannot review all of these products until our baby girl is here, but we look forward to testing them out when she arrives!
We hope that you found our tutorial helpful, and that you can claim your Amazon Baby Registry Box while supplies last.
We also want to note that all opinions here are our own and 100% truthful – this not a sponsored post – we just wanted to share the deal with any of our expecting family, friends or followers!
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from distutils.command.build import build
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
about = {}
with open(os.path.join(base_dir, "cryptography", "__about__.py")) as f:
exec(f.read(), about)
CFFI_DEPENDENCY = "cffi>=0.6"
SIX_DEPENDENCY = "six>=1.4.1"
requirements = [
CFFI_DEPENDENCY,
SIX_DEPENDENCY
]
class cffi_build(build):
"""
This class exists, instead of just providing ``ext_modules=[...]`` directly
in ``setup()`` because importing cryptography requires we have several
packages installed first.
By doing the imports here we ensure that packages listed in
``setup_requires`` are already installed.
"""
def finalize_options(self):
from cryptography.hazmat.bindings.openssl.binding import Binding
from cryptography.hazmat.primitives import constant_time, padding
self.distribution.ext_modules = [
Binding().ffi.verifier.get_extension(),
constant_time._ffi.verifier.get_extension(),
padding._ffi.verifier.get_extension()
]
build.finalize_options(self)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
],
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=requirements,
setup_requires=requirements,
# for cffi
zip_safe=False,
ext_package="cryptography",
cmdclass={
"build": cffi_build,
}
)
|
Amazing in recipes and as is, these milk chocolate bars are petite perfection.
You can never have enough GODIVA! This pack of 48 individually wrapped, solid milk chocolate bars is great to have on hand for the holidays. For baking, self-treating, and gifting, of course, our smooth and luscious chocolate makes the season extra sweet.
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=True,
activation=math_ops.tanh, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq],
1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"GridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block], block, state, batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self, freq_inputs, block, state, batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d"% block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time +
w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_timef * c_prev_freq +
w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time +
w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, 0, slice_offset], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, -slice_offset, 0], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int((cur_size - self._feature_size) / (
self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs, [0, start_index + slice_offset + f *
self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates, True, reuse)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block], block, state, batch_size,
state_prefix="fwd_state", state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse, block, state, batch_size,
state_prefix="bwd_state", state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_linear = rnn_cell_impl._linear
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell_impl.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1409.0473.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=True, reuse=None):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
super(AttentionCellWrapper, self).__init__(_reuse=reuse)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
self._reuse = reuse
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def call(self, inputs, state):
"""Long short-term memory cell with attention (LSTMA)."""
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
cell_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
output = _linear([cell_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable(
"attn_w", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class HighwayWrapper(rnn_cell_impl.RNNCell):
"""RNNCell wrapper that adds highway connection on cell input and output.
Based on:
R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks",
arXiv preprint arXiv:1505.00387, 2015.
https://arxiv.org/abs/1505.00387
"""
def __init__(self, cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _highway(self, inp, out):
input_size = inp.get_shape().with_rank(2)[1].value
carry_weight = vs.get_variable("carry_w", [input_size, input_size])
carry_bias = vs.get_variable(
"carry_b", [input_size],
initializer=init_ops.constant_initializer(
self._carry_bias_init))
carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
if self._couple_carry_transform_gates:
transform = 1 - carry
else:
transform_weight = vs.get_variable("transform_w",
[input_size, input_size])
transform_bias = vs.get_variable(
"transform_b", [input_size],
initializer=init_ops.constant_initializer(
-self._carry_bias_init))
transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
transform_weight,
transform_bias))
return inp * carry + out * transform
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._g = norm_gain
self._b = norm_shift
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._g)
beta_init = init_ops.constant_initializer(self._b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
weights = vs.get_variable("kernel", [proj_size, out_size])
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size])
out = nn_ops.bias_add(out, bias)
return out
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class NASCell(rnn_cell_impl.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
relu = nn_ops.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = vs.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
dtype)
concat_w_inputs = vs.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
dtype)
m_matrix = math_ops.matmul(m_prev, concat_w_m)
inputs_matrix = math_ops.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = vs.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
m_matrix = nn_ops.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = vs.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = math_ops.matmul(new_m, concat_w_proj)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)
return new_m, new_state
class UGRNNCell(rnn_cell_impl.RNNCell):
"""Update Gate Recurrent Neural Network (UGRNN) cell.
Compromise between a LSTM/GRU and a vanilla RNN. There is only one
gate, and that is to determine whether the unit should be
integrating or computing instantaneously. This is the recurrent
idea of the feedforward Highway Network.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
"""
def __init__(self, num_units, initializer=None, forget_bias=1.0,
activation=math_ops.tanh, reuse=None):
"""Initialize the parameters for an UGRNN cell.
Args:
num_units: int, The number of units in the UGRNN cell
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gate, used to reduce the scale of forgetting at the beginning
of the training.
activation: (optional) Activation function of the inner states.
Default is `tf.tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(UGRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of UGRNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_output: batch x num units, Tensor representing the output of the UGRNN
after reading `inputs` when previous state was `state`. Identical to
`new_state`.
new_state: batch x num units, Tensor representing the state of the UGRNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
cell_inputs = array_ops.concat([inputs, state], 1)
rnn_matrix = _linear(cell_inputs, 2 * self._num_units, True)
[g_act, c_act] = array_ops.split(
axis=1, num_or_size_splits=2, value=rnn_matrix)
c = self._activation(c_act)
g = sigmoid(g_act + self._forget_bias)
new_state = g * state + (1.0 - g) * c
new_output = new_state
return new_output, new_state
class IntersectionRNNCell(rnn_cell_impl.RNNCell):
"""Intersection Recurrent Neural Network (+RNN) cell.
Architecture with coupled recurrent gate as well as coupled depth
gate, designed to improve information flow through stacked RNNs. As the
architecture uses depth gating, the dimensionality of the depth
output (y) also should not change through depth (input size == output size).
To achieve this, the first layer of a stacked Intersection RNN projects
the inputs to N (num units) dimensions. Therefore when initializing an
IntersectionRNNCell, one should set `num_in_proj = N` for the first layer
and use default settings for subsequent layers.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
The Intersection RNN is built for use in deeply stacked
RNNs so it may not achieve best performance with depth 1.
"""
def __init__(self, num_units, num_in_proj=None,
initializer=None, forget_bias=1.0,
y_activation=nn_ops.relu, reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of the Intersection RNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_y: batch x num units, Tensor representing the output of the +RNN
after reading `inputs` when previous state was `state`.
new_state: batch x num units, Tensor representing the state of the +RNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from `inputs` via
static shape inference.
ValueError: If input size != output size (these must be equal when
using the Intersection RNN).
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
# read-in projections (should be used for first layer in deep +RNN
# to transform size of inputs from I --> N)
if input_size.value != self._num_units:
if self._num_input_proj:
with vs.variable_scope("in_projection"):
inputs = _linear(inputs, self._num_units, True)
else:
raise ValueError("Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init.")
n_dim = i_dim = self._num_units
cell_inputs = array_ops.concat([inputs, state], 1)
rnn_matrix = _linear(cell_inputs, 2*n_dim + 2*i_dim, True)
gh_act = rnn_matrix[:, :n_dim] # b x n
h_act = rnn_matrix[:, n_dim:2*n_dim] # b x n
gy_act = rnn_matrix[:, 2*n_dim:2*n_dim+i_dim] # b x i
y_act = rnn_matrix[:, 2*n_dim+i_dim:2*n_dim+2*i_dim] # b x i
h = tanh(h_act)
y = self._y_activation(y_act)
gh = sigmoid(gh_act + self._forget_bias)
gy = sigmoid(gy_act + self._forget_bias)
new_state = gh * state + (1.0 - gh) * h # passed thru time
new_y = gy * inputs + (1.0 - gy) * y # passed thru depth
return new_y, new_state
_REGISTERED_OPS = None
class CompiledWrapper(rnn_cell_impl.RNNCell):
"""Wraps step execution in an XLA JIT scope."""
def __init__(self, cell, compile_stateful=False):
"""Create CompiledWrapper cell.
Args:
cell: Instance of `RNNCell`.
compile_stateful: Whether to compile stateful ops like initializers
and random number generators (default: False).
"""
self._cell = cell
self._compile_stateful = compile_stateful
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
if self._compile_stateful:
compile_ops = True
else:
def compile_ops(node_def):
global _REGISTERED_OPS
if _REGISTERED_OPS is None:
_REGISTERED_OPS = op_def_registry.get_registered_ops()
return not _REGISTERED_OPS[node_def.op].is_stateful
with jit.experimental_jit_scope(compile_ops=compile_ops):
return self._cell(inputs, state, scope)
def _random_exp_initializer(minval,
maxval,
seed=None,
dtype=dtypes.float32):
"""Returns an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return math_ops.exp(
random_ops.random_uniform(
shape,
math_ops.log(minval),
math_ops.log(maxval),
dtype,
seed=seed))
return _initializer
class PhasedLSTMCell(rnn_cell_impl.RNNCell):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self,
num_units,
use_peepholes=False,
leak=0.001,
ratio_on=0.1,
trainable_ratio_on=True,
period_init_min=1.0,
period_init_max=1000.0,
reuse=None):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(PhasedLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x
def _get_cycle_ratio(self, time, phase, period):
"""Compute the cycle ratio in the dtype of the time."""
phase_casted = math_ops.cast(phase, dtype=time.dtype)
period_casted = math_ops.cast(period, dtype=time.dtype)
shifted_time = time - phase_casted
cycle_ratio = self._mod(shifted_time, period_casted) / period_casted
return math_ops.cast(cycle_ratio, dtype=dtypes.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
(time, x) = inputs
in_mask_gates = [x, h_prev]
if self._use_peepholes:
in_mask_gates.append(c_prev)
with vs.variable_scope("mask_gates"):
mask_gates = math_ops.sigmoid(
_linear(in_mask_gates, 2 * self._num_units, True))
[input_gate, forget_gate] = array_ops.split(
axis=1, num_or_size_splits=2, value=mask_gates)
with vs.variable_scope("new_input"):
new_input = math_ops.tanh(
_linear([x, h_prev], self._num_units, True))
new_c = (c_prev * forget_gate + input_gate * new_input)
in_out_gate = [x, h_prev]
if self._use_peepholes:
in_out_gate.append(new_c)
with vs.variable_scope("output_gate"):
output_gate = math_ops.sigmoid(
_linear(in_out_gate, self._num_units, True))
new_h = math_ops.tanh(new_c) * output_gate
period = vs.get_variable(
"period", [self._num_units],
initializer=_random_exp_initializer(
self._period_init_min, self._period_init_max))
phase = vs.get_variable(
"phase", [self._num_units],
initializer=init_ops.random_uniform_initializer(
0., period.initial_value))
ratio_on = vs.get_variable(
"ratio_on", [self._num_units],
initializer=init_ops.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
cycle_ratio = self._get_cycle_ratio(time, phase, period)
k_up = 2 * cycle_ratio / ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)
k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden],
self._kernel_shape,
4*self._output_channels,
self._use_bias)
gates = array_ops.split(value=new_hidden,
num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
def _conv(args,
filter_size,
num_features,
bias,
bias_start=0.0):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3,4,5]:
raise ValueError("Conv Linear expects 3D, 4D or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args to be of same Dimensiton: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length*[1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length*[1]
# Now the computation.
kernel = vs.get_variable(
"kernel",
filter_size + [total_arg_size_depth, num_features],
dtype=dtype)
if len(args) == 1:
res = conv_op(args[0],
kernel,
strides,
padding='SAME')
else:
res = conv_op(array_ops.concat(axis=shape_length-1, values=args),
kernel,
strides,
padding='SAME')
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, initializer=None, num_proj=None,
number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
else:
self._group_shape = [int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[self._get_input_for_group(inputs, group_id,
self._group_shape[0]),
self._get_input_for_group(m_prev, group_id,
self._group_shape[0])], axis=1)
R_k = _linear(x_g_id, 4 * self._group_shape[1], bias=False)
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
m = _linear(m, self._num_proj, bias=False)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
|
Chelsea Carey has become a big shot on the Alberta curling landscape.
Her claim of a second provincial title Sunday is proof of that.
So, too, was her shot to secure her rink’s trip to the 2019 Scotties Tournament of Hearts.
“We were struggling with the ice a little bit, and (we were given) a chance,” Carey told Sportsnet post-game of a ninth-end triple-takeout to top Edmonton’s Kelsey Rocque 8-3 in the championship finale at the Stettler Curling Club.
Until then, it had been a tight finale, with Rocque and her Edmonton Saville Sports Centre squad looking for revenge after losing the Page Playoff 1-2 game 10-2 to Carey and The Glencoe Club team one night earlier.
Rocque was the only rink to count more than one — with two in the fifth end — until the deciding ninth of the championship draw, with Carey also defending against a multi end for Rocque in the eight.
“It feels great,” Carey told Sportsnet. “These finals are always so full of pressure because you want so badly to get back (to the Scotties). It’s almost like the more you go to the Scotties, the more you want to go.
Carey’s win means she’ll make her fourth appearance at the nationals after representing Manitoba in 2014 and Alberta in 2016 and then returning as Team Canada in 2017 following her win of the Scotties crown the year before in Grande Prairie.
Meanwhile, Sarah Wilkes captured her second Alberta title as Carey’s third, while the rink’s second and lead, Dana Ferguson and Rachelle Brown respectively, are each primed for a third Scotties trip.
Together, however, this is their first trip to the nationals, since they formed this past off-season.
“We felt we were close all season, and we couldn’t quite make it across the finish line,” Carey said. “And we made the switch to bring Sarah back in the house and Rachel was back (from maternity leave) and feeling settled in.
“And it just started to click last event at the Canadian Open. And then here all week, we felt really good.
Earlier Sunday, Rocque trounced Jodi Marthaller, of Calgary’s Garrison Curling Club, in the 12-6 semifinal, as the Edmonton side counted three in each of the first, second and fourth ends to take an early stranglehold.
But Rocque couldn’t find the offence against a defensively solid Carey crew in the evening finale.
Nobody could solve Carey all week, as she rolled to five straight wins, including a key A-final edge of defending champion Casey Scheidegger, who was eventually eliminated by Marthaller in C-side action Saturday.
The last Alberta team to win the provincial women’s title in consecutive years was the Valerie Sweeting squad in 2014 and ’15.
But now Carey has two in her pocket as she guns to repeat her Scotties golden performance of three years ago.
And given her resume, she’s more than capable of carrying into Sydney the newfound confidence of a perfect provincials.
“This is a big one — last year really hurt,” said Carey of losing the Alberta semifinal to Shannon Kleibrink and then losing the Scotties wildcard game to Manitoba’s Kerri Einarson. “We had such a great season, and then we couldn’t quite win provincials and we lost the wildcard game (to miss the last-chance Scotties invite). We were dancing all around it. It was the first time in a while watching the Scotties on TV, and I really didn’t like it too much.
|
"""
Python wrapper that connects CPython interpreter to the Numba typed-list.
This is the code that is used when creating typed lists outside of a `@jit`
context and when returning a typed-list from a `@jit` decorated function. It
basically a Python class that has a Numba allocated typed-list under the hood
and uses `@jit` functions to access it. Since it inherits from MutableSequence
it should really quack like the CPython `list`.
"""
from numba.six import MutableSequence
from numba.types import ListType, TypeRef
from numba.targets.imputils import numba_typeref_ctor
from numba import listobject
from numba import njit, types, cgutils, errors, typeof
from numba.extending import (
overload_method,
overload,
box,
unbox,
NativeValue,
type_callable,
)
@njit
def _make_list(itemty):
return listobject._as_meminfo(listobject.new_list(itemty))
@njit
def _length(l):
return len(l)
@njit
def _append(l, item):
l.append(item)
@njit
def _setitem(l, i, item):
l[i] = item
@njit
def _getitem(l, i):
return l[i]
@njit
def _contains(l, item):
return item in l
@njit
def _count(l, item):
return l.count(item)
@njit
def _pop(l, i):
return l.pop(i)
@njit
def _delitem(l, i):
del l[i]
@njit
def _extend(l, iterable):
return l.extend(iterable)
@njit
def _insert(l, i, item):
l.insert(i, item)
@njit
def _remove(l, item):
l.remove(item)
@njit
def _clear(l):
l.clear()
@njit
def _reverse(l):
l.reverse()
@njit
def _copy(l):
return l.copy()
@njit
def _eq(t, o):
return t == o
@njit
def _ne(t, o):
return t != o
@njit
def _lt(t, o):
return t < o
@njit
def _le(t, o):
return t <= o
@njit
def _gt(t, o):
return t > o
@njit
def _ge(t, o):
return t >= o
@njit
def _index(l, item, start, end):
return l.index(item, start, end)
def _from_meminfo_ptr(ptr, listtype):
return List(meminfo=ptr, lsttype=listtype)
class List(MutableSequence):
"""A typed-list usable in Numba compiled functions.
Implements the MutableSequence interface.
"""
@classmethod
def empty_list(cls, item_type):
"""Create a new empty List with *item_type* as the type for the items
of the list .
"""
return cls(lsttype=ListType(item_type))
def __init__(self, **kwargs):
"""
For users, the constructor does not take any parameters.
The keyword arguments are for internal use only.
Parameters
----------
lsttype : numba.types.ListType; keyword-only
Used internally for the list type.
meminfo : MemInfo; keyword-only
Used internally to pass the MemInfo object when boxing.
"""
if kwargs:
self._list_type, self._opaque = self._parse_arg(**kwargs)
else:
self._list_type = None
def _parse_arg(self, lsttype, meminfo=None):
if not isinstance(lsttype, ListType):
raise TypeError('*lsttype* must be a ListType')
if meminfo is not None:
opaque = meminfo
else:
opaque = _make_list(lsttype.item_type)
return lsttype, opaque
@property
def _numba_type_(self):
if self._list_type is None:
raise TypeError("invalid operation on untyped list")
return self._list_type
@property
def _typed(self):
"""Returns True if the list is typed.
"""
return self._list_type is not None
def _initialise_list(self, item):
lsttype = types.ListType(typeof(item))
self._list_type, self._opaque = self._parse_arg(lsttype)
def __len__(self):
if not self._typed:
return 0
else:
return _length(self)
def __eq__(self, other):
return _eq(self, other)
def __ne__(self, other):
return _ne(self, other)
def __lt__(self, other):
return _lt(self, other)
def __le__(self, other):
return _le(self, other)
def __gt__(self, other):
return _gt(self, other)
def __ge__(self, other):
return _ge(self, other)
def append(self, item):
if not self._typed:
self._initialise_list(item)
_append(self, item)
def __setitem__(self, i, item):
if not self._typed:
self._initialise_list(item)
_setitem(self, i, item)
def __getitem__(self, i):
if not self._typed:
raise IndexError
else:
return _getitem(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __contains__(self, item):
return _contains(self, item)
def __delitem__(self, i):
_delitem(self, i)
def insert(self, i, item):
if not self._typed:
self._initialise_list(item)
_insert(self, i, item)
def count(self, item):
return _count(self, item)
def pop(self, i=-1):
return _pop(self, i)
def extend(self, iterable):
if not self._typed:
# Need to get the first element of the iterable to initialise the
# type of the list. FIXME: this may be a problem if the iterable
# can not be sliced.
self._initialise_list(iterable[0])
self.append(iterable[0])
return _extend(self, iterable[1:])
return _extend(self, iterable)
def remove(self, item):
return _remove(self, item)
def clear(self):
return _clear(self)
def reverse(self):
return _reverse(self)
def copy(self):
return _copy(self)
def index(self, item, start=None, stop=None):
return _index(self, item, start, stop)
def __str__(self):
buf = []
for x in self:
buf.append("{}".format(x))
return '[{0}]'.format(', '.join(buf))
def __repr__(self):
body = str(self)
prefix = str(self._list_type)
return "{prefix}({body})".format(prefix=prefix, body=body)
# XXX: should we have a better way to classmethod
@overload_method(TypeRef, 'empty_list')
def typedlist_empty(cls, item_type):
if cls.instance_type is not ListType:
return
def impl(cls, item_type):
return listobject.new_list(item_type)
return impl
@box(types.ListType)
def box_lsttype(typ, val, c):
context = c.context
builder = c.builder
# XXX deduplicate
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder, value=val)
# Returns the plain MemInfo
boxed_meminfo = c.box(
types.MemInfoPointer(types.voidptr),
lstruct.meminfo,
)
modname = c.context.insert_const_string(
c.builder.module, 'numba.typed.typedlist',
)
typedlist_mod = c.pyapi.import_module_noblock(modname)
fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr')
lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(fmp_fn, (boxed_meminfo, lsttype_obj))
c.pyapi.decref(fmp_fn)
c.pyapi.decref(typedlist_mod)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(types.ListType)
def unbox_listtype(typ, val, c):
context = c.context
builder = c.builder
miptr = c.pyapi.object_getattr_string(val, '_opaque')
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
mi = native.value
ctor = cgutils.create_struct_proxy(typ)
lstruct = ctor(context, builder)
data_pointer = context.nrt.meminfo_data(builder, mi)
data_pointer = builder.bitcast(
data_pointer,
listobject.ll_list_type.as_pointer(),
)
lstruct.data = builder.load(data_pointer)
lstruct.meminfo = mi
lstobj = lstruct._getvalue()
c.pyapi.decref(miptr)
return NativeValue(lstobj)
#
# The following contains the logic for the type-inferred constructor
#
@type_callable(ListType)
def typedlist_call(context):
"""
Defines typing logic for ``List()``.
Produces List[undefined]
"""
def typer():
return types.ListType(types.undefined)
return typer
@overload(numba_typeref_ctor)
def impl_numba_typeref_ctor(cls):
"""
Defines ``List()``, the type-inferred version of the list ctor.
Parameters
----------
cls : TypeRef
Expecting a TypeRef of a precise ListType.
See also: `redirect_type_ctor` in numba/target/bulitins.py
"""
list_ty = cls.instance_type
if not isinstance(list_ty, types.ListType):
msg = "expecting a ListType but got {}".format(list_ty)
return # reject
# Ensure the list is precisely typed.
if not list_ty.is_precise():
msg = "expecting a precise ListType but got {}".format(list_ty)
raise errors.LoweringError(msg)
item_type = types.TypeRef(list_ty.item_type)
def impl(cls):
# Simply call .empty_list with the item types from *cls*
return List.empty_list(item_type)
return impl
|
Square Enix's PlayStation 3 action RPG debuts atop the charts as hardware sales continue to slump.
We have no news or videos for Tokyo Mono Harashi: Karasu no Mori Gakuen Kitan. Sorry!
No forum topics for Tokyo Mono Harashi: Karasu no Mori Gakuen Kitan yet. Want to start us off? Create a new topic.
|
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
__all__ = ["ActionSet", "ButtonSet", "KeySet", "RelationSet",
"RoleSet", "StateSet", "keyset"]
from tadek.core.constants import *
class ConstantSet(object):
'''
Defines a read-only set of related constans, which can be initialized only
once - by current implementation of the accessibility interface.
'''
__slots__ = ("_name", "_items")
def __init__(self, name, *items):
self._name = name
self._items = {}
# Intializes items of the constant set with None
for i in items:
self._items[i] = None
def __getattr__(self, name):
'''
Gets a item value of the constant set given by the name.
'''
if name in self.__slots__:
return object.__getattribute__(self, name)
elif name in self._items and self._items[name] is not None:
return self._items[name]
else:
raise AttributeError("'%s' set has no item '%s'"
% (self._name, name))
def __setattr__(self, name, value):
'''
Sets new item value of the constant set given by the name.
'''
if name in self.__slots__:
object.__setattr__(self, name, value)
elif name not in self._items:
raise AttributeError("'%s' set has no item '%s'"
% (self._name, name))
elif self._items[name] is not None:
raise ValueError("'%s' item of '%s' set already initialized"
% (name, self._name))
else:
self._items[name] = value
def __iter__(self):
'''
Iterator that yields one item name of the constant set per iteration.
'''
for i in self._items:
if self._items[i] is not None:
yield self._items[i]
def name(self, value):
'''
Returns a item name of the constant set given by its value.
'''
if value is not None:
for n, v in self._items.iteritems():
if v == value:
return n
return None
class ActionSet(ConstantSet):
'''
An action set.
'''
def __init__(self):
ConstantSet.__init__(self, "Action", *ACTIONS)
class RelationSet(ConstantSet):
'''
A relation set.
'''
def __init__(self):
ConstantSet.__init__(self, "Relation", *RELATIONS)
class RoleSet(ConstantSet):
'''
A role set.
'''
def __init__(self):
ConstantSet.__init__(self, "Role", *ROLES)
class StateSet(ConstantSet):
'''
A state set.
'''
def __init__(self):
ConstantSet.__init__(self, "State", *STATES)
class ButtonSet(ConstantSet):
'''
A button set.
'''
def __init__(self):
ConstantSet.__init__(self, "Button", *BUTTONS)
class KeySet(ConstantSet):
'''
A key set.
'''
def __init__(self):
ConstantSet.__init__(self, "Key", *KEY_SYMS.keys())
keyset = KeySet()
# Set default values:
for key, code in KEY_SYMS.iteritems():
setattr(keyset, key, code)
|
Giving is like yawning; both require a big stretch of oneself, and they are super contagious. It’s impressive for a place like the Treasure Coast, which is known for its seasonal residents, transplants and vacationers, to also be hugely dedicated to the local community. Martin and St. Lucie counties are widely known and revered for the generosity of their residents. Whether through volunteer hours or fiscal resources, one of the greatest gifts of the area is the spirit of its people. And though there are thousands of stories of giving to share, the following three reflect the creativity and capacity for caring and giving that makes the area legendary.
Frustrated by working in a clinical setting and finding it nearly impossible to reach the teenage boys he’d been tasked with serving, psychotherapist and substance-abuse expert Bob Zaccheo used his imagination to find a way to break through and gain trust. Realizing motors and engines intrigue most teenage boys, Zaccheo arranged therapy sessions under the hoods of cars. Within seconds of popping a hood, formerly silent and sullen youths were not only talking but also sharing. Zaccheo knew it was the start of something big. Something he would call Project LIFT.
Founded in 2010, the program helps youth—95 percent of which are court-ordered—leave the program with solid counseling, a marketable skill and an education.
Project LIFT helps at-risk youths build vocational skills by learning auto-mechanics, carpentry and more.
Skills learned at Project LIFT help members create self esteem and learn about potential career paths.
In 2009, Cheryl Jarvis penned “The Necklace,” a book about 13 women in California who shared ownership of one expensive and special necklace that changed their perspectives, their values and their lives. Just a few short years later, their story and another necklace are inspiring an entire community and helping save lives in St. Lucie County.
When Wendy Dwyer had the idea to apply this concept locally, she first told Sydney Liebman, who was at the time the development director for HANDS of St. Lucie County, a Volunteers in Medicine clinic serving those who cannot access health insurance or Medicaid. Once she had an organization to support, she set out to find a jeweler. This was easier than she imagined as Greg Childress, the owner of G. Alan’s Fine Jewelry & Coin, loved the idea and created and donated a spectacular diamond necklace valued at $12,500. He also insured the pricey piece that would be shared by a dozen women and offered his shop as a meeting space for the group to make the monthly necklace exchange and share the progress made by each of its wearers.
A dozen women agreed to raise $1,000 each during their month-long experience with the necklace, as well as awareness about the plight of more than 65,000 individuals in St. Lucie County who are uninsured.
Thousands of community members who were friends, family and acquaintances to each of the women became aware of the life-changing health services available to residents who work hard, but simply do not have access to affordable health care. The phones at the HANDS clinic began to ring, nutrition education classes got funding, the dental and vision portions of the clinic began to increase their services, and referrals were made to mental and obstetric health services for needy individuals.
The first year alone, the 12 women raised about $300,000 between fundraisers and grants. “At one point we were told that the first $217,000 raised helped provide $21 million in services, which would mean that every dollar raised was worth $100 in provided services,” Dwyer says of the added value of their volunteer work and the awareness they spread.
Along the way, G. Alan’s Fine Jewelry & Coin added a pair of earrings to the necklace that had become a community celebrity. As Jewelia begins her fourth year in circulation with a new group of caring women, the shared necklace has raised more than $650,000 in funds to benefit St. Lucie County residents.
The people behind these three organizations have reached beyond themselves to become volunteers, donors or supporters of community non-profits. With thousands of non-profit organizations on our stretch of paradise, there is no limit of good that can be done to help change and improve the lives of children, seniors, animals, students, the environment, and indeed, the entire Treasure Coast community.
|
from __future__ import division
import golly as g
from PIL import Image
from math import floor, ceil, log
import os
import json
#---------------------------------------------
# settings
exportDir = "/Volumes/MugiRAID1/Works/2015/13_0xff/ca/golly-exported/otca"
# common
# duration = 147 #int( g.getgen() )
otcaDur = 35328
worldWidth = 30730
# 1/16x (1920)
duration = 64
ratio = 8
subdiv = 1
skipBound = 1
# skipFrame = int(genDur / duration)
# ad
# name = "ad_x%02d" % ratio
# bound = [4, 3, 1, 1]
# da
# name = "da_x%02d" % ratio
# bound = [5, 2, 1, 1]
# dd
# name = "dd_x%02d" % ratio
# bound = [0, 0, 1, 1]
# aa
name = "aa_x%02d" % ratio
bound = [8, 3, 1, 1]
bound[0] -= 8
bound[1] -= 8
# 1/8x
# ratio = 8
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [2, 2, 11, 11]
# 1/4x
# ratio = 4
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [2, 2, 9, 8]
# 1/2x
# ratio = 2
# subdiv = 2
# skipBound = 2
# skipFrame = 1000
# bound = [3, 3, 6, 6]
# 1/1x
# ratio = 1
# subdiv = 1
# skipBound = 1
# skipFrame = 1
# dead or alive
# bound = [0, 0, 1, 1]
# mode = "dead"
# bound = [2, 5, 1, 1]
# mode = "aliv"
#---------------------------------------------
# settings
def log( data ):
g.note( json.dumps(data) )
def getPalette():
colors = g.getcolors()
palette = {}
for i in xrange( 0, len(colors), 4 ):
state = colors[ i ]
rVal = colors[ i + 1 ]
gVal = colors[ i + 2 ]
bVal = colors[ i + 3 ]
palette[ state ] = ( rVal, gVal, bVal )
return palette
def main( step = 1, start = 0 ):
global bound, expand
g.reset()
g.run(otcaDur)
# get current Golly states
cellWidth = 2048
bound[ 0 ] = bound[0] * cellWidth
bound[ 1 ] = bound[1] * cellWidth
bound[ 2 ] = bound[2] * cellWidth
bound[ 3 ] = bound[3] * cellWidth
left = bound[ 0 ]
top = bound[ 1 ]
width = bound[ 2 ]
height = bound[ 3 ]
palette = getPalette()
cells = g.getcells( [0, 0, 1, 1] )
isMultiStates = len(cells) % 2 == 1
cellW = 2
# create image and destination directory
dstDir = "%s/%s" % (exportDir, name)
if not os.path.exists( dstDir ):
os.makedirs( dstDir )
# else:
# g.note( "destination folder already exists" )
# g.exit()
imgWidth = int( width / ratio )
imgHeight = int ( height / ratio )
boundWidth = ratio / subdiv
pb = [0, 0, boundWidth, boundWidth] # pixel bound
i = x = y = bx = by = 0
frameCount = 0
step = int(otcaDur / duration)
for i in xrange(0, duration):
g.show("Processing... %d / %d" % (i+1, duration))
g.run(step)
g.update()
img = Image.new("RGB", (imgWidth, imgHeight))
for y in xrange(imgHeight):
for x in xrange(imgWidth):
for by in xrange(0, subdiv, skipBound):
for bx in xrange(0, subdiv, skipBound):
pb[0] = left + x * ratio + bx * boundWidth
pb[1] = top + y * ratio + by * boundWidth
cells = g.getcells(pb)
if len( cells ) > 0:
img.putpixel((x, y), (255, 255, 255))
break
else:
continue
break
# save
# img.save( "%s/%s_%02dx_%s_%08d.png" % (dstDir, name, ratio, mode, i) )
# img.save( "%s/%s_%02dx_%08d.png" % (dstDir, name, ratio, i) )
img.save("%s/%s_%04d.png" % (dstDir, name, i))
g.show("Done.")
#---------------------------------------------
# main
main()
|
From the HopPress: Mmmm… Chocolate!
My latest featured article for RateBeer’s The HopPress… posted on Saturday, February 12th.
Some of you might be shocked to hear that not all beer that has “chocolate” in its name actually has real chocolate as an ingredient, but there are certainly some very excellent exceptions to that, as you will see. Many of these beers, in fact, get that name from the flavor imparted by the darkly roasted malt actually called chocolate malt. Not as dark (starts at about 300ºL) as the darkest malts , like black patent, chocolate malt retains some of the nutty and/or mild coffee flavors that most beer drinkers associate with porters or stouts. In the proper proportions with other ingredients and with the right care and handling during the brewing process, this malt can definitely contribute to a quite distinct chocolate flavor in the beer that I really enjoy.
My latest featured article for RateBeer’s The HopPress… posted on Saturday, December 25th.
A very happy holiday to everyone. Hopefully you were on the “good list” and got or will get a wonderful present from someone today. It’s a cold one here in Vermont this morning; only about 6F at my house, but I am sure that there are colder places around. I am listening to the rest of my family waking up on this Christmas morning and coming down stairs to the smell of my wife, Candy, beginning to cook breakfast in the kitchen… the heavenly smell of bacon being predominant, of course, but the coffee is running a close second at the moment.
|
# -*- coding: utf-8 -*-
# Copyright Yassine Lamgarchal <lamgarchal.yassine@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from chillaxd import commands
from chillaxd.raft import message
LOG = logging.getLogger(__name__)
class ServerState(object):
# The different Raft states.
_LEADER = "LEADER"
_CANDIDATE = "CANDIDATE"
_FOLLOWER = "FOLLOWER"
def __init__(self, server, log, queued_commands, private_endpoint):
self._server = server
self._log = log
self._queued_commands = queued_commands
self._private_endpoint = private_endpoint
# The server state, initially a follower.
self._current_state = ServerState._FOLLOWER
# The current known leader.
self._leader = None
# Set of peers that voted for this server in current term.
self._voters = set()
# TODO(yassine): must be persisted
# The candidate the server has voted for in current term.
self._voted_for = None
# Index of highest log entry known to be committed
# (initialized to 0, increases monotonically)
self._commit_index = 0
# Index of highest log entry applied to state machine
# (initialized to 0, increases monotonically)
self._last_applied = 0
# For each remote peer, index of the next log entry to send
# (initialized to leader last log index + 1)
self._next_index = {}
# For each remote peer, index of the highest log entry known to be
# replicated on that peer (initialized to 0, increases monotonically)
self._match_index = {}
# Latest term the server has seen
# (initialized to 0 on first boot, increases monotonically).
self._current_term = 0
def is_leader(self):
return self._current_state == ServerState._LEADER
def is_candidate(self):
return self._current_state == ServerState._CANDIDATE
def is_follower(self):
return self._current_state == ServerState._FOLLOWER
def switch_to_leader(self):
"""Switch to leader state.
Enable the heartbeat periodic call and
stop to check if a leader is still alive.
"""
if (not self._server.is_standalone() and
self._current_state != ServerState._CANDIDATE):
raise InvalidState(
"Invalid state '%s' while transiting to leader state." %
self._current_state)
self._current_state = ServerState._LEADER
self._voters.clear()
self._voted_for = None
LOG.info("switched to leader, term='%d'" % self._current_term)
for remote_peer in self._server.remote_peers():
self._next_index[remote_peer] = self._log.last_index() + 1
self._match_index[remote_peer] = 0
if not self._server.is_standalone():
self._server.broadcast_append_entries()
self._server.heartbeating.start()
self._server.checking_leader_timeout.stop()
if not self._server.is_standalone():
command_id, noop_message = commands.build_no_operation()
self._log.append_entry(self._current_term, noop_message)
self._queued_commands[command_id] = (None, -1)
def switch_to_follower(self, m_term, m_leader):
"""Switch to follower state.
Disable the heartbeat periodic call and
start to check if the leader is still alive.
:param m_term: The last recent known term.
:type m_term: int
:param m_leader: The leader if a valid append entry has
been received, None otherwise.
:type: str
"""
if self._current_state == ServerState._LEADER:
self._server.checking_leader_timeout.start()
self._server.heartbeating.stop()
self._current_state = ServerState._FOLLOWER
self._leader = m_leader
self._current_term = max(m_term, self._current_term)
self._voters.clear()
self._voted_for = None
LOG.info("switched to follower, term='%d'" % self._current_term)
def switch_to_candidate(self):
"""Switch to candidate state.
Increment the current term, vote for self, and broadcast a
request vote. The election timeout is randomly reinitialized.
"""
if self._current_state == ServerState._LEADER:
raise InvalidState(
"Invalid state '%s' while transiting to candidate state." %
self._current_state)
self._current_term += 1
self._current_state = ServerState._CANDIDATE
LOG.debug("switched to candidate, term='%d'" % self._current_term)
self._voters.clear()
self._voters.add(self._private_endpoint)
self._voted_for = self._private_endpoint
l_l_i, l_l_t = self._log.index_and_term_of_last_entry()
rv_message = message.build_request_vote(self._current_term, l_l_i,
l_l_t)
# Broadcast request vote and reset election timeout.
self._server.broadcast_message(rv_message)
self._server.reset_election_timeout()
def init_indexes(self, remote_peer_id):
"""Initialize next_index and match_index of a remote peer.
:param remote_peer_id: The id of the remote peer.
:type remote_peer_id: six.binary_type
"""
self._next_index[remote_peer_id] = self._log.last_index() + 1
self._match_index[remote_peer_id] = 0
def next_index(self, peer_id):
return self._next_index[peer_id]
def update_next_index(self, peer_id, new_next_index):
self._next_index[peer_id] = new_next_index
def match_index_values(self):
return self._match_index.values()
def update_match_index(self, peer_id, new_match_index):
self._match_index[peer_id] = max(self._match_index[peer_id],
new_match_index)
def commit_index(self):
return self._commit_index
def update_commit_index(self, new_commit_index):
self._commit_index = new_commit_index
def no_commands_to_apply(self):
return self._last_applied == self._commit_index
def last_applied(self):
return self._last_applied
def update_last_applied(self):
self._last_applied = self._commit_index
def clear(self):
self._next_index.clear()
self._match_index.clear()
def term(self):
return self._current_term
def add_voter(self, peer_id):
self._voters.add(peer_id)
def grant_vote(self, peer_id):
if not self._voted_for or self._voted_for == peer_id:
self._voted_for = peer_id
return True
return False
def number_of_voters(self):
return len(self._voters)
def update_leader(self, leader):
self._leader = leader
def is_leader_alive(self):
return self._leader is not None
class InvalidState(Exception):
"""Exception raised when the server try to perform an action which
is not allowed in its current state.
"""
|
There are two people in elightbo’s collective.
Subscribe to a podcast of everything huffduffed by elightbo.
"A good man will not waste himself upon mean and discreditable work or be busy merely for the sake of being busy. Neither will he, as you imagine, become so involved in ambitious schemes that he will have continually to endure their ebb and flow.
Grab your black cat and portable radio, climb on your mom’s broomstick, and join us for a king-sized discussion of Hayao Miyazaki’s classic animated film “Kiki’s Delivery Service.” This film features a climactic scene featuring an out-of-control dirigible, so you know we love it.
"Tim Kreider’s writing is heartbreaking, brutal and hilarious—usually at the same time. He can do in a few pages what I need several hours of screen time and tens of millions to accomplish. And he does it better. Come to think of it, I’d rather not do a blurb.
Matt Mullenweg has been named one of PC World’s Top 50 People on the Web, Inc.com’s 30 under 30, and Business Week’s 25 Most Influential People on the Web. In this episode, I attempt to get him drunk on tequila and make him curse.
In this episode, I interview the one and only Arnold Schwarzenegger… at his kitchen table. We dig into lessons learned, routines, favorite books, and much more, including many stories I’ve never heard anywhere else.
"Our revenues are now over $5 billion annually. Without access to Tony and his teachings, Salesforce.com wouldn’t exist today." - Marc Benioff, Founder of Salesforce.
Recorded live at Sunshine PHP 2014.
Seat Geek – Where Zack works.
Palentier – Where Larry works.
FoxyCart – Cause I like babysitting Luke’s kids.
PHP the Right Way – Because you NEED to read this.
This episode carries the EXPLICIT tag for language. It is not family friendly. I do apologize if this offends our regular listeners. Feel free to skip this episode and we will return to our normal format soon.
|
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'CartPole-v0'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=False, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=False)
|
View cart “$20 Gift Certificate” has been added to your cart.
This is a gift certificate that gives you $30 coupon to use in LBBS shop. It works with all the discounts and sales.
Coupon is valid thru 365 days starting from the purchasing day.
After making payment you’ll receive JPG file with coupon code on it to your email. You can gift it to third person.
|
# SIM-CITY client
#
# Copyright 2015 Netherlands eScience Center, Jan Bot
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
"""
Ensemble metadata
"""
def ensemble_view(task_db, name, version, url=None, ensemble=None):
"""
Create a view for an ensemble.
This checks if the view already exists. If not, a new view is created with
a new design document name. The view is then called all_docs under that
design document.
@param task_db: task database
@param name: simulator name
@param version: simulator version
@param url: base url of the database. If none, use the task database url.
@param ensemble: ensemble name. If None, no selection on ensemble is made.
@return design document
"""
if ensemble is None:
design_doc = '{0}_{1}'.format(name, version)
ensemble_condition = ''
else:
design_doc = '{0}_{1}_{2}'.format(name, version, ensemble)
ensemble_condition = ' && doc.ensemble === "{0}"'.format(ensemble)
doc_id = '_design/{0}'.format(design_doc)
try:
task_db.get(doc_id)
except ValueError:
if url is None:
url = task_db.url
if not url.endswith('/'):
url += '/'
map_fun = '''
function(doc) {{
if (doc.type === "task" && doc.name === "{name}" &&
doc.version === "{version}" && !doc.archive
{ensemble_condition}) {{
emit(doc._id, {{
_id: doc._id,
_rev: doc._rev,
url: "{url}" + doc._id,
input: {{
ensemble: doc.input.ensemble,
simulation: doc.input.simulation,
}},
error: doc.error,
lock: doc.lock,
done: doc.done,
input: doc.input,
files: doc.files,
_attachments: doc._attachments,
typeUrl: doc.typeUrl,
defaultFeatureType: doc.defaultFeatureType
}});
}}
}}'''.format(name=name, version=version,
ensemble_condition=ensemble_condition, url=url)
task_db.add_view('all_docs', map_fun, design_doc=design_doc)
return design_doc
|
The Reformers taught that we are saved by grace alone, through faith alone, on account of Christ alone, as revealed in Scripture alone. I believe this also. I also believe that we are declared righteous, not because of ourselves, but due to the righteousness of Christ imputed to us.
Roman Catholicism tells us that this teaching is foreign to the history of the church, that the Church never taught it and the early church fathers never taught it. Rome claims that we believe in a legal fiction.
Granted, the church could have been wrong all along, but is that likely? Since some doctrines took time to develop, can we say that the church never spoke about the imputed righteousness of Christ? I would like to investigate this. Can you help?
Justification is an act of God's free grace, wherein he pardoneth all our sins, and accepteth us as righteous in his sight, only for the righteousness of Christ imputed to us, and received by faith alone.
This statement sets forth the doctrine of justification held by the Reformers, and accurately represents the Bible's presentation of the constituent elements of a believer's judicial standing before an all holy God.
The magazine Modern Reformation has made Lecture 3 of Buchanan's excellent book available online here. In that lecture Buchanan supplies numerous and lengthy quotations from the early church fathers to support his assertion.
Buchanan wisely notes, "This doctrine was always held in substance by true believers; but it seems to have been reserved, for its fuller development, and more precise definition, till the great controversy which arose between the Romish and the Reformed Churches in the sixteenth century." (page 87) Just as controversies led to the carefully crafted Trinitarian formulations of Nicaea in the fourth century, and the Christological formulations of Chalcedon in the fifth century, so the controversies of the Reformation period led to the magnificent statements of our Reformed confessions on justification.
Is the Protestant doctrine of justification legal fiction? When I was a boy, I remember being taught by my Presbyterian Sunday School teachers a definition of justification that went like this: "God treats me as if I had never sinned." Such a definition is simply wrong, and opens the way to the charge of legal fiction.
God does not treat the believer as if he were righteous. He is in fact righteous by virtue of his union with Jesus Christ. God has made Christ "our righteousness" (1 Cor. 1:30), and in Christ we are "the righteousness of God" (2 Cor. 5:21).
No fiction here. In Christ the believer is truly righteous.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
AUTHOR = "SRI International"
SITENAME = "Scalable Informatics for Biomedical Imaging Studies"
SITESUBTITLE = ""
SITEURL = ""
TIMEZONE = "America/Vancouver"
DEFAULT_LANG = "en"
conda_env = os.environ.get('CONDA_PREFIX', "")
# Theme
if conda_env == "":
THEME = "/Users/nicholsn/Repos/pelican-themes/pelican-bootstrap3"
else:
THEME = conda_env + "/lib/python2.7/site-packages/pelican-themes/pelican-bootstrap3"
# Theme specific config
MENUITEMS = [['Scalable Informatics for Biomedical Imaging Studies', '/index.html'],
['About', '/pages/about.html'],
['Team', '/pages/team.html'],
['Contact', '/pages/contact.html']]
BOOTSTRAP_THEME = "spacelab"
PYGMENTS_STYLE = 'solarizedlight'
SITELOGO = "images/logo/SIBIS-logo-header.png"
SITELOGO_SIZE = "60%"
HIDE_SITENAME = True
#DISPLAY_BREADCRUMBS = True
#DISPLAY_CATEGORY_IN_BREADCRUMBS = True
BOOTSTRAP_NAVBAR_INVERSE = False
FAVICON = "images/logo/favicon.png"
DISPLAY_ARTICLE_INFO_ON_INDEX = True
ABOUT_ME = ""
AVATAR = ""
CC_LICENSE = "CC-BY"
SHOW_ARTICLE_AUTHOR = True
SHOW_ARTICLE_CATEGORY = True
USE_PAGER = True
BOOTSTRAP_FLUID = True
RELATED_POSTS_MAX = 10
USE_OPEN_GRAPH = True
# Notebook Rendering
NOTEBOOK_DIR = 'notebooks'
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
CUSTOM_CSS = 'static/custom.css'
# Template settings
DISPLAY_PAGES_ON_MENU = False
DISPLAY_CATEGORIES_ON_MENU = False
DIRECT_TEMPLATES = ('index', 'categories', 'authors', 'archives', 'search')
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),)
TAG_CLOUD_MAX_ITEMS = 20
DISPLAY_TAGS_ON_SIDEBAR = True
DISPLAY_TAGS_INLINE = True
DISPLAY_CATEGORIES_ON_SIDEBAR = False
HIDE_SIDEBAR = True
# Articles per page
DEFAULT_PAGINATION = 10
RECENT_POST_COUNT = 5
# Plugins
if conda_env == "":
PLUGIN_PATHS = ["/Users/nicholsn/Repos/pelican-plugins"]
else:
PLUGIN_PATHS = [conda_env + "/lib/python2.7/site-packages/pelican-plugins"]
PLUGINS = ['related_posts', 'tipue_search', 'liquid_tags.img',
'liquid_tags.video', 'liquid_tags.youtube',
'liquid_tags.vimeo', 'liquid_tags.include_code',
'liquid_tags.notebook']
# Static paths and cname mapping
PATH = "content"
STATIC_PATHS = ['images', 'extra/custom.css', 'form']
EXTRA_PATH_METADATA = {
'extra/custom.css': {'path': 'static/custom.css'}
}
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
ARTICLE_EXCLUDES = ['.']
# Social widget
SOCIAL = (('Github', 'https://github.com/sibis-platform'),)
# Disqus config
DISQUS_SITENAME = ""
# Addthis
ADDTHIS_PROFILE = ""
ADDTHIS_DATA_TRACK_ADDRESSBAR = False
# Github
GITHUB_USER = "sibis-platform"
GITHUB_REPO_COUNT = 3
GITHUB_SKIP_FORK = True
GITHUB_SHOW_USER_LINK = True
ARTICLE_EDIT_LINK = 'https://github.com/sibis-platform/sibis-platform.github.io/blob/gh-pages/content/%(slug)s.md'
# Google registration
GOOGLE_SEARCH = ""
GOOGLE_ANALYTICS_UNIVERSAL = ""
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = False
CATEGORY_FEED_ATOM = False
TRANSLATION_FEED_ATOM = None
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
|
This innovation book has a complete understanding of what is innovation and how to achieve it.
Scott present us a list of the biggest innovators in world, resuming their works and studies. There is either another list of 5 innovators, that were his most influenciers.
A complete 28 days Innovation Program is create to us, with examples and tasks to increase innovation in our projects, discovering opportunities, creating, analyzing and testing ideas.
|
"""
Django settings for foodbank project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h4ker#*e3a^ke)-rc4#$h4^j27ct^l8uktv&o0!tid+p%@e_+0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'share_it',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foodbank.urls'
WSGI_APPLICATION = 'foodbank.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = 'codeepy@gmail.com'
EMAIL_HOST_PASSWORD = 'fr33l0v3'
EMAIL_USE_SSL = True
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
|
Tech billionaire and philanthropist has taken steps to help clean the water in the Flint School District. Donating over $480,000 worth of ultraviolet water filtration systems to be installed in all 12 schools and administrative buildings, Derrick Lopez Flint Superintendent says. “The new water filtration systems will be instrumental in helping our students return to the normalcy of what should be a fundamental right: having access to safe, clean water from water fountains in their school.” The donation is made after PayPal co-founder and CEO of Tesla made remarks on Twitter pledging to personally pay for fixing homes with contaminated water back in July. Now that is putting money where your mouth is!
|
# -*- coding: utf-8 -*-
"""
Register the available formats and their capabilities.
"""
import magic
from django.conf import settings
from transifex.txcommon import import_to_python
from transifex.txcommon.log import logger
class _FormatsRegistry(object):
"""Registry class for the formats."""
def __init__(self, methods=None, handlers=None):
"""It initializes the variables of the registry.
The variables are:
methods: A dictionary of the available methods.
handlers: A dictionary of the available handlers.
"""
self.methods = methods or settings.I18N_METHODS
self.handlers = {}
handlers = handlers or settings.I18N_HANDLER_CLASS_NAMES
for method, klass in handlers.iteritems():
self.handlers[method] = import_to_python(klass)
def _string_to_list(self, string):
"""
Convert a string of multiple items separated by commas and spaces
to a list.
"""
return string.split(', ')
def add_handler(self, m, klass, priority=False):
"""Register a new handler for the type m.
Args:
m: A i18n_method.
klass: A handler class for the specified method.
priority: if this is a priority request, then register the
handler for the method anyway. Else, ignore the request.
Returns:
True, if the handler was added successfully, False otherwise.
"""
if m in self.handlers and not priority:
return False
self.handlers[m] = klass
return True
@property
def available_methods(self):
"""Get the available methods."""
methods = self.methods.copy()
del methods['POT']
return methods
def descriptions(self):
"""Get the available descriptions along with the
method they correspond to.
"""
return [
(m, '%s (%s)' % (v['description'], v['file-extensions']))
for m, v in self.methods.items()
if m != 'POT'
]
def extensions_for(self, m):
"""Get the extensions for the specified method.
Returns:
A list of file extensions or an empty list,
in case no such method exists.
"""
if m not in self.methods:
return []
return self._string_to_list(self.methods[m]['file-extensions'])
def guess_method(self, filename=None, mimetype=None):
"""
Return an appropriate Handler class for given file.
The handler is selected based on libmagic and the file extension
or the mime type.
Args:
filename: The path to the file.
mimetype: The mime type of the file.
Returns:
An appropriate handler class for the file.
"""
i18n_type = None
if filename is not None:
try:
m = magic.Magic(mime=True)
# guess mimetype and remove charset
mime_type = m.from_file(filename)
except AttributeError, e:
m = magic.open(magic.MAGIC_NONE)
m.load()
mime_type = m.file(filename)
m.close()
except IOError, e:
# file does not exist in the storage
mime_type = None
except Exception, e:
logger.error("Uncaught exception: %s" % e.message, exc_info=True)
# We don't have the actual file. Depend on the filename only
mime_type = None
for method, info in self.methods.items():
if filter(filename.endswith, info['file-extensions'].split(', ')) or\
mime_type in info['mimetype'].split(', '):
i18n_type = method
break
elif mimetype is not None:
for method in self.handlers:
if mimetype in self.mimetypes_for(method):
i18n_type = method
break
return i18n_type
def is_supported(self, m):
"""Check whether the method is supported.
Args:
m: The method to check.
Returns:
True, if it is supported. Else, False.
"""
return m in self.methods
def mimetypes_for(self, m):
"""Get the mimetypes for the specified method.
Args:
m: The method which we want the mimetypes for.
Returns:
The mimetypes for the method or an empty list.
"""
if m not in self.methods:
return []
return self._string_to_list(self.methods[m]['mimetype'])
def handler_for(self, m):
"""Return a handler for the i18n type specified.
Args:
m: A i18n_method.
filename: The filename (if available).
Returns:
A particular handler for the method or None, in case the method
has not been registered.
"""
if m not in self.handlers:
return None
return self.handlers[m]()
def appropriate_handler(self, resource, language, **kwargs):
"""Return the appropriate handler based on the arguments.
The arguments may be the filename of the resource or whether
a pot file has been requested.
Args:
resource: The resource the handler is for.
language: The language the handler is asked for.
Returns:
A subclass of formats.core.Handler or None.
"""
method = resource.i18n_type
handler = registry.handler_for
# Only PO/POT files need special treatment
if method != 'PO':
return handler(method)
# Override the behavior manually
wants_pot = kwargs.get('wants_pot')
if wants_pot:
return handler('POT')
# Check file extension
filename = kwargs.get('filename')
if filename is not None:
if filename.endswith('po'):
return handler('PO')
else:
return handler('POT')
# Return POT, when no language has been asked
if language is None:
return handler('POT')
return handler('PO')
# # Always return PO for non-source language files
# slang = resource.source_language
# if language != slang:
# return handler('PO')
# Should never be here
return None
def file_extension_for(self, resource, language):
"""Return the filename extension that should be used
for the specific resource-language pair.
"""
resource_method = resource.i18n_method
try:
if resource_method != 'PO':
return self.extensions_for(resource_method)[0]
if language is None:
return self.extensions_for('POT')[0]
return self.extensions_for(resource_method)[0]
except IndexError, e:
msg = "No extensions for resource %s: %s"
logger.error(msg % (resource, e), exc_info=True)
raise
registry = _FormatsRegistry()
|
Crawling out from the dregs of Kansas City, I Love You offers up a wild amalgamation of dub, noise, punk and African pop with its latest album, Bell Ord Forrest (Joyful Noise). Opening single “The Colloquialism Is Simply ‘Gas'” hypnotizes listeners with an infectious bass line and pulsating drums only to disintegrate into mountains of fuzz, dissonant skronks and manic shouts in the end. I Love You’s toe-tapping grooves, combined with untamed noise, will guarantee to make any listener get out on the dance floor or just ingest a large pile of drugs.
|
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import threading
import time
from prompt_toolkit import HTML
from prompt_toolkit.shortcuts import ProgressBar
def main():
with ProgressBar(
title=HTML("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=HTML("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
|
It is not what is said, but how it is said that so often becomes the theme for malpractice trial tactics. Defendant and plaintiff attorneys will use the words of the nurse to accentuate the tone of the medical record. Enlarging medical records to billboard size may catapult a nurse’s simple note into an embarrassing display of observations gone awry.
To prevent charting from receiving such scrutiny and giving an inaccurate perception of the care that was actually delivered, the charting guidelines in the Sidebar should be followed.
When attorneys review medical records, they seek a possible breach in standard care, an injury to the patient, and a causal link between the breach and the injury. One’s memory of an event where it differs from the medical record is unlikely to be persuasive. The poorly written but voluminous record often increases the liability risk by providing the plaintiff’s attorney with ample material from which to choose the example to build the plaintiff’s case. A well-written medical record may influence the attorney to have the plaintiff drop the matter without further action. Investing more time in record documentation can be beneficial and can help to avoid the stressful, expensive, and demoralizing effects of malpractice litigation.
All appropriate blanks must be filled in or boxes checked. Empty spaces give the impression that care was not delivered, side rails were not up as ordered, or the patient was not turned in bed or out of bed as required. The patient’s attorney will carefully scrutinize all of the forms in the medical record. A case may be decided on the failure to fill in a blank or check the patient care plan for instructions on whether, for example, a patient needs assistance with feeding or ambulation.
Accurately describe all unusual occurrences. Masking their existence could send a red flag to a plaintiff’s attorney that the hospital is trying to hide something.
Any threats and complaints must be documented in a non-judgmental, neutral manner. If health care personnel document their irritation and blame the patient, the tone is set for a hostile atmosphere that will give the wrong impression of care delivered.
Patient and family concerns must be documented, and follow-up related to those concerns is paramount. Lawsuits often can be avoided if the nurses indicate they have followed up on a family’s concerns.
Avoid using defensive, argumentative, blaming, and vague language.
If another person’s entry requires action or follow-up, do it and document the response.
The health care professional must use legible penmanship. Errors generated by illegible writing can be avoided. Good penmanship will eliminate the need for guesswork.
Avoid any comments implying the patient’s complaints are groundless. Avoid statements in the record that reveal frustration with the patient.
Record alteration must include the date and time of the change with careful attention not to obliterate the record itself.
Determine if the new note would be more appropriate than a change in the prior record.
The record should reflect acknowledgement and discussion with the patient of possible outcomes and complications.
Avoid any reference in the record that could be interpreted as uncaring, insensitive, or implying negligence.
Statements that may have legal significance, but which have no direct bearing on the care of the patient should not be written in the record.
Risk-prevention activity, such as completion of an incident report, notification of insurance claims personnel, risk management, or contact with an attorney, should not be within the record. This may inadvertently disclose information that should have been privileged, but because of disclosure, could be used by the defendant in a lawsuit.
Medical mishaps should be documented concisely. The incidents should not be overstated or misrepresented, but the mishaps should not be concealed or understated.
Legal threats and complaints about the quality of care may be briefly documented in the patient’s record in a non-judgmental, neutral manner. Do not use terms such as “vicious, nasty, malicious,” in the medical record. A detailed report of the threat or complaint should be documented precisely as stated in the incident report.
Do not understate the patient’s condition. Clearly document their mentation and any other observations objectively.
Always document the worries or concerns expressed by the patient or family. Then document the nurse’s actions to calm their fears.
Document in the record sources of information if other than the patient, such as wife or child.
Document important warnings given to the patient at the time of discharge. Failure to provide pertinent information at time of discharge can trigger a readmission and complications that can be traced to the inadequate discharge instructions.
Always document evidence of patient noncompliance.
Informed consent documentation is mandatory. It is highly recommended to use a separate form for the informed consent.
Any statement in the record by a member of the health care team may also be used as expert testimony. Thus, a recommendation or an implied need for action written in the record may be recognized as a standard of care and used to prove negligence if the response is inadequate. Avoid documenting the need for an action that is not going to be taken.
Avoid direct disagreement with any other health professional in the record.
If an injury occurs to a patient, do not make statements in the record about being careful prior to the injury.
Do not blame others in the record.
If another professional does not respond, document that the person was notified, the information relayed, and the time of such notification.
Any failure to respond can be deduced from the records, but a full detail should be outlined in the incident report.
If an instrument breaks, describe the break, but omit any theories on why it broke. If a bottle of IV fluid, for example, contains a precipitate, describe its appearance, but omit any opinion of the possible cause of the precipitate.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add router service insertion tables
Revision ID: 4105f6d52b82
Revises: 4cedd30aadf6
Create Date: 2014-12-24 19:25:38.042068
"""
# revision identifiers, used by Alembic.
revision = '4105f6d52b82'
down_revision = '4cedd30aadf6'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'midonet_servicerouterbindings',
sa.Column('resource_id', sa.String(length=36), nullable=False),
sa.Column('resource_type', sa.String(length=36), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], [u'routers.id'],
name='midonet_servicerouterbindings_ibfk_1'),
sa.PrimaryKeyConstraint('resource_id', 'resource_type'))
op.create_table(
'midonet_routerservicetypebindings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('service_type_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(
['router_id'], ['routers.id'],
name='midonet_routerservicetypebindings_ibfk_1'),
sa.PrimaryKeyConstraint(u'router_id'))
def downgrade():
op.drop_table(u'midonet_routerservicetypebindings')
op.drop_table(u'midonet_servicerouterbindings')
|
Side Effects is pleased to bring you a Houdini Masterclass video titled "Python Tips and Tricks" which includes information on new Python features in Houdini 11. Presented by Senior Software Developer Luke Moore, this video introduces you to a wide variety of important concepts.
Topics include an introduction to Houdini's Python API, scripting digital assets, implementing nodes in Python, using Python in parameter expressions, and creating user interfaces from Python.
|
__author__ = 'mt'
# -*- coding: utf-8 -*-
import math
from counts import *
import jieba
import jieba.analyse
from bs4 import BeautifulSoup
def to_string(content, strip=True):
return BeautifulSoup(content).html.body.get_text('\n', strip=strip)
def _cos(x, y):
ans = 0.
len_x = 0
len_y = 0
for i in range(len(x)):
ans += x[i] * y[i]
len_x += x[i] ** 2
len_y += y[i] ** 2
return math.sqrt(math.fabs(ans)) / math.sqrt(len_x) / math.sqrt(len_y)
def cos(x, y):
if len(x) == len(y):
return _cos(x, y)
else:
print "Vectors' lengths are different"
def parse_doc_list(docs, vocab):
"""
@param docs: A List of documents. Each document must be a string
@param vocab: No_stop_words vocabularies, that's to say only when the word is in this list will it not be ignored
@return:
Returns a pair of lists of lists.
The first, wordids, says what vocabulary tokens are present in
each document. wordids[i][j] gives the jth unique token present in
document i. (Don't count on these tokens being in any particular
order.)
The second, wordcts, says how many times each vocabulary token is
present. wordcts[i][j] is the number of times that the token given
by wordids[i][j] appears in document i.
"""
#jieba.initialize()
D = len(docs)
wordids = list()
wordcts = list()
for d in range(0, D):
words = jieba.cut(docs[d])
ddict = dict()
for word in words:
if word in vocab:
wordtoken = vocab[word]
if not wordtoken in ddict:
ddict[wordtoken] = 0
ddict[wordtoken] += 1
wordids.append([i-1 for i in ddict.keys()])
wordcts.append(ddict.values())
return wordids, wordcts
def doc_to_vector(doc, vocab):
ids, count = parse_doc_list([doc], vocab)
ids, count = ids[0], count[0]
temp_dict = {}
if len(ids):
for index in range(len(ids)):
temp_dict.setdefault(str(ids[index]), count[index])
ans = []
for tmp_id in range(VECTOR_LEN):
try:
ans.append(temp_dict[str(tmp_id)])
except KeyError:
ans.append(0.)
return ans
def get_base_vectors(db=None):
if not db:
from dbs.mongodb import db
return [db.vector.find_one({'_id': str(i)})['v'] for i in range(20)]
def vector_to_topic_vector(vector, base_vector):
return [cos(vector, base_vector[i]) for i in range(20)]
if __name__ == '__main__':
print cos([0.1, -0.1], [1.1, -0.9])
|
Somos especializados 3306 Assys Do Motor fabricantes e fornecedores / fábrica da China. 3306 Assys Do Motor atacado com alta qualidade como preço baixo / barato, uma das 3306 Assys Do Motor marcas líderes da China, Jining Far East Machinery Equipment Co.,Ltd.
Wholesale 3306 Assys Do Motor from China, Need to find cheap 3306 Assys Do Motor as low price but leading manufacturers. Just find high-quality brands on 3306 Assys Do Motor produce factory, You can also feedback about what you want, start saving and explore our 3306 Assys Do Motor, We'll reply you in fastest.
|
###
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from django.conf import settings
#############################################
# this is file is an abstraction for all visualizations to access for gathering data.
#############################################
# static endpoints
MAF_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/maf_api/v1/maf_search?gene={gene}&{tumor_parameters}'
BQ_ENDPOINT_URL = settings.BASE_API_URL + '/_ah/api/bq_api/v1'
INTERPRO_BQ_ENDPOINT_URI_TEMPLATE = settings.BASE_API_URL + '/_ah/api/bq_api/v1/bq_interpro?uniprot_id={uniprot_id}'
# Static definitions
SEQPEEK_VIEW_DEBUG_MODE = False
SAMPLE_ID_FIELD_NAME = 'tumor_sample_barcode'
TUMOR_TYPE_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'amino_acid_position'
PROTEIN_DOMAIN_DB = 'PFAM'
# Static definitions
friendly_name_map = {
'disease_code':'Disease Code',
'gender':'Gender',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
'age_at_initial_pathologic_diagnosis':'age at diagnosis',
'hsa_miR_146a_5p':'hsa-miR-146a-5p expression (log2[normalized_counts+1])',
'hsa_miR_7_7p':'hsa-miR-7-5p expression (log2[normalized_counts+1])',
'CNVR_EGFR':'EGFR copy-number (log2[CN/2])',
'EGFR_chr7_55086714_55324313':'EGFR expression (log2[normalized_counts+1])',
'EGFR_chr7_55086714_55324313_EGFR':'EGFR protein quantification',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island':'EGFR methylation (TSS1500, CpG island)',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island':"EGFR methylation (5' UTR, CpG island)",
'EGFR_chr7_55089770_cg10002850_Body_SShore':'EGFR methylation (first intron, cg10002850)',
'EGFR_chr7_55177623_cg18809076_Body':'EGFR methylation (first intron, cg18809076)'
}
numerical_attributes = [
'age_at_initial_pathologic_diagnosis',
'hsa_miR_146a_5p',
'hsa_miR_7_7p',
'CNVR_EGFR',
'EGFR_chr7_55086714_55324313',
'EGFR_chr7_55086714_55324313_EGFR',
'EGFR_chr7_55086288_cg03860890_TSS1500_Island',
'EGFR_chr7_55086890_cg14094960_5pUTR_Island',
'EGFR_chr7_55089770_cg10002850_Body_SShore',
'EGFR_chr7_55177623_cg18809076_Body'
]
categorical_attributes = [
'disease_code',
'gender',
'mirnPlatform',
'gexpPlatform',
'methPlatform',
'rppaPlatform',
'cnvrPlatform'
]
fm_friendly_name_map = {
'percent_lymphocyte_infiltration':'Percent Lymphocyte Infiltration',
'percent_monocyte_infiltration':'Percent Monocyte Infiltration',
'percent_necrosis':'Percent Necrosis',
'percent_neutrophil_infiltration':'Percent Neutrophil Infiltration',
'percent_normal_cells':'Percent Normal Cells',
'percent_stromal_cells':'Percent Stromal Cells',
'percent_tumor_cells':'Percent Tumor Cells',
'percent_tumor_nuclei':'Percent Tumor Nuclei',
'age_at_initial_pathologic_diagnosis':'Age at Diagnosis',
'days_to_birth':'Days to Birth',
'days_to_initial_pathologic_diagnosis':'Days to Diagnosis',
'year_of_initial_pathologic_diagnosis':'Year of Diagnosis',
'days_to_last_known_alive':'Days to Last Known Alive',
'tumor_necrosis_percent':'Tumor Necrosis Percent',
'tumor_nuclei_percent':'Tumor Nuclei Percent',
'tumor_weight':'Tumor Weight',
'days_to_last_followup':'Days to Last Followup',
'gender':'Gender',
'history_of_neoadjuvant_treatment':'History of Neoadjuvant Treatment',
'icd_o_3_histology':'ICD-O-3 Code',
'other_dx':'Prior Diagnosis',
'vital_status':'Vital Status',
'country':'Country',
'disease_code':'Disease Code',
'histological_type':'Histological Type',
'icd_10':'ICD-10 Category',
'icd_o_3_site':'ICD-O-3 Site',
'tumor_tissue_site':'Tumor Tissue Site',
'tumor_type':'Tumor Type',
'person_neoplasm_cancer_status':'Neoplasm Cancer Status',
'pathologic_N':'Pathologic N Stage',
'radiation_therapy':'Radiation Therapy',
'pathologic_T':'Pathologic T Stage',
'race':'Race',
'ethnicity':'Ethnicity',
'sampleType':'Sample Type',
'DNAseq_data':'DNA Sequencing Data',
'mirnPlatform':'microRNA expression platform',
'gexpPlatform':'gene (mRNA) expression platform',
'methPlatform':'DNA methylation platform',
'rppaPlatform':'protein quantification platform',
'cnvrPlatform':'copy-number platform',
}
fm_numerical_attributes = [
'percent_lymphocyte_infiltration',
'percent_monocyte_infiltration',
'percent_necrosis',
'percent_neutrophil_infiltration',
'percent_normal_cells',
'percent_stromal_cells',
'percent_tumor_cells',
'percent_tumor_nuclei',
'age_at_initial_pathologic_diagnosis',
'days_to_birth',
'days_to_initial_pathologic_diagnosis',
'year_of_initial_pathologic_diagnosis',
'days_to_last_known_alive',
'tumor_necrosis_percent',
'tumor_nuclei_percent',
'tumor_weight',
'days_to_last_followup'
]
fm_categorical_attributes = [
'gender',
'history_of_neoadjuvant_treatment',
'icd_o_3_histology',
'other_dx',
'vital_status',
'country',
'disease_code',
'histological_type',
'icd_10',
'icd_o_3_site',
'tumor_tissue_site',
'tumor_type',
'person_neoplasm_cancer_status',
'pathologic_N',
'radiation_therapy',
'pathologic_T',
'race',
'ethnicity',
'sampleType',
'DNAseq_data',
'mirnPlatform',
'cnvrPlatform',
'methPlatform',
'gexpPlatform',
'rppaPlatform'
]
|
it is okay orl i am a big snider and can handle rejection. i really can. she is right and i have to work harder. I CAN make her love me once more. I can be that man again.
|
from uber.models import Attendee
from uber.config import c
def check_range(badge_num, badge_type):
if badge_num is not None:
try:
badge_num = int(badge_num)
except Exception:
return '"{}" is not a valid badge number (should be an integer)'.format(badge_num)
if badge_num:
min_num, max_num = c.BADGE_RANGES[int(badge_type)]
if not min_num <= badge_num <= max_num:
return '{} badge numbers must fall within the range {} - {}'.format(
dict(c.BADGE_OPTS)[badge_type], min_num, max_num)
def is_badge_unchanged(attendee, old_badge_type, old_badge_num):
old_badge_num = int(old_badge_num or 0) or None
return old_badge_type == attendee.badge_type and (
not attendee.badge_num or old_badge_num == attendee.badge_num)
def reset_badge_if_unchanged(attendee, old_badge_type, old_badge_num):
"""
The "change badge" page can pass an empty string for the badge number,
but if nothing actually changed about the attendee's badge, we need the
old number back!
"""
if is_badge_unchanged(attendee, old_badge_type, old_badge_num):
attendee.badge_num = old_badge_num
return 'Attendee is already {} with badge {}'.format(c.BADGES[old_badge_type], old_badge_num)
# TODO: returning (result, error) is not a convention we're using anywhere else,
# so maybe change this to be more idiomatic if convenient, but not a big deal
def get_badge_type(badge_num):
if not c.NUMBERED_BADGES:
return c.ATTENDEE_BADGE, ''
else:
try:
for (badge_type, (lowest, highest)) in c.BADGE_RANGES.items():
if int(badge_num) in range(lowest, highest + 1):
return badge_type, ''
return None, "{0!r} isn't a valid badge number; it's not in the range of any badge type".format(badge_num)
except Exception:
return None, '{0!r} is not a valid integer'.format(badge_num)
def get_real_badge_type(badge_type):
return c.ATTENDEE_BADGE if badge_type in [c.PSEUDO_DEALER_BADGE, c.PSEUDO_GROUP_BADGE] else badge_type
# TODO: perhaps a check_leaderless() for checking for leaderless groups, since those don't get emails
# run through all badges and check 2 things:
# 1) there are no gaps in badge numbers
# 2) all badge numbers are in the ranges set by c.BADGE_RANGES
# note: does not do any duplicates checking, that's a different pre-existing check
def badge_consistency_check(session):
errors = []
# check 1, see if anything is out of range, or has a duplicate badge number
badge_nums_seen = []
attendees = session.query(Attendee).filter(Attendee.first_name != '', Attendee.badge_num != 0) \
.order_by('badge_num').all()
for attendee in attendees:
out_of_range_error = check_range(attendee.badge_num, attendee.badge_type)
if out_of_range_error:
msg = '{a.full_name}: badge #{a.badge_num}: {err}'.format(a=attendee, err=out_of_range_error)
errors.append(msg)
if attendee.badge_num in badge_nums_seen:
msg = '{a.full_name}: badge #{a.badge_num}: Has been assigned the same badge number ' \
'of another badge, which is not supposed to happen'.format(a=attendee)
errors.append(msg)
badge_nums_seen.append(attendee.badge_num)
# check 2: see if there are any gaps in each of the badge ranges
for badge_type_val, badge_type_desc in c.BADGE_OPTS:
prev_badge_num = -1
prev_attendee_name = ""
attendees = session.query(Attendee) \
.filter(Attendee.badge_type == badge_type_val, Attendee.first_name != '', Attendee.badge_num != 0) \
.order_by('badge_num').all()
for attendee in attendees:
if prev_badge_num == -1:
prev_badge_num = attendee.badge_num
prev_attendee_name = attendee.full_name
continue
if attendee.badge_num - 1 != prev_badge_num:
msg = "gap in badge sequence between " + badge_type_desc + " " + \
"badge# " + str(prev_badge_num) + "(" + prev_attendee_name + ")" + " and " + \
"badge# " + str(attendee.badge_num) + "(" + attendee.full_name + ")"
errors.append(msg)
prev_badge_num = attendee.badge_num
prev_attendee_name = attendee.full_name
return errors
def needs_badge_num(attendee=None, badge_type=None):
"""
Takes either an Attendee object, a badge_type, or both and returns whether or not the attendee should be
assigned a badge number. If neither parameter is given, always returns False.
:param attendee: Passing an existing attendee allows us to check for a new badge num whenever the attendee
is updated, particularly for when they are checked in.
:param badge_type: Must be an integer. Allows checking for a new badge number before adding/updating the
Attendee() object.
:return:
"""
if not badge_type and attendee:
badge_type = attendee.badge_type
elif not badge_type and not attendee:
return None
if c.NUMBERED_BADGES:
if attendee:
return (badge_type in c.PREASSIGNED_BADGE_TYPES or attendee.checked_in) \
and attendee.paid != c.NOT_PAID and attendee.badge_status != c.INVALID_STATUS
else:
return badge_type in c.PREASSIGNED_BADGE_TYPES
|
Bring energy to your landscape by planting red shrubs and flowers.
Anyone who has ever bought a lipstick knows that red comes in many shades. The same is true in the plant world, but, whether burgundy or scarlet, a touch of red lights a spark in any garden. Shrubs, perennials and annual flowers help you keep a red thread in your landscape for the long Mediterranean-climate growing season. Add a little red to bring a lot of life to your plantings.
Several different strategies let you create both excitement and continuity by maintaining a particular color in your garden. One strategy resembles that of decorating a room: select a main color for your scheme, along with one or more accent colors. Choosing red as a main color might mean massing tulips in early spring, with summer-blooming shrub fuchsia and plumleaf azalea, and a front-walk border of annual red salvia. To accent with white, plant white shrub roses under the fuchsia and edge the salvia border with dusty miller. Alternatively, red zinnias and dahlias accent an all-white or white-pink garden scheme. Another strategy is to create color corners in your garden: a series of red-yellow-orange hues around the patio; a blue shade garden; and a grass garden in tones of green, beige, brown and dark red. Within the area, plants change with the seasons, but the color spectrum remains constant. However you decide to use red in your garden, you have lots of plant choices. Red annuals and roses are readily available, but shrubs and perennials offer many less-familiar selections. Expect wildlife attention when you add red to your garden: hummingbirds, butterflies and beneficial pollinators love red, too.
A mid-sized, rounded shrub with a lingering spicy fragrance, Carolina allspice (Calycanthus floridus), also known as sweetshrub or strawberry bush, produces an abundance of scented dark red flowers, usually in May/June. Expect a mature height of 6 to 9 feet, in well-drained organically-rich soil with full to partial sun. Shrub fuchsia (Fuchsia magellinica) is a reliable summer-long bloomer in similar conditions but can grow 10 feet high and wide. Bicolor fuchsia flowers combine red, pink, purple and white, depending on variety. Flowering from summer through fall, plumleaf azalea (Rhododendron prunifolium) is a heat-tolerant large shrub with bright red flowers. Plumleaf azalea grows in full or partial sun. Allow for a mature height of 15 to 30 feet.
Red-flowered shrubs for dry, hot sites originate in Australia and the American Southwest. Favoring pH neutral to slightly-alkaline conditions, Southwest-native Ocotillo (Fouquieria splendens) is a 15- to 30-foot, multi-stemmed shrub whose cylindrical red flowers appear after rain and attract hummingbirds. A good choice for xeric landscaping, this desert plant blooms intermittently in spring, summer and fall. Pincushion hakea (Hakea laurina) is a member of the ancient Protea plant family and favors low-moisture, low-phosphorus soil. Like some other Australian shrubs, hakea roots draw moisture well from a thick layer of mulch. Abundant red blooms appear from early fall through winter, an unusual but welcome source of cut flowers.
Another way to bring red into the landscape is through shrub foliage and bark. Folthergilla x intermedia "Mt. Airy" is a mid-sized shrub with red-orange fall foliage. Red-twig dogwood (Cornus sanguinea) displays bark ranging from crimson to burgundy, fall to spring; "Midwinter Fire" is a bright-hued variety. Weigelias (Weigelia spp.), ninebarks (Physocarpus spp.) and barberry (Berberis) are among shrubs well known for burgundy-to-purple leaves, and Chinese fringe-flower (Loropetalum chinensis) "Crimson Fire" and "EverRed" pair dark burgundy foliage with starburst-shaped red flowers.
As a small-yard focal point or large-planter specimen, a shrub with red flowers makes an arresting statement. A summer favorite, Hibiscus (H. spp.) is hardy to U.S. Department of Agriculture plant hardiness zones 9 and 10 in both tender and hardy perennial forms. Tropical, tender hibiscus grows several feet tall; red, pink, salmon or yellow flowers appear all summer. Container-grown, tropical hibiscus can be brought in when temperatures go into the 40s and below. Hardy hibiscus reaches heights of 3 to 6 feet; the name of one species, Hibiscus coccineus, is based on its red flowers. Dwarf crepe myrtle "Cherry Dazzle" (Lagearstroemia Gamed I) and dwarf oleander "Little Red" (Nerium oleander) are popular small versions of traditional warm-summer favorite shrubs.
Perennial choices for flowerbeds include several American native plants. Bee balm (Monarda didyma), cardinal flower (Lobelia cardinalis) and blanket flower (Gaillardia spp.) bring intense deep and bright reds in USDA plant hardiness zones 4 to 9. Blanket flower grows in summer-blooming clumps reaching 1 to 2 feet high, while mid-to-late-summer blooming bee balm and cardinal flower spread in clusters 3 to 4 feet tall. All three are excellent components of a hummingbird or butterfly garden and do best in humus soil, although blanket flower tolerates dry soil. Dryer gardens benefit from the durability of scarlet or Texas sage (Salvia coccinea), which sends forth 10-inch flower spikes, and red-hot-poker plant (Knipfolia spp.), sometimes called "torch lily," which favors sandy soil and sends forth distinctive flower spears in a red/orange/yellow/white spectrum. Add red accents to a dry grass garden with native scarlet flax (Linum rubrum), a hardy, re-seeding annual that grows 1 to 2 feet tall and tolerates both heat and drought.
Beal, Janet. "Types of Red Bushes & Red Plants and Flowers." Home Guides | SF Gate, http://homeguides.sfgate.com/types-red-bushes-red-plants-flowers-38746.html. Accessed 25 April 2019.
|
import socket
import logging
import AES_Encrypt
import shell
'''
class Hander():
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def login():
pass
'''
class Hander():
def __init__(self, IS_SERVER, config_dictionary):
if not IS_SERVER:
self.IS_SERVER = False
self.ip = config_dictionary["addr"]
self.port = config_dictionary["port"]
self.password = config_dictionary["password"]
else:
self.IS_SERVER = True
self.password = config_dictionary["password"]
self.udpfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def login(self):
data = "LOGIN:" + self.password + ":" + "tihs is random message"
en_data = AES_Encrypt.encrypt(data)
self.udpfd.sendto(en_data, (self.ip, self.port))
def check_loggin(self, data, log_times):
de_data = AES_Encrypt.decrypt(data)
try:
de_data = de_data.decode()
except:
logging.debug("check login decode error")
return None
# success de_data : LOGIN:SUCCESS:10.10.0.2
# error de_data : LOGIN:PASSWORD
if de_data.startswith("LOGIN:SUCCESS"):
recv_ip = de_data.split(":")[2]
return recv_ip
else:
return 1 # login error
def check_passwd(self, data):
de_data = AES_Encrypt.decrypt(data)
try:
de_data = de_data.decode()
except UnicodeDecodeError:
logging.debug("de_data decode error")
return 1 # passwd decode error
if (data.startswith("LOGIN:") and data.split(":")[1]) == self.password:
return 0 # password right
else:
return 2 # password error
def send_public_key(self, ip, port):
if self.IS_SERVER:
ip = ip
port = port
else:
ip = self.ip
port = self.port
with open("public.pem", 'r') as public_pem:
public_key = public_pem.read()
en_public_key = AES_Encrypt.encrypt(public_key)
self.udpfd.sendto(en_public_key, (ip, port))
|
Last month, Home Solutions of America (NASDAQ:HSOA) rocked the world with rapid-fire news that it had entered into two $100 million dollar construction contracts — the largest in its history – one in Tampa and one in New York.The bloom quickly came off the rose as HSOA scrambled to “correct” the New York story – on June 11, it had to re-write history – excising counterparty “SD Consulting” out of the picture because of the revelation that its principal was David Goldwasser, a recently released felon convicted of financial fraud. HSOA inserted “Blue Diamond Group” as the counterparty in the story.Last week, Citron Research suggested that the Tampa project was likely a related party transaction, arranged with a Jeff Craft, an employee of Fireline Restoration, a subsidiary of HSOA. Meanwhile, the status of the New York project, contracted from Blue Diamond, has remained unclear, especially as to its ability to finance or fund a “$100 million dollar contract”.
In the interest of bringing further clarity to this, Citron Research reports that state business filings raise significant questions as to whether Blue Diamond itself is a related party to Fireline, and thus to HSOA.
At www.sunbiz.org/search.html , look up “Blue Diamond Ventures LLC”. On June 6, just days after the New York contract announcement, and before the June 11 “correction” naming Blue Diamond as the counterparty, Richard Holowchak filed as a Florida Limited Liability entity. The listed manager is Tom Davis of Tampa Fl.
The problem is, Tom Davis is the Vice President of Fireline Restoration. http://www.firelinerestoration.com/meet_the_team.htm .
So the question remains – is Blue Diamond a related party to HSOA? The June 11 PR stated that someone hired them and agreed to pay them $100 million – presumably an outside party committing to buy and pay for $100 million in construction services. If the parties are in fact related, the press release is false.
While Fireline still does not appear to be a licensed contractor in New York, Blue Diamond Ventures LLC filed as a Florida Foreign Limited Liability Corp – on July 2, 2007.
Is history still being rewritten?
Is this project an arms-length contract or a joint venture propped up by HSOA’s own employees?
Just because there’s a jobsite, it does not mean that HSOA has earned a contracted right to receive two $100 million dollar payments from outside parties with the ability to pay them. If these bona fide contracts do not exist, the PR’s announcing them are false.
How long will Tom Davis remain on the Fireline website? Will he be the third name to disappear?
Citron Research continues to believe HSOA is a house of cards propped up on cashless earnings and mounting receivables.
It is now close to a month and a half after Home Solutions announced the $100 million contract in Tampa and we have finally made some progress in finding out what we believe to be the mystery project. After 6 weeks of searching up and down all over Hillsborough County for what might be this 600,000 square foot project, Citron believes we have finally found it. It seems to fit much of the criteria of what has been described to the public.
It is the belief of Citron Research that the Hillsborough project is attempting to be built (as they don’t have permits) on a 9 acre parcel of land in Gibsonton, Florida. The developer is currently in negotiation with the county in an attempt to get the land rezoned. We believe that the land is being developed by the local real estate development firm of Craftmar Construction and Development. Here is some local reading on Craftmar’s attempt to get zoning for the project.
Yet, there is one problem. As Brian Marshall is telling the investing community, “We are pleased to have been selected for another major construction management project”, he fails to mention…that he owns Craftmar Construction along with Fireline employee Jeff Craft.
Furthermore, in what appears to be a last minute attempt to rewrite history, Jeff Craft has recently been taken off the Fireline website in the past month. It has been so recent, that Google still has the cached version of him on the site.
This is the second time Fireline has had to resort to rewriting history to remove an important person from the Fireline team – Brian Marshall’s brother Roy Marshall had to be expunged from the Fireline site, too. We are left to guess if it was because of his troubling past regarding consumer fraud.
The company never filed an 8-K with the SEC regarding the project.
If the contract does not exist, all investors also must question the mystery receivables.
Citron believes this is all a major SEC 10b-5 violation. The rule prohibits any act or omission resulting in fraud or deceit in connection with the purchase or sale of any security. Omission is key here as the company continues to omit all relevant information to shareholders.
Citron Research believes the problems at Home Solutions extend way beyond this Tampa Project. We still do not know who is paying them $100 million in New York and for what reason they are paying them. We still do not know who owes the receivables and are left staring at the auditors’ comment about not being able to match receivables to invoices. Most importantly there is no definitive vision for the future of the company as they seem to lack cash and a steady workflow of projects that they can actually collect money from.
Most of all, we do not understand how investors are not appalled at the outrageous SG&A expenses of Home Solutions that topped $9 million last quarter while the company continues to deliver cashless earnings. According to management and Sanders Morris we are supposed to believe the company on their word and because we have faith……faith is for religion, not for investing.
|
#!/usr/bin/env python
# Copyright 2012-2013 inBloom, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lridb,json,schema,datetime,copy,prop,timer,time,traceback
import py2neo.neo4j,py2neo.cypher
class neorest(lridb.lridb):
def open(self):
self.log.debug(self.server_url)
self.ndb = py2neo.neo4j.GraphDatabaseService(self.server_url)
self.ndb.get_indexes(py2neo.neo4j.Node)
self.log.debug("RAW NODE COUNT =",self.ndb.get_node_count())
self.log.debug("RAW LINK COUNT =",self.ndb.get_relationship_count())
def close(self):
pass
def boot(self,create_indices=False):
self.schema = schema.schema(bootstrap_filenames = self.bootstrap_filenames,in_test_mode=self.in_test_mode)
if self.in_test_mode:
return
# Create a right hand side for literal properties stored in link
self.null_node=self.ndb.get_node(0)
# Make sure our indices are set up
#self.init_indices()
self.init_indices(create_indices=create_indices)
# Get our bootstrap schema
if create_indices:
self.schema.load_bootstrap() # First time load
self.log.debug(self.schema.errors)
else:
self.schema.load_from_neodb(self)
def init_indices(self,create_indices=True):
self.node_index = self.ndb.get_or_create_index(py2neo.neo4j.Node,"node_index")
self.link_index = self.ndb.get_or_create_index(py2neo.neo4j.Relationship,"link_index")
def create_entity(self,creator=None,max_tries=3):
self.log.debug("CREATE ENTITY")
if not creator:
creator=self.creator_guid
rec={}
rec["guid"]=lridb.random_guid()
rec["timestamp"] = datetime.datetime.utcnow().isoformat()
rec["creator"] = creator
self.log.debug("CREATE ENTITY VIA REST",rec)
success = False
tries = 0
while not success or tries == max_tries:
try:
n = self.ndb.create(rec)[0]
success = True
except Exception, e:
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
tries +=1
if not success:
self.errors.append("Too many Neo4J BadRequest errors in node creation!")
self.errors.extend(neoerrs)
return None
self.node_index.add("guid",rec["guid"],n)
return n
def internal_properties(self,x):
return x.get_properties()
def links_of_node_gen(self,n):
for link in n.get_relationships():
yield link
def write_and_index_property_old(self,node,rec,primary_proptype,target_node,max_tries=3):
# Now create link to hold actual property and add the literal properties
success = False
tries = 0
while not success and tries < max_tries:
try:
link = node.create_relationship_to(target_node,primary_proptype,rec)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
if not success:
self.errors.append("Too many Neo4J errors in relationship creation!")
self.errors.extend(neoerrs)
return None
link["rec"]=json.dumps(rec)
self.log.debug("CREATING LINK INDEX ENTRIES")
# Index links by from, guid, type, and value, and maybe to
for f in ["from","to","guid","proptype","value","timestamp","creator","alive","replaced_by","complete"]:
if f in rec:
if isinstance(rec[f],basestring):
self.link_index.add(f,rec[f].lower(),link)
else:
self.link_index.add(f,str(rec[f]).lower(),link)
self.log.debug("CREATED LINK INDEX ENTRY",f,rec[f])
return link
def write_and_index_property(self,node,rec,primary_proptype,target_node,max_tries=3):
# Now create link to hold actual property and add the literal properties
success = False
tries = 0
while not success and tries < max_tries:
try:
link = node.create_relationship_to(target_node,primary_proptype,rec)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
time.sleep(0.25)
if not success:
self.errors.append("Too many Neo4J errors in relationship creation!")
self.errors.extend(neoerrs)
return None
link["rec"]=json.dumps(rec)
self.log.debug("CREATING LINK INDEX ENTRIES")
# Index links by from, guid, type, and value, and maybe to
batch = py2neo.neo4j.WriteBatch(self.ndb)
for f in ["from","to","guid","proptype","value","timestamp","creator","alive","replaced_by","complete"]:
if f in rec:
if isinstance(rec[f],basestring):
batch.add_indexed_relationship(self.link_index,f,rec[f].lower(),link)
else:
batch.add_indexed_relationship(self.link_index,f,str(rec[f]).lower(),link)
self.log.debug("CREATED LINK INDEX ENTRY",f,rec[f])
batch.submit()
return link
def get_entity(self,guid):
hits = self.node_index.get("guid",guid)
if hits:
return hits[0]
return None
def get_prop(self,guid):
self.log.debug("GET PROP BY GUID:",guid)
hits = self.link_index.get("guid",guid)
self.log.debug(hits)
if hits:
return hits[0]
return None
def form_cypher_query(self,constraints,limit,skip):
# Normalize for non-strings and escape quotes for strings
clean_constraints = copy.deepcopy(constraints)
for k,v in clean_constraints.items():
if isinstance(v,bool) or isinstance(v,int) or isinstance(v,float):
clean_constraints[k]=str(clean_constraints[k]).lower()
elif isinstance(v,basestring):
clean_constraints[k] = v.replace('\\','\\\\').replace('"','\\"').lower()
wildcard_search=False
if 'proptype' in clean_constraints and clean_constraints['proptype'] in lridb.wildcard_allowed_properties and 'value' in clean_constraints and "*" in clean_constraints['value']:
value = clean_constraints['value']
value = value.replace('\\','\\\\').replace('"','\\"')
del clean_constraints['value']
wildcard_search=True
# First make a lucene query
lq = ' AND '.join([k+':"'+v+'"' for k,v in clean_constraints.items() if isinstance(v,basestring)])
self.log.debug("PROPERTY SEARCH LUCENE QUERY:",repr(lq))
# And then a cypher query from that
lq = lq.replace('\\','\\\\').replace('"','\\"')
# If we are searching by name and we have no spaces in the
# name, then let's do a wildcard search
if wildcard_search:
where = ' WHERE r.value =~ "(?i)%s"' % (value)
else:
where = ""
#where = ''
q = 'START r=relationship:link_index("%s") %s RETURN r' % (lq,where)
#q = 'START r=relationship:link_index("%s") RETURN r' % (lq)
if skip:
q += " SKIP %d" % (skip)
if limit:
q += " LIMIT %d" % (limit)
return q.encode('utf-8')
def link_search(self,constraints,limit=None,start=None,max_tries=3):
# We use cypher simply to support pagination
q = self.form_cypher_query(constraints,limit,start)
self.log.debug("LINK SEARCH CYPHER QUERY:",q)
# We try a few times due to py2neo bug that causes timeouts
success = False
tries = 0
while not success and tries < max_tries:
try:
hits, metadata = py2neo.cypher.execute(self.ndb,q)
success = True
except Exception, e:
tries += 1
neoerrs = traceback.format_exc().split("\n")
self.log.debug("FAILING CYPHER QUERY =",repr(q),"-- TRYING %d more times." % (3-tries))
time.sleep(0.1)
if not success:
self.errors.append("Too many Neo4J errors in cypher query execution!")
self.errors.extend(neoerrs)
return None
return [h[0] for h in hits] # Need only to return the first column
def update_property(self,oldrec=None,newrec={}):
self.log.debug("UPDATE PROP REC",oldrec)
if "proptype" in newrec and oldrec["proptype"] != newrec["proptype"]:
self.errors.append("UPDATE: Changing proptype is not allowed.")
oldrec["internal"].append("UPDATE: Changing proptype is not allowed.")
return None
# Create the record for our replacement property
finalrec={"proptype":oldrec["proptype"],
"creator":oldrec["creator"]}
for k in ["from","to","value","complete"]:
if k in newrec:
finalrec[k]=newrec[k]
elif k in oldrec:
finalrec[k]=oldrec[k]
if "to" in finalrec and "from" in finalrec and "value" in finalrec:
del finalrec["value"] # Can't be both link and literal
if newrec.get("alive") == False:
# This update is a property deletion
finalrec["alive"] = False
# Make the new property
self.log.debug("MAKING REPLACEMENT PROP REC",oldrec)
newp = prop.prop(rec=finalrec,db=self)
if newp.is_valid:
self.log.debug("CREATE UPDATED PROP:",finalrec)
newp.create(is_update=True)
if newp.errors:
self.errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
return None
self.log.debug("CREATE UPDATED PROP FINAL:",newp.link["rec"])
# Point old property to its replacement
oldrec["internal"]["replaced_by"] = newp.link["guid"]
#oldrec["internal"]["alive"] = False
oldrec["replaced_by"] = newp.link["guid"]
#oldrec["alive"] = False -- Don't make old property dead
oldrec["internal"]["rec"] = json.dumps(dict([(k,v) for k,v in oldrec.items() if k not in ["internal","rec"]]))
# Update our index
self.log.debug(oldrec)
self.link_index.remove(key="replaced_by",entity=oldrec["internal"])
self.link_index.add(key="replaced_by",value=newp.rec["guid"],entity=oldrec["internal"])
#self.link_index.remove(key="alive",value="true",entity=oldrec["internal"])
#self.link_index.add(key="alive",value="false",entity=oldrec["internal"])
return newp
else:
self.errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
oldrec["internal"].errors.append("PROPERTY UPDATE: "+(" ".join(newp.errors)))
return None
def destroy_node(self,n):
n.delete()
def destroy_link(self,n):
l.delete()
|
Since 1978, our goal has been to provide patients, physicians, and other health care professionals with superior service from referral to post-discharge follow-up.
HCR is committed to providing the best quality patient care. The Centers for Medicare and Medicaid Services (CMS) tracks patient outcomes for all certified home health agencies in the country.
For more information, please visit Medicare's Home Health Compare website.
Enjoying the main benefits of HCR is as easy as telling your health care provider that you want HCR for home care services. Or simply call us to start services.
|
"""
Support for Freebox devices (Freebox v6 and Freebox mini 4K).
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/freebox/
"""
import logging
import socket
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_FREEBOX
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.discovery import async_load_platform
REQUIREMENTS = ['aiofreepybox==0.0.6']
_LOGGER = logging.getLogger(__name__)
DOMAIN = "freebox"
DATA_FREEBOX = DOMAIN
FREEBOX_CONFIG_FILE = 'freebox.conf'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Freebox component."""
conf = config.get(DOMAIN)
async def discovery_dispatch(service, discovery_info):
if conf is None:
host = discovery_info.get('properties', {}).get('api_domain')
port = discovery_info.get('properties', {}).get('https_port')
_LOGGER.info("Discovered Freebox server: %s:%s", host, port)
await async_setup_freebox(hass, config, host, port)
discovery.async_listen(hass, SERVICE_FREEBOX, discovery_dispatch)
if conf is not None:
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
await async_setup_freebox(hass, config, host, port)
return True
async def async_setup_freebox(hass, config, host, port):
"""Start up the Freebox component platforms."""
from aiofreepybox import Freepybox
from aiofreepybox.exceptions import HttpRequestError
app_desc = {
'app_id': 'hass',
'app_name': 'Home Assistant',
'app_version': '0.65',
'device_name': socket.gethostname()
}
token_file = hass.config.path(FREEBOX_CONFIG_FILE)
api_version = 'v1'
fbx = Freepybox(
app_desc=app_desc,
token_file=token_file,
api_version=api_version)
try:
await fbx.open(host, port)
except HttpRequestError:
_LOGGER.exception('Failed to connect to Freebox')
else:
hass.data[DATA_FREEBOX] = fbx
hass.async_create_task(async_load_platform(
hass, 'sensor', DOMAIN, {}, config))
hass.async_create_task(async_load_platform(
hass, 'device_tracker', DOMAIN, {}, config))
async def close_fbx(event):
"""Close Freebox connection on HA Stop."""
await fbx.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_fbx)
|
Infomation: Dora and Diego watched FIFA world cup on TV and are quite taken up by this sport and hence, they have decided to give a try at football. Unfortunately, they cannot go to a football field and they have to make use of the place in the backyard. But it is quite messy and they will not be able to enjoy their football. Can you clean up the place for Dora and Diego that they can kick the ball around and have a great time playing this great game? Have fun playing this cleaning game!
<a href="http://www.toongamesonline.com/play/Dora_And_Diego_Playing_Football.html" title="Dora And Diego Playing Football"><b>Dora And Diego Playing Football</b></a><br /><object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" codebase="http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=7,0,19,0" width=" 800" height=" 600"><param name="movie" value="
|
import logging
logger = logging.getLogger('MangaLoader.data')
# -------------------------------------------------------------------------------------------------
# Manga class
# -------------------------------------------------------------------------------------------------
class Manga(object):
def __init__(self, name):
self.name = name
self.chapter_list = []
self.url = ''
self.internalName = ''
self.cover_url = ''
self.is_open = None
def __str__(self):
return str(self.name)
def add_chapter(self, chapter):
chapter.manga = self
self.chapter_list.append(chapter)
def get_chapter(self, number):
for chapter in self.chapter_list:
if chapter.chapterNo == number:
return chapter
return None
def get_chapters(self, numbers):
result = []
for chapter in self.chapter_list:
if chapter.chapterNo == numbers or chapter.chapterNo in numbers:
result.append(chapter)
return result
# -------------------------------------------------------------------------------------------------
# Chapter class
# -------------------------------------------------------------------------------------------------
class Chapter(object):
def __init__(self, manga, chapter_no):
self.manga = manga
self.chapterNo = chapter_no
self.chapterTitle = ''
self.url = ''
self.image_list = []
self.text = ''
self.title = ''
def __str__(self):
if self.manga is not None:
return str(self.manga) + ' ' + str(self.chapterNo)
else:
return str(self.chapterNo)
def add_image(self, image):
image.chapter = self
self.image_list.append(image)
def get_image(self, number):
for image in self.image_list:
if image.imageNo == number:
return image
return None
def get_images(self, numbers):
result = []
for image in self.image_list:
if image.imageNo == numbers or image.imageNo in numbers:
result.append(image)
return result
# -------------------------------------------------------------------------------------------------
# Image class
# -------------------------------------------------------------------------------------------------
class Image(object):
def __init__(self, chapter, image_no):
self.chapter = chapter
self.imageNo = image_no
self.url = None
def __str__(self):
if self.chapter is not None:
return str(self.chapter) + ' - ' + str(self.imageNo)
else:
return str(self.imageNo)
|
You've got more restaurants than days of the week here – that includes a whopping seven à la cartes and two buffets. Bar-wise, it's the same story, with 14 to pick from. There's loads of choice on the pool front, too. Plus, there's an aquapark with a healthy dose of slides. And you can burn off the calories even more with activities like tennis, badminton, bowling and minigolf.
Sports like tennis, beach volleyball and badminton crop up on the activities menu. You can also join in with aerobics classes, have a go at minigolf, or take the family to the onsite bowling alley.
There are clubs for kids and teens, which put on stuff like pizza-making and themed days. They also come equipped with a soft play area, a trampoline, a sandpit and a playground. Elsewhere, there's a funfair with rides.
Windsurfing takes place during the summer.
During the summer, canoeing is available.
Running as part of the entertainment programme.
With floodlighting available at an extra charge.
As well as a duo of buffet places, there are seven swanky à la cartes specialising in the likes of Japanese*, Turkish and Italian* cuisine. A handful of them are al fresco, too.
Little ones will love the kids' section in the buffet restaurant – dishes are laid out on a mini-train. In between meal times, a patisserie, a Turkish pancake hut and a 24-hour international place keep tummies from rumbling.
For breakfast, lunch and dinner. Also offering a kids' menu.
Adults and children are seated separately.
Available to everybody over 16 years of age.
The disco is available to everybody over 16 years of age.
Please make a reservation. One visit to each à la carte restaurant is included.
Please make a reservation. One visit to each à la carte restaurant is included. Open during the summer.
A la carte dining is available at the Chinese, Greek, Italian, Japanese, Mexican, seafood, steak and Turkish restaurants.
A traditional Turkish bath is slotted in with a sauna and steam room. There’s a fitness centre, too, plus an indoor pool, a vitamin bar, and a spa* with a menu of massages.
The fitness centre is available to everybody over the age of 16.
You’ve got a fortnight’s worth of watering holes here. You can chill out in the coffee bar's garden in the summer, sink fresh juices from the vitamin bar or go for a boogie at the disco bar.
There's a top-notch roster of live shows and DJ sessions, as well.
A variety of shows are performed as part of the entertainment programme. Bingo and karaoke also take place in the evenings.
Suitable for kids aged 13 to 15 years running from July to August.
There are tonnes of pools to pick from, including a gigantic 1,800 square-metre main one, an adults-only number and an aquapark with chutes and a giant cobra-shaped slide.
If you splash out on a swim-up villa, you get even more pools thrown in. Plus, the hotel’s right by a beach, where the jetties are lined with teak daybeds.
With sunbeds, parasols and towels provided.
With slides, tubes and splash zones. The aqua park usually closes in the winter months.
With parasols, sunbeds and towels provided.
With towels, sunbeds and parasols provided. 1 outdoor pool remains open in winter.
These lagoon-style pools are exclusively for those staying in a 1-bedroom apartment with direct access to the pool.
There's an internet café in the hotel.
Open in the summer months, with one for ages 16 years and over.
|
""" types.py
"""
from __future__ import division
import abc
import base64
import binascii
import bisect
import codecs
import collections
import operator
import os
import re
import warnings
import weakref
from enum import EnumMeta
from enum import IntEnum
import six
from ._author import __version__
from .decorators import sdproperty
from .errors import PGPError
__all__ = ['Armorable',
'ParentRef',
'PGPObject',
'Field',
'Header',
'MetaDispatchable',
'Dispatchable',
'SignatureVerification',
'FlagEnumMeta',
'FlagEnum',
'Fingerprint',
'SorteDeque']
if six.PY2:
FileNotFoundError = IOError
re.ASCII = 0
class Armorable(six.with_metaclass(abc.ABCMeta)):
__crc24_init = 0x0B704CE
__crc24_poly = 0x1864CFB
__armor_fmt = '-----BEGIN PGP {block_type}-----\n' \
'{headers}\n' \
'{packet}\n' \
'={crc}\n' \
'-----END PGP {block_type}-----\n'
# the re.VERBOSE flag allows for:
# - whitespace is ignored except when in a character class or escaped
# - anything after a '#' that is not escaped or in a character class is ignored, allowing for comments
__armor_regex = re.compile(r"""# This capture group is optional because it will only be present in signed cleartext messages
(^-{5}BEGIN\ PGP\ SIGNED\ MESSAGE-{5}(?:\r?\n)
(Hash:\ (?P<hashes>[A-Za-z0-9\-,]+)(?:\r?\n){2})?
(?P<cleartext>(.*\r?\n)*(.*(?=\r?\n-{5})))(?:\r?\n)
)?
# armor header line; capture the variable part of the magic text
^-{5}BEGIN\ PGP\ (?P<magic>[A-Z0-9 ,]+)-{5}(?:\r?\n)
# try to capture all the headers into one capture group
# if this doesn't match, m['headers'] will be None
(?P<headers>(^.+:\ .+(?:\r?\n))+)?(?:\r?\n)?
# capture all lines of the body, up to 76 characters long,
# including the newline, and the pad character(s)
(?P<body>([A-Za-z0-9+/]{1,75}={,2}(?:\r?\n))+)
# capture the armored CRC24 value
^=(?P<crc>[A-Za-z0-9+/]{4})(?:\r?\n)
# finally, capture the armor tail line, which must match the armor header line
^-{5}END\ PGP\ (?P=magic)-{5}(?:\r?\n)?
""", flags=re.MULTILINE | re.VERBOSE)
@property
def charset(self):
return self.ascii_headers.get('Charset', 'utf-8')
@charset.setter
def charset(self, encoding):
self.ascii_headers['Charset'] = codecs.lookup(encoding).name
@staticmethod
def is_ascii(text):
if isinstance(text, six.string_types):
return bool(re.match(r'^[ -~\r\n]*$', text, flags=re.ASCII))
if isinstance(text, (bytes, bytearray)):
return bool(re.match(br'^[ -~\r\n]*$', text, flags=re.ASCII))
raise TypeError("Expected: ASCII input of type str, bytes, or bytearray") # pragma: no cover
@staticmethod
def is_armor(text):
"""
Whether the ``text`` provided is an ASCII-armored PGP block.
:param text: A possible ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: Whether the text is ASCII-armored.
"""
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
return Armorable.__armor_regex.search(text) is not None
@staticmethod
def ascii_unarmor(text):
"""
Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``.
"""
m = {'magic': None, 'headers': None, 'body': bytearray(), 'crc': None}
if not Armorable.is_ascii(text):
m['body'] = bytearray(text)
return m
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
m = Armorable.__armor_regex.search(text)
if m is None: # pragma: no cover
raise ValueError("Expected: ASCII-armored PGP data")
m = m.groupdict()
if m['hashes'] is not None:
m['hashes'] = m['hashes'].split(',')
if m['headers'] is not None:
m['headers'] = collections.OrderedDict(re.findall('^(?P<key>.+): (?P<value>.+)$\n?', m['headers'], flags=re.MULTILINE))
if m['body'] is not None:
try:
m['body'] = bytearray(base64.b64decode(m['body'].encode()))
except (binascii.Error, TypeError) as ex:
six.raise_from(PGPError, ex)
if m['crc'] is not None:
m['crc'] = Header.bytes_to_int(base64.b64decode(m['crc'].encode()))
if Armorable.crc24(m['body']) != m['crc']:
warnings.warn('Incorrect crc24', stacklevel=3)
return m
@staticmethod
def crc24(data):
# CRC24 computation, as described in the RFC 4880 section on Radix-64 Conversions
#
# The checksum is a 24-bit Cyclic Redundancy Check (CRC) converted to
# four characters of radix-64 encoding by the same MIME base64
# transformation, preceded by an equal sign (=). The CRC is computed
# by using the generator 0x864CFB and an initialization of 0xB704CE.
# The accumulation is done on the data before it is converted to
# radix-64, rather than on the converted data.
crc = Armorable.__crc24_init
if not isinstance(data, bytearray):
data = six.iterbytes(data)
for b in data:
crc ^= b << 16
for i in range(8):
crc <<= 1
if crc & 0x1000000:
crc ^= Armorable.__crc24_poly
return crc & 0xFFFFFF
@abc.abstractproperty
def magic(self):
"""The magic string identifier for the current PGP type"""
@classmethod
def from_file(cls, filename):
with open(filename, 'rb') as file:
obj = cls()
data = bytearray(os.path.getsize(filename))
file.readinto(data)
po = obj.parse(data)
if po is not None:
return (obj, po)
return obj # pragma: no cover
@classmethod
def from_blob(cls, blob):
obj = cls()
if (not isinstance(blob, six.binary_type)) and (not isinstance(blob, bytearray)):
po = obj.parse(bytearray(blob, 'latin-1'))
else:
po = obj.parse(bytearray(blob))
if po is not None:
return (obj, po)
return obj # pragma: no cover
def __init__(self):
super(Armorable, self).__init__()
self.ascii_headers = collections.OrderedDict()
self.ascii_headers['Version'] = 'PGPy v' + __version__ # Default value
def __str__(self):
payload = base64.b64encode(self.__bytes__()).decode('latin-1')
payload = '\n'.join(payload[i:(i + 64)] for i in range(0, len(payload), 64))
return self.__armor_fmt.format(
block_type=self.magic,
headers=''.join('{key}: {val}\n'.format(key=key, val=val) for key, val in self.ascii_headers.items()),
packet=payload,
crc=base64.b64encode(PGPObject.int_to_bytes(self.crc24(self.__bytes__()), 3)).decode('latin-1')
)
def __copy__(self):
obj = self.__class__()
obj.ascii_headers = self.ascii_headers.copy()
return obj
class ParentRef(object):
# mixin class to handle weak-referencing a parent object
@property
def _parent(self):
if isinstance(self.__parent, weakref.ref):
return self.__parent()
return self.__parent
@_parent.setter
def _parent(self, parent):
try:
self.__parent = weakref.ref(parent)
except TypeError:
self.__parent = parent
@property
def parent(self):
return self._parent
def __init__(self):
super(ParentRef, self).__init__()
self._parent = None
class PGPObject(six.with_metaclass(abc.ABCMeta, object)):
__metaclass__ = abc.ABCMeta
@staticmethod
def int_byte_len(i):
return (i.bit_length() + 7) // 8
@staticmethod
def bytes_to_int(b, order='big'): # pragma: no cover
"""convert bytes to integer"""
if six.PY2:
# save the original type of b without having to copy any data
_b = b.__class__()
if order != 'little':
b = reversed(b)
if not isinstance(_b, bytearray):
b = six.iterbytes(b)
return sum(c << (i * 8) for i, c in enumerate(b))
return int.from_bytes(b, order)
@staticmethod
def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover
"""convert integer to bytes"""
blen = max(minlen, PGPObject.int_byte_len(i), 1)
if six.PY2:
r = iter(_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1)))
return bytes(bytearray((i >> c) & 0xff for c in r))
return i.to_bytes(blen, order)
@staticmethod
def text_to_bytes(text):
if text is None:
return text
# if we got bytes, just return it
if isinstance(text, (bytearray, six.binary_type)):
return text
# if we were given a unicode string, or if we translated the string into utf-8,
# we know that Python already has it in utf-8 encoding, so we can now just encode it to bytes
return text.encode('utf-8')
@staticmethod
def bytes_to_text(text):
if text is None or isinstance(text, six.text_type):
return text
return text.decode('utf-8')
@abc.abstractmethod
def parse(self, packet):
"""this method is too abstract to understand"""
@abc.abstractmethod
def __bytearray__(self):
"""
Returns the contents of concrete subclasses in a binary format that can be understood by other OpenPGP
implementations
"""
def __bytes__(self):
"""
Return the contents of concrete subclasses in a binary format that can be understood by other OpenPGP
implementations
"""
# this is what all subclasses will do anyway, so doing this here we can reduce code duplication significantly
return bytes(self.__bytearray__())
class Field(PGPObject):
@abc.abstractmethod
def __len__(self):
"""Return the length of the output of __bytes__"""
class Header(Field):
@staticmethod
def encode_length(l, nhf=True, llen=1):
def _new_length(l):
if 192 > l:
return Header.int_to_bytes(l)
elif 8384 > l:
elen = ((l & 0xFF00) + (192 << 8)) + ((l & 0xFF) - 192)
return Header.int_to_bytes(elen, 2)
return b'\xFF' + Header.int_to_bytes(l, 4)
def _old_length(l, llen):
return Header.int_to_bytes(l, llen) if llen > 0 else b''
return _new_length(l) if nhf else _old_length(l, llen)
@sdproperty
def length(self):
return self._len
@length.register(int)
def length_int(self, val):
self._len = val
@length.register(six.binary_type)
@length.register(bytearray)
def length_bin(self, val):
def _new_len(b):
def _parse_len(a, offset=0):
# returns (the parsed length, size of length field, whether the length was of partial type)
fo = a[offset]
if 192 > fo:
return (self.bytes_to_int(a[offset:offset + 1]), 1, False)
elif 224 > fo: # >= 192 is implied
dlen = self.bytes_to_int(b[offset:offset + 2])
return (((dlen - (192 << 8)) & 0xFF00) + ((dlen & 0xFF) + 192), 2, False)
elif 255 > fo: # >= 224 is implied
# this is a partial-length header
return (1 << (fo & 0x1f), 1, True)
elif 255 == fo:
return (self.bytes_to_int(b[offset + 1:offset + 5]), 5, False)
else: # pragma: no cover
raise ValueError("Malformed length: 0x{:02x}".format(fo))
part_len, size, partial = _parse_len(b)
del b[:size]
if partial:
total = part_len
while partial:
part_len, size, partial = _parse_len(b, total)
del b[total:total + size]
total += part_len
self._len = total
else:
self._len = part_len
def _old_len(b):
if self.llen > 0:
self._len = self.bytes_to_int(b[:self.llen])
del b[:self.llen]
else: # pragma: no cover
self._len = 0
_new_len(val) if self._lenfmt == 1 else _old_len(val)
@sdproperty
def llen(self):
l = self.length
lf = self._lenfmt
if lf == 1:
# new-format length
if 192 > l:
return 1
elif 8384 > self.length: # >= 192 is implied
return 2
else:
return 5
else:
# old-format length
##TODO: what if _llen needs to be (re)computed?
return self._llen
@llen.register(int)
def llen_int(self, val):
if self._lenfmt == 0:
self._llen = {0: 1, 1: 2, 2: 4, 3: 0}[val]
def __init__(self):
super(Header, self).__init__()
self._len = 1
self._llen = 1
self._lenfmt = 1
self._partial = False
class MetaDispatchable(abc.ABCMeta):
"""
MetaDispatchable is a metaclass for objects that subclass Dispatchable
"""
_roots = set()
"""
_roots is a set of all currently registered RootClass class objects
A RootClass is successfully registered if the following things are true:
- it inherits (directly or indirectly) from Dispatchable
- __typeid__ == -1
"""
_registry = {}
"""
_registry is the Dispatchable class registry. It uses the following format:
{ (RootClass, None): OpaqueClass }:
denotes the default ("opaque") for a given RootClass.
An OpaqueClass is successfully registered as such provided the following conditions are met:
- it inherits directly from a RootClass
- __typeid__ is None
{ (RootClass, TypeID): SubClass }:
denotes the class that handles the type given in TypeID
a SubClass is successfully registered as such provided the following conditions are met:
- it inherits (directly or indirectly) from a RootClass
- __typeid__ is a positive int
- the given typeid is not already registered
{ (RootClass, TypeID): VerSubClass }:
denotes that a given TypeID has multiple versions, and that this is class' subclasses handle those.
A VerSubClass is registered identically to a normal SubClass.
{ (RootClass, TypeID, Ver): VerSubClass }:
denotes the class that handles the type given in TypeID and the version of that type given in Ver
a Versioned SubClass is successfully registered as such provided the following conditions are met:
- it inherits from a VerSubClass
- __ver__ > 0
- the given typeid/ver combination is not already registered
"""
def __new__(mcs, name, bases, attrs): # NOQA
ncls = super(MetaDispatchable, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(ncls.__typeid__, '__isabstractmethod__'):
if ncls.__typeid__ == -1 and not issubclass(ncls, tuple(MetaDispatchable._roots)):
# this is a root class
MetaDispatchable._roots.add(ncls)
elif issubclass(ncls, tuple(MetaDispatchable._roots)) and ncls.__typeid__ != -1:
for rcls in [ root for root in MetaDispatchable._roots if issubclass(ncls, root) ]:
if (rcls, ncls.__typeid__) not in MetaDispatchable._registry:
MetaDispatchable._registry[(rcls, ncls.__typeid__)] = ncls
if (ncls.__ver__ is not None and ncls.__ver__ > 0 and
(rcls, ncls.__typeid__, ncls.__ver__) not in MetaDispatchable._registry):
MetaDispatchable._registry[(rcls, ncls.__typeid__, ncls.__ver__)] = ncls
# finally, return the new class object
return ncls
def __call__(cls, packet=None): # NOQA
def _makeobj(cls):
obj = object.__new__(cls)
obj.__init__()
return obj
if packet is not None:
if cls in MetaDispatchable._roots:
rcls = cls
elif issubclass(cls, tuple(MetaDispatchable._roots)): # pragma: no cover
rcls = next(root for root in MetaDispatchable._roots if issubclass(cls, root))
##TODO: else raise an exception of some kind, but this should never happen
header = rcls.__headercls__()
header.parse(packet)
ncls = None
if (rcls, header.typeid) in MetaDispatchable._registry:
ncls = MetaDispatchable._registry[(rcls, header.typeid)]
if ncls.__ver__ == 0:
if header.__class__ != ncls.__headercls__:
nh = ncls.__headercls__()
nh.__dict__.update(header.__dict__)
try:
nh.parse(packet)
except Exception as ex:
six.raise_from(PGPError, ex)
header = nh
if (rcls, header.typeid, header.version) in MetaDispatchable._registry:
ncls = MetaDispatchable._registry[(rcls, header.typeid, header.version)]
else: # pragma: no cover
ncls = None
if ncls is None:
ncls = MetaDispatchable._registry[(rcls, None)]
obj = _makeobj(ncls)
obj.header = header
try:
obj.parse(packet)
except Exception as ex:
six.raise_from(PGPError, ex)
else:
obj = _makeobj(cls)
return obj
class Dispatchable(six.with_metaclass(MetaDispatchable, PGPObject)):
__metaclass__ = MetaDispatchable
@abc.abstractproperty
def __headercls__(self): # pragma: no cover
return False
@abc.abstractproperty
def __typeid__(self): # pragma: no cover
return False
__ver__ = None
class SignatureVerification(object):
_sigsubj = collections.namedtuple('sigsubj', ['verified', 'by', 'signature', 'subject'])
@property
def good_signatures(self):
"""
A generator yielding namedtuples of all signatures that were successfully verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature.
"""
for s in [ i for i in self._subjects if i.verified ]:
yield s
@property
def bad_signatures(self): # pragma: no cover
"""
A generator yielding namedtuples of all signatures that were not verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature.
"""
for s in [ i for i in self._subjects if not i.verified ]:
yield s
def __init__(self):
"""
Returned by :py:meth:`PGPKey.verify`
Can be compared directly as a boolean to determine whether or not the specified signature verified.
"""
super(SignatureVerification, self).__init__()
self._subjects = []
def __contains__(self, item):
return item in {ii for i in self._subjects for ii in [i.signature, i.subject]}
def __len__(self):
return len(self._subjects)
def __bool__(self):
return all(s.verified for s in self._subjects)
def __nonzero__(self):
return self.__bool__()
def __and__(self, other):
if not isinstance(other, SignatureVerification):
raise TypeError(type(other))
self._subjects += other._subjects
return self
def __repr__(self):
return "<SignatureVerification({verified})>".format(verified=str(bool(self)))
def add_sigsubj(self, signature, by, subject=None, verified=False):
self._subjects.append(self._sigsubj(verified, by, signature, subject))
class FlagEnumMeta(EnumMeta):
def __and__(self, other):
return { f for f in iter(self) if f.value & other }
def __rand__(self, other): # pragma: no cover
return self & other
if six.PY2:
class FlagEnum(IntEnum):
__metaclass__ = FlagEnumMeta
else:
namespace = FlagEnumMeta.__prepare__('FlagEnum', (IntEnum,))
FlagEnum = FlagEnumMeta('FlagEnum', (IntEnum,), namespace)
class Fingerprint(str):
"""
A subclass of ``str``. Can be compared using == and != to ``str``, ``unicode``, and other :py:obj:`Fingerprint` instances.
Primarily used as a key for internal dictionaries, so it ignores spaces when comparing and hashing
"""
@property
def keyid(self):
return str(self).replace(' ', '')[-16:]
@property
def shortid(self):
return str(self).replace(' ', '')[-8:]
def __new__(cls, content):
if isinstance(content, Fingerprint):
return content
# validate input before continuing: this should be a string of 40 hex digits
content = content.upper().replace(' ', '')
if not bool(re.match(r'^[A-F0-9]{40}$', content)):
raise ValueError("Expected: String of 40 hex digits")
# store in the format: "AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333"
# ^^ note 2 spaces here
spaces = [ ' ' if i != 4 else ' ' for i in range(10) ]
chunks = [ ''.join(g) for g in six.moves.zip_longest(*[iter(content)] * 4) ]
content = ''.join(j for i in six.moves.zip_longest(chunks, spaces, fillvalue='') for j in i).strip()
return str.__new__(cls, content)
def __eq__(self, other):
if isinstance(other, Fingerprint):
return str(self) == str(other)
if isinstance(other, (six.text_type, bytes, bytearray)):
if isinstance(other, (bytes, bytearray)): # pragma: no cover
other = other.decode('latin-1')
other = str(other).replace(' ', '')
return any([self.replace(' ', '') == other,
self.keyid == other,
self.shortid == other])
return False # pragma: no cover
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(str(self.replace(' ', '')))
def __bytes__(self):
return binascii.unhexlify(six.b(self.replace(' ', '')))
class SorteDeque(collections.deque):
"""A deque subclass that tries to maintain sorted ordering using bisect"""
def insort(self, item):
i = bisect.bisect_left(self, item)
self.rotate(- i)
self.appendleft(item)
self.rotate(i)
def resort(self, item): # pragma: no cover
if item in self:
# if item is already in self, see if it is still in sorted order.
# if not, re-sort it by removing it and then inserting it into its sorted order
i = bisect.bisect_left(self, item)
if i == len(self) or self[i] is not item:
self.remove(item)
self.insort(item)
else:
# if item is not in self, just insert it in sorted order
self.insort(item)
def check(self): # pragma: no cover
"""re-sort any items in self that are not sorted"""
for unsorted in iter(self[i] for i in range(len(self) - 2) if not operator.le(self[i], self[i + 1])):
self.resort(unsorted)
|
Well said Armin,must agree, what happens like you say if there is a slump in Whisky consumption in decades to come.
The Population on the Island is less than 4000 so it’s mostly uninhabited. This is a good thing. Huge demand globally for these spirits so I say great and its a good for the jobs and local economy. The metrics would probably show the island economy is mostly spirits. If they are looking for workers I’d love to work at a distillery and learn all about it.
That’s exactly my point: The demand is huge now. But after every boom comes a bust. And then? Where are the worker going to go in a monoculture?
Armin Grewe I think this is an example of a recession “Proof” industry. If there’s a black Tuesday the first thing people will do is reach for the bottle of the liquid Gold ($$$). Anyhow the stuff will sit in casks for at least 12 years anyway.
Armin Grewe the monoculture of the island is probably sheep. It’s mostly grassland so goat cheese lamb and wool seem like logical produce (salmon?). Goods like produce will fair better in tough economic times that tourism will. Tourism is discretionary expenditures whereas food is a necessity.
I read about some small islands off the north shore where there are ancient settlements. I also read about one of the oldest Celtic crosses in Europe. The Island should be in the process of developing and cataloging all of those archeological sites to promote ethnic (gaelic and celtic) tourism so people can go and see the ruins and see displays etc. Then go for some of the local fish and brew at a nice restaurant or camp on a beach.
Tim Helmer they already do this. Although camping on beaches is not the wonderful idea it sounds and can lead to all sorts of pollution unfortunately.
The produce you mention is and can only ever be relatively small scale.
Hundreds, perhaps thousands, of ignorant punters traipsing all over Islay only because of its whisky is a bad idea for the island.
I agree. There’s far more to Islay than whisky. At least the weather stops it becoming a Magaluf!
Irena Krasinska-Lobban I have my grandmothers History of Islay book, family photos from the 1850’s and Laggan Farm and that’s about it. I plan to visit the island in the next 5-10 years. Never been there. Diversification is great but obviously with only 3500 residents the public coffers are limited as to what they can do. It’s a sleepy place I gather.
Michael Small whisey tourism huh. Well what else can do you think can be done?
I think it’s the Ileachs who should decide what can be done. Outside interference in a small(ish) community can be very detrimental. The highlands and island have suffered greatly at the hands of people and organisations who have changed things ‘for their own good’.
Tim Helmer Is there any reason to drastically alter the existing culture of the island? Not likely.
In recent years the island has lost nearly all of its dairy herd and its cheese making. Sure there remains sheep and beef. Farmers are turning to growing barley for the ethos of Islay whisky completely from Islay.
Yes, Islay whisky has a global market for single malt and for blending and the current distilleries can sell all they make.
Two things worry me, all eggs in one basket – if something happens to global whisky consumption, Islay is in serious trouble, the other point is that the island infrastructure, specifically road, ferry, waste disposal and water supply are under considerable strain at present. With existing distilleries looking to massively increase production and 4 new distilleries in the pipeline, in addition to rum and gin production, can the island cope??
Some good points made in your post, not least regards infrastructure. I’ve said elsewhere that I think the drinks industry needs to feather its own (island) nest a little bit more and provide some financial support. /continued….
That seems odd when you take into account the island population was double what it is now during the late 1800’s. Just goes to show how wasteful of resources like water the modern lifestyle is. The water these distilleries use is metered no? Hopefully if they’re not using native peat filter water they pay their fair share for the upkeep and development of the local water supplies. I just spent the last hour going through the Wikipedia page on Islay. Seems to me the Island has always been in the ownership of one individual or another; Dominus Insularum—”Lord of the Isles”. Perhaps some day in the future the Island can buy back its own land so that the locally elected government and island inhabitants can take a more self directed approach rather than letting external market economics dictate the course of business. That would be a first in the Islands long and complex history.
Michael Small Drastically altering the culture is not likely unless people start moving back onto the island and it reaches its former population of over 7000. Back in the day most folks were farming but to see economic growth on the island perhaps some other industry could be started. There is the potential for mining perhaps. The Island has a complex geology. I’m just brainstorming here because some of the people on this post have expressed worry for the Islands economic future. Not like Pitcairn Island though, now there is an island in trouble. They actively encouraging people to move to the island.
Jim Loudon Yes I see that and then by 1891 it was down to just over 7500. My great grandfather John Dunlop rented Lagan Farm but eventually he became annoyed at the increasing rent so they sold everything and moved to Canada in about 1900.
Jim Loudon oh, well that is just great, it’s like socialism but on a small scale. They set a good example. A person could just take out a 99 year lease from the community. All profit stays with the island and is not bleed off to external landlords living elsewhere where they no doubt spend the profits to suit themselves and not the community that generated it.
Islay is an interesting economic example. If I wanted to buy land from a wealthy landowner I need to waive a big bag of cash in his face. If the Island has 15000 people on it the cost goes up because the occupants are busy making money and can afford rent. If the Island is barren guess how much it’s worth? Not much. So the loss of population on Islay in the last century and a half is the direct result of the landlords putting an economic pinch on the Islanders and mismanaging the Islands financial well being. Kind of like slum lords who buy a hotel, bleed it for money yet not spending a penny on upkeep then turning it over for a profit. They destroy the hotel and make a big bag of cash. It’s sick. I think the concern of some of the posts on this thread for the future of the island are well founded. Perhaps the time has come for Islay to take back the Island and set up a trust fund like Eigg did.
You are being very unkind to the landowners on Islay,Tim Helmer, The land on Islay is, in the main, well managed and the owners do not put an ‘economic pinch’ on the inhabitants. In fact they are quite generous benefactors to the community in one way or another.
Whilst the owners of the larger estates may have business interests on the mainland they maintain homes on Islay and they and their families spend time there. They reinvest in their estates. Why would they not? The downtrodden tenants of the cruel landlords are, thankfully, confined to history as far as Islay is concerned – this is 2018 not 1818. They certainly are NOT like slum landlords.
If you wanted to buy land you would have to pay the market value and only if that land was put up for sale.
I would suggest that Wikipedia is not the best source of information if it is presenting such a feudal view.
The various land owners – and the rest of the population – all pay taxes of one sort or another. Some of these – e.g whisky duties, going straight into the UK Treasury others – i.e. Council Tax go to Argyll and Bute Council, under whose jurisdiction Islay lies.
There is not and never will be a ‘locally elected government’ on Islay and not possible for all monies generated to remain on the island.
Regarding buy outs – Eigg and Gigha are completely different – both these islands were private land in the ownership of one person with, inthe case of Eigg, no publicly owned infrastructure such as roads. Islay is in the ownership of many with a road networking owned and maintained (albeit not very well) by Argyll & Bute Counciland I would doubt whether Ileachs would ever want a ‘buy out’.
If even a small part of the whisky revenue, i.e. taxes & duty, from these many distilleries was allowed to stay on, and be invested in, Islay the infrastructure could be upgraded to cope. As things stand this will never happen.
As for the water, distilleries on Islay and the mainland draw their water from natural sources – mostly springs – and I do not know of any who draw from public water supplies.
Additionally – do not mistake uninhabited for unuseable. The land may not have dwellings but every acre of Islay is used for something, whether it is arable -used for crops; grassland – grazing for cattle and sheep; moorland – grazed by sheep or shot over for grouse, deer etc or for forestry; or water – let for fishing.
Yes, the population has decreased, as have populations all over the highlands and islands. Small crofts are not economically viable and the larger farms, on the mainland as well as Islay and other islands, became mechanised and no longer require a large workforce. People’s expectations have changed and sons and daughters are no longer expected, nor do they necessarily wish, to stay at home following the family occupation whatever that may be.
Tim Helmer there’s plenty of space in England as well. Plenty. The problem are the NIMBYs and the planning laws.
NIMBY’s, refuseniks, contrarians I get it. There are a lot of provincial attitudes.
Jim Loudon I read that it was also accomplished on another Hebridean Island Eriskay.
|
"""Feature extraction test"""
import numpy as np;
import sys
import theano;
import theano.tensor as T;
sys.path.append("..")
import scae_destin.datasets as ds;
from scae_destin.convnet import ReLUConvLayer;
from scae_destin.convnet import LCNLayer
n_epochs=1;
batch_size=100;
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("/home/icog/convAE+destin/cifar-10-batches-py");
Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 32, 32))
layer_0=LCNLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="full");
extract=theano.function(inputs=[idx],
outputs=layer_0.apply(images),
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape
|
Should I reorder my invitations?
Did you catch the mistake and what would you do?
I personally would reorder. The switch from first to third person would drive me nuts. I also hope the capitalization isn't like that on the invites, because it's not correct. Nothing should be capitalized except proper nouns and Because.
You can also drop off "in the afternoon" (no one will think you're getting married at 5am) and the 2015.
Saturday, the 20th of June sounds much better/more formal than Saturday, June 20th to me . Also, 4:45 is a atypical start time. You aren't planning on telling your guests to arrive 15 minutes before people actually start rolling down the aisle, are you?
Sentimental poetry and words like "happily request the presence" are not appropriate on the wedding invitation. They serve no purpose. Many people will find this cringe worthy. The inivtation is a simple note from the hosts to the guests telling them who, what, when and where - not WHY!
Of course, you can do what you want, but many people will side eye your invitations if you keep them like you have posted.
|
#!/usr/bin/env python
import unittest
import NmeaSentences
import NmeaParser
import datetime
from Utc import Utc
class TestNmeaSentence(unittest.TestCase):
def setUp(self):
self.__nmea = NmeaSentences.NmeaSentence("XYZ")
def test_getType(self):
self.assertEqual(self.__nmea.getType(), "XYZ")
class TestGpggaSentence(unittest.TestCase):
def setUp(self):
sentence1 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,"
self.__gpgga1 = NmeaSentences.GpggaSentence( sentence1.split(',') )
self.__sentence2 = "GPXXX,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,"
self.__sentence3 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,"
def test_getType(self):
self.assertEqual(self.__gpgga1.getType(), "GPGGA")
def test_getTime(self):
dt = datetime.datetime.now( tz = Utc() )
dt = dt.replace(hour = 12, minute = 35, second = 19, microsecond = 0)
self.assertEqual(self.__gpgga1.getTime(), dt)
def test_getLatitude(self):
self.assertAlmostEqual(self.__gpgga1.getLatitude(), 48.0 + 7.038 / 60.0)
def test_getLongitude(self):
self.assertAlmostEqual(self.__gpgga1.getLongitude(), 11.0 + 31.000 / 60.0)
def test_getQuality(self):
self.assertEqual(self.__gpgga1.getQuality(), 1)
def test_getNumSatellites(self):
self.assertEqual(self.__gpgga1.getNumSatellites(), 8)
def test_getHorizontalDop(self):
self.assertAlmostEqual(self.__gpgga1.getHorizontalDop(), 0.9)
def test_getAltitude(self):
self.assertAlmostEqual(self.__gpgga1.getAltitude(), 545.4)
def test_getAltitudeUnits(self):
self.assertEqual(self.__gpgga1.getAltitudeUnits(), "M")
def test_getGeoidHeight(self):
self.assertAlmostEqual(self.__gpgga1.getGeoidHeight(), 46.9)
def test_getGeoidHeightUnits(self):
self.assertEqual(self.__gpgga1.getGeoidHeightUnits(), "M")
def test_getSecondsSinceLastDgpsUpdate(self):
self.assertEqual(self.__gpgga1.getSecondsSinceLastDgpsUpdate(), 0)
def test_getDgpsStationId(self):
self.assertEqual(self.__gpgga1.getDgpsStationId(), "")
def test_GpggaSentence1(self):
self.assertRaises(NmeaSentences.InvalidGpggaSentence, NmeaSentences.GpggaSentence, self.__sentence2)
def test_GpggaSentence2(self):
self.assertRaises(NmeaSentences.InvalidGpggaSentence, NmeaSentences.GpggaSentence, self.__sentence3)
class TestNmeaParser(unittest.TestCase):
def setUp(self):
self.__gpggaRaw1 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\n"
self.__gpgga1 = NmeaParser.NmeaParser.Parse(self.__gpggaRaw1)
self.__gpggaRaw2 = "GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\n"
self.__gpggaRaw3 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\n\n"
self.__gpggaRaw4 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47\r\r"
self.__gpggaRaw5 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,447\r\n"
self.__gpggaRaw6 = "$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*46\r\n"
def test_Parse1(self):
self.assertEqual(self.__gpgga1.getType(), "GPGGA")
dt = datetime.datetime.now( tz = Utc() )
dt = dt.replace(hour = 12, minute = 35, second = 19, microsecond = 0)
self.assertEqual(self.__gpgga1.getTime(), dt)
self.assertAlmostEqual(self.__gpgga1.getLatitude(), 48.0 + 7.038 / 60.0)
self.assertAlmostEqual(self.__gpgga1.getLongitude(), 11.0 + 31.000 / 60.0)
self.assertEqual(self.__gpgga1.getQuality(), 1)
self.assertEqual(self.__gpgga1.getNumSatellites(), 8)
self.assertAlmostEqual(self.__gpgga1.getHorizontalDop(), 0.9)
self.assertAlmostEqual(self.__gpgga1.getAltitude(), 545.4)
self.assertEqual(self.__gpgga1.getAltitudeUnits(), "M")
self.assertAlmostEqual(self.__gpgga1.getGeoidHeight(), 46.9)
self.assertEqual(self.__gpgga1.getGeoidHeightUnits(), "M")
self.assertEqual(self.__gpgga1.getSecondsSinceLastDgpsUpdate(), 0)
self.assertEqual(self.__gpgga1.getDgpsStationId(), "")
def test_Parse2(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw2)
def test_Parse3(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw3)
def test_Parse4(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw4)
def test_Parse5(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw5)
def test_Parse6(self):
self.assertRaises(NmeaParser.InvalidNmeaSentence, NmeaParser.NmeaParser.Parse, self.__gpggaRaw6)
if __name__ == '__main__':
unittest.main()
|
A Co Armagh man faces a potential jail sentence after he admitted “very serious charges” arising from an incident on a plane.
Standing in the dock of Antrim Magistrates Court, 27-year-old Kevin O’Hare elected to have his case dealt with in the lower court instead of the crown court and then entered guilty pleas to each of the five charges against him.
O’Hare, from Keggall View in Camlough, admitted being drunk on a plane, failing to obey the lawful command of a pilot, behaving in a “threatening, abusive, insulting or disorderly manner towards a member of the crew of an aircraft”, threatening to kill a woman and assaulting the same female on August 5.
None of the facts surrounding the offences were opened in court and O’Hare’s solicitor asked for sentencing to be adjourned to allow time for a pre-sentence report.
Freeing O’Hare on continuing bail until October 9, Deputy District Judge Brian Archer warned the defendant he had confessed to “very serious charges”.
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
default_version = "v3"
for library in s.get_staging_dirs(default_version):
if library.name.startswith("v3"):
# TODO(danoscarmike): remove once upstream protos have been fixed
# Escape underscores in gs:\\ URLs
s.replace(
library / "google/cloud/translate_v3*/types/translation_service.py",
"a_b_c_",
"a_b_c\_"
)
excludes = [
"setup.py",
"nox*.py",
"README.rst",
"docs/conf.py",
"docs/index.rst",
"translation.py",
]
s.move(library, excludes=excludes)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
samples=True,
microgenerator=True,
cov_level=99,
)
s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file
# Correct namespace in noxfile
s.replace("noxfile.py", "google.cloud.translation", "google.cloud.translate")
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples()
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
The biennial competition and festival in Eindhoven opens this year with a special percussion spectacle Friday 9 November. The Austrian percussionist Martin Grubinger is one of the prominent guests of this edition, both in the professional jury of the competition and on the festival stage in an unique collaboration with TROMP 2014 winner Dominique Vleeshouwers and Harmonie St. Michaël van Thorn.
The Austrian percussionist Martin Grubinger is a true phenomenon. He turns his hand just as easily to a Gregorian repertoire as a grand show in the Eurovision Song Contest or a marathon concert in which he spends over four hours moving between 200 instruments. In the Opening Night of the TROMP festival he shares the stage with a rising star from The Netherlands: as a winner of the jury-, audience and press prize of the TROMP competition in 2014, Dominique Vleeshouwers international career has taken off. Together with Harmonie St. Michaël van Thorn, a wind orchestra of international top-class, the opening of TROMP 2018 promises to be an unique and festive percussion spectacle.
|
import sys
"""
A class to print BIND records.
"""
class Printer(object):
def __init__( self, fd=sys.stdout, AJUST1=30, AJUST2=10 ):
self.fd = fd
self.AJUST1 = AJUST1
self.AJUST2 = AJUST2
def print_raw( self, message ):
self.fd.write( message )
def print_rr( self, name, data , rr_type, terminal="" ):
name = name.ljust(self.AJUST1," ")
self.fd.write("%s %s%s%s%s\n" % (name,rr_type," "*self.AJUST2,data,terminal))
def print_PTR( self, ip, dname ):
ip = '.'.join(list(reversed(ip.split('.')))) # Reverse the ip along '.' boundaries
self.print_rr( ip, dname, "PTR", "." )
def print_A( self, dname, ip , terminal=""):
self.print_rr( dname, ip, "A", terminal )
def print_CNAME( self, dname, cname ):
self.print_rr( dname, cname, "CNAME", "." )
def print_MX( self, name, domain, ttl, prio, server ):
self.print_rr( name, str(prio)+" ".rjust(3," ") , 'MX', server )
def print_NS( self, dname, nameservers ):
if not nameservers:
return
dname = dname.ljust(self.AJUST1," ")
padding = "@".ljust(self.AJUST1," ")
for ns in nameservers:
self.fd.write("%s NS%s%s.\n" % (padding," "*self.AJUST2,ns))
def print_SOA( self, ttl, dname, primary_master, contact,serial, refresh, retry, expire, minimum ):
dname = dname+"."
dname = dname.ljust(self.AJUST1," ")
off = 9
soa = ""
soa += "$TTL %s\n" % (ttl)
soa += "%s IN SOA %s. %s. (\n" % (dname, primary_master, contact)
soa +="\n"
soa += str(serial).rjust(self.AJUST1+off," ")
soa +=" ;Serial"
soa +="\n"
soa += str(refresh).rjust(self.AJUST1+off," ")
soa +=" ;Refresh"
soa +="\n"
soa += str(retry).rjust(self.AJUST1+off," ")
soa +=" ;Retry"
soa +="\n"
soa += str(expire).rjust(self.AJUST1+off," ")
soa +=" ;Expire"
soa +="\n"
soa += str(minimum).rjust(self.AJUST1+off," ")+" )"
soa +=" ;Minimum"
soa +="\n"
self.fd.write(soa)
|
The Individual Development Plan (IDP) is a critical part of graduate student professional development, and is a tool to assist your career planning. In addition to the knowledge and expertise you gain in your specific program or postdoctoral fellowship, the University of Alberta hopes that you develop general skills throughout your program that will help you with your life post-university. While work on your IDP cannot be counted as part of the eight hour professional development activities for graduate students, it is designed to complement other professional development activities.
There are two types of IDP workbooks, one for students who are continuing their education from an undergraduate or graduate level and one for graduate students who are returning to school after years of professional experience.
The UA Calendar states that all graduate students at the University of Alberta are required to submit an individualized professional development plan to the department for their program of studies (graduate coordinator or graduate chair) within 12 months of the program's commencement for master's students and within 18 months of the program's commencement for doctoral students.
If you would like guidance please attend our Professional Development Requirement sessions and/or complete our IDP Online course (also eligible for PD credit).
Take time to research possible career paths that are of interest to you.
Look at possibilities in business, industry, government, nonprofit sectors, and academia. Identify at least three possible careers that you would want to work in. Leave yourself open to careers that you might otherwise not have thought to pursue.
• "What are my long and short term goals?"
• "What would a sustainable career path look like?"
• "What required skills do I need to develop or cultivate?"
In this section, you will gauge your skills and competencies by completing a self-assessment. This activity is intended to help you monitor competencies you already possess, and identify areas that need further development. What skills do you possess and how can you best articulate them? What skills are emphasized in your careers of interest? Note the activities and actions you can build into your timeline to develop your skills.
Develop a time line needed for completing your academic program, including academic milestones and career goals.
Include plans for how you can tailor your program or fellowship to develop your skills and competencies. How much time will you allocate to professional development? How will you obtain skills that need further development. A blank template for a program timeline is provided in the FGSR handout.
Meet with a mentor(s) who can speak to your academic program goals, timelines and how to maximize your graduate program or fellowship. Discuss skills you wish to develop. Your mentor(s) can also alert you to professional development opportunities applicable to your interests and help assess whether you have a realistic timeline and if you've allocated sufficient time for professional development.
Revisit your IDP annually. Evaluate whether you’re on track with your academic and career goals. What is working well and what needs to change? What skills do you still need or want to pursue? As you answer these questions, determine any adjustments required, and confer with your mentor(s) as needed.
The Individual Development Plan must be completed within the first 12-18 months of your program (Master's and PhD). Professional development hours must be completed by defense (Master's) and by year three (PhD). At the end of your program, ensure that you have completed the Individual Development Plan & Professional Development Completion Form and submitted it to your department/faculty in order to convocate. Please confirm with your department/faculty if they have a different department/faculty specific form.
|
import sublime, sublime_plugin
from sidebar.SideBarGit import SideBarGit
from sidebar.SideBarSelection import SideBarSelection
import threading
class Object():
pass
s = sublime.load_settings('SideBarGit.sublime-settings')
class StatusBarBranch(sublime_plugin.EventListener):
def on_load(self, v):
if s.get('statusbar_branch') and v.file_name():
StatusBarBranchGet(v.file_name(), v).start()
def on_activated(self, v):
if s.get('statusbar_branch') and v.file_name():
StatusBarBranchGet(v.file_name(), v).start()
class StatusBarBranchGet(threading.Thread):
def __init__(self, file_name, v):
threading.Thread.__init__(self)
self.file_name = file_name
self.v = v
def run(self):
for repo in SideBarGit().getSelectedRepos(SideBarSelection([self.file_name]).getSelectedItems()):
object = Object()
object.item = repo.repository
object.command = ['git', 'branch']
object.silent = True
SideBarGit().run(object)
sublime.set_timeout(lambda:self.on_done(SideBarGit.last_stdout.decode('utf-8')), 0)
return
def on_done(self, branches):
branches = branches.split('\n')
for branch in branches:
if branch.startswith("*"):
self.v.set_status('statusbar_sidebargit_branch', branch)
return
|
Let your dream car drive you - book a Lamborghini on Rentalcars24h.com! This supercar is more than real now so don't miss your chance to drive the most legendary vehicle ever! Lamborghini car rental Caprivi - Katima is ready to give you the best prices for these spectacular Italian cars - use our car rental form and check it yourself!
RentalCars24h.com provides you with high-quality service and reliable modern vehicles all over the world with more than 30,000 locations in more than 176 countries. We are ready for your bookings 24/7. No delays, no hidden payments - just the cheapest prices for all kinds of car rental vehicles. Be sure that Lamborghini car rental Caprivi - Katima will provide you with exclusive car and not less exclusive mindful and friendly service.
World-famous Lamborghinis combines elegance and outstanding performance. The car's design emphasize that the vehicle was made for high speed. Superb handling and the 4-wheel drive transmission make you confident of your safety on the road. This almighty vehicle guarantees you the general attention - wherever you will drive you'll be in the spotlight! Lamborghini car rental Caprivi - Katima offers you to reserve Lamborghini Aventador, Lamborghini Gallardo Superleggera, Lamborghini Gallardo LP560-4 Spyder or Lamborghini Gallardo Spyder.
On this amazing car you can gain the top speed of 309-350 km/h! You'll have to pinch yourself to believe it's not a dream, with Lamborghini car rental Caprivi - Katima it's real! This outstanding vehicle can get you from 0 to 100 km/h in 2.9 - 4 seconds (depending on a model). You can also rent the cabrio version of this supercar to let the wind play with your hair and feel the speed.
Lamborghini car rental Caprivi - Katima is waiting for your reservations! Let your dreams come true - experience Lamborghini breathtaking ride!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.