text
stringlengths 29
850k
|
|---|
#!/usr/bin/env python
# Copyright (C) 2004 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2007 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import unittest
import time
import logging
builddir = os.path.normpath(os.environ["DBUS_TOP_BUILDDIR"])
pydir = os.path.normpath(os.environ["DBUS_TOP_SRCDIR"])
import dbus
import _dbus_bindings
import dbus.glib
import dbus.service
try:
from gi.repository import GObject as gobject
except ImportError:
raise SystemExit(77)
logging.basicConfig()
logging.getLogger().setLevel(1)
logger = logging.getLogger('test-signals')
pkg = dbus.__file__
if not pkg.startswith(pydir):
raise Exception("DBus modules (%s) are not being picked up from the package"%pkg)
if not _dbus_bindings.__file__.startswith(builddir):
raise Exception("DBus modules (%s) are not being picked up from the package"%_dbus_bindings.__file__)
NAME = "org.freedesktop.DBus.TestSuitePythonService"
IFACE = "org.freedesktop.DBus.TestSuiteInterface"
OBJECT = "/org/freedesktop/DBus/TestSuitePythonObject"
class TestSignals(unittest.TestCase):
def setUp(self):
logger.info('setUp()')
self.bus = dbus.SessionBus()
self.remote_object = self.bus.get_object(NAME, OBJECT)
self.remote_object_fallback_trivial = self.bus.get_object(NAME,
OBJECT + '/Fallback')
self.remote_object_fallback = self.bus.get_object(NAME,
OBJECT + '/Fallback/Badger')
self.remote_object_follow = self.bus.get_object(NAME, OBJECT,
follow_name_owner_changes=True)
self.iface = dbus.Interface(self.remote_object, IFACE)
self.iface_follow = dbus.Interface(self.remote_object_follow, IFACE)
self.fallback_iface = dbus.Interface(self.remote_object_fallback, IFACE)
self.fallback_trivial_iface = dbus.Interface(
self.remote_object_fallback_trivial, IFACE)
self.in_test = None
def signal_test_impl(self, iface, name, test_removal=False):
self.in_test = name
# using append rather than assignment here to avoid scoping issues
result = []
def _timeout_handler():
logger.debug('_timeout_handler for %s: current state %s', name, self.in_test)
if self.in_test == name:
main_loop.quit()
def _signal_handler(s, sender, path):
logger.debug('_signal_handler for %s: current state %s', name, self.in_test)
if self.in_test not in (name, name + '+removed'):
return
logger.info('Received signal from %s:%s, argument is %r',
sender, path, s)
result.append('received')
main_loop.quit()
def _rm_timeout_handler():
logger.debug('_timeout_handler for %s: current state %s', name, self.in_test)
if self.in_test == name + '+removed':
main_loop.quit()
logger.info('Testing %s', name)
match = iface.connect_to_signal('SignalOneString', _signal_handler,
sender_keyword='sender',
path_keyword='path')
logger.info('Waiting for signal...')
iface.EmitSignal('SignalOneString', 0)
source_id = gobject.timeout_add(1000, _timeout_handler)
main_loop.run()
if not result:
raise AssertionError('Signal did not arrive within 1 second')
logger.debug('Removing match')
match.remove()
gobject.source_remove(source_id)
if test_removal:
self.in_test = name + '+removed'
logger.info('Testing %s', name)
result = []
iface.EmitSignal('SignalOneString', 0)
source_id = gobject.timeout_add(1000, _rm_timeout_handler)
main_loop.run()
if result:
raise AssertionError('Signal should not have arrived, but did')
gobject.source_remove(source_id)
def testFallback(self):
self.signal_test_impl(self.fallback_iface, 'Fallback')
def testFallbackTrivial(self):
self.signal_test_impl(self.fallback_trivial_iface, 'FallbackTrivial')
def testSignal(self):
self.signal_test_impl(self.iface, 'Signal')
def testRemoval(self):
self.signal_test_impl(self.iface, 'Removal', True)
def testSignalAgain(self):
self.signal_test_impl(self.iface, 'SignalAgain')
def testRemovalAgain(self):
self.signal_test_impl(self.iface, 'RemovalAgain', True)
def testSignalF(self):
self.signal_test_impl(self.iface_follow, 'Signal')
def testRemovalF(self):
self.signal_test_impl(self.iface_follow, 'Removal', True)
def testSignalAgainF(self):
self.signal_test_impl(self.iface_follow, 'SignalAgain')
def testRemovalAgainF(self):
self.signal_test_impl(self.iface_follow, 'RemovalAgain', True)
if __name__ == '__main__':
main_loop = gobject.MainLoop()
gobject.threads_init()
dbus.glib.init_threads()
logger.info('Starting unit test')
unittest.main()
|
1 Let`s Tell A Fortune!
27 Pop! Goes the Urd!
42 Ah! My Average College Student!
|
'''
Created on Apr 5, 2015
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
import psycopg2
import time
import neurodb.neodb.core
from math import e, pow
from scipy.optimize import leastsq
import neurodb
import random
from sklearn.cluster import KMeans, AgglomerativeClustering, MiniBatchKMeans
from neurodb.cfsfdp import libcd
array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array_1d_int = npct.ndpointer(dtype=np.int64, ndim=1, flags='CONTIGUOUS')
array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
def get_points(id_block, channel):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
query = """SELECT spike.p1, spike.p2, spike.p3 from SPIKE
JOIN segment ON id_segment = segment.id
JOIN recordingchannel ON id_recordingchannel = recordingchannel.id
WHERE segment.id_block = %s
AND recordingchannel.index = %s"""%(id_block, channel)
cursor = dbconn.cursor()
cursor.execute(query)
results = cursor.fetchall()
points = []
for i in range(len(results)):
p1 = results[i][0]
p2 = results[i][1]
p3 = results[i][2]
points.append([p1,p2,p3])
return np.array(points)
def ajuste(local_density, coeficientes):
vajuste = np.zeros(len(local_density))
for j in range(len(local_density)):
vajuste[j] = np.polynomial.polynomial.polyval(local_density[j], coeficientes)
return vajuste
def select_nodes(id_project, id_session, channel, n_nodos):
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_session))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
random.shuffle(spikes)
len_spikes = len(spikes)
len_nodo = np.ceil(float(len_spikes)/float(n_nodos))
nodos = []
for i in range(n_nodos):
nodo = []
j = 0
while(spikes != [] and j<len_nodo):
nodo.append(spikes.pop())
j = j + 1
nodos.append(nodo)
return nodos
def select_nodes_r(id_project, id_session, channel, n_nodos):
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_session))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
len_spikes = len(spikes)
len_nodo = np.ceil(float(len_spikes)/float(n_nodos))
nodos = []
for i in range(n_nodos):
nodo = random.sample(spikes,int(len_nodo))
nodos.append(nodo)
return nodos
def get_centers(nodos, nnodos, points):
centersT = []
rho = np.array([], np.float64)
delta = np.array([], np.float64)
ncenters = 0
spikes = np.array([], np.float64)
cl = np.array([], np.float64)
for i in range(nnodos):
spikes_id = nodos[i]
spikes_id = np.array(spikes_id, np.float64)
nspikes = len(spikes_id)
local_density = np.empty(nspikes)
distance_to_higher_density = np.empty(nspikes)
cluster_index = np.empty(nspikes)
nneigh = np.empty(nspikes)
centers = np.empty(nspikes)
dc = libcd.get_dc(connect, spikes_id, nspikes, np.float(1.8), points)
libcd.cluster_dp(connect, local_density, distance_to_higher_density, spikes_id,
cluster_index, nneigh, centers, dc, points, nspikes, "gaussian")
print "nodo %s procesado. ncenters:%s"%(i,int(centers[0]))
ncenters = centers[0] + ncenters
for j in range(int(centers[0])):
centersT.append([local_density[int(centers[j+1])], distance_to_higher_density[int(centers[j+1])]])
rho = np.concatenate((rho,local_density))
delta = np.concatenate((delta, distance_to_higher_density))
spikes = np.concatenate((spikes, spikes_id))
cl = np.concatenate((cl, cluster_index))
# plt.plot(local_density, distance_to_higher_density, 'o')
# plt.show()
ncenters = np.ceil(ncenters/nnodos)
plt.plot(rho, delta, 'ro')
plt.show()
centersT = np.array(centersT)
return centersT, ncenters, spikes, cl
if __name__ == '__main__':
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
project = 19
id_block = "76"
#id_block = "76"
channel = "1"
points = 3
nnodos = 1
nodos = select_nodes_r(project, id_block, channel, nnodos)
color = ['bo', 'ro', 'go', 'co', 'ko', 'mo', 'b^', 'r^', 'g^', 'c^', 'k^', 'm^', 'bx', 'rx', 'gx', 'cx', 'kx', 'mx']
centers, nclusters, spikes, cl= get_centers(nodos, nnodos, points)
print "clusters: %s"%nclusters
km = KMeans(n_clusters = int(nclusters))
#km = MiniBatchKMeans(n_clusters = int(ncenters))
aw = AgglomerativeClustering(linkage='ward', n_clusters=int(nclusters))
km.fit(centers)
aw.fit(centers)
# plt.plot(km.cluster_centers_[0][0], km.cluster_centers_[0][1], 'kx')
# plt.plot(km.cluster_centers_[1][0], km.cluster_centers_[1][1], 'kx')
# plt.plot(km.cluster_centers_[2][0], km.cluster_centers_[2][1], 'kx')
# c = np.array(centers, np.float64)
#
# centersC = np.empty(len(c[:,1]))
# labels = np.empty(len(c[:,1]))
# x = np.array(c[:,0], np.float64)
# y = np.array(c[:,1], np.float64)
# libcd.dp(x, y, len(c[:,1]), labels, centersC, "gaussian")
for i in range(len(centers)):
plt.plot(centers[i][0], centers[i][1], color[int(aw.labels_[i])])
plt.show()
pass
#
# local_density = np.empty(nspikes)
# distance_to_higher_density = np.empty(nspikes)
# cluster_index = np.empty(nspikes)
# nneigh = np.empty(nspikes)
# centers = np.empty(nspikes)
#
# dc = libcd.get_dc(connect, spikes_id, nspikes, np.float(1.8), points)
# libcd.cluster_dp(connect, local_density, distance_to_higher_density, spikes_id,
# cluster_index, nneigh, centers, dc, points, nspikes, "gaussian")
#
# plt.plot(local_density, distance_to_higher_density, 'bo')
# plt.show()
#
# for i in range(int(cluster_index.max())+1):
# plt.subplot(int(cluster_index.max())+1,1,i+1)
# k = 0
# for j in range(nspikes):
# if cluster_index[j] == i:
# spikes = neurodb.neodb.core.spikedb.get_from_db(dbconn, id_block = id_block, channel = channel, id = int(spikes_id[j]))
# signal = spikes[0].waveform
# plt.plot(signal)
# k = 1 + k
#
# title = str(i) +": "+ str(k)
# plt.title(title)
# plt.show()
#
# pass
|
This webpage for welovetwnnis.com contains all the typos potential single character typos that can be made while typing the domain name welovetwnnis.com. All potential typos that can be generated with a QWERTY keyboard or numpad are included in the typo list below. Only typos that are inside the domain name of welovetwnnis.com are included so no typos within the domain extension for welovetwnnis.com.
welovetwnnis.com contains 12 characters in it's domain name and is added on 2019-04-24. The welovetwnnis.com page has had 1 visitor since it was added on 2019-04-24.
Beside typos this page will also show you other domain extensions for welovetwnnis.com and other domain names that you might be interested in and are not related to this specific domain name. Only the most populair domain extensions for welovetwnnis.com are included in the list below.
The domain extensions for welovetwnnis.com that are listed above are the most populair domain extensions that are globally used. If you are searching for other domain extensions makes sure to check out our domain extension generator for welovetwnnis.com.
Our domain name typo generator found 75 typos for the domain name welovetwnnis.com based on 12 characters inside the domain name. The character length does not include the domain extension of the domain.
|
from bs4 import BeautifulSoup
import pandas as pd
from scripts.kapp import CACHABLE
import urllib
C = CACHABLE()
reactions = C.map_model_reaction_to_genes().set_index(0)
genes = {row[0:5]:row[56:63] for row in open('data/all_ecoli_genes.txt', 'r')
if row[0:5] in reactions.values}
new_dict = {}
for j,(b, EG) in enumerate(genes.iteritems()):
sock = urllib.urlopen("http://ecocyc.org/ECOLI/NEW-IMAGE?type=GENE&object=%s" %EG)
html = sock.read()
doc = BeautifulSoup(html)
classes = doc.findAll('p')
subunits = 1
for item in classes:
title = item.contents[0].strip()
if title == 'Subunit composition of':
for s in item.findAll('sub'):
try:
subunits = int(s.contents[0].strip())
except ValueError:
continue
break
print j, b, "->", subunits, " subunits"
new_dict[b] = subunits
subunits = pd.DataFrame(new_dict.items())
subunits.to_csv("cache/subunits.csv")
#
#
#
#
#
#
#
#
#
#
#
#
#
# m = 0
# try:
# a = doc.findAll('p')[4]
# except:
# continue
# if 'Subunit composition of' in str(a):
# try:
# a = doc.findAll('sub')
# except:
# continue
#
# print a
# if j >2:
# break
# if 'Subunit composition of' in str(a):
# m = int(str(a.sub).split('<sub>')[1][0])
# break
# if m == 0:
# m =1
# new_dict[b] = m
# print j, EG, "->", m, " subunits"
subunits = pd.DataFrame(new_dict.items())
subunits.to_csv("cache/subunits.csv")
|
Modern man: Is Irish masculinity really in crisis?
Men are in crisis. That, at least, is the message that men receive from the media all day every day. We have lost our role, we struggle with change, we don't talk. Men are confused, lost, and unable to cope in a new and unfamiliar world. We are doomed.
And yet, most men are not even vaguely in crisis. The great majority of men continue to work, play and live their lives without difficulty. Most men welcomed #MeToo with open arms and were utterly horrified at the behaviours it exposed. And the vast majority of us are deeply appalled by what it is now termed "toxic masculinity", the use of power and violence by men to subordinate others, mainly women.
Even so, talk of a crisis persists. The figure of the hapless, incapable man rescued by the savvy, capable woman, is now an inevitable feature of advertising, television and movies. Not only are we men socially inept, biologically redundant and soon to be replaced by robots, but now we cannot even find our socks. Is it any wonder there is a sense of crisis?
It is worth taking some time to figure out whether this crisis is real or imagined, rooted in fact or simply fuelled by rhetoric. Facts can help. First, suicide. We know that suicide in Ireland is highly gendered, almost four times more common among men than women. But, globally, suicide has declined by 38pc since 1994. In Ireland, it has fallen by 29pc since 2011 (based on provisional figures for 2017). And the decline among men (32pc) is greater than that among women (17pc), albeit from a higher starting point.
This is not to say that suicide is no longer a problem; it clearly is. Every suicide is one too many and no one is born wanting to die by suicide. But recent figures indicate that despite the infinite tragedy and urgency of suicide, positive change is possible, and figures are moving in the right direction - especially for men.
But what about men more generally, are we becoming more troubled and depressed, or happier and more fulfilled? Every two years, the European Social Survey examines well-being across Europe. In 2008, over 25,000 men in 29 European countries rated their happiness as 7.0 out of 10, where 0 means "extremely unhappy" and 10 means "extremely happy". The average rating among more than 30,000 women was essentially the same, at 6.9. By 2016, as the economic recovery deepened, happiness in both men and women across Europe had increased significantly to 7.4 out of 10.
This is the picture across Europe as a whole, however, and trends in Ireland appear different and generally more stable. In 2008, when male happiness averaged 7.0 out of 10 across Europe, Irish men rated their happiness significantly higher, at 7.4. And in 2016, when men's happiness had risen to 7.4 across Europe, Irish men's happiness remained steady, at 7.3. This steadiness is also apparent among Irish women, who rated their happiness at 7.7 in 2008 and have remained above the European average since then, still at 7.7 in 2016.
Statistics, of course, have their limitations and describing happiness as a number is at best reductive. But trends over time are still interesting and these figures suggest, at the very least, that talk of a general crisis among men is not supported by systematic evidence. This is consistent with my clinical practice as a psychiatrist, where the gender mix appears stable.
And yet many people point to male behaviours that they feel suggest either an uncertainty about male identity or, conversely, a doubling down of certain male characteristics, as if insecure men need to actively reassure themselves about their masculinity. Some weeks ago, on an afternoon walk, I spontaneously popped into a gym in suburban Dublin and witnessed a fine example of this. The gym was packed with tattooed men feverishly practising weights as if their lives depended on it. The combination of deafening music, stale sweat, foul air and oddly energetic male despair sent me gasping for the exit. There was nothing quiet about this desperation.
Disturbing as I found this experience, however, I know that the determination of certain men to express their masculinity through bodily perfection, which is sometimes seen as a modern phenomenon, is nothing new. In 1858, American poet Walt Whitman, writing under the pseudonym Mose Velsor, published a 13-part series in the New York Atlas newspaper, titled "Manly Health and Training". "Manly health!", Whitman wrote, "is there not a kind of charm, a fascinating magic, in the words?"
Whitman took his rhetoric about perfection of the male physique to a level that would make present-day fitness trainers blush, suggesting "that there was a wonderful medicinal effect in the mere personal presence of a man who was perfectly well!"
For Whitman, "The only true and profitable way of reaching the morals of the young is through making them first healthy, clean-blooded and vigorous specimens of men." Plenty of outdoor exercise should, he felt, be underscored by, "an almost exclusive meat diet", "steady reason should assume the helm", and every man should grow a beard, "a great sanitary protection to the throat."
Whitman's own beard was a feral creation of thunderous magnificence, spilling from his face like a mighty waterfall, in stark contrast to the over-manicured facial hair that blights our hipster coffee shops today. If men's beards tell the story of manhood over the eons, then it is a story of insecurity and decline.
Despite the complexities and contradictions in this picture, however, talk of a crisis in masculinity remains a constant feature of human discourse, even if it is poorly supported by systematic evidence at the present time. Perhaps the more interesting question today is why we continually speak of a crisis in masculinity, even when male well-being is steadily improving, and all humans - men and women - now inhabit a world in which many bad things are in decline: poverty, homicide, war death, bullying, racism, homophobia and working hours. Many good things are increasing: democracy, literacy, income and lifespans. While these benefits are by no means equally distributed, and enormous social challenges remain, it is still misleading to speak of a particular crisis among men.
The last great iteration of this archetypal "male crisis" was around 2000, when Irish psychiatrist Anthony Clare wrote his book On Men. Clare argued that men were facing great changes in their roles that they were finding difficult to navigate, but that there was a way forward through tolerance, dialogue and learning. Clare's conclusions remain true today, albeit that the male crisis of which we speak now appears more rhetorical than real.
But while most men are not in crisis at the moment, perhaps this talk of crisis can still be put to good use. If public discussion of male well-being encourages individual men who are truly in crisis to seek the help they need, then the over-blown rhetoric will have served a useful purpose. If the talk of crisis helps focus attention on particular communities of men who are struggling with change within themselves or in society, then that, too, will be a genuine positive to emerge from the melee.
Most of all, it is my hope that this renewed talk of a crisis in masculinity will highlight the fact that men not only struggle with issues like identity, relationships and roles, but can come through these struggles intact or even stronger - just like women can. This, perhaps, could be the greatest lesson from this most recent iteration of the "crisis" in men: if we can stop analysing everything through the flawed prism of gender, we will see that the common humanity binding men and women together is far greater than anything that divides us. Failing to understand this would, perhaps, be the greatest crisis of all.
|
#!/usr/bin/env python
######################################################################
#
# Email Status of SolusVM VPS's
# Designed as a Cron script
#
######################################################################
#
# Example
#
#Node0
# bw:
# 15.5GB/1.0TB
# [#---------------------------------------] 2%
#
#Node1
# bw:
# 2.6GB/1000.0GB
# [----------------------------------------] 0%
#
#Node2
# hdd:
# 4.9GB/30.0GB
# [######----------------------------------] 16%
# bw:
# 8.3GB/1.0TB
# [----------------------------------------] 1%
#
#Node3
# hdd:
# 23.7GB/50.0GB
# [###################---------------------] 47%
# bw:
# 372.8GB/500.0GB
# [##############################----------] 75%
#
#
######################################################################
###### Settings start ################################################
######################################################################
# Hosts to check the status of (in order)
# Put as many as your want
HOSTS = [
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://usadmin.inceptionhosting.com", # API host
'name': "Node0" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://solus.fliphost.net", # API host
'name': "Node1" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "http://master.weloveservers.net", # API host
'name': "Node2" # Name
},
{
'key': "XXXXX-00000-XXXXX", # API Key
'hash': "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", # API hash
'url': "https://vpscp.ramnode.com", # API host
'name': "Node3" # Name
}
]
# Email Settings
# Uses the local email SMTP server, so watch out
EMAIL_FROM = "root@example.com"
EMAIL_TO = "admin@example.com"
EMAIL_SUBJECT = "Server Status Report"
# Possible values: "bw", "hdd", "mem" (in order)
CHECK = ["hdd", "bw", "mem"]
# Do not show blank values (Usually due to the server being a KVM/XEN)
REMOVE_BLANKS = True
# Steps (the size of the status bars)
STEPS = 40
######################################################################
###### Settings end ##################################################
######################################################################
import subprocess
import re
import os
import smtplib
from email.mime.text import MIMEText
######################################################################
###### Functions start ###############################################
######################################################################
def run(args):
proc = subprocess.Popen([args], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def parseStatus(str):
parser = re.compile(r'</*\w+>')
array = parser.split(str)
array = filter(None, array)
lookup = {
'status': array[0],
'statusmsg': array[1],
'vmstat': array[2],
'hostname': array[3],
'ipaddress': array[4],
'hdd': parseType(array[5]),
'bw': parseType(array[6]),
'mem': parseType(array[7])
}
return lookup
def parseType(str):
parser = re.compile(r',')
array = parser.split(str)
array = filter(None, array)
lookup = {
'max': sizeOf(array[0]),
'used': sizeOf(array[1]),
'left': sizeOf(array[2]),
'precent': array[3]
}
return lookup
def pullStatus(host):
result = run(
"curl -s \"" + host['url'] +
"/api/client/command.php?key=" + host['key'] +
"&hash=" + host['hash'] +
"&action=status&bw=true&mem=true&hdd=true\""
)
return parseStatus(result)
def sizeOf(str):
# http://stackoverflow.com/a/1094933/2001966
num = float(str)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def saveHost(host):
status = pullStatus(host)
str = ""
for type in CHECK:
if(not(REMOVE_BLANKS) or (status[type]['used'] != "0.0bytes")):
str += " " + type + ":" + "\n"
str += " " + status[type]['used'] + "/" + status[type]['max'] + "\n"
str += " " + statusBar(status[type]['precent']) + " " + status[type]['precent'] + "%" + "\n"
return str
def statusBar(precent):
value = float(precent)
value = STEPS * (value / 100)
value = round(value)
value = int(value)
str = ""
for x in range(0, value):
str += "#"
for x in range(value, STEPS):
str += "-"
return "[" + str + "]"
######################################################################
###### Functions end #################################################
######################################################################
str = ""
for host in HOSTS:
str += (host['name'] + "\n")
str += (saveHost(host) + "\n")
msg = MIMEText(str)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
server = smtplib.SMTP( "localhost", 25 )
server.sendmail( EMAIL_FROM, EMAIL_TO, msg.as_string() )
server.quit()
|
Andy is Director of Business Development at East West. He leads all marketing and business development efforts including strategy, branding, messaging, communications, public relations and the early stages of new customer development. Andy also works with the Company’s independent sales reps and the inside sales team to generate strategic opportunities. Andy is a graduate of the University of Connecticut, with 20 years of marketing and global supply chain experience. Previously, Andy created and executed business development strategies for GE Capital, Georgia-Pacific and Spend Management Experts.
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from oslo_config import cfg
from oslo_log import log as logging
import routes
import six
import stevedore
import webob.dec
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova.i18n import _LC
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.i18n import translate
from nova import notifications
from nova import utils
from nova import wsgi as base_wsgi
api_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether the V3 API is enabled or not'),
cfg.ListOpt('extensions_blacklist',
default=[],
help='A list of v3 API extensions to never load. '
'Specify the extension aliases here.'),
cfg.ListOpt('extensions_whitelist',
default=[],
help='If the list is not empty then a v3 API extension '
'will only be loaded if it exists in this list. Specify '
'the extension aliases here.')
]
api_opts_group = cfg.OptGroup(name='osapi_v3', title='API v3 Options')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_group(api_opts_group)
CONF.register_opts(api_opts, api_opts_group)
# List of v3 API extensions which are considered to form
# the core API and so must be present
# TODO(cyeoh): Expand this list as the core APIs are ported to V3
API_V3_CORE_EXTENSIONS = set(['os-consoles',
'extensions',
'os-flavor-extra-specs',
'os-flavor-manage',
'flavors',
'ips',
'os-keypairs',
'os-flavor-access',
'server-metadata',
'servers',
'versions'])
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
LOG.exception(_LE("Caught error: %s"), six.text_type(inner))
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
user_locale = req.best_match_language()
inner_msg = translate(inner.message, user_locale)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
notifications.send_api_fault(req.url, status, inner)
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url == "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kargs):
# NOTE(vish): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kargs.setdefault('requirements', {})
if not kargs['requirements'].get('format'):
kargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class PlainMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' in kwargs:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the OpenStack API to the appropriate controller
and method.
"""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
def __init__(self, ext_mgr=None, init_only=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise Exception(_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr, init_only)
self._setup_ext_routes(mapper, ext_mgr, init_only)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr, init_only):
for resource in ext_mgr.get_resources():
LOG.debug('Extending resource: %s',
resource.collection)
if init_only is not None and resource.collection not in init_only:
continue
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.Resource(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
msg_format_dict = {'collection': collection,
'ext_name': extension.extension.name}
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
msg_format_dict)
continue
LOG.debug('Extension %(ext_name)s extended resource: '
'%(collection)s',
msg_format_dict)
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr, init_only):
raise NotImplementedError()
class APIRouterV21(base_wsgi.Router):
"""Routes requests on the OpenStack v2.1 API to the appropriate controller
and method.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one."""
return cls()
@staticmethod
def api_extension_namespace():
# TODO(oomichi): This namespaces will be changed after moving all v3
# APIs to v2.1.
return 'nova.api.v3.extensions'
def __init__(self, init_only=None, v3mode=False):
# TODO(cyeoh): bp v3-api-extension-framework. Currently load
# all extensions but eventually should be able to exclude
# based on a config file
# TODO(oomichi): We can remove v3mode argument after moving all v3 APIs
# to v2.1.
def _check_load_extension(ext):
if (self.init_only is None or ext.obj.alias in
self.init_only) and isinstance(ext.obj,
extensions.V3APIExtensionBase):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return self._register_extension(ext)
return False
if not CONF.osapi_v3.enabled:
LOG.info(_LI("V3 API has been disabled by configuration"))
return
self.init_only = init_only
LOG.debug("v3 API Extension Blacklist: %s",
CONF.osapi_v3.extensions_blacklist)
LOG.debug("v3 API Extension Whitelist: %s",
CONF.osapi_v3.extensions_whitelist)
in_blacklist_and_whitelist = set(
CONF.osapi_v3.extensions_whitelist).intersection(
CONF.osapi_v3.extensions_blacklist)
if len(in_blacklist_and_whitelist) != 0:
LOG.warning(_LW("Extensions in both blacklist and whitelist: %s"),
list(in_blacklist_and_whitelist))
self.api_extension_manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.api_extension_namespace(),
check_func=_check_load_extension,
invoke_on_load=True,
invoke_kwds={"extension_info": self.loaded_extension_info})
if v3mode:
mapper = PlainMapper()
else:
mapper = ProjectMapper()
self.resources = {}
# NOTE(cyeoh) Core API support is rewritten as extensions
# but conceptually still have core
if list(self.api_extension_manager):
# NOTE(cyeoh): Stevedore raises an exception if there are
# no plugins detected. I wonder if this is a bug.
self._register_resources_check_inherits(mapper)
self.api_extension_manager.map(self._register_controllers)
missing_core_extensions = self.get_missing_core_extensions(
self.loaded_extension_info.get_extensions().keys())
if not self.init_only and missing_core_extensions:
LOG.critical(_LC("Missing core API extensions: %s"),
missing_core_extensions)
raise exception.CoreAPIMissing(
missing_apis=missing_core_extensions)
LOG.info(_LI("Loaded extensions: %s"),
sorted(self.loaded_extension_info.get_extensions().keys()))
super(APIRouterV21, self).__init__(mapper)
def _register_resources_list(self, ext_list, mapper):
for ext in ext_list:
self._register_resources(ext, mapper)
def _register_resources_check_inherits(self, mapper):
ext_has_inherits = []
ext_no_inherits = []
for ext in self.api_extension_manager:
for resource in ext.obj.get_resources():
if resource.inherits:
ext_has_inherits.append(ext)
break
else:
ext_no_inherits.append(ext)
self._register_resources_list(ext_no_inherits, mapper)
self._register_resources_list(ext_has_inherits, mapper)
@staticmethod
def get_missing_core_extensions(extensions_loaded):
extensions_loaded = set(extensions_loaded)
missing_extensions = API_V3_CORE_EXTENSIONS - extensions_loaded
return list(missing_extensions)
@property
def loaded_extension_info(self):
raise NotImplementedError()
def _register_extension(self, ext):
raise NotImplementedError()
def _register_resources(self, ext, mapper):
"""Register resources defined by the extensions
Extensions define what resources they want to add through a
get_resources function
"""
handler = ext.obj
LOG.debug("Running _register_resources on %s", ext.obj)
for resource in handler.get_resources():
LOG.debug('Extended resource: %s', resource.collection)
inherits = None
if resource.inherits:
inherits = self.resources.get(resource.inherits)
if not resource.controller:
resource.controller = inherits.controller
wsgi_resource = wsgi.ResourceV21(resource.controller,
inherits=inherits)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
# non core-API plugins use the collection name as the
# member name, but the core-API plugins use the
# singular/plural convention for member/collection names
if resource.member_name:
member_name = resource.member_name
else:
member_name = resource.collection
mapper.resource(member_name, resource.collection,
**kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _register_controllers(self, ext):
"""Register controllers defined by the extensions
Extensions define what resources they want to add through
a get_controller_extensions function
"""
handler = ext.obj
LOG.debug("Running _register_controllers on %s", ext.obj)
for extension in handler.get_controller_extensions():
ext_name = extension.extension.name
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': ext_name, 'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': ext_name, 'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
|
While your goals can range from career development through the acquisition of new skills and knowledge to personal enrichment aimed at a hobby or fulfilling a passion, continual education is essential to all aspects of life. We provide the tools necessary for these pursuits - and to proclaim yourself a life-long learner!
Current students: Please direct questions about your lessons, quizzes, assignments, and other course materials to your instructor or course administrator. You can use the course discussion areas for this purpose.
If you have questions about accessing the online classroom, retrieving your lessons, changing your password, correcting your e-mail address, obtaining progress reports or completion letters, receiving an extension, or any other administrative tasks, visit our Help page.
New or potential students: If you have questions about course content, prerequisites, requirements, follow-ups, or instructors, you'll find the information you need by locating the courses that interest you. Click the course title to review the course details.
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
from __future__ import absolute_import
from tvm import relay
from .init import create_workload
def get_net(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
"""Get network a simple multilayer perceptron.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data",
shape=data_shape,
dtype=dtype)
data = relay.nn.batch_flatten(data)
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=128)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=64)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
fc3 = relay.nn.dense(act2, relay.var("fc3_weight"), units=num_classes)
fc3 = relay.nn.bias_add(fc3, relay.var("fc3_bias"), axis=-1)
mlp = relay.nn.softmax(data=fc3)
args = relay.analysis.free_vars(mlp)
return relay.Function(args, mlp)
def get_workload(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
"""Get benchmark workload for a simple multilayer perceptron.
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.relay.Module
The relay module that contains a mlp network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
|
1. What is a Genuine Brand?
A Genuine Brand is the internalized sum of all impressions received by customers and consumers resulting in a distinctive position in their MIND’S EYE based on perceived emotional and functional benefits.
2. What is brand equity?
We define brand equity as a brand’s valuation based on the totality of its perceptions including the relative equity of its products and services, financial performance, customer loyalty, satisfaction and esteem, etc.
3. What is the difference between a BrandPromise and a mission statement?
The basic difference is one of perspective. A mission statement generally articulates an organization’s internal perspective regarding direction and objectives; simply put– “What It Does.” On the other hand, the BrandPromise is written primarily with the customers’ perspective as a focus, articulating the essence of the brand’s benefits (functional and emotional) that current and potential customers should expect to receive when experiencing a brand’s products and services.
4. How does a Genuine Brand create paradigm shifts?
A Genuine Brand creates paradigm shifts by leveraging the brand, following a disciplined brand process, and by utilizing the entire brand playing field. It does this by creating brand value for the customer, using the BrandStrategy Doctrine to drive the business and the budgets, and consistently focusing on the distinctive benefits to the customer. The goal is to enhance perceived value and preference in the mind’s eye of the target audience.
5. We need to revitalize our brand. How will BrandStrategy understand our business and how should we proceed?
Our diverse and varied experience around the globe has acquainted us with most industries. BrandStrategy partners with its clients to fully understand the business and works side by side with you to outline a comprehensive plan of action for success.
6. We are getting ready to kick off our annual strategic planning retreat. How should the strategy for our brand be integrated?
The desired perception for the organization’s brand in the future should be the focus of all strategic thinking. The brand drives all decision making and guides budget considerations, not the other way around. We can facilitate your retreat to accomplish the desired goals and develop a consensus for your future brand direction.
7. We have never used an expert. How is BrandStrategy, Inc. different from other marketing and brand consultants?
Our advisory approach and proprietary brand science provides your executive team with the “best demonstrated” brand practices in order to create your own destiny and enhance your success. Our practical approach produces real results.
8. We are a small company; can we develop brand expertise internally?
Of course you can. As advisors, we work with you to guide, develop and build brand expertise within your organization, consistent with your budget. As a result of our methodology, you own the framework with which to build and enhance your brand’s equity.
9. We need a speaker who will really energize our annual convention and raise the level of enthusiasm and excitement of our management team. Can you do that?
Duane Knapp is frequently called upon as a “keynote” speaker on the subject of Genuine Brands. His willingness to share his passion and thoughts on building Genuine Brands with others has been welcomed by hundreds of executive teams, associations, and organizations. Duane engages his audience in a highly effective manner, combining “real-world” expertise with an entertaining delivery, leaving attendees inspired. A DVD sampling of his presentation is available upon request.
|
# Paperwork - Using OCR to grep dead trees the easy way
# Copyright (C) 2014 Jerome Flesch
#
# Paperwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Paperwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paperwork. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from paperwork.frontend.util.canvas import Canvas
class Animator(GObject.GObject):
__gsignals__ = {
'animator-start': (GObject.SignalFlags.RUN_LAST, None, ()),
'animator-end': (GObject.SignalFlags.RUN_LAST, None, ()),
}
def __init__(self,
drawer,
attr_name, attr_values, # one value per canvas tick
canvas=None):
GObject.GObject.__init__(self)
self.drawer = drawer
self.attr_name = attr_name
self.attr_values = attr_values
self.canvas = canvas
self.started = False
self.stopped = False
self.previous_pos = self.drawer.relative_position
self.previous_size = self.drawer.relative_size
def set_canvas(self, canvas):
self.canvas = canvas
def on_tick(self):
if len(self.attr_values) <= 0:
if not self.stopped:
self.stopped = True
self.emit('animator-end')
return
if not self.started:
self.started = True
self.emit('animator-start')
setattr(self.drawer, self.attr_name, self.attr_values[0])
self.attr_values = self.attr_values[1:]
self.canvas.redraw((self.previous_pos, self.previous_size))
self.previous_pos = self.drawer.relative_position
self.previous_size = self.drawer.relative_size
self.canvas.redraw((self.previous_pos, self.previous_size))
class LinearSimpleAnimator(Animator):
def __init__(self, drawer,
target_value,
time_length, # ms
attr_name='angle',
canvas=None):
nb_values = int(time_length / Canvas.TICK_INTERVAL)
assert(nb_values)
value_intervals = (
(target_value - getattr(drawer, attr_name)) / nb_values
)
values = [
getattr(drawer, attr_name) + (i * value_intervals)
for i in range(0, nb_values + 1)
]
if values[-1] != target_value:
values.append(target_value)
Animator.__init__(self, drawer, attr_name, values, canvas)
GObject.type_register(LinearSimpleAnimator)
class LinearCoordAnimator(Animator):
def __init__(self, drawer,
target_coord,
time_length, # ms
attr_name='position',
canvas=None):
nb_coords = int(time_length / Canvas.TICK_INTERVAL)
assert(nb_coords)
pos_intervals = (
(target_coord[0] - getattr(drawer, attr_name)[0]) / nb_coords,
(target_coord[1] - getattr(drawer, attr_name)[1]) / nb_coords,
)
coords = [
(getattr(drawer, attr_name)[0] + (i * pos_intervals[0]),
getattr(drawer, attr_name)[1] + (i * pos_intervals[1]))
for i in range(0, nb_coords + 1)
]
Animator.__init__(self, drawer, attr_name, coords, canvas)
GObject.type_register(LinearCoordAnimator)
|
A video capturing a bikepacking adventure into Namadgi National Park and over the Brindabella ranges (Australia) to explore the back country, soak up some mountain air, and enjoy a pedal with friends. Plus, a look at Hunt Bikes 29+ bikepacking frame.
Always looking for an excuse to get out into the bush, and having recently built up the Hunt Bikes prototypes, a test bikepacking trip was in order. Myself and two mates mapped out a route and set off for a three day ride that turned out to be the wettest, muddiest and toughest ride any of us had ever been on, good prototype testing though.
During our wet and foggy ride through the mountains we passed the historic Oldfield’s Hut and promised to return with some dry socks and time to enjoy a campfire under the stars.
When we heard about the Swift Campout event less than a month later, it seemed like the perfect excuse to head back to the mountains. We put the word out, gathered a crew and the ride was on.
With the intent of having more fun and less pain cave, this time we mapped a shorter route to the hut coming in from Namadgi National Park. Although shorter, the ride still involved climbing over the Brindabella Ranges and consequently, a bit of hike a bike was involved.
Despite a few steep climbs the ride was short enough to make the hut before dark with time to collect plenty of fire wood. The fire was lit, an assortment of dehydrated and sandwich bagged meals were cooked up, then we all sat back to enjoy the stars while passing around a bottle of local red.
The nights sleep was broken by a few visits from the resident possum who decided he couldn’t wait for breakfast to eat Danny’s cereal. After finally getting some sleep we arose to the frost covered landscape, got the fire going, brewed a morning cuppa, and packed up the bikes to enjoy a perfect days ride home.
Hunt Bikes is a new bikepacking focused company founded by myself Daniel Hunt. Home base is Canberra Australia.
The company is a culmination of a lifelong passion for bikes and a love of the outdoors combined with my background in Mechanical Engineering and Sustainability.
Currently in the works is the first Hunt Bikes frame and fork, a 29+ double butted 4130 chromoly ride thats designed for bikepacking and single trail. The frame and forks are now in their final revision and the design is essentially ready for production pending a little more testing.
The first limited batch of frames will be available for pre-sale next week on our website. An earlybird price will be available for anyone willing to wait a little while for the first batch to come in. Also, people ordering a frame early will be helping to “kickstart” Hunt Bikes into becoming a legitimate bikepacking bike company and the first orders will get a numbered frame that corresponds to their order number. The first 10 orders will get an extra bonus just to say thanks!
In addition to the frame & forks, Ive been working closely with an experienced bike bag maker and hope to have a full range of bikepacking bags available on the website by the end of Sept.
The plan is to slowly grow the range of bikepacking gear so stay tuned for more to come.
Follow Hunt Bikes on Instagram.
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
defines few algorithms, that deals with different properties of std containers
"""
import types
import string
import calldef
import cpptypes
import namespace
import templates
import type_traits
import class_declaration
std_namespaces = ( 'std', 'stdext', '__gnu_cxx' )
class defaults_eraser:
@staticmethod
def normalize( type_str ):
return type_str.replace( ' ', '' )
@staticmethod
def replace_basic_string( cls_name ):
strings = {
'std::string' : ( 'std::basic_string<char,std::char_traits<char>,std::allocator<char> >'
, 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >' )
, 'std::wstring' : ( 'std::basic_string<wchar_t,std::char_traits<wchar_t>,std::allocator<wchar_t> >'
, 'std::basic_string<wchar_t, std::char_traits<wchar_t>, std::allocator<wchar_t> >' ) }
new_name = cls_name
for short_name, long_names in strings.iteritems():
for lname in long_names:
new_name = new_name.replace( lname, short_name )
return new_name
class recursive_impl:
@staticmethod
def decorated_call_prefix( cls_name, text, doit ):
has_text = cls_name.startswith( text )
if has_text:
cls_name = cls_name[ len( text ): ]
answer = doit( cls_name )
if has_text:
answer = text + answer
return answer
@staticmethod
def decorated_call_suffix( cls_name, text, doit ):
has_text = cls_name.endswith( text )
if has_text:
cls_name = cls_name[: len( text )]
answer = doit( cls_name )
if has_text:
answer = answer + text
return answer
@staticmethod
def erase_call( cls_name ):
global find_container_traits
c_traits = find_container_traits( cls_name )
if not c_traits:
return cls_name
return c_traits.remove_defaults( cls_name )
@staticmethod
def erase_recursive( cls_name ):
ri = defaults_eraser.recursive_impl
no_std = lambda cls_name: ri.decorated_call_prefix( cls_name, 'std::', ri.erase_call )
no_stdext = lambda cls_name: ri.decorated_call_prefix( cls_name, 'stdext::', no_std )
no_gnustd = lambda cls_name: ri.decorated_call_prefix( cls_name, '__gnu_cxx::', no_stdext )
no_const = lambda cls_name: ri.decorated_call_prefix( cls_name, 'const ', no_gnustd )
no_end_const = lambda cls_name: ri.decorated_call_suffix( cls_name, ' const', no_const )
return no_end_const( cls_name )
@staticmethod
def erase_recursive( cls_name ):
return defaults_eraser.recursive_impl.erase_recursive( cls_name )
@staticmethod
def erase_allocator( cls_name, default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 2 != len( c_args ):
return
value_type = c_args[0]
tmpl = string.Template( "$container< $value_type, $allocator<$value_type> >" )
tmpl = tmpl.substitute( container=c_name, value_type=value_type, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_container( cls_name, default_container_name='std::deque' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 2 != len( c_args ):
return
value_type = c_args[0]
dc_no_defaults = defaults_eraser.erase_recursive( c_args[1] )
if defaults_eraser.normalize( dc_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_container_name, [value_type] ) ):
return
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_container_compare( cls_name, default_container_name='std::vector', default_compare='std::less' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 3 != len( c_args ):
return
dc_no_defaults = defaults_eraser.erase_recursive( c_args[1] )
if defaults_eraser.normalize( dc_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_container_name, [c_args[0]] ) ):
return
dcomp_no_defaults = defaults_eraser.erase_recursive( c_args[2] )
if defaults_eraser.normalize( dcomp_no_defaults ) \
!= defaults_eraser.normalize( templates.join( default_compare, [c_args[0]] ) ):
return
value_type = defaults_eraser.erase_recursive( c_args[0] )
return templates.join( c_name, [value_type] )
@staticmethod
def erase_compare_allocator( cls_name, default_compare='std::less', default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 3 != len( c_args ):
return
value_type = c_args[0]
tmpl = string.Template( "$container< $value_type, $compare<$value_type>, $allocator<$value_type> >" )
tmpl = tmpl.substitute( container=c_name
, value_type=value_type
, compare=default_compare
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_map_compare_allocator( cls_name, default_compare='std::less', default_allocator='std::allocator' ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if 4 != len( c_args ):
return
key_type = c_args[0]
mapped_type = c_args[1]
tmpls = [
string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< const $key_type, $mapped_type> > >" )
, string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< $key_type const, $mapped_type> > >" )
, string.Template( "$container< $key_type, $mapped_type, $compare<$key_type>, $allocator< std::pair< $key_type, $mapped_type> > >" )]
for tmpl in tmpls:
tmpl = tmpl.substitute( container=c_name
, key_type=key_type
, mapped_type=mapped_type
, compare=default_compare
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( tmpl ):
return templates.join( c_name
, [ defaults_eraser.erase_recursive( key_type )
, defaults_eraser.erase_recursive( mapped_type )] )
@staticmethod
def erase_hash_allocator( cls_name ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
if len( c_args ) < 3:
return
default_hash=None
default_less='std::less'
default_equal_to='std::equal_to'
default_allocator='std::allocator'
tmpl = None
if 3 == len( c_args ):
default_hash='hash_compare'
tmpl = "$container< $value_type, $hash<$value_type, $less<$value_type> >, $allocator<$value_type> >"
elif 4 == len( c_args ):
default_hash='hash'
tmpl = "$container< $value_type, $hash<$value_type >, $equal_to<$value_type >, $allocator<$value_type> >"
else:
return
value_type = c_args[0]
tmpl = string.Template( tmpl )
for ns in std_namespaces:
inst = tmpl.substitute( container=c_name
, value_type=value_type
, hash= ns + '::' + default_hash
, less=default_less
, equal_to=default_equal_to
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( inst ):
return templates.join( c_name, [defaults_eraser.erase_recursive( value_type )] )
@staticmethod
def erase_hashmap_compare_allocator( cls_name ):
cls_name = defaults_eraser.replace_basic_string( cls_name )
c_name, c_args = templates.split( cls_name )
default_hash=None
default_less='std::less'
default_allocator='std::allocator'
default_equal_to = 'std::equal_to'
tmpl = None
key_type = None
mapped_type = None
if 2 < len( c_args ):
key_type = c_args[0]
mapped_type = c_args[1]
else:
return
if 4 == len( c_args ):
default_hash = 'hash_compare'
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type, $less<$key_type> >, $allocator< std::pair< const $key_type, $mapped_type> > >" )
if key_type.startswith( 'const ' ) or key_type.endswith( ' const' ):
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type, $less<$key_type> >, $allocator< std::pair< $key_type, $mapped_type> > >" )
elif 5 == len( c_args ):
default_hash = 'hash'
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type >, $equal_to<$key_type>, $allocator< $mapped_type> >" )
if key_type.startswith( 'const ' ) or key_type.endswith( ' const' ):
tmpl = string.Template( "$container< $key_type, $mapped_type, $hash<$key_type >, $equal_to<$key_type>, $allocator< $mapped_type > >" )
else:
return
for ns in std_namespaces:
inst = tmpl.substitute( container=c_name
, key_type=key_type
, mapped_type=mapped_type
, hash=ns + '::' + default_hash
, less=default_less
, equal_to = default_equal_to
, allocator=default_allocator )
if defaults_eraser.normalize( cls_name ) == defaults_eraser.normalize( inst ):
return templates.join( c_name
, [ defaults_eraser.erase_recursive( key_type )
, defaults_eraser.erase_recursive( mapped_type )] )
class container_traits_impl_t:
"""this class implements the functionality needed for convinient work with
STD container classes.
Implemented functionality:
- find out whether a declaration is STD container or not
- find out container value( mapped ) type
This class tries to be useful as much, as possible. For example, for class
declaration( and not definition ) it parsers the class name in order to
extract all the information.
"""
def __init__( self
, container_name
, element_type_index
, element_type_typedef
, defaults_remover
, key_type_index=None
, key_type_typedef=None ):
"""
container_name - std container name
element_type_index - position of value\\mapped type within template
arguments list
element_type_typedef - class typedef to the value\\mapped type
key_type_index - position of key type within template arguments list
key_type_typedef - class typedef to the key type
"""
self._name = container_name
self.remove_defaults_impl = defaults_remover
self.element_type_index = element_type_index
self.element_type_typedef = element_type_typedef
self.key_type_index = key_type_index
self.key_type_typedef = key_type_typedef
def name(self):
return self._name
def get_container_or_none( self, type ):
"""returns reference to the class declaration or None"""
type = type_traits.remove_alias( type )
type = type_traits.remove_cv( type )
cls = None
if isinstance( type, cpptypes.declarated_t ):
cls = type_traits.remove_alias( type.declaration )
elif isinstance( type, class_declaration.class_t ):
cls = type
elif isinstance( type, class_declaration.class_declaration_t ):
cls = type
else:
return
if not cls.name.startswith( self.name() + '<' ):
return
for ns in std_namespaces:
if type_traits.impl_details.is_defined_in_xxx( ns, cls ):
return cls
def is_my_case( self, type ):
"""checks, whether type is STD container or not"""
return bool( self.get_container_or_none( type ) )
def class_declaration( self, type ):
"""returns reference to the class declaration"""
cls = self.get_container_or_none( type )
if not cls:
raise TypeError( 'Type "%s" is not instantiation of std::%s' % ( type.decl_string, self.name() ) )
return cls
def is_sequence( self, type ):
#raise exception if type is not container
unused = self.class_declaration( type )
return self.key_type_index is None
def is_mapping( self, type ):
return not self.is_sequence( type )
def __find_xxx_type( self, type, xxx_index, xxx_typedef, cache_property_name ):
cls = self.class_declaration( type )
result = getattr( cls.cache, cache_property_name )
if not result:
if isinstance( cls, class_declaration.class_t ):
xxx_type = cls.typedef( xxx_typedef, recursive=False ).type
result = type_traits.remove_declarated( xxx_type )
else:
xxx_type_str = templates.args( cls.name )[xxx_index]
result = type_traits.impl_details.find_value_type( cls.top_parent, xxx_type_str )
if None is result:
raise RuntimeError( "Unable to find out %s '%s' key\\value type."
% ( self.name(), cls.decl_string ) )
setattr( cls.cache, cache_property_name, result )
return result
def element_type( self, type ):
"""returns reference to the class value\\mapped type declaration"""
return self.__find_xxx_type( type
, self.element_type_index
, self.element_type_typedef
, 'container_element_type')
def key_type( self, type ):
"""returns reference to the class key type declaration"""
if not self.is_mapping( type ):
raise TypeError( 'Type "%s" is not "mapping" container' % str( type ) )
return self.__find_xxx_type( type
, self.key_type_index
, self.key_type_typedef
, 'container_key_type' )
def remove_defaults( self, type_or_string ):
"""remove template defaults from a template class instantiation
For example:
std::vector< int, std::allocator< int > >
will become
std::vector< int >
"""
name = type_or_string
if not isinstance( type_or_string, types.StringTypes ):
name = self.class_declaration( type_or_string ).name
if not self.remove_defaults_impl:
return name
no_defaults = self.remove_defaults_impl( name )
if not no_defaults:
return name
else:
return no_defaults
create_traits = container_traits_impl_t
list_traits = create_traits( 'list'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
deque_traits = create_traits( 'deque'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
queue_traits = create_traits( 'queue'
, 0
, 'value_type'
, defaults_eraser.erase_container )
priority_queue_traits = create_traits( 'priority_queue'
, 0
, 'value_type'
, defaults_eraser.erase_container_compare )
vector_traits = create_traits( 'vector'
, 0
, 'value_type'
, defaults_eraser.erase_allocator )
stack_traits = create_traits( 'stack'
, 0
, 'value_type'
, defaults_eraser.erase_container )
map_traits = create_traits( 'map'
, 1
, 'mapped_type'
, defaults_eraser.erase_map_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
multimap_traits = create_traits( 'multimap'
, 1
, 'mapped_type'
, defaults_eraser.erase_map_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
hash_map_traits = create_traits( 'hash_map'
, 1
, 'mapped_type'
, defaults_eraser.erase_hashmap_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
hash_multimap_traits = create_traits( 'hash_multimap'
, 1
, 'mapped_type'
, defaults_eraser.erase_hashmap_compare_allocator
, key_type_index=0
, key_type_typedef='key_type')
set_traits = create_traits( 'set'
, 0
, 'value_type'
, defaults_eraser.erase_compare_allocator)
multiset_traits = create_traits( 'multiset'
, 0
, 'value_type'
, defaults_eraser.erase_compare_allocator )
hash_set_traits = create_traits( 'hash_set'
, 0
, 'value_type'
, defaults_eraser.erase_hash_allocator )
hash_multiset_traits = create_traits( 'hash_multiset'
, 0
, 'value_type'
, defaults_eraser.erase_hash_allocator )
container_traits = (
list_traits
, deque_traits
, queue_traits
, priority_queue_traits
, vector_traits
, stack_traits
, map_traits
, multimap_traits
, hash_map_traits
, hash_multimap_traits
, set_traits
, hash_set_traits
, multiset_traits
, hash_multiset_traits )
"""tuple of all STD container traits classes"""
def find_container_traits( cls_or_string ):
if isinstance( cls_or_string, types.StringTypes ):
if not templates.is_instantiation( cls_or_string ):
return None
name = templates.name( cls_or_string )
if name.startswith( 'std::' ):
name = name[ len( 'std::' ): ]
for cls_traits in container_traits:
if cls_traits.name() == name:
return cls_traits
else:
for cls_traits in container_traits:
if cls_traits.is_my_case( cls_or_string ):
return cls_traits
|
The 2019 Dodge Challenger SRT Hellcat Redeye is capable of sub-11-second quarter-mile runs.
This year, 30 entries were pared down to the top 12 — with four in each category of safety, innovation and green technologies. These 12 were presented to the panel members, with the winner in each category announced at the Canadian International Auto Show in Toronto.
Here is one of the entries presented to the panel: FCA (Fiat Chrysler Automobiles) presented its 2019 Dodge Challenger SRT Hellcat Redeye powertrain in the Innovation Category.
The Challenger SRT Hellcat Redeye must be known by every drag-racing fan in North America. With 797 horsepower in the Redeye version — 717 hp in the regular SRT Hellcat — the Hellcat Redeye is capable of sub-11-second quarter-mile runs. That is quick for a race car, but this is a car that has a factory warranty and can be easily operated on the street.
There are several features of the Hellcat Redeye to help produce that power and maintain reliability. To increase maximum engine r.p.m., the valve keepers were designed with a single groove rather than the standard three-groove keeper. This aids in stability of the valve train. The valve springs were also redesigned, with a damping coil on the spring moved to the top of the spring. This enhances durability and damping of vibrations in the valve train. The camshaft was changed from a ductile iron shaft to a gun-drilled 5150 steel shaft with induction-hardened cams to handle the higher loads.
Power requires fuel, and to ensure adequate fuel delivery, the Hellcat Redeye uses a dual fuel-pump system so that at full throttle, and with the fuel injectors open 90 per cent of the time, both fuel pumps can maintain full fuel pressure. At idle and low-throttle openings, fuel pressure is reduced, with only one fuel pump operating.
The Challenger hood was redesigned with twin hood scoops to aid in vehicle stability and allow cooler air to the engine. The open airbox system with cold-air induction pushes more dense air into the supercharger and helps produce more power. Simply going from a rainy day to a sunny day can equal a 50-hp loss because of higher air temperatures, less dense air and less humidity. That is why the top drag racers constantly monitor drag-strip air temperature, barometric pressure and humidity, so they can tune their cars for the most power.
The heat produced by the Hellcat Redeye is amazing. At full power, there is enough heat to heat around nine homes. To control this heat and let the coldest air into the engine, the Hellcat Redeye uses three heat exchangers for heat rejection. The radiator and cooling system are the primary methods of getting rid of engine heat. But the intercooler on the supercharger is also used to cool the air as it enters the engine.
The third method of controlling the heat utilizes the air-conditioning system. When the vehicle is in track mode, the air conditioning system is switched from cooling the vehicle interior to cooling the coolant circuit located in the intercooler for the supercharger, which is mounted in front near the radiator. This cools the engine’s intake air by about 10 C, which increases the air density to produce more power. To accommodate this two-phase air-conditioning system operation, the AC compressor has been increased about 10 per cent in capacity.
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
from twisted.python import log
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
from twisted.internet.defer import Deferred
import json
import urllib
import Cookie
from autobahn.util import newid, utcnow
from autobahn.websocket import http
class ServerProtocol(WampWebSocketServerProtocol):
## authid -> cookie -> set(connection)
def onConnect(self, request):
protocol, headers = WampWebSocketServerProtocol.onConnect(self, request)
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
self._authenticated = self.factory._cookies[self._cbtid]['authenticated']
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
from autobahn.twisted.wamp import RouterSession
from autobahn.wamp import types
class MyRouterSession(RouterSession):
def onOpen(self, transport):
RouterSession.onOpen(self, transport)
print "transport authenticated: {}".format(self._transport._authenticated)
def onHello(self, realm, details):
print "onHello: {} {}".format(realm, details)
if self._transport._authenticated is not None:
return types.Accept(authid = self._transport._authenticated)
else:
return types.Challenge("mozilla-persona")
return accept
def onLeave(self, details):
if details.reason == "wamp.close.logout":
cookie = self._transport.factory._cookies[self._transport._cbtid]
cookie['authenticated'] = None
for proto in cookie['connections']:
proto.sendClose()
def onAuthenticate(self, signature, extra):
print "onAuthenticate: {} {}".format(signature, extra)
dres = Deferred()
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = signature
audience = 'http://127.0.0.1:8080/'
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
try:
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self._transport.factory._cookies[self._transport._cbtid]['authenticated'] = res['email']
log.msg("Authenticated user {}".format(res['email']))
dres.callback(types.Accept(authid = res['email']))
else:
log.msg("Authentication failed!")
dres.callback(types.Deny())
except Exception as e:
print "ERRR", e
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
dres.callback(types.Deny())
d.addCallbacks(done, error)
return dres
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if True or args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
## start an embedded application component ..
##
session_factory.add(TimeService())
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug_wamp = args.debug)
transport_factory.protocol = ServerProtocol
transport_factory._cookies = {}
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
|
Position Description Serves as the store expert in millwork by providing detailed product information to both customers and peers, promoting and recommending products, plans, or installation services that match customer needs, informing customers on pricing, options or status on pending orders. Includes generating leads, conducting sales activities, building relationships with customers, keeping shelves stocked and correctly displayed, and coordinating successful completion of projects and orders. This includes performing order management duties such as entering new orders for customers, reaching out to vendors on special orders, tracking and fulfilling orders, and resolving issues Job Requirements Requires morning, afternoon, and evening availability any day of the week. Physical ability to move large, bulky and/or heavy merchandise.
Physical ability to perform tasks that may require prolonged standing, sitting, and other activities necessary to perform job duties. Minimum Qualifications High school diploma or equivalent. 1 year external experience in customer facing sales OR 6 months Lowe's retail experience. Preferred Qualifications 1 year experience entering and submitting customer sales orders, including Special Order.
2 years experience identifying and selling products based upon customer needs or plans. 2 years experience in providing customer service including identifying and resolving customer complaints, greeting customers, answering phones, building relationships with customers, and thanking customers for their business. 2 years experience following up on outstanding or incomplete customer orders, sales, or installs. 1 year experience promoting product related services and plans such as installation, delivery, credit financing, or extended protection plans.
2 years experience in a sales environment with required sales goals or metrics. 1 year employment with Lowe's as a Sales Specialist. 1 year experience in a trade directly related to this department (Millwork).
|
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# Python wrappers on LAMMPS library via ctypes
# for python3 compatibility
from __future__ import print_function
# imports for simple LAMMPS python wrapper module "lammps"
import sys,traceback,types
from ctypes import *
from os.path import dirname,abspath,join
from inspect import getsourcefile
# imports for advanced LAMMPS python wrapper modules "PyLammps" and "IPyLammps"
from collections import namedtuple
import os
import select
import re
import sys
def get_ctypes_int(size):
if size == 4:
return c_int32
elif size == 8:
return c_int64
return c_int
class MPIAbortException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
class lammps(object):
# detect if Python is using version of mpi4py that can pass a communicator
has_mpi4py = False
try:
from mpi4py import MPI
from mpi4py import __version__ as mpi4py_version
if mpi4py_version.split('.')[0] in ['2','3']: has_mpi4py = True
except:
pass
# create instance of LAMMPS
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
self.comm = comm
self.opened = 0
# determine module location
modpath = dirname(abspath(getsourcefile(lambda:0)))
self.lib = None
# if a pointer to a LAMMPS object is handed in,
# all symbols should already be available
try:
if ptr: self.lib = CDLL("",RTLD_GLOBAL)
except:
self.lib = None
# load liblammps.so unless name is given
# if name = "g++", load liblammps_g++.so
# try loading the LAMMPS shared object from the location
# of lammps.py with an absolute path,
# so that LD_LIBRARY_PATH does not need to be set for regular install
# fall back to loading with a relative path,
# typically requires LD_LIBRARY_PATH to be set appropriately
if any([f.startswith('liblammps') and f.endswith('.dylib') for f in os.listdir(modpath)]):
lib_ext = ".dylib"
else:
lib_ext = ".so"
if not self.lib:
try:
if not name: self.lib = CDLL(join(modpath,"liblammps" + lib_ext),RTLD_GLOBAL)
else: self.lib = CDLL(join(modpath,"liblammps_%s" % name + lib_ext),
RTLD_GLOBAL)
except:
if not name: self.lib = CDLL("liblammps" + lib_ext,RTLD_GLOBAL)
else: self.lib = CDLL("liblammps_%s" % name + lib_ext,RTLD_GLOBAL)
# define ctypes API for each library method
# NOTE: should add one of these for each lib function
self.lib.lammps_extract_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),
POINTER(c_double),POINTER(c_double),POINTER(c_double),
POINTER(c_int),POINTER(c_int)]
self.lib.lammps_extract_box.restype = None
self.lib.lammps_reset_box.argtypes = \
[c_void_p,POINTER(c_double),POINTER(c_double),c_double,c_double,c_double]
self.lib.lammps_reset_box.restype = None
self.lib.lammps_gather_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms.restype = None
self.lib.lammps_gather_atoms_concat.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_gather_atoms_concat.restype = None
self.lib.lammps_gather_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_gather_atoms_subset.restype = None
self.lib.lammps_scatter_atoms.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_void_p]
self.lib.lammps_scatter_atoms.restype = None
self.lib.lammps_scatter_atoms_subset.argtypes = \
[c_void_p,c_char_p,c_int,c_int,c_int,POINTER(c_int),c_void_p]
self.lib.lammps_scatter_atoms_subset.restype = None
# if no ptr provided, create an instance of LAMMPS
# don't know how to pass an MPI communicator from PyPar
# but we can pass an MPI communicator from mpi4py v2.0.0 and later
# no_mpi call lets LAMMPS use MPI_COMM_WORLD
# cargs = array of C strings from args
# if ptr, then are embedding Python in LAMMPS input script
# ptr is the desired instance of LAMMPS
# just convert it to ctypes ptr and store in self.lmp
if not ptr:
# with mpi4py v2, can pass MPI communicator to LAMMPS
# need to adjust for type of MPI communicator object
# allow for int (like MPICH) or void* (like OpenMPI)
if comm:
if not lammps.has_mpi4py:
raise Exception('Python mpi4py version is not 2 or 3')
if lammps.MPI._sizeof(lammps.MPI.Comm) == sizeof(c_int):
MPI_Comm = c_int
else:
MPI_Comm = c_void_p
narg = 0
cargs = 0
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lib.lammps_open.argtypes = [c_int, c_char_p*narg, \
MPI_Comm, c_void_p()]
else:
self.lib.lammps_open.argtypes = [c_int, c_int, \
MPI_Comm, c_void_p()]
self.lib.lammps_open.restype = None
self.opened = 1
self.lmp = c_void_p()
comm_ptr = lammps.MPI._addressof(comm)
comm_val = MPI_Comm.from_address(comm_ptr)
self.lib.lammps_open(narg,cargs,comm_val,byref(self.lmp))
else:
if lammps.has_mpi4py:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.opened = 1
if cmdargs:
cmdargs.insert(0,"lammps.py")
narg = len(cmdargs)
for i in range(narg):
if type(cmdargs[i]) is str:
cmdargs[i] = cmdargs[i].encode()
cargs = (c_char_p*narg)(*cmdargs)
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(narg,cargs,byref(self.lmp))
else:
self.lmp = c_void_p()
self.lib.lammps_open_no_mpi(0,None,byref(self.lmp))
# could use just this if LAMMPS lib interface supported it
# self.lmp = self.lib.lammps_open_no_mpi(0,None)
else:
# magic to convert ptr to ctypes ptr
if sys.version_info >= (3, 0):
# Python 3 (uses PyCapsule API)
pythonapi.PyCapsule_GetPointer.restype = c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]
self.lmp = c_void_p(pythonapi.PyCapsule_GetPointer(ptr, None))
else:
# Python 2 (uses PyCObject API)
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
self.lmp = c_void_p(pythonapi.PyCObject_AsVoidPtr(ptr))
# optional numpy support (lazy loading)
self._numpy = None
# set default types
self.c_bigint = get_ctypes_int(self.extract_setting("bigint"))
self.c_tagint = get_ctypes_int(self.extract_setting("tagint"))
self.c_imageint = get_ctypes_int(self.extract_setting("imageint"))
self._installed_packages = None
# add way to insert Python callback for fix external
self.callback = {}
self.FIX_EXTERNAL_CALLBACK_FUNC = CFUNCTYPE(None, c_void_p, self.c_bigint, c_int, POINTER(self.c_tagint), POINTER(POINTER(c_double)), POINTER(POINTER(c_double)))
self.lib.lammps_set_fix_external_callback.argtypes = [c_void_p, c_char_p, self.FIX_EXTERNAL_CALLBACK_FUNC, c_void_p]
self.lib.lammps_set_fix_external_callback.restype = None
# shut-down LAMMPS instance
def __del__(self):
if self.lmp and self.opened:
self.lib.lammps_close(self.lmp)
self.opened = 0
def close(self):
if self.opened: self.lib.lammps_close(self.lmp)
self.lmp = None
self.opened = 0
def version(self):
return self.lib.lammps_version(self.lmp)
def file(self,file):
if file: file = file.encode()
self.lib.lammps_file(self.lmp,file)
# send a single command
def command(self,cmd):
if cmd: cmd = cmd.encode()
self.lib.lammps_command(self.lmp,cmd)
if self.has_exceptions and self.lib.lammps_has_error(self.lmp):
sb = create_string_buffer(100)
error_type = self.lib.lammps_get_last_error_message(self.lmp, sb, 100)
error_msg = sb.value.decode().strip()
if error_type == 2:
raise MPIAbortException(error_msg)
raise Exception(error_msg)
# send a list of commands
def commands_list(self,cmdlist):
cmds = [x.encode() for x in cmdlist if type(x) is str]
args = (c_char_p * len(cmdlist))(*cmds)
self.lib.lammps_commands_list(self.lmp,len(cmdlist),args)
# send a string of commands
def commands_string(self,multicmd):
if type(multicmd) is str: multicmd = multicmd.encode()
self.lib.lammps_commands_string(self.lmp,c_char_p(multicmd))
# extract lammps type byte sizes
def extract_setting(self, name):
if name: name = name.encode()
self.lib.lammps_extract_setting.restype = c_int
return int(self.lib.lammps_extract_setting(self.lmp,name))
# extract global info
def extract_global(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_global.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_double)
else: return None
ptr = self.lib.lammps_extract_global(self.lmp,name)
return ptr[0]
# extract global info
def extract_box(self):
boxlo = (3*c_double)()
boxhi = (3*c_double)()
xy = c_double()
yz = c_double()
xz = c_double()
periodicity = (3*c_int)()
box_change = c_int()
self.lib.lammps_extract_box(self.lmp,boxlo,boxhi,
byref(xy),byref(yz),byref(xz),
periodicity,byref(box_change))
boxlo = boxlo[:3]
boxhi = boxhi[:3]
xy = xy.value
yz = yz.value
xz = xz.value
periodicity = periodicity[:3]
box_change = box_change.value
return boxlo,boxhi,xy,yz,xz,periodicity,box_change
# extract per-atom info
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def extract_atom(self,name,type):
if name: name = name.encode()
if type == 0:
self.lib.lammps_extract_atom.restype = POINTER(c_int)
elif type == 1:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_int))
elif type == 2:
self.lib.lammps_extract_atom.restype = POINTER(c_double)
elif type == 3:
self.lib.lammps_extract_atom.restype = POINTER(POINTER(c_double))
else: return None
ptr = self.lib.lammps_extract_atom(self.lmp,name)
return ptr
@property
def numpy(self):
if not self._numpy:
import numpy as np
class LammpsNumpyWrapper:
def __init__(self, lmp):
self.lmp = lmp
def _ctype_to_numpy_int(self, ctype_int):
if ctype_int == c_int32:
return np.int32
elif ctype_int == c_int64:
return np.int64
return np.intc
def extract_atom_iarray(self, name, nelem, dim=1):
if name in ['id', 'molecule']:
c_int_type = self.lmp.c_tagint
elif name in ['image']:
c_int_type = self.lmp.c_imageint
else:
c_int_type = c_int
np_int_type = self._ctype_to_numpy_int(c_int_type)
if dim == 1:
tmp = self.lmp.extract_atom(name, 0)
ptr = cast(tmp, POINTER(c_int_type * nelem))
else:
tmp = self.lmp.extract_atom(name, 1)
ptr = cast(tmp[0], POINTER(c_int_type * nelem * dim))
a = np.frombuffer(ptr.contents, dtype=np_int_type)
a.shape = (nelem, dim)
return a
def extract_atom_darray(self, name, nelem, dim=1):
if dim == 1:
tmp = self.lmp.extract_atom(name, 2)
ptr = cast(tmp, POINTER(c_double * nelem))
else:
tmp = self.lmp.extract_atom(name, 3)
ptr = cast(tmp[0], POINTER(c_double * nelem * dim))
a = np.frombuffer(ptr.contents)
a.shape = (nelem, dim)
return a
self._numpy = LammpsNumpyWrapper(self)
return self._numpy
# extract compute info
def extract_compute(self,id,style,type):
if id: id = id.encode()
if type == 0:
if style > 0: return None
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
if type == 1:
self.lib.lammps_extract_compute.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
if type == 2:
if style == 0:
self.lib.lammps_extract_compute.restype = POINTER(c_int)
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr[0]
else:
self.lib.lammps_extract_compute.restype = POINTER(POINTER(c_double))
ptr = self.lib.lammps_extract_compute(self.lmp,id,style,type)
return ptr
return None
# extract fix info
# in case of global datum, free memory for 1 double via lammps_free()
# double was allocated by library interface function
def extract_fix(self,id,style,type,i=0,j=0):
if id: id = id.encode()
if style == 0:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
elif (style == 1) or (style == 2):
if type == 1:
self.lib.lammps_extract_fix.restype = POINTER(c_double)
elif type == 2:
self.lib.lammps_extract_fix.restype = POINTER(POINTER(c_double))
else:
return None
ptr = self.lib.lammps_extract_fix(self.lmp,id,style,type,i,j)
return ptr
else:
return None
# extract variable info
# free memory for 1 double or 1 vector of doubles via lammps_free()
# for vector, must copy nlocal returned values to local c_double vector
# memory was allocated by library interface function
def extract_variable(self,name,group,type):
if name: name = name.encode()
if group: group = group.encode()
if type == 0:
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
result = ptr[0]
self.lib.lammps_free(ptr)
return result
if type == 1:
self.lib.lammps_extract_global.restype = POINTER(c_int)
nlocalptr = self.lib.lammps_extract_global(self.lmp,"nlocal".encode())
nlocal = nlocalptr[0]
result = (c_double*nlocal)()
self.lib.lammps_extract_variable.restype = POINTER(c_double)
ptr = self.lib.lammps_extract_variable(self.lmp,name,group)
for i in range(nlocal): result[i] = ptr[i]
self.lib.lammps_free(ptr)
return result
return None
# return current value of thermo keyword
def get_thermo(self,name):
if name: name = name.encode()
self.lib.lammps_get_thermo.restype = c_double
return self.lib.lammps_get_thermo(self.lmp,name)
# return total number of atoms in system
def get_natoms(self):
return self.lib.lammps_get_natoms(self.lmp)
# set variable value
# value is converted to string
# returns 0 for success, -1 if failed
def set_variable(self,name,value):
if name: name = name.encode()
if value: value = str(value).encode()
return self.lib.lammps_set_variable(self.lmp,name,value)
# reset simulation box size
def reset_box(self,boxlo,boxhi,xy,yz,xz):
cboxlo = (3*c_double)(*boxlo)
cboxhi = (3*c_double)(*boxhi)
self.lib.lammps_reset_box(self.lmp,cboxlo,cboxhi,xy,yz,xz)
# return vector of atom properties gathered across procs
# 3 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# returned data is a 1d vector - doc how it is ordered?
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def gather_atoms(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_concat(self,name,type,count):
if name: name = name.encode()
natoms = self.lib.lammps_get_natoms(self.lmp)
if type == 0:
data = ((count*natoms)*c_int)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
elif type == 1:
data = ((count*natoms)*c_double)()
self.lib.lammps_gather_atoms_concat(self.lmp,name,type,count,data)
else: return None
return data
def gather_atoms_subset(self,name,type,count,ndata,ids):
if name: name = name.encode()
if type == 0:
data = ((count*ndata)*c_int)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
elif type == 1:
data = ((count*ndata)*c_double)()
self.lib.lammps_gather_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
else: return None
return data
# scatter vector of atom properties across procs
# 2 variants to match src/library.cpp
# name = atom property recognized by LAMMPS in atom->extract()
# type = 0 for integer values, 1 for double values
# count = number of per-atom valus, 1 for type or charge, 3 for x or f
# assume data is of correct type and length, as created by gather_atoms()
# NOTE: need to insure are converting to/from correct Python type
# e.g. for Python list or NumPy or ctypes
def scatter_atoms(self,name,type,count,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms(self.lmp,name,type,count,data)
def scatter_atoms_subset(self,name,type,count,ndata,ids,data):
if name: name = name.encode()
self.lib.lammps_scatter_atoms_subset(self.lmp,name,type,count,ndata,ids,data)
# create N atoms on all procs
# N = global number of atoms
# id = ID of each atom (optional, can be None)
# type = type of each atom (1 to Ntypes) (required)
# x = coords of each atom as (N,3) array (required)
# v = velocity of each atom as (N,3) array (optional, can be None)
# NOTE: how could we insure are passing correct type to LAMMPS
# e.g. for Python list or NumPy, etc
# ditto for gather_atoms() above
def create_atoms(self,n,id,type,x,v,image=None,shrinkexceed=False):
if id:
id_lmp = (c_int * n)()
id_lmp[:] = id
else:
id_lmp = id
if image:
image_lmp = (c_int * n)()
image_lmp[:] = image
else:
image_lmp = image
type_lmp = (c_int * n)()
type_lmp[:] = type
self.lib.lammps_create_atoms(self.lmp,n,id_lmp,type_lmp,x,v,image_lmp,
shrinkexceed)
@property
def has_exceptions(self):
""" Return whether the LAMMPS shared library was compiled with C++ exceptions handling enabled """
return self.lib.lammps_config_has_exceptions() != 0
@property
def has_gzip_support(self):
return self.lib.lammps_config_has_gzip_support() != 0
@property
def has_png_support(self):
return self.lib.lammps_config_has_png_support() != 0
@property
def has_jpeg_support(self):
return self.lib.lammps_config_has_jpeg_support() != 0
@property
def has_ffmpeg_support(self):
return self.lib.lammps_config_has_ffmpeg_support() != 0
@property
def installed_packages(self):
if self._installed_packages is None:
self._installed_packages = []
npackages = self.lib.lammps_config_package_count()
sb = create_string_buffer(100)
for idx in range(npackages):
self.lib.lammps_config_package_name(idx, sb, 100)
self._installed_packages.append(sb.value.decode())
return self._installed_packages
def set_fix_external_callback(self, fix_name, callback, caller=None):
import numpy as np
def _ctype_to_numpy_int(ctype_int):
if ctype_int == c_int32:
return np.int32
elif ctype_int == c_int64:
return np.int64
return np.intc
def callback_wrapper(caller_ptr, ntimestep, nlocal, tag_ptr, x_ptr, fext_ptr):
if cast(caller_ptr,POINTER(py_object)).contents:
pyCallerObj = cast(caller_ptr,POINTER(py_object)).contents.value
else:
pyCallerObj = None
tptr = cast(tag_ptr, POINTER(self.c_tagint * nlocal))
tag = np.frombuffer(tptr.contents, dtype=_ctype_to_numpy_int(self.c_tagint))
tag.shape = (nlocal)
xptr = cast(x_ptr[0], POINTER(c_double * nlocal * 3))
x = np.frombuffer(xptr.contents)
x.shape = (nlocal, 3)
fptr = cast(fext_ptr[0], POINTER(c_double * nlocal * 3))
f = np.frombuffer(fptr.contents)
f.shape = (nlocal, 3)
callback(pyCallerObj, ntimestep, nlocal, tag, x, f)
cFunc = self.FIX_EXTERNAL_CALLBACK_FUNC(callback_wrapper)
cCaller = cast(pointer(py_object(caller)), c_void_p)
self.callback[fix_name] = { 'function': cFunc, 'caller': caller }
self.lib.lammps_set_fix_external_callback(self.lmp, fix_name.encode(), cFunc, cCaller)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
################################################################################
# Alternative Python Wrapper
# Written by Richard Berger <richard.berger@temple.edu>
################################################################################
class OutputCapture(object):
""" Utility class to capture LAMMPS library output """
def __init__(self):
self.stdout_pipe_read, self.stdout_pipe_write = os.pipe()
self.stdout_fd = 1
def __enter__(self):
self.stdout = os.dup(self.stdout_fd)
os.dup2(self.stdout_pipe_write, self.stdout_fd)
return self
def __exit__(self, type, value, tracebac):
os.dup2(self.stdout, self.stdout_fd)
os.close(self.stdout)
os.close(self.stdout_pipe_read)
os.close(self.stdout_pipe_write)
# check if we have more to read from the pipe
def more_data(self, pipe):
r, _, _ = select.select([pipe], [], [], 0)
return bool(r)
# read the whole pipe
def read_pipe(self, pipe):
out = ""
while self.more_data(pipe):
out += os.read(pipe, 1024).decode()
return out
@property
def output(self):
return self.read_pipe(self.stdout_pipe_read)
class Variable(object):
def __init__(self, lammps_wrapper_instance, name, style, definition):
self.wrapper = lammps_wrapper_instance
self.name = name
self.style = style
self.definition = definition.split()
@property
def value(self):
if self.style == 'atom':
return list(self.wrapper.lmp.extract_variable(self.name, "all", 1))
else:
value = self.wrapper.lmp_print('"${%s}"' % self.name).strip()
try:
return float(value)
except ValueError:
return value
class AtomList(object):
def __init__(self, lammps_wrapper_instance):
self.lmp = lammps_wrapper_instance
self.natoms = self.lmp.system.natoms
self.dimensions = self.lmp.system.dimensions
def __getitem__(self, index):
if self.dimensions == 2:
return Atom2D(self.lmp, index + 1)
return Atom(self.lmp, index + 1)
class Atom(object):
def __init__(self, lammps_wrapper_instance, index):
self.lmp = lammps_wrapper_instance
self.index = index
@property
def id(self):
return int(self.lmp.eval("id[%d]" % self.index))
@property
def type(self):
return int(self.lmp.eval("type[%d]" % self.index))
@property
def mol(self):
return self.lmp.eval("mol[%d]" % self.index)
@property
def mass(self):
return self.lmp.eval("mass[%d]" % self.index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index),
self.lmp.eval("z[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
self.lmp.set("atom", self.index, "z", value[2])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index),
self.lmp.eval("vz[%d]" % self.index))
@velocity.setter
def velocity(self, value):
self.lmp.set("atom", self.index, "vx", value[0])
self.lmp.set("atom", self.index, "vy", value[1])
self.lmp.set("atom", self.index, "vz", value[2])
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index),
self.lmp.eval("fz[%d]" % self.index))
@property
def charge(self):
return self.lmp.eval("q[%d]" % self.index)
class Atom2D(Atom):
def __init__(self, lammps_wrapper_instance, index):
super(Atom2D, self).__init__(lammps_wrapper_instance, index)
@property
def position(self):
return (self.lmp.eval("x[%d]" % self.index),
self.lmp.eval("y[%d]" % self.index))
@position.setter
def position(self, value):
self.lmp.set("atom", self.index, "x", value[0])
self.lmp.set("atom", self.index, "y", value[1])
@property
def velocity(self):
return (self.lmp.eval("vx[%d]" % self.index),
self.lmp.eval("vy[%d]" % self.index))
@velocity.setter
def velocity(self, value):
self.lmp.set("atom", self.index, "vx", value[0])
self.lmp.set("atom", self.index, "vy", value[1])
@property
def force(self):
return (self.lmp.eval("fx[%d]" % self.index),
self.lmp.eval("fy[%d]" % self.index))
class variable_set:
def __init__(self, name, variable_dict):
self._name = name
array_pattern = re.compile(r"(?P<arr>.+)\[(?P<index>[0-9]+)\]")
for key, value in variable_dict.items():
m = array_pattern.match(key)
if m:
g = m.groupdict()
varname = g['arr']
idx = int(g['index'])
if varname not in self.__dict__:
self.__dict__[varname] = {}
self.__dict__[varname][idx] = value
else:
self.__dict__[key] = value
def __str__(self):
return "{}({})".format(self._name, ','.join(["{}={}".format(k, self.__dict__[k]) for k in self.__dict__.keys() if not k.startswith('_')]))
def __repr__(self):
return self.__str__()
def get_thermo_data(output):
""" traverse output of runs and extract thermo data columns """
if isinstance(output, str):
lines = output.splitlines()
else:
lines = output
runs = []
columns = []
in_run = False
current_run = {}
for line in lines:
if line.startswith("Per MPI rank memory allocation"):
in_run = True
elif in_run and len(columns) == 0:
# first line after memory usage are column names
columns = line.split()
current_run = {}
for col in columns:
current_run[col] = []
elif line.startswith("Loop time of "):
in_run = False
columns = None
thermo_data = variable_set('ThermoData', current_run)
r = {'thermo' : thermo_data }
runs.append(namedtuple('Run', list(r.keys()))(*list(r.values())))
elif in_run and len(columns) > 0:
values = [float(x) for x in line.split()]
for i, col in enumerate(columns):
current_run[col].append(values[i])
return runs
class PyLammps(object):
"""
More Python-like wrapper for LAMMPS (e.g., for iPython)
See examples/ipython for usage
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
if ptr:
if isinstance(ptr,PyLammps):
self.lmp = ptr.lmp
elif isinstance(ptr,lammps):
self.lmp = ptr
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
else:
self.lmp = lammps(name=name,cmdargs=cmdargs,ptr=None,comm=comm)
print("LAMMPS output is captured by PyLammps wrapper")
self._cmd_history = []
self.runs = []
def __del__(self):
if self.lmp: self.lmp.close()
self.lmp = None
def close(self):
if self.lmp: self.lmp.close()
self.lmp = None
def version(self):
return self.lmp.version()
def file(self,file):
self.lmp.file(file)
def write_script(self,filename):
""" Write LAMMPS script file containing all commands executed up until now """
with open(filename, "w") as f:
for cmd in self._cmd_history:
f.write("%s\n" % cmd)
def command(self,cmd):
self.lmp.command(cmd)
self._cmd_history.append(cmd)
def run(self, *args, **kwargs):
output = self.__getattr__('run')(*args, **kwargs)
if(lammps.has_mpi4py):
output = self.lmp.comm.bcast(output, root=0)
self.runs += get_thermo_data(output)
return output
@property
def last_run(self):
if len(self.runs) > 0:
return self.runs[-1]
return None
@property
def atoms(self):
return AtomList(self)
@property
def system(self):
output = self.info("system")
d = self._parse_info_system(output)
return namedtuple('System', d.keys())(*d.values())
@property
def communication(self):
output = self.info("communication")
d = self._parse_info_communication(output)
return namedtuple('Communication', d.keys())(*d.values())
@property
def computes(self):
output = self.info("computes")
return self._parse_element_list(output)
@property
def dumps(self):
output = self.info("dumps")
return self._parse_element_list(output)
@property
def fixes(self):
output = self.info("fixes")
return self._parse_element_list(output)
@property
def groups(self):
output = self.info("groups")
return self._parse_groups(output)
@property
def variables(self):
output = self.info("variables")
vars = {}
for v in self._parse_element_list(output):
vars[v['name']] = Variable(self, v['name'], v['style'], v['def'])
return vars
def eval(self, expr):
value = self.lmp_print('"$(%s)"' % expr).strip()
try:
return float(value)
except ValueError:
return value
def _split_values(self, line):
return [x.strip() for x in line.split(',')]
def _get_pair(self, value):
return [x.strip() for x in value.split('=')]
def _parse_info_system(self, output):
lines = output[6:-2]
system = {}
for line in lines:
if line.startswith("Units"):
system['units'] = self._get_pair(line)[1]
elif line.startswith("Atom style"):
system['atom_style'] = self._get_pair(line)[1]
elif line.startswith("Atom map"):
system['atom_map'] = self._get_pair(line)[1]
elif line.startswith("Atoms"):
parts = self._split_values(line)
system['natoms'] = int(self._get_pair(parts[0])[1])
system['ntypes'] = int(self._get_pair(parts[1])[1])
system['style'] = self._get_pair(parts[2])[1]
elif line.startswith("Kspace style"):
system['kspace_style'] = self._get_pair(line)[1]
elif line.startswith("Dimensions"):
system['dimensions'] = int(self._get_pair(line)[1])
elif line.startswith("Orthogonal box"):
system['orthogonal_box'] = [float(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Boundaries"):
system['boundaries'] = self._get_pair(line)[1]
elif line.startswith("xlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("ylo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("zlo"):
keys, values = [self._split_values(x) for x in self._get_pair(line)]
for key, value in zip(keys, values):
system[key] = float(value)
elif line.startswith("Molecule type"):
system['molecule_type'] = self._get_pair(line)[1]
elif line.startswith("Bonds"):
parts = self._split_values(line)
system['nbonds'] = int(self._get_pair(parts[0])[1])
system['nbondtypes'] = int(self._get_pair(parts[1])[1])
system['bond_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Angles"):
parts = self._split_values(line)
system['nangles'] = int(self._get_pair(parts[0])[1])
system['nangletypes'] = int(self._get_pair(parts[1])[1])
system['angle_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Dihedrals"):
parts = self._split_values(line)
system['ndihedrals'] = int(self._get_pair(parts[0])[1])
system['ndihedraltypes'] = int(self._get_pair(parts[1])[1])
system['dihedral_style'] = self._get_pair(parts[2])[1]
elif line.startswith("Impropers"):
parts = self._split_values(line)
system['nimpropers'] = int(self._get_pair(parts[0])[1])
system['nimpropertypes'] = int(self._get_pair(parts[1])[1])
system['improper_style'] = self._get_pair(parts[2])[1]
return system
def _parse_info_communication(self, output):
lines = output[6:-3]
comm = {}
for line in lines:
if line.startswith("MPI library"):
comm['mpi_version'] = line.split(':')[1].strip()
elif line.startswith("Comm style"):
parts = self._split_values(line)
comm['comm_style'] = self._get_pair(parts[0])[1]
comm['comm_layout'] = self._get_pair(parts[1])[1]
elif line.startswith("Processor grid"):
comm['proc_grid'] = [int(x) for x in self._get_pair(line)[1].split('x')]
elif line.startswith("Communicate velocities for ghost atoms"):
comm['ghost_velocity'] = (self._get_pair(line)[1] == "yes")
elif line.startswith("Nprocs"):
parts = self._split_values(line)
comm['nprocs'] = int(self._get_pair(parts[0])[1])
comm['nthreads'] = int(self._get_pair(parts[1])[1])
return comm
def _parse_element_list(self, output):
lines = output[6:-3]
elements = []
for line in lines:
element_info = self._split_values(line.split(':')[1].strip())
element = {'name': element_info[0]}
for key, value in [self._get_pair(x) for x in element_info[1:]]:
element[key] = value
elements.append(element)
return elements
def _parse_groups(self, output):
lines = output[6:-3]
groups = []
group_pattern = re.compile(r"(?P<name>.+) \((?P<type>.+)\)")
for line in lines:
m = group_pattern.match(line.split(':')[1].strip())
group = {'name': m.group('name'), 'type': m.group('type')}
groups.append(group)
return groups
def lmp_print(self, s):
""" needed for Python2 compatibility, since print is a reserved keyword """
return self.__getattr__("print")(s)
def __dir__(self):
return ['angle_coeff', 'angle_style', 'atom_modify', 'atom_style', 'atom_style',
'bond_coeff', 'bond_style', 'boundary', 'change_box', 'communicate', 'compute',
'create_atoms', 'create_box', 'delete_atoms', 'delete_bonds', 'dielectric',
'dihedral_coeff', 'dihedral_style', 'dimension', 'dump', 'fix', 'fix_modify',
'group', 'improper_coeff', 'improper_style', 'include', 'kspace_modify',
'kspace_style', 'lattice', 'mass', 'minimize', 'min_style', 'neighbor',
'neigh_modify', 'newton', 'nthreads', 'pair_coeff', 'pair_modify',
'pair_style', 'processors', 'read', 'read_data', 'read_restart', 'region',
'replicate', 'reset_timestep', 'restart', 'run', 'run_style', 'thermo',
'thermo_modify', 'thermo_style', 'timestep', 'undump', 'unfix', 'units',
'variable', 'velocity', 'write_restart']
def __getattr__(self, name):
def handler(*args, **kwargs):
cmd_args = [name] + [str(x) for x in args]
with OutputCapture() as capture:
self.command(' '.join(cmd_args))
output = capture.output
if 'verbose' in kwargs and kwargs['verbose']:
print(output)
lines = output.splitlines()
if len(lines) > 1:
return lines
elif len(lines) == 1:
return lines[0]
return None
return handler
class IPyLammps(PyLammps):
"""
iPython wrapper for LAMMPS which adds embedded graphics capabilities
"""
def __init__(self,name="",cmdargs=None,ptr=None,comm=None):
super(IPyLammps, self).__init__(name=name,cmdargs=cmdargs,ptr=ptr,comm=comm)
def image(self, filename="snapshot.png", group="all", color="type", diameter="type",
size=None, view=None, center=None, up=None, zoom=1.0):
cmd_args = [group, "image", filename, color, diameter]
if size:
width = size[0]
height = size[1]
cmd_args += ["size", width, height]
if view:
theta = view[0]
phi = view[1]
cmd_args += ["view", theta, phi]
if center:
flag = center[0]
Cx = center[1]
Cy = center[2]
Cz = center[3]
cmd_args += ["center", flag, Cx, Cy, Cz]
if up:
Ux = up[0]
Uy = up[1]
Uz = up[2]
cmd_args += ["up", Ux, Uy, Uz]
if zoom:
cmd_args += ["zoom", zoom]
cmd_args.append("modify backcolor white")
self.write_dump(*cmd_args)
from IPython.core.display import Image
return Image('snapshot.png')
def video(self, filename):
from IPython.display import HTML
return HTML("<video controls><source src=\"" + filename + "\"></video>")
|
Classic cotton balls are soft and gentle to all skin types. They are perfect for the removal of make-up and nail polish. Use to apply oils, powders and lotions to baby's delicate skin. Soft, safe care for cleaning baby's eyes, ears and nose.
Cotton rounds are useful for everyday clean up, polishing, and for use with hobbies and crafts.
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we print a useful message (and exit non-zero) if an external
error occurs while deciding if a Node is current or not.
"""
import sys
import TestSCons
test = TestSCons.TestSCons()
install = test.workpath('install')
install_file = test.workpath('install', 'file')
work_file = test.workpath('work', 'file')
test.subdir('install', 'work')
test.write(['work', 'SConstruct'], """\
Alias("install", Install(r"%(install)s", File('file')))
# Make a directory where we expect the File() to be. This causes an
# IOError or OSError when we try to open it to read its signature.
import os
os.mkdir(r'%(work_file)s')
""" % locals())
if sys.platform == 'win32':
error_message = "Permission denied"
else:
error_message = "Is a directory"
expect = """\
scons: *** [%(install_file)s] %(work_file)s: %(error_message)s
""" % locals()
test.run(chdir = 'work',
arguments = 'install',
status = 2,
stderr = expect)
test.pass_test()
|
Susan Sawasy works closely with her Clients to understand their expectations as they relate to their project's design, budget, and deadlines. She works hard to merge her style with the Clients. She brings a wealth of sources and talent to all of her work. Her ability to edit the sometimes overwhelming options allows her Clients to make decisions with confidence. In the end, the space should reflect the owner's lifestyle, but should relate well to itself as a whole. Rooms should speak to each other, the colors, textures and furnishings should be well integrated.
|
# This file is part of stupid_python_tricks written by Duncan Townsend.
#
# stupid_python_tricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# stupid_python_tricks is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with stupid_python_tricks. If not, see <http://www.gnu.org/licenses/>.
from itertools import *
from fractions import gcd
from operator import itemgetter
def simple():
"""A simple prime generator using the Sieve of Eratosthenes.
This is not intended to be fast, but is instead intended to be so
simple that its correctness is obvious.
"""
stream = count(2)
while True:
prime = next(stream)
sieve = (lambda n: lambda i: i % n)(prime)
stream = ifilter(sieve, stream)
yield prime
def take(n, stream):
return islice(stream, None, n, None)
def drop(n, stream):
return islice(stream, n, None, None)
def nth(n, stream):
try:
return next(drop(n, stream))
except StopIteration:
raise IndexError("Can't get element off the end of generator")
class Wheel(object):
class Spokes(object):
def __init__(self, iterator, length, last):
self.iterator = take(length, iterator)
self.length = length
self.last = last
self.cache = []
def __len__(self):
return self.length
def __getitem__(self, key):
cache = self.cache
if key >= len(cache):
try:
it_next = self.iterator.next
append = cache.append
while key >= len(cache):
append(it_next())
except StopIteration:
raise IndexError("%s index out of range or iterator ended early" % type(self).__name__)
return cache[key % self.length]
def index(self, needle):
left = 0
left_value = self[left]
right = self.length-1
right_value = self.last
while True:
guess = ((right - left) * max(needle - left_value, 0) \
// max(right_value - left_value, 1)) + left
guess_value = self[guess]
if guess_value == needle:
# base case; needle is found
return guess
elif guess_value < needle:
left = guess + 1
left_value = self[left]
elif guess-1 < 0 or self[guess-1] < needle:
# base case; needle isn't present; return the
# index of the next-largest element
return guess
else:
right = guess - 1
right_value = self[right]
def __init__(self, smaller, prime):
if smaller is None and prime is None:
self.modulus = 1
self.spokes = self.Spokes((1,), 1, 1)
else:
self.modulus = smaller.modulus * prime
self.spokes = self.Spokes(ifilter(lambda x: x % prime,
smaller),
len(smaller.spokes)*(prime-1),
self.modulus)
def _index_unsafe(self, elem):
cycle, raw_spoke = divmod(elem, self.modulus)
spoke = self.spokes.index(raw_spoke)
return (cycle, spoke)
def index(self, elem):
ret = self._index_unsafe(elem)
if self[ret] != elem:
raise IndexError("%d is not in %s" % (elem, type(self).__name__))
return ret
def __getitem__(self, (cycle, spoke)):
return cycle*self.modulus + self.spokes[spoke]
def __contains__(self, elem):
return gcd(elem, self.modulus) == 1
def __iter__(self):
spokes = self.spokes
modulus = self.modulus
for i in count():
for j in spokes:
yield i*modulus + j
def roll(self, cycles, sieve=None):
modulus = self.modulus
spokes = self.spokes
# populate the sieve if it's not supplied
if sieve is None:
sieve = {}
for p in takewhile(lambda p: p < modulus, simple()):
if p in self:
for q in dropwhile(lambda q: q < p,
takewhile(lambda q: q < modulus,
simple())):
hazard = p*q
if hazard > modulus and hazard in self:
sieve[hazard] = (p, None, None)
break
# update the sieve for our wheel size
to_delete = set()
to_insert = set()
for hazard, (prime, _, __) in sieve.iteritems():
if hazard in self:
cycle, spoke = self._index_unsafe(hazard // prime)
sieve[hazard] = (prime, cycle, spoke)
else:
to_delete.add(hazard)
if prime in self:
cycle, spoke = self._index_unsafe(hazard // prime)
to_insert.add((prime, cycle, spoke))
for hazard in to_delete:
del sieve[hazard]
for prime, cycle, spoke in sorted(to_insert):
hazard = prime * self[(cycle, spoke)]
while hazard in sieve:
spoke += 1
cycle_incr, spoke = divmod(spoke, len(spokes))
cycle += cycle_incr
hazard = prime * self[(cycle, spoke)]
sieve[hazard] = (prime, cycle, spoke)
del to_insert
del to_delete
# assert len(frozenset(imap(itemgetter(0), \
# sieve.itervalues()))) \
# == len(sieve)
# assert all(imap(lambda hazard: hazard in self, sieve.iterkeys()))
# perform the wheel factorization
candidate_stream = drop(len(spokes), self)
if cycles is not None:
candidate_stream = take(len(spokes)*cycles, candidate_stream)
# sieve the result
for candidate in candidate_stream:
if candidate in sieve:
hazard = candidate
prime, cycle, spoke = sieve[hazard]
# assert hazard == prime * self[(cycle, spoke)]
while hazard in sieve:
spoke += 1
cycle_incr, spoke = divmod(spoke, len(spokes))
cycle += cycle_incr
hazard = prime * self[(cycle, spoke)]
# assert hazard in self
del sieve[candidate]
sieve[hazard] = (prime, cycle, spoke)
else:
cycle, spoke = self._index_unsafe(candidate)
sieve[candidate**2] = (candidate, cycle, spoke)
yield candidate
# assert all(imap(lambda h: h > candidate, sieve.iterkeys()))
class __metaclass__(type):
def __iter__(cls):
last = cls(None, None)
yield last
for prime in simple():
last = cls(last, prime)
yield last
def __repr__(self):
return "<%s.%s with modulus %d>" % \
(__name__, type(self).__name__, self.modulus)
def fixed_wheel(index):
w = nth(index, Wheel)
return chain(takewhile(lambda p: p < w.modulus, simple()),
w.roll(None))
def variable_wheel():
sieve = {}
return chain.from_iterable( ( wheel.roll(prime-1, sieve)
for wheel, prime in izip(Wheel, simple()) ) )
def _check_fixed(index, up_to):
try:
import pyprimes.sieves
good_stream = pyprimes.sieves.best_sieve()
except ImportError:
good_stream = simple()
for i, (a, b) in enumerate(take(up_to,
izip(fixed_wheel(index),
good_stream))):
if a != b:
return i
def _check_variable(up_to):
try:
import pyprimes.sieves
good_stream = pyprimes.sieves.best_sieve()
except ImportError:
good_stream = simple()
for i, (a, b) in enumerate(take(up_to,
izip(variable_wheel(),
good_stream))):
if a != b:
return i
if __name__ == '__main__':
import sys
print nth(int(sys.argv[1]), variable_wheel())
|
Affiliations - Advanced Gastroenterology Associates, LLC. Affiliations - Advanced Gastroenterology Associates, LLC.
Advanced Gastroenterology Associates, LLC has a rich support infrastructure to care for your needs throughout Pinellas, Pasco and Hernando Counties. These include Hospitals, Outpatient, Surgery and Specialty Centers. Our G.I. Physicians belong to an extended network of Medical Organizations which provide experience and knowledge to ensure quality in care.
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
import webnotes
import unittest
from accounts.doctype.shipping_rule.shipping_rule import FromGreaterThanToError, ManyBlankToValuesError, OverlappingConditionError
class TestShippingRule(unittest.TestCase):
def test_from_greater_than_to(self):
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].from_value = 101
self.assertRaises(FromGreaterThanToError, shipping_rule.insert)
def test_many_zero_to_values(self):
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].to_value = 0
self.assertRaises(ManyBlankToValuesError, shipping_rule.insert)
def test_overlapping_conditions(self):
for range_a, range_b in [
((50, 150), (0, 100)),
((50, 150), (100, 200)),
((50, 150), (75, 125)),
((50, 150), (25, 175)),
((50, 150), (50, 150)),
]:
shipping_rule = webnotes.bean(copy=test_records[0])
shipping_rule.doclist[1].from_value = range_a[0]
shipping_rule.doclist[1].to_value = range_a[1]
shipping_rule.doclist[2].from_value = range_b[0]
shipping_rule.doclist[2].to_value = range_b[1]
self.assertRaises(OverlappingConditionError, shipping_rule.insert)
test_records = [
[
{
"doctype": "Shipping Rule",
"label": "_Test Shipping Rule",
"calculate_based_on": "Net Total",
"company": "_Test Company",
"account": "_Test Account Shipping Charges - _TC",
"cost_center": "_Test Cost Center - _TC"
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 0,
"to_value": 100,
"shipping_amount": 50.0
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 101,
"to_value": 200,
"shipping_amount": 100.0
},
{
"doctype": "Shipping Rule Condition",
"parentfield": "shipping_rule_conditions",
"from_value": 201,
"shipping_amount": 0.0
},
{
"doctype": "Applicable Territory",
"parentfield": "valid_for_territories",
"territory": "_Test Territory"
}
]
]
|
Wind Mobile started selling “lightly loved” iPhones on June 25th. This addition to its lineup brought Wind the promise to dramatically increase its subscriber base by offering customers low-cost, refurbished iPhones.
The wireless carrier does not list the source of its iPhone inventory, but the Globe and Mail reveals a battle has started between Apple Canada and Ingram Micro. According to the report, Apple states that Ingram has breached its contract and has requested the supplier to stop providing Wind with refurbished iPhones.
“Ingram’s interpretation of the contract was that the distribution of used iPhones was not a breach and it expressed concern about cutting off Wind’s supply of the device, but ultimately said it wanted to co-operate with Apple,” said the report.
Apparently the request for Ingram to stop providing used iPhones might have been started by Bell, who was “upset about Wind being able to access the Apple product.” According to sources, Ingram agreed to provide Wind Mobile with 50,000 used iPhones.
Wind Mobile is currently selling the 16GB iPhone 5s for $499 and the 16GB iPhone 5c $399. The wireless carrier has over 800,000 subscribers and revealed during a recent interview with MobileSyrup that it had over 25,000 iPhone customers.
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005-2009 University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
####################################################
#
# This script must be run as root in a UNIX system.
# Any call to http://(this-host):PORT/?sessionid=foo
# Will cause the user USERNAME to have "foo" as
# password. This is useful for sharing the session
# with the user through SSH or other systems based
# on the systems password.
#
PORT = 18080
USERNAME = 'weblab'
PASSWD_PATH = "/usr/bin/passwd"
####################################################
import pexpect
import time
import urllib
import traceback
import BaseHTTPServer
def change_password(new_passwd):
passwd = pexpect.spawn("%s %s" % (PASSWD_PATH, USERNAME))
for _ in range(2):
# wait for password: to come out of passwd's stdout
passwd.expect("password: ")
# send pass to passwd's stdin
passwd.sendline(new_passwd)
time.sleep(0.1)
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
_, query_args = urllib.splitquery(self.path)
arguments = dict([ urllib.splitvalue(query_arg) for query_arg in query_args.split('&') ])
session_id = arguments.get('sessionid')
if session_id is None:
self.send_error(400)
self.end_headers()
self.wfile.write("fail: sessionid argument is required")
else:
try:
change_password(session_id)
except Exception, e:
traceback.print_exc()
self.send_error(500)
self.end_headers()
self.wfile.write("Internal error: %s" % str(e))
else:
self.send_response(200)
self.end_headers()
self.wfile.write("ok")
self.wfile.close()
server = BaseHTTPServer.HTTPServer(('',PORT), RequestHandlerClass = Handler)
server.serve_forever()
|
NEW YORK, June 1, 2017 - Freixenet Cava announces today, the brand’s first-ever signature holiday, National Bubbly Day. The annual holiday will be held on Saturday, June 3, 2017 and Freixenet is inviting people around the country to pop open a bottle of Cava and celebrate life, love and bubbly fun.
Freixenet set out to create a special bubbly holiday dedicated exclusively to the most festive celebratory libation. Freixenet chose the name “National Bubbly Day,” not just to encompass sparkling wine, but to also celebrate the beginning of summer and activities that make people feel effervescent. Whether you spend the day by the pool, host a movie night at home, or go for a night out with friends, Freixenet hopes people will gather together and celebrate.
Freixenet Cava, part of the Freixenet USA portfolio, has become the nation’s leading Cava brand and with the launch of National Bubbly Day, the brand is poised to continue its growth and bubbly magic with the creation of their hallmark holiday.
Freixenet Cava is available at leading grocery stores nationwide as well as specialty wine and spirits locations. To find a retailer near you and to learn more, please visit freixenetusa.com and follow us on Facebook, Instagram and Twitter @FreixenetUSA.
Freixenet Cava encourages everyone to drink responsibly and cheers with care on National Bubbly Day.
Freixenet (pronounced fresh-eh-net) Cava, best known for its iconic black bottle, was introduced in 1914. This méthode traditionelle sparkling wine is made in the family owned winery based just outside of Barcelona, Spain. Freixenet’s matte black bottle is just like a blackboard and can be personalized with a gold marker!
|
from configparser import ConfigParser
from utils.cli import CLI
from api.queue import Queue
from api.nerds import NerdsApi
from scanner.host import HostScanner
from scanner.exceptions import ScannerExeption
from utils.url import url_concat
import logging
FORMAT = '%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('ni_scanner')
def process_host(queue, nerds_api):
item = queue.next("Host")
while item:
try:
queue.processing(item)
scanner = HostScanner(item)
nerds = scanner.process()
if not nerds:
# Error occured :(
logger.error("Unable to scan item %s", str(item))
queue.failed(item)
else:
logger.debug("Posting nerds data")
nerds_api.send(nerds)
queue.done(item)
except ScannerExeption as e:
logger.error("%s", e)
failed(queue, item)
except Exception as e:
logger.error("Unable to process host %s got error: %s", item, str(e))
failed(queue, item)
item = queue.next("Host")
def failed(queue, item):
try:
queue.failed(item)
except Exception as e:
logger.error("Problem with reaching NI, got error: %s", e)
def main():
args = CLI().options()
try:
config = ConfigParser()
config.readfp(open(args.config))
except IOError:
logger.error("Config file '%s' is missing", args.config)
return None
# ready :)
api_user = config.get("NI", "api_user")
api_key = config.get("NI", "api_key")
queue_url = url_concat(config.get("NI", "url"), "scan_queue/")
queue = Queue(queue_url, api_user, api_key)
nerds_url = url_concat(config.get("NI", "url"), "nerds/")
nerds_api = NerdsApi(nerds_url, api_user, api_key)
process_host(queue, nerds_api)
if __name__ == "__main__":
main()
|
Broderick Court is a thoroughly attractive urban property with a huge amount to offer. Its central location immediately makes it an ideal choice for any professional who is based in the heart of Leeds. But the apartments in Broderick Court provide much more than mere practicality; they also offer bags of room and a fantastic view of Millennium Square.
The interior of Broderick Court has been fitted out with traditional-styled wooden kitchens and bathrooms giving each apartment a thoroughly homely and wholesome feel. Yet the exterior of the property is actually much more contemporary looking with its wide steel-fronted balconies. This property is a mere stone s throw from many of the city s main shops pubs and clubs making it an extremely desirable place to live. Yet you will not have to sacrifice your peace simply for the sake of convenience as this is a surprisingly quiet area to reside in.
|
# coding=utf-8
"""Author: Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Functions wrapping capabilities of docker binary.
"""
import json
import os
import subprocess
import sys
# noinspection PyDefaultArgument
def run(image, docker_host=None, detach=False, dns_list=[], add_host={},
envs={}, hostname=None, interactive=False, link={}, tty=False, rm=False,
reflect=[], volumes=[], name=None, workdir=None, user=None, group=None,
group_add=[], cpuset_cpus=None, privileged=False, run_params=[], command=None,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('run')
if detach:
cmd.append('-d')
for addr in dns_list:
cmd.extend(['--dns', addr])
for key, value in add_host.iteritems():
cmd.extend(['--add-host', '{0}:{1}'.format(key, value)])
for key in envs:
cmd.extend(['-e', '{0}={1}'.format(key, envs[key])])
if hostname:
cmd.extend(['-h', hostname])
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
for container, alias in link.items():
cmd.extend(['--link', '{0}:{1}'.format(container, alias)])
if name:
cmd.extend(['--name', name])
if rm:
cmd.append('--rm')
for path, read in reflect:
vol = '{0}:{0}:{1}'.format(os.path.abspath(path), read)
cmd.extend(['-v', vol])
# Volume can be in one of three forms
# 1. 'path_on_docker'
# 2. ('path_on_host', 'path_on_docker', 'ro'/'rw')
# 3. {'volumes_from': 'volume name'}
for entry in volumes:
if isinstance(entry, tuple):
path, bind, readable = entry
vol = '{0}:{1}:{2}'.format(os.path.abspath(path), bind, readable)
cmd.extend(['-v', vol])
elif isinstance(entry, dict):
volume_name = entry['volumes_from']
cmd.extend(['--volumes-from', volume_name])
else:
cmd.extend(['-v', entry])
if workdir:
cmd.extend(['-w', os.path.abspath(workdir)])
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
for g in group_add:
cmd.extend(['--group-add', g])
if privileged:
cmd.append('--privileged')
if cpuset_cpus:
cmd.extend(['--cpuset-cpus', cpuset_cpus])
cmd.extend(run_params)
cmd.append(image)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
elif command is not None:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def exec_(container, command, docker_host=None, user=None, group=None,
detach=False, interactive=False, tty=False, privileged=False,
output=False, stdin=None, stdout=None, stderr=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('exec')
if user:
user_group = '{0}:{1}'.format(user, group) if group else user
cmd.extend(['-u', user_group])
if detach:
cmd.append('-d')
if detach or sys.__stdin__.isatty():
if interactive:
cmd.append('-i')
if tty:
cmd.append('-t')
if privileged:
cmd.append('--privileged')
cmd.append(container)
if isinstance(command, basestring):
cmd.extend(['sh', '-c', command])
elif isinstance(command, list):
cmd.extend(command)
else:
raise ValueError('{0} is not a string nor list'.format(command))
if detach or output:
return subprocess.check_output(cmd, stdin=stdin, stderr=stderr).decode(
'utf-8').strip()
return subprocess.call(cmd, stdin=stdin, stderr=stderr, stdout=stdout)
def inspect(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['inspect', container])
out = subprocess.check_output(cmd, universal_newlines=True)
return json.loads(out)[0]
def logs(container, docker_host=None):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.extend(['logs', container])
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
def remove(containers, docker_host=None, force=False,
link=False, volumes=False):
cmd = ['docker']
if docker_host:
cmd.extend(['-H', docker_host])
cmd.append('rm')
if force:
cmd.append('-f')
if link:
cmd.append('-l')
if volumes:
cmd.append('-v')
cmd.extend(containers)
subprocess.check_call(cmd)
def cp(container, src_path, dest_path, to_container=False):
"""Copying file between docker container and host
:param container: str, docker id or name
:param src_path: str
:param dest_path: str
:param to_container: bool, if True file will be copied from host to
container, otherwise from docker container to host
"""
cmd = ["docker", "cp"]
if to_container:
cmd.extend([src_path, "{0}:{1}".format(container, dest_path)])
else:
cmd.extend(["{0}:{1}".format(container, src_path), dest_path])
subprocess.check_call(cmd)
def login(user, password, repository='hub.docker.com'):
"""Logs into docker repository."""
subprocess.check_call(['docker', 'login', '-u', user, '-p', password,
repository])
def build_image(image, build_args):
"""Builds and tags docker image."""
subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',
image] + build_args)
def tag_image(image, tag):
"""Tags docker image."""
subprocess.check_call(['docker', 'tag', image, tag])
def push_image(image):
"""Pushes docker image to the repository."""
subprocess.check_call(['docker', 'push', image])
def pull_image(image):
"""Pulls docker image from the repository."""
subprocess.check_call(['docker', 'pull', image])
def remove_image(image):
"""Removes docker image."""
subprocess.check_call(['docker', 'rmi', '-f', image])
def create_volume(path, name, image, command):
cmd = ['docker']
cmd.append('create')
cmd.append('-v')
cmd.append(path)
cmd.append('--name')
cmd.append(name)
cmd.append(image)
cmd.append(command)
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
|
Commemorate the Clemson Tigers becoming the 2016 College Football Playoff National Champions with this framed collectible. Each piece comes designed with one game-day photograph, descriptive plate with season schedule, and a team logo. It comes matted in team colors and framed in black wood. It measures 23.5" x 27.5" x 1" and is ready to hang in any home or office. The product is officially licensed by the NCAA.
|
# -*- coding: utf-8 -*-
import os
import sys
import struct
USAGE = """
python gen_dir_list.py src_path [dst_path]
"""
def usage(): print USAGE
def format_path(path):
if len(path) == 0: return "./"
path = path.replace('\\', '/')
if path[-1] != '/': path += '/'
return path
def list_all_files(path):
ret = []
files = os.listdir(path)
files.sort()
for fname in files:
fpath = path + fname
if os.path.isdir(fpath):
fpath += '/'
child_ret = list_all_files(fpath)
ret.append( (fname, child_ret) )
else:
ret.append( (fname, None) )
return ret
def output_dirs(path, handle):
name, children = path
handle.write( struct.pack("H", len(name)) )
handle.write( name )
nchild = 0xffff if children is None else len(children)
handle.write( struct.pack("H", nchild) )
if children is not None:
for child in children:
output_dirs(child, handle)
return
def gen_dir_list(src_path, dst_path):
src_path = format_path(src_path)
dst_path = format_path(dst_path)
print "collect files: ", src_path
paths = list_all_files(src_path)
filename = dst_path + "ora.dir"
print "write fo file: ", filename
handle = open(filename, "wb")
output_dirs((".", paths), handle)
handle.close()
def main():
if len(sys.argv) < 2:
return usage()
src_path = sys.argv[1]
dst_path = sys.argv[2] if len(sys.argv) > 2 else src_path
gen_dir_list(src_path, dst_path)
if __name__ == "__main__":
main()
|
Goodwill is an energetic, positive word. It’s a proactively muscular word. It contains enough self-definition that further commentary is unnecessary in casual conversation.
Let’s start describing ourselves as the goodwill Baptists.
Let’s drop the milquetoast modifier “moderate” to describe Southern Baptists who distance themselves from fundamentalism.
The phrase “moderate” was used during the last few decades of the 20th century to contrast one group of Baptists against another group of Baptists–those who were theological fundamentalists with their claim of an inerrant Bible, assertion of biblical literalness, declaration of theological purity and pronouncement of a truncated moral agenda.
If you disagreed with the veracity, method and secular political loyalty of the fundamentalists in their takeover of the Southern Baptist Convention and conquest of the public square, then you called yourself a moderate.
Of course, a moderate was not a liberal. Moderates stiff-armed zealots at both ends of the spectrum, clinging to the yellow stripe in the middle of the theological and political road.
The moderate, mainstream and centrist modifiers always required a lot more words that explained why we were not them (Southern Baptist fundamentalists). In other words, we defined ourselves negatively, because to describe the SBC was to use negative terms.
Now granted, some moderates have denied for a long time that they even care about the fundamentalists. They claim disinterest, while watching the SBC with eagle eyes and funding it through their churches and state conventions. Such is the situation of the Baptist deniers.
As non-fundamentalist Baptists go forward in the 21st century, we need a simple, positive, proactive, powerful, descriptive term.
Renaming is certainly not a unique issue for Baptists of the South. Some Southern Baptists have tried to drop the “southern” modifier, an effort that failed several years ago when the majority decided that keeping southern was a good thing, never mind that the term tied them to slavery, segregation and southern culture.
The Baptist Center for Ethics has used the modifier “goodwill” to describe the kind of Baptists we hope our readers are or should be.
When BCE released its global poverty DVD in the summer of 2006, we highlighted what goodwill Baptists were doing around the world to address hunger and poverty specifically through Baptist World Aid and its partners.
We think that the modifier “goodwill” works a lot more accurately and constructively than some of the old terms.
Let’s start describing ourselves as “goodwill Baptists.” Let’s see if that modifier works better in defining who we are to ourselves and to others.
|
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import datetime
import csv
from numpy import *
from scipy.stats.kde import gaussian_kde
import os
##############################
# Petar Ojdrovic
##############################
# parses sentiment data
# compares with underlying prices
import bisect # for bisection search
def find_le(a, x):
'Find rightmost value less than or equal to x'
i = bisect.bisect_right(a, x)
if i:
return i-1#a[i-1]
raise ValueError
def find_ge(a, x):
'Find leftmost item greater than or equal to x'
i = bisect.bisect_left(a, x)
if i != len(a):
return i#a[i]
raise ValueError
#### load prices ####
data = loadtxt('Prices_NYSE100_2011-2013_5.csv', delimiter=',', dtype='string')
tickers = data[0,1:]
times = data[1:,0]
times = [datetime.datetime.strptime(t, '%Y%m%d %H:%M') for t in times] # datetime object
ords = [t.toordinal() for t in times] # ordinals (integer for each day)
P = data[1:,1:].astype(float)
#### compare with news data ####
# load news data
with open('Alex_Top100_2011to2013/Alex_Top100_2011.xml') as input:
lines = input.readlines()
with open('Alex_Top100_2011to2013/Alex_Top100_2012.xml') as input:
for l in input:
lines.append(l)
with open('Alex_Top100_2011to2013/Alex_Top100_2013.xml') as input:
for l in input:
lines.append(l)
# loop through tickers...
for tick in tickers[:10]:
n = where(array(tickers)==tick)[0][0]
newsdat = []
newstime = []
for i in range(len(lines)):
if '<Row>' in lines[i] and '>'+tick+'<' in lines[i+3]:
day = lines[i+5].split('Type="String">')[1].split('</Data>')[0]
minute = lines[i+6].split('Type="String">')[1].split('</Data>')[0][:8]
sentiment = float(lines[i+7].split('Type="Number">')[1].split('</Data>')[0])
confidence = float(lines[i+8].split('Type="Number">')[1].split('</Data>')[0])
novelty = float(lines[i+9].split('Type="Number">')[1].split('</Data>')[0])
relevance = float(lines[i+11].split('Type="Number">')[1].split('</Data>')[0])
newsdat.append([sentiment, confidence, novelty, relevance])
newstime.append([day, minute])
newsdat = array(newsdat)
if len(newsdat)==0: # no events for this ticker
continue
X = [] # high quality events
for i in range(len(newsdat)):
if newsdat[i,0]!=0.0 and newsdat[i,1]>0.95 and newsdat[i,2]==1.0 and newsdat[i,3]==1.0:
event_time = datetime.datetime.strptime(newstime[i][0]+' '+newstime[i][1],'%Y-%m-%d %H:%M:%S')
X.append([event_time, newsdat[i,0]])
L = [] # check to see if news anticipates (intraday)
F = [] # check to see if news follows (intraday)
L_o = [] # overnight
F_o = [] # overnight
for x in X:
if x[0].toordinal() in ords:
# intraday
if (x[0].time() >= datetime.time(9,30)) and (x[0].time() <= datetime.time(16,00)):
close_p = P[find_le(ords, x[0].toordinal()),n] # close price that day
open_p = P[find_ge(ords, x[0].toordinal()),n]
recent_p = P[find_le(times, x[0]),n] # most recent price before news
L.append([x[1], (close_p-recent_p)/recent_p])
F.append([x[1], (recent_p-open_p)/open_p])
# overnight
else:
close_p = P[find_le(ords, x[0].toordinal()),n] # close price that day
open_p = P[find_ge(ords, x[0].toordinal()),n]
recent_p = P[find_le(times, x[0]),n] # most recent price before news
next_close_p = P[find_le(ords, x[0].toordinal()+1),n] # should revise to handle Fridays...
L_o.append([x[1], (next_close_p - recent_p)/recent_p])
F_o.append([x[1], (close_p - open_p)/open_p])
L = array(L)
F = array(F)
print(tick+': '+str(sum(L[:,0]==1))+' positive, '+str(sum(L[:,0]==-1))+' negative')
# make KDE plots
b = 1.5*max(abs(array([min(L[:,1]), max(L[:,1]), min(F[:,1]), max(F[:,1])])))
xs = arange(-b, b, 2*b/1000.0)
kde_L_p = gaussian_kde([L[i,1] for i in range(len(L)) if L[i,0]>0]) # leading, positive
y_L_p = kde_L_p.evaluate(xs)
kde_L_n = gaussian_kde([L[i,1] for i in range(len(L)) if L[i,0]<0]) # leading, negative
y_L_n = kde_L_n.evaluate(xs)
kde_F_p = gaussian_kde([F[i,1] for i in range(len(F)) if F[i,0]>0]) # following, positive
y_F_p = kde_F_p.evaluate(xs)
kde_F_n = gaussian_kde([F[i,1] for i in range(len(F)) if F[i,0]<0]) # following, negative
y_F_n = kde_F_n.evaluate(xs)
fig = plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
ax.plot(xs, y_L_p, linewidth=2, color='r')
ax.plot(xs, y_L_n, linewidth=2, color='b')
ax.fill_between(xs, y_L_p, color='r', alpha=0.2)
ax.fill_between(xs, y_L_n, color='b', alpha=0.2)
ax.legend(('Positive', 'Negative'), loc='upper left')
top = (int(max([max(y_L_p), max(y_L_n)]))/10)*10+10
ax.plot([0, 0], [0, top], color='k', linewidth=2)
ax.grid()
plt.title(tick,size=20)
pdf = PdfPages(tick+'_leading_intraday.pdf')
pdf.savefig()
pdf.close()
plt.close()
fig = plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
ax.plot(xs, y_F_p, linewidth=2, color='r')
ax.plot(xs, y_F_n, linewidth=2, color='b')
ax.fill_between(xs, y_F_p, color='r', alpha=0.2)
ax.fill_between(xs, y_F_n, color='b', alpha=0.2)
ax.legend(('Positive', 'Negative'), loc='upper left')
top = (int(max([max(y_F_p), max(y_F_n)]))/10)*10+10
ax.plot([0, 0], [0, top], color='k', linewidth=2)
ax.grid()
plt.title(tick,size=20)
pdf = PdfPages(tick+'_following_intraday.pdf')
pdf.savefig()
pdf.close()
plt.close()
|
Taxzer.com - This great name is for sale on Brandroot!
A clever six-letter name built around the word "tax" designed for businesses that excel in a highly dreaded obligation. Its suffix makes it fun and relaxing - important qualities in this industry.
|
"""
Tests for conformers.py.
"""
import numpy as np
import unittest
from rdkit import Chem
from vs_utils.utils.rdkit_utils import conformers
class TestConformerGenerator(unittest.TestCase):
"""
Tests for ConformerGenerator.
"""
def setUp(self):
"""
Set up tests.
"""
aspirin_smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O aspirin'
self.mol = Chem.MolFromSmiles(aspirin_smiles.split()[0])
self.mol.SetProp('_Name', 'aspirin')
assert self.mol.GetNumConformers() == 0
self.engine = conformers.ConformerGenerator()
def test_generate_conformers(self):
"""
Generate molecule conformers using default parameters.
"""
mol = self.engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
# check that molecule names are retained
assert self.mol.GetProp('_Name') == mol.GetProp('_Name')
def test_mmff94_minimization(self):
"""
Generate conformers and minimize with MMFF94 force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_mmff94s_minimization(self):
"""
Generate conformers and minimize with MMFF94s force field.
"""
engine = conformers.ConformerGenerator(force_field='mmff94s')
mol = engine.generate_conformers(self.mol)
assert mol.GetNumConformers() > 0
def test_embed_molecule(self):
"""
Test ConformerGenerator.embed_molecule.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
def test_minimize_conformers(self):
"""
Test ConformerGenerator.minimize_conformers.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
start = self.engine.get_conformer_energies(mol)
self.engine.minimize_conformers(mol)
finish = self.engine.get_conformer_energies(mol)
# check that all minimized energies are lower
assert np.all(start > finish), (start, finish)
def test_get_conformer_energies(self):
"""
Test ConformerGenerator.get_conformer_energies.
"""
mol = self.engine.embed_molecule(self.mol)
assert mol.GetNumConformers() > 0
energies = self.engine.get_conformer_energies(mol)
# check that the number of energies matches the number of
# conformers
assert len(energies) == mol.GetNumConformers()
def test_prune_conformers(self):
"""
Test ConformerGenerator.prune_conformers.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
engine.minimize_conformers(mol)
energies = engine.get_conformer_energies(mol)
pruned = engine.prune_conformers(mol)
pruned_energies = engine.get_conformer_energies(pruned)
# check that the number of conformers is not to large
assert pruned.GetNumConformers() <= engine.max_conformers
# check that the number of conformers has not increased
assert pruned.GetNumConformers() <= mol.GetNumConformers()
# check that lowest energy conformer was selected
assert np.allclose(min(energies), min(pruned_energies))
# check that pruned energies are taken from the original set
for energy in pruned_energies:
assert np.allclose(min(np.fabs(energies - energy)), 0)
# check that conformers are in order of increasing energy
sort = np.argsort(pruned_energies)
assert np.array_equal(sort, np.arange(len(pruned_energies))), sort
def test_get_conformer_rmsd(self):
"""
Test ConformerGenerator.get_conformer_rmsd.
"""
engine = conformers.ConformerGenerator(max_conformers=10)
mol = engine.embed_molecule(self.mol)
# check that there is more than one conformer
assert mol.GetNumConformers() > 1
rmsd = engine.get_conformer_rmsd(mol)
# check for a valid distance matrix
assert rmsd.shape[0] == rmsd.shape[1] == mol.GetNumConformers()
assert np.allclose(np.diag(rmsd), 0)
assert np.array_equal(rmsd, rmsd.T)
# check for non-zero off-diagonal values
assert np.all(rmsd[np.triu_indices_from(rmsd, k=1)] > 0), rmsd
|
Want to impact humanity and protect its health? Improve quality of life? Being a Bio-medical Engineer is a great opportunity to reach this goal. According to the US department of Labor, Bureau of Labor Statistics, employment of biomedical engineers is expected to increase faster than the average for all occupations through 2012. Hence, if you are considering your career in bioengineering field, you definitely are in the right career path.
Being a biomedical engineer, you may be called upon to design medical instruments and devices such as magnetic resonance imaging (MRI), the heart pacemaker, kidney dialysis and the heart-lung machine. In additional, you may need to carry out researches to acquire new knowledge needed to solve new medical problems. Bioengineering field covers a wide specialty areas, among the well know specialty areas include bioinstrumentation, biomedical, biomechanics, clinical engineering, rehabilitation engineering. You can set your career path to any of these specialties in bioengineering.
Median annual earnings of biomedical engineers with a bachelor's degree were $48,503 in 2004; those with a master's degree earned $59, 667. The middle 50 percent earned around $70, 500. The lowest 10 percent earned less than $41,260 and the highest 10 percent earned more than $107,530.
Employment of biomedical engineer is expected to boom in next 5 years in line with the demand for more sophisticated medical equipment. The increase concerns on cost efficiency and effectiveness will also boost demand for biomedical engineers. Hence, bioengineering is a great career path for you if you interest in this field.
This entry was posted in Reference Education and tagged bioengineering, biomedical, engineer, engineering, engineers, field, medical. Bookmark the permalink.
|
# -*- coding: utf-8 -*-
import time
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
class mrp_generate_previsions(osv.osv_memory):
_name = "mrp.previsions.generate"
_description = "Generate previsions"
_columns = {
'max_date': fields.date('Date Max', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'mrp.previsions.generate', context=c),
}
def _check_date_max(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.max_date <= time.strftime('%Y-%m-%d'):
return False
return True
_constraints = [
(_check_date_max, u'La date max doit être supérieure à la date de jour', ['max_date']),
]
#Calculer les dates entre deux dates différentes
def list_dates_availables(self, cr, uid, date_max, context=None):
list_dates = []
date = time.strftime('%Y-%m-%d')
from_dt = datetime.datetime.strptime(date, '%Y-%m-%d')
to_dt = datetime.datetime.strptime(date_max, '%Y-%m-%d')
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
j=0
while(j <= diff_day):
d = datetime.datetime.strptime(date, '%Y-%m-%d') + datetime.timedelta(days=j)
j +=1
list_dates.append(d.strftime('%Y-%m-%d'))
list_dates.sort()
return list_dates
#Déterminer la liste des produits utilisés dans les commandes, les OF et les prévisions suggérées
def list_products_availables(self, cr, uid, niveau, context=None):
lst_products = []
if niveau == 0:
#Chercher dans les commandes
order_obj = self.pool.get('sale.order.line')
sale_ids = order_obj.search(cr, uid, [], context=context)
if sale_ids:
for sale in order_obj.browse(cr, uid, sale_ids, context=context):
if sale.product_id not in lst_products:
lst_products.append(sale.product_id)
else:
continue
#Chercher dans les ordres de fabrication
mrp_obj = self.pool.get('mrp.production')
mrp_ids = mrp_obj.search(cr, uid, [], context=context)
if mrp_ids:
for mrp in mrp_obj.browse(cr, uid, mrp_ids, context=context):
if mrp.product_id not in lst_products:
lst_products.append(mrp.product_id)
else:
continue
else: #Chercher dans les prévision besoin suggéré de niveau adéquat
prevision_obj = self.pool.get('mrp.prevision')
prevision_ids = prevision_obj.search(cr, uid, [('type','=','ft'),('niveau','=',niveau),], context=context)
if prevision_ids:
for prev in prevision_obj.browse(cr, uid, prevision_ids, context=context):
if prev.product_id not in lst_products:
lst_products.append(prev.product_id)
else:
continue
return lst_products
#Calculer la somme des quantités des commandes
def sum_qty_cmd(self, cr, uid, date, product, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
procurement_obj = self.pool.get('procurement.order')
stock_move_obj = self.pool.get('stock.move')
if date == time.strftime('%Y-%m-%d'):
sale_ids = sale_obj.search(cr, uid, [('date_expedition','<=', date)], context=context)
line_ids = sale_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('order_id','in', sale_ids), ('product_id','=',product)])
else:
sale_ids = sale_obj.search(cr, uid, [('date_expedition','=', date)], context=context)
line_ids = sale_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('order_id','in', sale_ids), ('product_id','=',product)])
qty = 0
if line_ids:
draft_line_ids = sale_line_obj.search(cr, uid, [('id','in', line_ids), ('state','=', 'draft')], context=context)
if draft_line_ids:
for line in sale_line_obj.read(cr, uid, draft_line_ids, ['product_uom_qty'], context=context):
qty += line['product_uom_qty']
confirm_line_ids = sale_line_obj.search(cr, uid, [('id','in', line_ids), ('state','!=', 'draft')], context=context)
proc_line_ids = procurement_obj.search(cr, uid, [('sale_line_id','in',confirm_line_ids)], context=context)
if proc_line_ids:
for line_id in proc_line_ids:
proc_line = procurement_obj.browse(cr, uid, line_id, context=context)
deliv_line_ids = stock_move_obj.search(cr, uid, [('procurement_id','=',line_id)], context=context)
product_qty = proc_line.sale_line_id.product_uom_qty
if deliv_line_ids:
for deliv_line in stock_move_obj.read(cr, uid, deliv_line_ids, ['product_uom_qty', 'state'], context=context):
if deliv_line['state'] == 'done':
product_qty -= deliv_line['product_uom_qty']
else:
continue
qty += product_qty
return qty
#Retourner la quantité des produits non fabriqués
def sum_qty_of(self, cr, uid, lst_mrp, context=None):
qty_of = 0
if lst_mrp:
for prod in self.pool.get('mrp.production').browse(cr, uid, lst_mrp, context=context):
done = 0.0
for move in prod.move_created_ids2:
if move.product_id == prod.product_id:
if not move.scrapped:
done += move.product_qty
qty_prod = (prod.product_qty - done)
qty_of += qty_prod
return qty_of
#Calculer la somme des quantités des ordres de fabrications lancés
def list_mrp_prod(self, cr, uid, date, product, context=None):
date1 = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(date + ' 00:00:00', '%Y-%m-%d %H:%M:%S'))
date2 = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(date + ' 23:59:59', '%Y-%m-%d %H:%M:%S'))
if date == time.strftime('%Y-%m-%d'):
cr.execute("SELECT DISTINCT(id) FROM mrp_production WHERE product_id = %s AND date_planned <= %s AND state not in ('cancel', 'done')", (product, date2,))
else:
cr.execute("SELECT DISTINCT(id) FROM mrp_production WHERE product_id = %s AND date_planned <= %s AND date_planned >= %s AND state not in ('cancel', 'done')", (product, date2, date1,))
lst_mrp = [t[0] for t in cr.fetchall()]
if lst_mrp:
return self.sum_qty_of(cr, uid, lst_mrp, context=context)
else:
return 0
#Calculer la somme des quantités des prévisions de suggestion
def sum_qty_prevision_sug(self, cr, uid, prevision, context=None):
if prevision:
return prevision.quantity
else:
return 0
#Calculer la somme des quantités des prévision de type besoin suggérés
def sum_qty_besoin_sugg(self, cr, uid, date, product, niveau, context=None):
if date == time.strftime('%Y-%m-%d'):
cr.execute("SELECT SUM(quantity) FROM mrp_prevision " \
"WHERE type = 'ft' AND start_date <= %s AND product_id = %s AND niveau = %s", (date, product, niveau,))
else:
cr.execute("SELECT SUM(quantity) FROM mrp_prevision " \
"WHERE type = 'ft' AND start_date = %s AND product_id = %s AND niveau = %s", (date, product, niveau))
qty_ft = cr.fetchone()
if qty_ft[0] is None:
return 0
else:
return qty_ft[0]
#déterminer la date max des besoins suggérés
def date_max_suggestion(self, cr, uid, product, niveau, context=None):
cr.execute("SELECT max(start_date) FROM mrp_prevision " \
"WHERE product_id = %s and niveau = %s ", (product, niveau,))
max_date = cr.fetchone()
if max_date[0] is None:
return ''
else:
return max_date[0]
# Calculer la quantité réelle en stock
def calcul_qty_stock_reel(self, cr, uid, product, context=None):
inventory_obj = self.pool.get('stock.inventory.line')
cr.execute('SELECT MAX(inv.id) FROM stock_inventory inv ' \
'JOIN stock_inventory_line inv_line ON inv.id = inv_line.inventory_id ' \
'WHERE inv_line.product_id = %s ', (product.id, ))
last_inventory = cr.fetchone()
inventory_ids = inventory_obj.search(cr, uid, [('product_id','=',product.id), ('inventory_id','=',last_inventory[0])], context=context)
qty_stock = - product.is_stock_secu
if inventory_ids:
for inv in inventory_obj.browse(cr, uid, inventory_ids, context=context):
qty_stock += inv.product_qty
return qty_stock
#Calculer le stock theorique
def calcul_stock_theorique(self, cr, uid, qty_stock, qty, qty_mrp, qty_prev, qty_four, context=None):
qty_th = qty_stock - qty + qty_mrp + qty_prev + qty_four
return qty_th
#Calculer la quantité de la prévision en fonction de lot mini et multiple de
def calcul_prevision_qty(self, cr, uid, stock_th, product, context=None):
if -(stock_th) <= product.lot_mini:
prev_qty = product.lot_mini + (product.lot_mini * product.is_perte / 100)
return prev_qty
else: # la valeur absolu de stock_th est superieure au lot_mini
qty1 = -(stock_th) - product.lot_mini
qty2 = qty1 / product.multiple
if int(qty2) < qty2:
qty2 = int(qty2) + 1
qty = product.lot_mini + (qty2 * product.multiple)
prev_qty = qty + (qty * product.is_perte / 100)
return prev_qty
#Calculer la somme des quantités des commandes
def sum_qty_cmd_four(self, cr, uid, date, product, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
stock_move_obj = self.pool.get('stock.move')
if date == time.strftime('%Y-%m-%d'):
line_ids = purchase_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('date_planned','<=', date), ('product_id','=',product)])
else:
line_ids = purchase_line_obj.search(cr, uid, [('state','not in',('done', 'cancel')), ('date_planned','=', date), ('product_id','=',product)])
qty = 0
if line_ids:
draft_line_ids = purchase_line_obj.search(cr, uid, [('id','in', line_ids), ('state','=', 'draft')], context=context)
if draft_line_ids:
for line in purchase_line_obj.read(cr, uid, draft_line_ids, ['product_qty'], context=context):
qty += line['product_qty']
confirm_line_ids = purchase_line_obj.search(cr, uid, [('id','in', line_ids), ('state','!=', 'draft')], context=context)
if confirm_line_ids:
for line_id in confirm_line_ids:
recept_line_ids = stock_move_obj.search(cr, uid, [('purchase_line_id','=',line_id)], context=context)
line = purchase_line_obj.read(cr, uid, line_id, ['product_qty'], context=context)
product_qty = line['product_qty']
if recept_line_ids:
for recept_line in stock_move_obj.read(cr, uid, recept_line_ids, ['product_uom_qty', 'state'], context=context):
if recept_line['state'] == 'done':
product_qty -= recept_line['product_uom_qty']
else:
continue
qty += product_qty
return qty
def prevision_fournisseur(self, cr, uid, product, context=None):
cr.execute("SELECT MAX(id) FROM mrp_prevision " \
"WHERE type = 'sa' AND product_id = %s ", (product,))
prevision_id = cr.fetchone()
if prevision_id[0] is None:
return False
else:
return True
#Retourner Vrai s'il faut créer une prévision fournisseur
def create_prevision_sug_cmd_four(self, cr, uid, product, date, stock_four, context=None):
cr.execute("SELECT MAX(id) FROM mrp_prevision " \
"WHERE type = 'sa' AND product_id = %s ", (product,))
prevision_id = cr.fetchone()
if prevision_id[0] is None:
return True
else:
prevision_obj = self.pool.get('mrp.prevision')
prevision = prevision_obj.browse(cr, uid, prevision_id[0], context=context)
if self.calcul_prevision_qty(cr, uid, (prevision.stock_th - stock_four), prevision.product_id, context=context) <= prevision.quantity:
return False
else:
return True
#Calculer la date debut de la prevision
def calcul_date_prevision(self, cr, uid, date, quantity, product, type, company, context=None):
time_production = quantity * product.temps_realisation
delai = product.produce_delay + product.delai_cq
start_date = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=delai) - datetime.timedelta(seconds=time_production)
start_time = start_date.strftime('%H:%M:%S')
if start_time > '01:00:00':
start_date = datetime.datetime.strptime(date, '%Y-%m-%d') - datetime.timedelta(days=(delai + 1)) - datetime.timedelta(seconds=time_production)
start_date = start_date.strftime('%Y-%m-%d')
partner = False
if type == 'fs':
partner = company
if type == 'sa':
if product.seller_ids:
partner = product.seller_ids[0].name
start_date = self.format_start_date(cr, uid, start_date, partner, context)
return start_date
# déterminer la date de début en prenant en considération les jours de fermetures de l'usine et de fournisseur
def format_start_date(self, cr, uid, date, partner, context=None):
is_api = self.pool.get('is.api')
if partner:
# jours de fermeture de la société
jours_fermes = is_api.num_closing_days(cr, uid, partner, context=context)
# Jours de congé de la société
leave_dates = is_api.get_leave_dates(cr, uid, partner, context=context)
num_day = time.strftime('%w', time.strptime(date, '%Y-%m-%d'))
date = is_api.get_working_day(cr, uid, date, num_day, jours_fermes, leave_dates, context=context)
return date
def chiffre_texte(self, cr, uid, num_od, context=None):
if len(num_od) == 1:
return '0000'
elif len(num_od) == 2:
return '000'
elif len(num_od) == 3:
return '00'
elif len(num_od) == 4:
return '0'
else:
return ''
# structurer le nom de la prévision
def formater_nom_prevision(self, cr, uid, type, num_od, context=None):
part = self.chiffre_texte(cr, uid, str(num_od), context) + str(num_od)
if type == 'fs':
return 'FS-' + part
elif type == 'ft':
return 'FT-' + part
else:
return 'SA-' + part
#Créer une prévision
def create_prevision(self, cr, uid, product, quantity, start_date, end_date, type, niveau, stock_th, num_od_fs, num_od_sa, note, context=None):
prevision_obj = self.pool.get('mrp.prevision')
if type in ('fs', 'ft'):
num_od = num_od_fs
if type == 'sa':
num_od = num_od_sa
prevision_values = {
'num_od': num_od,
'name': self.formater_nom_prevision(cr, uid, type, num_od, context),
'type': type,
'product_id': product,
'quantity': quantity,
'start_date': start_date,
'end_date': end_date,
'niveau': niveau,
'stock_th': stock_th,
'note': note,
}
prevision = prevision_obj.create(cr, uid, prevision_values, context=context)
return prevision
#Déterminer le premier niveau de la nomenclature d'un produit
def get_product_boms(self, cr, uid, product, context=None):
boms = []
bom_obj = self.pool.get('mrp.bom')
template_id = product.product_tmpl_id and product.product_tmpl_id.id or False
if template_id:
bom_ids = bom_obj.search(cr, uid, [('product_tmpl_id','=',template_id),], context=context)
if bom_ids:
for line in bom_obj.browse(cr, uid, bom_ids[0], context=context).bom_line_ids:
boms.append(line.id)
return boms
#Vérifier si un produit a une nomenclature ou non
def product_nomenclature(self, cr, uid, product, context=None):
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').read(cr, uid, product, ['product_tmpl_id'], context=context)
template_id = product['product_tmpl_id'] and product['product_tmpl_id'][0] or False
if template_id:
bom_ids = bom_obj.search(cr, uid, [('product_tmpl_id','=',template_id),], context=context)
if bom_ids:
if bom_obj.browse(cr, uid, bom_ids[0], context=context).bom_line_ids :
return True
return False
def generate_previsions(self, cr, uid, ids, context=None):
prevision_obj = self.pool.get('mrp.prevision')
bom_line_obj = self.pool.get('mrp.bom.line')
company_obj = self.pool.get('res.company')
result = []
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
company = company_obj.browse(cr, uid, data['company_id'][0], context=context)
if data:
#Chercher les dates entre la date d'aujourd'hui et la date max
dates = self.list_dates_availables(cr, uid, data['max_date'], context=context)
print 'dates *******', dates
#supprimer les previsions de type "suggestion de fabrication" existantes
prevision_ids = prevision_obj.search(cr, uid, [('active','=',True),], context=context)
prevision_obj.unlink(cr, uid, prevision_ids, context=context)
niveau = 0
lst_items = []
num_od_fs = 0
num_od_sa = 0
while (niveau < 10):
#Créer des FS pour les produits ayant des commandes et des Ordres de fabrication si le niveau = 0
#Créer des FS pour les produits ayant des prévision de type Besoin suggéré si le niveau > 1
lst_products = self.list_products_availables(cr, uid, niveau, context=context)
print 'lst_products ******', lst_products
if lst_products:
res_fs = []
for product in lst_products:
#Initialiser la prevision et le stock theorique
prevision = None
stock_theor = 0
exist_item = False
if lst_items:
for item in lst_items:
if item['product_id'] == product.id:
exist_item = True
else:
continue
if not lst_items or not exist_item:
lst_items.append({'product_id':product.id, 'stock_reel':0, 'date_max_ft': '', 'qty_four':0, 'niv_four':10, 'sum_stock_th':0, 'sum_qty_prev':0 })
print 'lst_items******', lst_items
for date in dates:
#Calculer la somme des quantités des commandes si niveau = 0
#Calculer la somme des quantités des prévisions besoin suggéré si niveau > 0
qty = 0
if niveau == 0:
qty = self.sum_qty_cmd(cr, uid, date, product.id, context=context)
else:
qty = self.sum_qty_besoin_sugg(cr, uid, date, product.id, niveau, context=context)
#Calculer la somme des quantités des ordres de fabrications
qty_mrp = self.list_mrp_prod(cr, uid, date, product.id, context=None)
#Calculer la somme des quantités des prévisions de suggestion
qty_prev = self.sum_qty_prevision_sug(cr, uid, prevision, context=context)
#Calculer la somme des quantités des commandes fournisseurs
qty_four = 0
for item in lst_items:
if item['product_id'] == product.id:
if niveau < item['niv_four']:
item['niv_four'] = niveau
date_max = self.date_max_suggestion(cr, uid, product.id, niveau, context=context)
item['date_max_ft'] = date_max
qty_four = self.sum_qty_cmd_four(cr, uid, date, product.id, context=context)
if niveau == item['niv_four'] and date <= item['date_max_ft']:
item['qty_four'] += qty_four
else:
if date == time.strftime('%Y-%m-%d'):
qty_four = item['qty_four']
else:
qty_four = 0
else:
continue
#Calculer le stock theorique
if date == time.strftime('%Y-%m-%d'): #Première itération
qty_stock = self.calcul_qty_stock_reel(cr, uid, product, context=context)
for item in lst_items:
if item['product_id'] == product.id:
if niveau == item['niv_four']:
item['stock_reel'] = qty_stock
else:
qty_stock = item['stock_reel']
else:
continue
stock_th = self.calcul_stock_theorique(cr, uid, qty_stock, qty, qty_mrp, qty_prev, qty_four, context=context)
else: #Reste des itérations
stock_th = self.calcul_stock_theorique(cr, uid, stock_theor, qty, qty_mrp, qty_prev, qty_four, context=context)
#Mettre à jour le stock reel et la quantité des commandes fournisseurs
for item in lst_items:
if item['product_id'] == product.id:
if stock_th <= 0:
item['stock_reel'] = 0
item['qty_four'] = 0
else:
sum_items = item['stock_reel'] + item['qty_four']
if sum_items > stock_th:
diff = sum_items - stock_th
if item['stock_reel'] >= diff:
item['stock_reel'] -= diff
else:
qty = diff - item['stock_reel']
item['qty_four'] -= qty
item['stock_reel'] = 0
else:
pass
#Si le stock theorique est negatif, on crée une prévision de suggestion
if stock_th < 0:
#Calculer la quantité de la prévision en fonction de lot mini et multiple de
quantity = self.calcul_prevision_qty(cr, uid, stock_th, product, context=context)
#Si il existe des prévisions qui peuvent satisfaire la quantité a créer, on ne crée pas une nouvelle prévision
create_prev = True
if not self.product_nomenclature(cr, uid, product.id, context=context):
for item in lst_items:
if item['product_id'] == product.id:
sum_qty = self.calcul_prevision_qty(cr, uid, item['sum_stock_th'] + stock_th, product, context=context)
if sum_qty <= item['sum_qty_prev']:
item['sum_stock_th'] += stock_th
stock_th = 0
create_prev = False
else:
create_prev = True
else:
continue
type_prev = 'sa'
num_od_sa += 1
else:
type_prev = 'fs'
num_od_fs += 1
#Calculer la date debut de la prevision
start_date = self.calcul_date_prevision(cr, uid, date, quantity, product, type_prev, company.partner_id, context=context)
if create_prev:
prevision_id = self.create_prevision(cr, uid, product.id, quantity, start_date, date, type_prev, niveau, stock_th, num_od_fs, num_od_sa, '', context=context)
result.append(prevision_id)
res_fs.append(prevision_id)
prevision_init = prevision_obj.browse(cr, uid, prevision_id, context=context)
prevision = prevision_init
stock_theor = stock_th
for elem in lst_items:
if elem['product_id'] == product.id and type_prev == 'sa':
elem['sum_stock_th'] += stock_th
elem['sum_qty_prev'] += quantity
else:
continue
else:
prevision = None
stock_theor = stock_th
else:
prevision = None
stock_theor = stock_th
#Créer des prévisions Besoin de suggestion
if res_fs:
niveau += 1
res_ft = []
for prevision in prevision_obj.browse(cr, uid, res_fs, context=context):
bom_ids = self.get_product_boms(cr, uid, prevision.product_id, context=context)
if bom_ids:
for bom in bom_line_obj.browse(cr, uid, bom_ids, context=context):
qty = prevision.quantity * bom.product_qty
note = 'Prevision: ' + str(prevision.name) + '\n' + 'Produit: ' + str(prevision.product_id.default_code)
prev_bes_sug_id = self.create_prevision(cr, uid, bom.product_id.id, qty, prevision.start_date, prevision.end_date, 'ft', niveau, 0, prevision.num_od, num_od_sa, note, context=context)
res_ft.append(prev_bes_sug_id)
result.append(prev_bes_sug_id)
if not res_ft:
niveau = 10
else:
niveau = 10
action_model = False
data_pool = self.pool.get('ir.model.data')
action = {}
action_model,action_id = data_pool.get_object_reference(cr, uid, 'is_mrp', "action_mrp_prevision_form")
if action_model:
action_pool = self.pool.get(action_model)
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(str,result))+"])]"
return action
mrp_generate_previsions()
|
Four or five times later, it’s ‘okay you can go but I’m driving you’. Is a remarkable sight hundreds of cars, many of the drivers parents just like him. He says police are commonly on scene but overwhelmed.. And Ring, Susan M. And Strachan, David P. And Pembrey, Marcus and Breen, Gerome and St Clair, David and Caesar, Sian and Gordon Smith, Katherine and Jones, Lisa and Fraser, Christine and Green, Elaine K.
RCC Board Chairman Marty Wortendyke said Baston is visionary and compassionate educational administrator. Baston has held executive leadership positions at LaGuardia Community College in the City University of New York. Most recently, he served as acting provost and vice president for academic and student affairs..
One example is that of a woman I worked with some years ago who was terribly addicted to Valium, Paxil, and Prozac. She wanted to get off of everything. Of course, I could not counsel her on the medical aspects of her situation, but I did refer her to a medical acquaintance who could assist her.
Since he sharpened his pencil for the first game back in 1948, that has been his seat ever since. Along the way, he has just about seen it all. By day he started out as an accountant at Crosley Manufacturing in Cincinnati before becoming a farmer. The biggest name no doubt being Lamar Odom, a former NBA Champion and Sixth Man of the Year with the Lakers, he was supposed to be the reliable one coming off the bench. Unfortunately so far he’s been all name, and no game. The additions of Vince Carter and Delonte West have in no way hurt the team either, and with Brandan Wright you have an energetic high flying big man.
I’ve never found the numbers for how many of those taken prisoners on Corregidor died in captivity or how many lived through the war. Many of them certainly died. But I’ve never forgotten those words on the wall of Malinta Tunnel. Think you can imagine, they not doing well, Morris said. Law enforcement officer likes to do that. The family not knowing and feeling empty about where their loved ones are, it a tough situation.
Fear is built into the equation for some climbers, particularly new ones. But it all comes down to trust and experience, in many instances. First timers on rock perhaps testing their newly found skills after trying indoor climbing have to get used to finding which way to go by searching for new holes in the wall of rock.
Most Strivectin reviews admit that, while the skin cream does not remove deep wrinkles associated with the natural aging process, it does rewind the skin’s clock back almost 10 to 15 years. The secret is found in a solution containing oglio peptide (called Pal KTTKS), an ingredient that has proven to smooth skin roughness and improve wrinkle volume and depth. These peptides do not inflame the skin (a result often seen after skin peels and retinol treatments) and are easy to simply mix in a face cream for easy application..
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import json
import os
import shutil
import time
from datetime import datetime
from itertools import groupby
from math import isnan
from apptools.preferences.preference_binding import bind_preference
from git import Repo
from traits.api import Instance, Str, Set, List, provides
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.filetools import remove_extension, list_subdirectories
from pychron.core.i_datastore import IDatastore
from pychron.core.progress import progress_loader, progress_iterator
from pychron.database.interpreted_age import InterpretedAge
from pychron.dvc import dvc_dump, dvc_load, analysis_path, repository_path, AnalysisNotAnvailableError
from pychron.dvc.defaults import TRIGA, HOLDER_24_SPOKES, LASER221, LASER65
from pychron.dvc.dvc_analysis import DVCAnalysis, PATH_MODIFIERS
from pychron.dvc.dvc_database import DVCDatabase
from pychron.dvc.func import find_interpreted_age_path, GitSessionCTX, push_repositories
from pychron.dvc.meta_repo import MetaRepo, Production
from pychron.envisage.browser.record_views import InterpretedAgeRecordView
from pychron.git.hosts import IGitHost, CredentialException
from pychron.git_archive.repo_manager import GitRepoManager, format_date, get_repository_branch
from pychron.globals import globalv
from pychron.loggable import Loggable
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import RATIO_KEYS, INTERFERENCE_KEYS
TESTSTR = {'blanks': 'auto update blanks', 'iso_evo': 'auto update iso_evo'}
class DVCException(BaseException):
def __init__(self, attr):
self._attr = attr
def __repr__(self):
return 'DVCException: neither DVCDatabase or MetaRepo have {}'.format(self._attr)
def __str__(self):
return self.__repr__()
class Tag(object):
name = None
path = None
@classmethod
def from_analysis(cls, an):
tag = cls()
tag.name = an.tag
tag.record_id = an.record_id
tag.repository_identifier = an.repository_identifier
tag.path = analysis_path(an.record_id, an.repository_identifier, modifier='tags')
return tag
def dump(self):
obj = {'name': self.name}
if not self.path:
self.path = analysis_path(self.record_id, self.repository_identifier, modifier='tags', mode='w')
# with open(self.path, 'w') as wfile:
# json.dump(obj, wfile, indent=4)
dvc_dump(obj, self.path)
class DVCInterpretedAge(InterpretedAge):
def from_json(self, obj):
for a in ('age', 'age_err', 'kca', 'kca_err', 'age_kind', 'kca_kind', 'mswd',
'sample', 'material', 'identifier', 'nanalyses', 'irradiation'):
setattr(self, a, obj[a])
@provides(IDatastore)
class DVC(Loggable):
"""
main interface to DVC backend. Delegates responsibility to DVCDatabase and MetaRepo
"""
db = Instance('pychron.dvc.dvc_database.DVCDatabase')
meta_repo = Instance('pychron.dvc.meta_repo.MetaRepo')
meta_repo_name = Str
organization = Str
default_team = Str
current_repository = Instance(GitRepoManager)
auto_add = True
pulled_repositories = Set
selected_repositories = List
def __init__(self, bind=True, *args, **kw):
super(DVC, self).__init__(*args, **kw)
if bind:
self._bind_preferences()
# self.synchronize()
# self._defaults()
def initialize(self, inform=False):
self.debug('Initialize DVC')
if not self.meta_repo_name:
self.warning_dialog('Need to specify Meta Repository name in Preferences')
return
self.open_meta_repo()
# update meta repo.
self.meta_pull()
if self.db.connect():
# self._defaults()
return True
def open_meta_repo(self):
mrepo = self.meta_repo
root = os.path.join(paths.dvc_dir, self.meta_repo_name)
self.debug('open meta repo {}'.format(root))
if os.path.isdir(os.path.join(root, '.git')):
self.debug('Opening Meta Repo')
mrepo.open_repo(root)
else:
url = self.make_url(self.meta_repo_name)
self.debug('cloning meta repo url={}'.format(url))
path = os.path.join(paths.dvc_dir, self.meta_repo_name)
self.meta_repo.clone(url, path)
def synchronize(self, pull=True):
"""
pull meta_repo changes
:return:
"""
if pull:
self.meta_repo.pull()
else:
self.meta_repo.push()
def load_analysis_backend(self, ln, isotope_group):
db = self.db
with db.session_ctx():
ip = db.get_identifier(ln)
dblevel = ip.level
irrad = dblevel.irradiation.name
level = dblevel.name
pos = ip.position
fd = self.meta_repo.get_flux(irrad, level, pos)
prodname, prod = self.meta_repo.get_production(irrad, level)
cs = self.meta_repo.get_chronology(irrad)
x = datetime.now()
now = time.mktime(x.timetuple())
if fd['lambda_k']:
isotope_group.arar_constants.lambda_k = fd['lambda_k']
isotope_group.trait_set(j=fd['j'],
# lambda_k=lambda_k,
production_ratios=prod.to_dict(RATIO_KEYS),
interference_corrections=prod.to_dict(INTERFERENCE_KEYS),
chron_segments=cs.get_chron_segments(x),
irradiation_time=cs.irradiation_time,
timestamp=now)
return True
def repository_db_sync(self, reponame):
repo = self._get_repository(reponame, as_current=False)
ps = []
ans = self.db.repository_analyses(reponame)
for ai in ans:
p = analysis_path(ai.record_id, reponame)
obj = dvc_load(p)
sample = None
project = None
material = None
changed = False
for attr, v in (('sample', sample),
('project', project),
('material', material)):
if obj.get(attr) != v:
obj[attr] = v
changed = True
if changed:
ps.append(p)
dvc_dump(obj, p)
if ps:
repo.pull()
repo.add_paths(ps)
repo.commit('Synced repository with database {}'.format(self.db.datasource_url))
repo.push()
def repository_transfer(self, ans, dest):
def key(x):
return x.repository_identifier
destrepo = self._get_repository(dest, as_current=False)
for src, ais in groupby(sorted(ans, key=key), key=key):
repo = self._get_repository(src, as_current=False)
for ai in ais:
ops, nps = self._transfer_analysis_to(dest, src, ai.runid)
repo.add_paths(ops)
destrepo.add_paths(nps)
# update database
dbai = self.db.get_analysis_uuid(ai.uuid)
for ri in dbai.repository_associations:
if ri.repository == src:
ri.repository = dest
# commit src changes
repo.commit('Transferred analyses to {}'.format(dest))
dest.commit('Transferred analyses from {}'.format(src))
def _transfer_analysis_to(self, dest, src, rid):
p = analysis_path(rid, src)
np = analysis_path(rid, dest)
obj = dvc_load(p)
obj['repository_identifier'] = dest
dvc_dump(obj, p)
ops = [p]
nps = [np]
shutil.move(p, np)
for modifier in ('baselines', 'blanks', 'extraction',
'intercepts', 'icfactors', 'peakcenter', '.data'):
p = analysis_path(rid, src, modifier=modifier)
np = analysis_path(rid, dest, modifier=modifier)
shutil.move(p, np)
ops.append(p)
nps.append(np)
return ops, nps
def get_flux(self, irrad, level, pos):
fd = self.meta_repo.get_flux(irrad, level, pos)
return fd['j']
def freeze_flux(self, ans):
self.info('freeze flux')
def ai_gen():
key = lambda x: x.irradiation
lkey = lambda x: x.level
rkey = lambda x: x.repository_identifier
for irrad, ais in groupby(sorted(ans, key=key), key=key):
for level, ais in groupby(sorted(ais, key=lkey), key=lkey):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj['positions']
for repo, ais in groupby(sorted(ais, key=rkey), key=rkey):
yield repo, irrad, level, {ai.irradiation_position: positions[ai.irradiation_position] for ai in
ais}
added = []
def func(x, prog, i, n):
repo, irrad, level, d = x
if prog:
prog.change_message('Freezing Flux {}{} Repository={}'.format(irrad, level, repo))
root = os.path.join(paths.repository_dataset_dir, repo, 'flux', irrad)
r_mkdir(root)
p = os.path.join(root, level)
if os.path.isfile(p):
dd = dvc_load(p)
dd.update(d)
dvc_dump(d, p)
added.append((repo, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<FLUX_FREEZE>')
def freeze_production_ratios(self, ans):
self.info('freeze production ratios')
def ai_gen():
key = lambda x: x.irradiation
lkey = lambda x: x.level
for irrad, ais in groupby(sorted(ans, key=key), key=key):
for level, ais in groupby(sorted(ais, key=lkey), key=lkey):
pr = self.meta_repo.get_production(irrad, level)
for ai in ais:
yield pr, ai
added = []
def func(x, prog, i, n):
pr, ai = x
if prog:
prog.change_message('Freezing Production {}'.format(ai.runid))
p = analysis_path(ai.runid, ai.repository_identifier, 'productions', mode='w')
pr.dump(path=p)
added.append((ai.repository_identifier, p))
progress_loader(ai_gen(), func, threshold=1)
self._commit_freeze(added, '<PR_FREEZE>')
def _commit_freeze(self, added, msg):
key = lambda x: x[0]
rr = sorted(added, key=key)
for repo, ps in groupby(rr, key=key):
rm = GitRepoManager()
rm.open_repo(repo, paths.repository_dataset_dir)
rm.add_paths(ps)
rm.smart_pull()
rm.commit(msg)
# database
# analysis manual edit
# def manual_intercepts(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'intercepts')
#
# def manual_blanks(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'blanks')
#
# def manual_baselines(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'baselines')
#
# def manual_baselines(self, runid, experiment_identifier, values, errors):
# return self._manual_edit(runid, experiment_identifier, values, errors, 'baselines')
def manual_edit(self, runid, repository_identifier, values, errors, modifier):
self.debug('manual edit {} {} {}'.format(runid, repository_identifier, modifier))
self.debug('values {}'.format(values))
self.debug('errors {}'.format(errors))
path = analysis_path(runid, repository_identifier, modifier=modifier)
with open(path, 'r') as rfile:
obj = json.load(rfile)
for k, v in values.iteritems():
o = obj[k]
o['manual_value'] = v
o['use_manual_value'] = True
for k, v in errors.iteritems():
o = obj[k]
o['manual_error'] = v
o['use_manual_error'] = True
dvc_dump(obj, path)
return path
def revert_manual_edits(self, runid, repository_identifier):
ps = []
for mod in ('intercepts', 'blanks', 'baselines', 'icfactors'):
path = analysis_path(runid, repository_identifier, modifier=mod)
with open(path, 'r') as rfile:
obj = json.load(rfile)
for item in obj.itervalues():
if isinstance(item, dict):
item['use_manual_value'] = False
item['use_manual_error'] = False
ps.append(path)
dvc_dump(obj, path)
msg = '<MANUAL> reverted to non manually edited'
self.commit_manual_edits(repository_identifier, ps, msg)
def commit_manual_edits(self, repository_identifier, ps, msg):
if self.repository_add_paths(repository_identifier, ps):
self.repository_commit(repository_identifier, msg)
# analysis processing
def analysis_has_review(self, ai, attr):
return True
# test_str = TESTSTR[attr]
# repo = self._get_experiment_repo(ai.experiment_id)
# for l in repo.get_log():
# if l.message.startswith(test_str):
# self.debug('{} {} reviewed'.format(ai, attr))
# return True
# else:
# self.debug('{} {} not reviewed'.format(ai, attr))
def update_analyses(self, ans, modifier, msg):
key = lambda x: x.repository_identifier
ans = sorted(ans, key=key)
mod_repositories = []
for expid, ais in groupby(ans, key=key):
paths = map(lambda x: analysis_path(x.record_id, x.repository_identifier, modifier=modifier), ais)
# print expid, modifier, paths
if self.repository_add_paths(expid, paths):
self.repository_commit(expid, msg)
mod_repositories.append(expid)
# ais = map(analysis_path, ais)
# if self.experiment_add_analyses(exp, ais):
# self.experiment_commit(exp, msg)
# mod_experiments.append(exp)
return mod_repositories
def update_tag(self, an):
tag = Tag.from_analysis(an)
tag.dump()
expid = an.repository_identifier
return self.repository_add_paths(expid, tag.path)
def save_icfactors(self, ai, dets, fits, refs):
if fits and dets:
self.info('Saving icfactors for {}'.format(ai))
ai.dump_icfactors(dets, fits, refs, reviewed=True)
def save_blanks(self, ai, keys, refs):
if keys:
self.info('Saving blanks for {}'.format(ai))
ai.dump_blanks(keys, refs, reviewed=True)
def save_fits(self, ai, keys):
if keys:
self.info('Saving fits for {}'.format(ai))
ai.dump_fits(keys, reviewed=True)
def save_flux(self, identifier, j, e):
self.meta_pull()
with self.session_ctx(use_parent_session=False):
irp = self.get_identifier(identifier)
if irp:
level = irp.level
irradiation = level.irradiation
self.save_j(irradiation.name, level.name, irp.position, identifier, j, e, 0, 0, None, None)
self.meta_commit('User manual edited flux')
self.meta_push()
def save_j(self, irradiation, level, pos, identifier, j, e, mj, me, decay, analyses, add=True):
self.info('Saving j for {}{}:{} {}, j={} +/-{}'.format(irradiation, level,
pos, identifier, j, e))
self.meta_repo.update_flux(irradiation, level, pos, identifier, j, e, mj, me, decay, analyses, add)
with self.session_ctx(use_parent_session=False):
ip = self.get_identifier(identifier)
ip.j = j
ip.j_err = e
def remove_irradiation_position(self, irradiation, level, hole):
db = self.db
dbpos = db.get_irradiation_position(irradiation, level, hole)
if dbpos:
db.delete(dbpos)
self.meta_repo.remove_irradiation_position(irradiation, level, hole)
def find_interpreted_ages(self, identifiers, repositories):
ias = []
for idn in identifiers:
path = find_interpreted_age_path(idn, repositories)
if path:
obj = dvc_load(path)
name = obj.get('name')
ias.append(InterpretedAgeRecordView(idn, path, name))
return ias
def find_references(self, ans, atypes, hours, exclude=None, make_records=True, **kw):
records = self.db.find_references(ans, atypes, hours, exclude=exclude, **kw)
if records:
if make_records:
records = self.make_analyses(records)
return records
def make_interpreted_ages(self, ias):
def func(x, prog, i, n):
if prog:
prog.change_message('Making Interpreted age {}'.format(x.name))
obj = dvc_load(x.path)
ia = DVCInterpretedAge()
ia.from_json(obj)
return ia
return progress_loader(ias, func, step=25)
def get_analysis(self, uuid):
an = self.db.get_analysis_uuid(uuid)
if an:
return self.make_analyses(an.record_views)
def make_analysis(self, record, *args, **kw):
a = self.make_analyses((record,), *args, **kw)
if a:
return a[0]
def make_analyses(self, records, calculate_f_only=False):
if not records:
return
globalv.active_analyses = records
# load repositories
st = time.time()
def func(xi, prog, i, n):
if prog:
prog.change_message('Syncing repository= {}'.format(xi))
self.sync_repo(xi, use_progress=False)
exps = {r.repository_identifier for r in records}
progress_iterator(exps, func, threshold=1)
# for ei in exps:
make_record = self._make_record
def func(*args):
# t = time.time()
try:
r = make_record(calculate_f_only=calculate_f_only, *args)
# print 'make time {}'.format(time.time()-t)
return r
except BaseException:
pass
ret = progress_loader(records, func, threshold=1, step=25)
et = time.time() - st
n = len(records)
self.debug('Make analysis time, total: {}, n: {}, average: {}'.format(et, n, et / float(n)))
return ret
# repositories
def repository_add_paths(self, repository_identifier, paths):
repo = self._get_repository(repository_identifier)
return repo.add_paths(paths)
def repository_commit(self, repository, msg):
self.debug('Experiment commit: {} msg: {}'.format(repository, msg))
repo = self._get_repository(repository)
repo.commit(msg)
def remote_repositories(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
ri = gi.get_repos(self.organization)
rs.extend(ri)
else:
self.warning_dialog('GitLab or GitHub plugin is required')
return rs
def remote_repository_names(self):
rs = []
gs = self.application.get_services(IGitHost)
if gs:
for gi in gs:
ri = gi.get_repository_names(self.organization)
rs.extend(ri)
else:
self.warning_dialog('GitLab or GitHub plugin is required')
return rs
def check_githost_connection(self):
git_service = self.application.get_service(IGitHost)
return git_service.test_connection(self.organization)
def make_url(self, name):
git_service = self.application.get_service(IGitHost)
return git_service.make_url(name, self.organization)
def git_session_ctx(self, repository_identifier, message):
return GitSessionCTX(self, repository_identifier, message)
def sync_repo(self, name, use_progress=True):
"""
pull or clone an repo
"""
root = os.path.join(paths.repository_dataset_dir, name)
exists = os.path.isdir(os.path.join(root, '.git'))
self.debug('sync repository {}. exists={}'.format(name, exists))
if exists:
repo = self._get_repository(name)
repo.pull(use_progress=use_progress)
return True
else:
self.debug('getting repository from remote')
names = self.remote_repository_names()
service = self.application.get_service(IGitHost)
if name in names:
service.clone_from(name, root, self.organization)
return True
else:
self.debug('name={} not in available repos from service={}, organization={}'.format(name,
service.remote_url,
self.organization))
for ni in names:
self.debug('available repo== {}'.format(ni))
def rollback_repository(self, expid):
repo = self._get_repository(expid)
cpaths = repo.get_local_changes()
# cover changed paths to a list of analyses
# select paths to revert
rpaths = ('.',)
repo.cmd('checkout', '--', ' '.join(rpaths))
for p in rpaths:
self.debug('revert changes for {}'.format(p))
head = repo.get_head(hexsha=False)
msg = 'Changes to {} reverted to Commit: {}\n' \
'Date: {}\n' \
'Message: {}'.format(expid, head.hexsha[:10],
format_date(head.committed_date),
head.message)
self.information_dialog(msg)
def push_repository(self, repo):
self.debug('push repository {}'.format(repo))
for gi in self.application.get_services(IGitHost):
self.debug('pushing to remote={}, url={}'.format(gi.default_remote_name, gi.remote_url))
repo.push(remote=gi.default_remote_name)
def push_repositories(self, changes):
for gi in self.application.get_services(IGitHost):
push_repositories(changes, gi.default_remote_name, quiet=False)
# IDatastore
def get_greatest_aliquot(self, identifier):
return self.db.get_greatest_aliquot(identifier)
def get_greatest_step(self, identifier, aliquot):
return self.db.get_greatest_step(identifier, aliquot)
def is_connected(self):
return self.db.connected
def connect(self, *args, **kw):
return self.db.connect(*args, **kw)
# meta repo
def update_flux(self, *args, **kw):
self.meta_repo.update_flux(*args, **kw)
def set_identifier(self, *args):
self.meta_repo.set_identifier(*args)
def update_chronology(self, name, doses):
self.meta_repo.update_chronology(name, doses)
self.meta_commit('updated chronology for {}'.format(name))
def meta_pull(self, **kw):
return self.meta_repo.smart_pull(**kw)
def meta_push(self):
self.meta_repo.push()
def meta_add_all(self):
self.meta_repo.add_unstaged(paths.meta_root, add_all=True)
def meta_commit(self, msg):
changes = self.meta_repo.has_staged()
if changes:
self.debug('meta repo has changes: {}'.format(changes))
self.meta_repo.report_status()
self.meta_repo.commit(msg)
self.meta_repo.clear_cache = True
else:
self.debug('no changes to meta repo')
def add_production(self, irrad, name, prod):
self.meta_repo.add_production_to_irradiation(irrad, name, prod)
def get_production(self, irrad, name):
return self.meta_repo.get_production(irrad, name)
# get
def get_local_repositories(self):
return list_subdirectories(paths.repository_dataset_dir)
def get_repository(self, exp):
return self._get_repository(exp)
def get_meta_head(self):
return self.meta_repo.get_head()
def get_irradiation_geometry(self, irrad, level):
dblevel = self.db.get_irradiation_level(irrad, level)
return self.meta_repo.get_irradiation_holder_holes(dblevel.holder)
def get_irradiation_names(self):
irrads = self.db.get_irradiations()
return [i.name for i in irrads]
# add
def add_interpreted_age(self, ia):
a = ia.get_ma_scaled_age()
mswd = ia.preferred_mswd
if isnan(mswd):
mswd = 0
d = dict(age=float(nominal_value(a)),
age_err=float(std_dev(a)),
display_age_units=ia.age_units,
age_kind=ia.preferred_age_kind,
kca_kind=ia.preferred_kca_kind,
kca=float(ia.preferred_kca_value),
kca_err=float(ia.preferred_kca_error),
mswd=float(mswd),
include_j_error_in_mean=ia.include_j_error_in_mean,
include_j_error_in_plateau=ia.include_j_error_in_plateau,
include_j_error_in_individual_analyses=ia.include_j_error_in_individual_analyses,
sample=ia.sample,
material=ia.material,
identifier=ia.identifier,
nanalyses=ia.nanalyses,
irradiation=ia.irradiation)
d['analyses'] = [dict(uuid=ai.uuid, tag=ai.tag, plateau_step=ia.get_is_plateau_step(ai))
for ai in ia.all_analyses]
self._add_interpreted_age(ia, d)
def add_repository_association(self, expid, runspec):
db = self.db
dban = db.get_analysis_uuid(runspec.uuid)
if dban:
for e in dban.repository_associations:
if e.repository == expid:
break
else:
db.add_repository_association(expid, dban)
src_expid = runspec.repository_identifier
if src_expid != expid:
repo = self._get_repository(expid)
for m in PATH_MODIFIERS:
src = analysis_path(runspec.record_id, src_expid, modifier=m)
dest = analysis_path(runspec.record_id, expid, modifier=m, mode='w')
shutil.copyfile(src, dest)
repo.add(dest, commit=False)
repo.commit('added repository association')
else:
self.warning('{} not in the database {}'.format(runspec.runid, self.db.name))
def add_material(self, name, grainsize=None):
db = self.db
added = False
if not db.get_material(name, grainsize):
added = True
db.add_material(name, grainsize)
return added
def add_project(self, name, pi=None, **kw):
added = False
db = self.db
if not db.get_project(name, pi):
added = True
db.add_project(name, pi, **kw)
return added
def add_sample(self, name, project, material, grainsize=None, note=None):
added = False
db = self.db
if not db.get_sample(name, project, material, grainsize):
added = True
db.add_sample(name, project, material, grainsize, note=note)
return added
def add_principal_investigator(self, name):
added = False
db = self.db
if not db.get_principal_investigator(name):
db.add_principal_investigator(name)
added = True
return added
def add_irradiation_position(self, irrad, level, pos, identifier=None, **kw):
db = self.db
added = False
if not db.get_irradiation_position(irrad, level, pos):
db.add_irradiation_position(irrad, level, pos, identifier, **kw)
self.meta_repo.add_position(irrad, level, pos)
added = True
return added
def add_irradiation_level(self, name, irradiation, holder, production_name, **kw):
added = False
dblevel = self.get_irradiation_level(irradiation, name)
if dblevel is None:
added = True
self.db.add_irradiation_level(name, irradiation, holder, production_name, **kw)
self.meta_repo.add_level(irradiation, name)
self.meta_repo.update_level_production(irradiation, name, production_name)
return added
def clone_repository(self, identifier):
root = os.path.join(paths.repository_dataset_dir, identifier)
if not os.path.isdir(root):
self.debug('cloning {}'.format(root))
url = self.make_url(identifier)
Repo.clone_from(url, root)
else:
self.debug('{} already exists'.format(identifier))
def add_repository(self, identifier, principal_investigator, inform=True):
self.debug('trying to add repository identifier={}, pi={}'.format(identifier, principal_investigator))
root = os.path.join(paths.repository_dataset_dir, identifier)
if os.path.isdir(root):
self.debug('already a directory {}'.format(identifier))
return True
names = self.remote_repository_names()
if identifier in names:
# make sure also in the database
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('Repository "{}" already exists'.format(identifier))
return True
else:
if os.path.isdir(root):
self.db.add_repository(identifier, principal_investigator)
if inform:
self.warning_dialog('{} already exists.'.format(root))
else:
gs = self.application.get_services(IGitHost)
ret = False
for i, gi in enumerate(gs):
self.info('Creating repository at {}. {}'.format(gi.name, identifier))
if gi.create_repo(identifier, organization=self.organization, auto_init=True):
ret = True
if self.default_team:
gi.set_team(self.default_team, self.organization, identifier,
permission='push')
url = gi.make_url(identifier, self.organization)
if i == 0:
try:
repo = Repo.clone_from(url, root)
except BaseException, e:
self.debug('failed cloning repo. {}'.format(e))
ret = False
self.db.add_repository(identifier, principal_investigator)
else:
repo.create_remote(gi.default_remote_name or 'origin', url)
return ret
def add_irradiation(self, name, doses=None, add_repo=False, principal_investigator=None):
if self.db.get_irradiation(name):
self.warning('irradiation {} already exists'.format(name))
return
self.db.add_irradiation(name)
self.meta_repo.add_irradiation(name)
self.meta_repo.add_chronology(name, doses)
root = os.path.join(paths.meta_root, name)
p = os.path.join(root, 'productions')
if not os.path.isdir(p):
os.mkdir(p)
with open(os.path.join(root, 'productions.json'), 'w') as wfile:
json.dump({}, wfile)
if add_repo and principal_investigator:
self.add_repository('Irradiation-{}'.format(name), principal_investigator)
return True
def add_load_holder(self, name, path_or_txt):
self.db.add_load_holder(name)
self.meta_repo.add_load_holder(name, path_or_txt)
def copy_production(self, pr):
"""
@param pr: irrad_ProductionTable object
@return:
"""
pname = pr.name.replace(' ', '_')
path = os.path.join(paths.meta_root, 'productions', '{}.json'.format(pname))
if not os.path.isfile(path):
obj = {}
for attr in INTERFERENCE_KEYS + RATIO_KEYS:
obj[attr] = [getattr(pr, attr), getattr(pr, '{}_err'.format(attr))]
dvc_dump(obj, path)
# private
def _add_interpreted_age(self, ia, d):
p = analysis_path(ia.identifier, ia.repository_identifier, modifier='ia', mode='w')
dvc_dump(d, p)
def _load_repository(self, expid, prog, i, n):
if prog:
prog.change_message('Loading repository {}. {}/{}'.format(expid, i, n))
self.sync_repo(expid)
def _make_record(self, record, prog, i, n, calculate_f_only=False):
meta_repo = self.meta_repo
if prog:
# this accounts for ~85% of the time!!!
prog.change_message('Loading analysis {}. {}/{}'.format(record.record_id, i, n))
expid = record.repository_identifier
if not expid:
exps = record.repository_ids
self.debug('Analysis {} is associated multiple repositories '
'{}'.format(record.record_id, ','.join(exps)))
expid = None
if self.selected_repositories:
rr = [si for si in self.selected_repositories if si in exps]
if rr:
if len(rr) > 1:
expid = self._get_requested_experiment_id(rr)
else:
expid = rr[0]
if expid is None:
expid = self._get_requested_experiment_id(exps)
if isinstance(record, DVCAnalysis):
a = record
else:
# self.debug('use_repo_suffix={} record_id={}'.format(record.use_repository_suffix, record.record_id))
try:
rid = record.record_id
if record.use_repository_suffix:
rid = '-'.join(rid.split('-')[:-1])
a = DVCAnalysis(rid, expid)
a.group_id = record.group_id
except AnalysisNotAnvailableError:
self.info('Analysis {} not available. Trying to clone repository "{}"'.format(rid, expid))
try:
self.sync_repo(expid)
except CredentialException:
self.warning_dialog('Invalid credentials for GitHub/GitLab')
return
try:
a = DVCAnalysis(rid, expid)
except AnalysisNotAnvailableError:
self.warning_dialog('Analysis {} not in repository {}'.format(rid, expid))
return
# get repository branch
a.branch = get_repository_branch(os.path.join(paths.repository_dataset_dir, expid))
# a.set_tag(record.tag)
# load irradiation
if a.irradiation and a.irradiation not in ('NoIrradiation',):
# self.debug('Irradiation {}'.format(a.irradiation))
chronology = meta_repo.get_chronology(a.irradiation)
a.set_chronology(chronology)
frozen_production = self._get_frozen_production(rid, a.repository_identifier)
if frozen_production:
pname, prod = frozen_production.name, frozen_production
else:
pname, prod = meta_repo.get_production(a.irradiation, a.irradiation_level)
a.set_production(pname, prod)
fd = meta_repo.get_flux(record.irradiation,
record.irradiation_level,
record.irradiation_position_position)
a.j = fd['j']
if fd['lambda_k']:
a.arar_constants.lambda_k = fd['lambda_k']
a.standard_age = fd['standard_age']
a.standard_name = fd['standard_name']
a.standard_material = fd['standard_material']
if calculate_f_only:
a.calculate_F()
else:
a.calculate_age()
return a
def _get_frozen_production(self, rid, repo):
path = analysis_path(rid, repo, 'productions')
if path:
return Production(path)
def _get_repository(self, repository_identifier, as_current=True):
repo = None
if as_current:
repo = self.current_repository
path = repository_path(repository_identifier)
if repo is None or repo.path != path:
self.debug('make new repomanager for {}'.format(path))
repo = GitRepoManager()
repo.path = path
repo.open_repo(path)
if as_current:
self.current_repository = repo
return repo
def _bind_preferences(self):
prefid = 'pychron.dvc'
for attr in ('meta_repo_name', 'organization', 'default_team'):
bind_preference(self, attr, '{}.{}'.format(prefid, attr))
prefid = 'pychron.dvc.db'
for attr in ('username', 'password', 'name', 'host', 'kind', 'path'):
bind_preference(self.db, attr, '{}.{}'.format(prefid, attr))
self._meta_repo_name_changed()
def _meta_repo_name_changed(self):
paths.meta_root = os.path.join(paths.dvc_dir, self.meta_repo_name)
def _defaults(self):
self.debug('writing defaults')
# self.db.create_all(Base.metadata)
self.db.add_save_user()
for tag, func in (('irradiation holders', self._add_default_irradiation_holders),
('productions', self._add_default_irradiation_productions),
('load holders', self._add_default_load_holders)):
d = os.path.join(self.meta_repo.path, tag.replace(' ', '_'))
if not os.path.isdir(d):
os.mkdir(d)
if self.auto_add:
func()
elif self.confirmation_dialog('You have no {}. Would you like to add some defaults?'.format(tag)):
func()
def _add_default_irradiation_productions(self):
ds = (('TRIGA.txt', TRIGA),)
self._add_defaults(ds, 'productions')
def _add_default_irradiation_holders(self):
ds = (('24Spokes.txt', HOLDER_24_SPOKES),)
self._add_defaults(ds, 'irradiation_holders', )
def _add_default_load_holders(self):
ds = (('221.txt', LASER221),
('65.txt', LASER65))
self._add_defaults(ds, 'load_holders', self.db.add_load_holder)
def _add_defaults(self, defaults, root, dbfunc=None):
commit = False
repo = self.meta_repo
for name, txt in defaults:
p = os.path.join(repo.path, root, name)
if not os.path.isfile(p):
with open(p, 'w') as wfile:
wfile.write(txt)
repo.add(p, commit=False)
commit = True
if dbfunc:
name = remove_extension(name)
dbfunc(name)
if commit:
repo.commit('added default {}'.format(root.replace('_', ' ')))
def __getattr__(self, item):
try:
return getattr(self.db, item)
except AttributeError:
try:
return getattr(self.meta_repo, item)
except AttributeError, e:
print e, item
# raise DVCException(item)
# defaults
def _db_default(self):
return DVCDatabase(kind='mysql',
username='root',
password='Argon',
host='localhost',
name='pychronmeta')
def _meta_repo_default(self):
return MetaRepo()
if __name__ == '__main__':
paths.build('_dev')
idn = '24138'
exps = ['Irradiation-NM-272']
print find_interpreted_age_path(idn, exps)
# d = DVC(bind=False)
# with open('/Users/ross/Programming/githubauth.txt') as rfile:
# usr = rfile.readline().strip()
# pwd = rfile.readline().strip()
# d.github_user = usr
# d.github_password = pwd
# d.organization = 'NMGRLData'
# d.add_experiment('Irradiation-NM-273')
# ============= EOF =============================================
|
We�ve all had those moments.
You park the car, toss the keys on the seat and lock the door.
You pay for dinner with a credit card and leave the card in the folder.
You grab the peanut butter from the pantry, slap some on a piece of bread and stick the jar back in the � refrigerator.
Sometimes we just space out. Human nature.
But pity the poor soul who pulls one of those stunts with $100 worth of gift cards purchased by a spouse.
Charles Minick did just that at Christmastime, and he�s still trying to atone.
His wife, Susan, gave him several greeting cards to mail and one card to hand-deliver to his stepsister. When Minick arrived at the Ellet post office, he not only tossed the greeting cards in the mail but threw in the unaddressed envelope as well.
That one had his stepsister�s name on the outside and contained a $50 gift card to the Flower Factory and another to Gabriel Brothers.
Minick figured it wouldn�t be more than a minor inconvenience. He�d just return the next morning and someone would surely fish it out for him. Wrong.
When he came back, he was told the mail had already been sent to Cleveland for sorting. So he called Cleveland. When he was told it would take at least several days to retrieve it, he reimbursed his wife.
After repeated calls to Cleveland over many weeks, Minick was still out of luck. So in early March he contacted his favorite columnist, bemoaning what he saw as the demise of customer service.
Well, not so fast. As of this writing, Minick is halfway home.
A couple of days after I contacted a U.S. Postal Service official in Cleveland, Minick got a call saying his envelope and one of the gift certificates had been found and were being mailed back to him.
We�re not sure what happened to the other card. In the best-case scenario it was forwarded to the Mail Recovery Center in Atlanta and eventually will be returned.
Here�s the way these things are supposed to work, according to regional USPS spokesman David Van Allen.
Mail that�s undeliverable and has no return address is considered dead and is sent to Atlanta, where employees open and examine the contents. If the lost items are worth more than $25 and the owner can�t be immediately identified, they are held for three months. All envelopes containing cash and gift cards with some identifying information are held for six months.
The MRC won�t search for anything under $25 or for prescription drugs, checks, money orders, consumable items or cell phones.
Now, given the volume of stuff that is dropped into mailboxes each day, it�s impressive that at least some of the material classified as �Loose in Mail� is recovered. Yet in two recent local cases � three if you count Minick�s partial victory � the system worked.
Jane Greenland, customer relations coordinator for the Akron post office, forwarded the names of the satisfied customers, both of whom confirmed her claim.
Akron resident Shiva Kumar Madishetty mailed a package last fall that contained a pair of North Face shoes, a gift to her best friend. Although a label on the package carried both the friend�s address and the return address, somewhere along the line the label was torn off.
It happens. Mail shoots through machines on high-speed conveyor belts, then slides down chutes into a pile. A label can be scraped off when it rubs against other boxes or the equipment itself.
A month after mailing her package, Madishetty realized her friend hadn�t received it. She contacted the Akron office, which put her in touch with the Atlanta center. Although the recovery took nearly six months, Madishetty was thrilled when her problem was resolved.
In another case, a sorting machine gobbled up an envelope �that happens, too � that had been mailed by Melinda Clutter of Medina.
Clutter sent a Christmas card, a gift card and a check to both of her young godchildren. Two days before Christmas, she heard that one of the girls had gotten her present but the other received an empty envelope.
As Clutter was heading out the door to send another gift card via FedEx overnight, she checked her mail and discovered the contents of her original envelope had been returned to her. The Akron office was able to identify her because the check with her name and address had survived its encounter with the sorting machine.
Sometimes the post office screws up. Sometimes the customer screws up. But unlike some screw-ups, these aren�t necessarily fatal.
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from bs4 import BeautifulSoup
host = 'https://www.pornxbit.com'
# gounlimited, woolf, openload
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas" , action="lista", url=host + "/full-movie/?asgtbndr=1"))
itemlist.append(item.clone(title="Nuevos" , action="lista", url=host + "/porn-videos/?filter=latest&asgtbndr=1"))
itemlist.append(item.clone(title="Mas vistos" , action="lista", url=host + "/porn-videos/?filter=most-viewed&asgtbndr=1"))
itemlist.append(item.clone(title="Mejor valorado" , action="lista", url=host + "/porn-videos/?filter=popular&asgtbndr=1"))
itemlist.append(item.clone(title="Mas largo" , action="lista", url=host + "/porn-videos/?filter=longest&asgtbndr=1"))
itemlist.append(item.clone(title="PornStar" , action="categorias", url=host + "/actors/"))
# itemlist.append(item.clone(title="Canal" , action="categorias", url=host + "/sites/?sort_by=avg_videos_popularity&from=01"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "%s/?s=%s" % (host,texto)
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('div', class_='videos-list')
matches = soup.find_all('article', id=re.compile(r"^post-\d+"))
for elem in matches:
url = elem.a['href']
title = elem.a['title']
thumbnail = elem.img['src']
plot = ""
itemlist.append(item.clone(action="lista", title=title, url=url,
thumbnail=thumbnail , plot=plot) )
next_page = soup.find('a', class_='current')
if next_page:
next_page = next_page.parent.find_next_sibling("li").a['href']
itemlist.append(item.clone(action="categorias", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def create_soup(url, referer=None, unescape=False):
logger.info()
if referer:
data = httptools.downloadpage(url, headers={'Referer': referer}).data
else:
data = httptools.downloadpage(url).data
if unescape:
data = scrapertools.unescape(data)
soup = BeautifulSoup(data, "html5lib", from_encoding="utf-8")
return soup
def lista(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('main')
matches = soup.find_all('article', class_=re.compile(r"^post-\d+"))
for elem in matches:
url = elem.a['href']
title = elem.a['title'].replace("–", "-")
thumbnail = elem.img['data-src']
time = elem.find('span', class_='duration')
quality = elem.find('span', class_='hd-video')
if time:
time = time.text.strip()
else:
time = ""
if quality:
quality = quality.text
title = "[COLOR yellow]%s[/COLOR] [COLOR red]%s[/COLOR] %s" % (time,quality,title)
else:
title = "[COLOR yellow]%s[/COLOR] %s" % (time,title)
plot = ""
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=thumbnail, contentTitle=title ))
next_page = soup.find('a', class_='current')
if next_page:
next_page = next_page.parent.find_next_sibling("li").a['href']
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
soup = create_soup(item.url).find('div', class_='responsive-player')
matches = soup.find_all('iframe')
for elem in matches:
url = elem['src']
itemlist.append(item.clone(action="play", title= "%s", contentTitle = item.title, url=url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# logger.debug(url)
return itemlist
|
This cornbread is traditionally baked in a pre-heated cast-iron skillet to create a crispy brown crust, but an 8 x 8 baking dish will also work.
Pre-heat oven to 425°. If using a cast iron skillet, put it into the oven to pre-heat. Put a piece of foil or some other oven-proof material on the handle as a reminder not to touch it without a potholder. If using a different container, wait until a few minutes before the batter is ready to pre-heat.
Sour the milk: add 2 tablespoons of vinegar to the milk and set aside.
In a mixing bowl, beat the eggs well. Add oil or melted butter and the sugar or honey. In a separate bowl, stir the Masa Harina De Maiz, cornmeal, salt, soda and baking powder together. Add the soured milk to the eggs, mix well, and stir in the dry ingredients and mix lightly.
Remove the hot skillet from the oven, add oil or butter to coat the pan and pour in the batter. Return to oven and bake until set and golden brown, 20-25 minutes. Serve warm or room temperature, with butter, applesauce, or syrup.
"Thank you ever so much for this recipe, Ive been paying $8 a loaf for spelt or kamut bread for my son because he cant have much gluten. Every recipe Ive come across for a GF bread corn or not has far too many ingredients that just aren't what you have in the cupboard ready, there too expensive and cost more to make than buying the loaf already made. But not yours its simple, its delicious and best of all my son will eat it. So thanx again for sharing this budget saving scrumptious corn bread recipe I'm telling the world xoxoxo"
"Perfect cornbread - tasted just like grandma used to make. Only change that I made was using buttermilk instead of regular milk + vinegar."
|
#!/usr/bin/python
#
# Genetic Programming algorithm for for evolving
# 3-qubit entanglement production quantum circuit
#
# Copyright (C) 2006 Robert Nowotniak <rnowotniak@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# based on:
# [Rub00] Ben I. P. Rubinstein. Evolving quantum circuits using genetic programming
#
from random import choice,randint
from qclib import *
from copy import deepcopy as dc
import sys
class Node:
''' Genetic Programming Tree Node '''
def __init__(self, type, target, control):
self.type = type # T, H, I lub CNot
# T -- Pi/8 gates (shifts the phase with the Pi/4 angle)
self.target = target
self.control = control
def __repr__(self):
return '(%s, %s, %s)' % (self.type, self.target, self.control)
def randNode(qubits = 3):
''' Generate random GP Tree Node '''
return Node(
choice(('I', 'H', 'T', 'CNot')),
''.join([choice(['0', '1']) for x in xrange(qubits)]),
''.join([choice(['0', '1']) for x in xrange(qubits)]))
def randGenotype(qubits = 3, length = 4):
''' Generate random genotype (GP Tree) '''
result = []
for i in xrange(length):
result.append(randNode(qubits))
return result
def phenotype(genotype):
''' Transforms genotype into phenotypes (QCircuits) space '''
stages = []
for n in genotype:
qubits = len(n.target)
trgt = int(n.target, 2) % qubits
ctrl = int(n.control, 2) % qubits
if n.type == 'CNot' and ctrl != trgt:
cnot = CNot(ctrl, trgt)
gates = [cnot]
gates += [I] * (qubits - cnot.size)
gates.reverse()
else:
gates = [I] * (qubits - trgt - 1)
if n.type == 'H':
gates.append(h)
elif n.type == 'I':
gates.append(I)
elif n.type == 'CNot':
gates.append(Not())
elif n.type == 'T':
gates.append(T)
else:
raise Exception()
gates += [I] * (qubits - len(gates))
s = Stage(*gates)
stages.append(s)
return QCircuit(*stages)
input = Ket(0, 3) # |000>
expected = s2 * Ket(0, 3) + s2 * Ket(7, 3)
qubits = 3
def fitness(indiv):
output = indiv(input)
return sum(abs(output.matrix - expected.matrix))
poplen = 100
elitism = 5
nstages = 5
Ngen = 100
pc = 0.7
pm = 0.03
nm = 2
# Generate random population
population = []
for i in xrange(poplen):
population.append(randGenotype(qubits = qubits, length = nstages))
f = open('log.txt', 'w')
print population
best = None
best_val = None
for epoch in xrange(Ngen):
print 'epoch ' + str(epoch)
fvalues = []
for i in xrange(poplen):
fvalues.append(fitness(phenotype(population[i])))
# for roulette selection
sects = [-v for v in fvalues]
m = min(sects)
if m < 0:
sects = [s - m + (0.01 * abs(m)) for s in sects]
sects /= sum(sects)
# accumulated probabilities
for i in xrange(1, poplen):
sects[i] = sects[i - 1] + sects[i]
sects[-1] = 1.0
if best == None or min(fvalues) < best_val:
best_val = min(fvalues)
best = population[fvalues.index(best_val)]
f.write('%d %f %f %f %f\n' % (epoch, best_val, min(fvalues), max(fvalues), sum(fvalues) / len(fvalues)))
newpop = []
# elitism
if elitism > 0:
ranking = {}
for i in xrange(poplen):
ranking[i] = fvalues[i]
kvs = ranking.items()
kvs = [(v,k) for (k,v) in kvs]
kvs.sort()
kvs = [(k,v) for (v,k) in kvs]
for e in xrange(elitism):
newpop.append(dc(population[kvs[e][0]]))
while len(newpop) < poplen:
# select genetic operation probabilistically
r = random()
if r <= pm:
op = 'mutation'
elif r <= pm + pc:
op = 'crossover'
else:
op = 'reproduction'
# select two individuals by roulette
r = random()
for j in xrange(len(sects)):
if r <= sects[j]:
indiv1 = j
break
r = random()
for j in xrange(len(sects)):
if r <= sects[j]:
indiv2 = j
break
if op == 'reproduction':
newpop.append(dc(population[indiv1]))
elif op == 'crossover':
par1 = indiv1
par2 = indiv2
# crossover type
crosstype = choice(('gate', 'target', 'control'))
if crosstype == 'gate':
cp = randint(1, nstages - 1)
child1 = dc(population[par1][:cp] + population[par2][cp:])
child2 = dc(population[par2][:cp] + population[par1][cp:])
elif crosstype == 'target':
child1 = dc(population[par1])
child2 = dc(population[par2])
g1 = choice(child1)
g2 = choice(child2)
cp = randint(0, len(g1.target))
# crossover target qubit binary strings
control1 = g1.target[:cp] + g2.target[cp:]
control2 = g2.target[:cp] + g1.target[cp:]
g1.target = control1
g2.target = control2
elif crosstype == 'control':
child1 = dc(population[par1])
child2 = dc(population[par2])
g1 = choice(child1)
g2 = choice(child2)
cp = randint(0, len(g1.control))
# crossover control qubit binary strings
target1 = g1.target[:cp] + g2.target[cp:]
target2 = g2.target[:cp] + g1.target[cp:]
g1.target = target1
g2.target = target2
else:
assert(False)
# add the offspring to new population
newpop.append(child1)
newpop.append(child2)
elif op == 'mutation':
# mutation
child = dc(population[indiv1])
done = []
for i in xrange(nm):
while True:
gi = choice(xrange(len(child)))
if gi not in done:
break
done.append(gi)
child[gi] = randNode(qubits = qubits)
newpop.append(child)
else:
# NOT REACHABLE
assert(False)
population = newpop
print best_val
print best
f.close()
|
Tutorials to Bitcoins Buying How to Buy Bitcoin with Credit or.
Abra is the only global cryptocurrency app that allows you to buy,.
Reliable Exchange Bitcoin To Bank Account instantly, Wire transfer bitcoin to checking account US bank of america chase wells fargo,.
Lock the price before sending your bitcoins to us and receive a guaranteed amount of EUR or USD to your bank or PayPal account.
|
"""
Communication class
"""
import sys
import glob
import serial
import time
class Communicator:
"""
Class that's responsible for communication with the arduino
"""
class CommunicationFaultException(Exception):
"""
Exception for communication problems
"""
def __init__(self, expectedMsg, msg):
"""
constructor
@param expectedMsg excepted content of the message
@param msg the content of the message you got
"""
super().__init__(self,
'Expected content: "{0}" | Content you got: "{1}"'
.format(expectedMsg, msg))
class CompressedClipTooLong(Exception):
"""
Exception for too long clips
"""
def __init__(self):
"""
constructor
"""
super().__init__(self, 'Compressed clip is too long!')
serialPort = None
@classmethod
def getPorts(cls):
"""
returns a list of available serial ports
@return a list with available ports
@see https://stackoverflow.com/questions/12090503/listing-available-
com-ports-with-python
"""
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith(
'cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
possiblePorts = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
possiblePorts.append(port)
except (OSError, serial.SerialException):
pass
result = []
for port in possiblePorts:
try:
s = serial.Serial(port, 9600, timeout=2, writeTimeout=2)
time.sleep(2) # Sleep for windows
s.write(b'ping')
pong = s.read(4)
if pong == b'nsd1':
result.append(port)
except serial.SerialTimeoutException:
pass
return result
@classmethod
def start(cls, port):
"""
Starts a transmission.
@param port port of the Nightsky device
@raise CommunicationFaultException when the helo response is wrong
"""
cls.serialPort = serial.Serial(port)
time.sleep(2) # Sleep for windows
cls.serialPort.write(b'helo')
heloResp = cls.serialPort.read(4)
if heloResp != b'helo':
cls.serialPort.close()
raise cls.CommunicationFaultException(b'helo', heloResp)
@classmethod
def transmitFrame(cls, frame):
"""
Transmits a frame.
@param frame compressed frame as bytes
"""
cls.serialPort.write(frame)
resp = cls.serialPort.read(4)
if resp == b'done':
raise cls.CompressedClipTooLong()
if resp != b'ok ':
raise cls.CommunicationFaultException(b'ok ', resp)
@classmethod
def end(cls):
"""
Ends the transmission.
"""
cls.serialPort.write(b'\x00\x00')
doneResp = cls.serialPort.read(4) # Wait for "done" from device
cls.serialPort.close()
if doneResp != b'done':
raise cls.CommunicationFaultException(b'done', doneResp)
|
Your fabulous friend with the greatest place with that relaxing vibe and beauty everywhere you look? She pays attention to the details.
She makes sure to have a bottle of sauvignon blanc chilled for Jane (who only drinks white wine) even though everyone else will be enjoying Pinot Noir on this brisk October evening.
She artfully arranges autumn leaves gathered on a neighborhood walk around a candle on the kitchen bar. The details make the difference between something nice and something FABULOUS!
If your first thought is, “I don’t have time for details,” fear not! The most time-consuming part of adding those special details to your space comes at the beginning, when you are selecting your furniture. Great furniture gives your space “good bones,” the structure which forms the foundation of your space. Once you have these “good bones” in place, incorporating little details takes no more time than clearing off your coffee table for your next party (and I do that with a sweep of my arm and a closed-eye fling of the pile into my study).
For example, picking a simple backless bar stool may seem easy. However, finding the perfect backless bar stool takes a moment of consideration.
Take the example of our RAD backless bar stool.
The curve in the seat of the stool pushes your body into a more upright posture, supporting your back as you relax with your glass of wine. Chamfers allow room for your legs to drape painlessly over the side whether sitting straight on or straddling the stool. Double tenoned joints lend strength to the piece, leading to decades of support for you and your guests.
All of these details lend comfort, value and beauty to this humble backless barstool.
Once you have that piece in place, you only need to snag a few autumn leaves on your daily walk or pick up some tasteful gourds during your regular grocery shopping to add that perfect autumnal detail to your bar for your next wine tasting.
So let’s raise our glasses to talking with your best friends until the wee hours of Saturday night!
…And to the details that make that moment possible.
LA Bourgeois discovered an artist who happened to work in furniture when she joined Brian Boggs Chairmakers. Inspired to unleash her own inner “furniture nerd,” she combines her knowledge of woodworking, interior design and writing to convey the passion she sees around her each day.
|
from webapp2_extras import security
from handlers import base
from library import messages
from models.profile import Profile
from forms.profile import ProfileForm
from forms.profile_update import ProfileUpdateForm
class EditorHandler(base.BaseHandler):
def create(self):
form = ProfileForm(self.request.POST)
if self.request.method == 'POST' and form.validate():
name = ' '.join([form.first_name.data,
form.last_name.data])
# Create the webapp2_extras.auth user.
model = self.auth.store.user_model
ok, user = model.create_user(form.data['email'],
password_raw=form.data['password'])
if not ok:
self.session.add_flash(messages.EDITOR_CREATE_ERROR,
level='error')
return self.redirect_to('editors.list')
# Create the profile.
profile = Profile(name=name,
email=form.data['email'],
is_editor=True,
auth_user_id=user.key.id())
profile.put()
# Force reload of profile object
Profile.get(profile.key())
self.session.add_flash(messages.EDITOR_CREATE_SUCCESS)
return self.redirect_to('editors.list')
return self.render_to_response('editors/form.haml', {'form': form})
def delete(self, id):
editor = Profile.get_by_id(int(id))
if not editor or not editor.is_editor:
self.session.add_flash(messages.EDITOR_NOT_FOUND, level='error')
return self.redirect_to('editors.list')
editor.delete()
self.session.add_flash(messages.EDITOR_DELETE_SUCCESS)
return self.redirect_to('editors.list')
def list(self):
editors = Profile.all().filter('is_editor = ', True)
return self.render_to_response('editors/list.haml', {'editors': editors})
def update(self, id):
editor = Profile.get_by_id(int(id))
if not editor or not editor.is_editor:
self.session.add_flash(messages.EDITOR_NOT_FOUND, level='error')
self.redirect_to('editors.list')
form = ProfileUpdateForm(self.request.POST, obj=editor)
form.user_id = editor.key().id()
if self.request.method == 'GET':
names = editor.name.split(' ')
form.first_name.data = names[0]
form.last_name.data = names[1]
form.profile_id = editor.key().id()
if self.request.method == 'POST' and form.validate():
# Access to the user model is only needed in this section.
user = editor.get_auth_user()
editor.name = ' '.join([form.first_name.data, form.last_name.data])
if form.email.data != editor.email:
user.auth_ids.remove(editor.email)
user.auth_ids.append(form.email.data)
editor.email = form.email.data
if form.password.data:
user.password = security.generate_password_hash(form.password.data,
length=12)
editor.put()
user.put()
# Force reload of profile object
Profile.get(editor.key())
self.session.add_flash(messages.EDITOR_UPDATE_SUCCESS)
return self.redirect_to('editors.list')
return self.render_to_response('editors/form.haml', {'form': form})
|
Our 'India' t-shirt is a tribute to the country where our products are made. Sewn in our fair trade factory in Kolkata, India. Screen printed at BLOC Screen Printing in Price Hill, OH.
BLOC Screen Printing is a screen printing shop in Price Hill, OH that provides job training and employs individuals who would otherwise have great difficulty in finding work. Working at BLOC Screen Printing gives individuals the opportunity to learn an employable trade. BLOC mentors their employees, and gives them the skills needed to re-integrate into their community.
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2014 7Gates Interactive Technologies
# <http://www.7gates.co>
# @author Erdem Uney
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'vat_dept': fields.char('Tax Department', size=32, help="Tax Identification Department."),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['vat_dept']
|
??25k-??32k base salary, ??40k-??50k OTE in year 1.
* Earn huge bonuses from your team performance.
Ready to take on a bigger challenge? Join a recruitment company who have established contract and perm teams with IT. They're the #1 recruiter in Knutsford!
* Manage a team of 3 and hire 2x more of your own.
* Lead from the front and develop the team.
* There are lots of accounts to manage, and they have a dedicated BD function.
* You'll want to get your teeth stuck into management.
* Caring of colleagues and a big fan of building teams.
* You should have recruited in a professional, white-collar market. Ideally with tech.
|
"""Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
def fromRoman(s):
"""convert Roman numeral to integer"""
pass
|
December 23rd will be one of the memorable days in the history of OCC. For the first time since the inception in August, we had more folks who are founders/co founders of companies than those who are planning to. There were 7 startups and the discussions that ensued was simply of real quality. There were two key insights from this meeting. Read on for more.
We had an informal discussions for the first forty minutes then we decided to get on a round where each of the founders talk about their startups.
DyAnsys is a medical instrumentation company. Gunasekaran a co founder is an active participant of both OCC Chennai and OCC Bangalore was present. DyAnsys is mainly into manufacturing and marketing low cost medical instruments.
udrool is to launch it's products in first quarter next year. The co founder Vaasu had some great insights into Marketing and why all startups should have a marketing guy in the founding team. Later on in the discussion he provided some great insights. udrool is the company which has a few products in pipeline and has technological offerings from a consumer perspective in the domain of sales, content aggregation etc.
adventure advertising solutions is a web design, graphic design, marketing, brand building company. Harsha one of the co founders is a very passionate guy. These folks have done some good work and are on an expansion path.
Steadfast has a product called Pactus is "an application aimed at SMBs handling product sales, service and contracts. It is an “easy to install and simple to use” CRM Software".We had Anand, one of the co founders with us. An organically built company, now on the way to move to the next level.
flipkart is an online book store. It boasts of pretty huge collection of books and decent UI for the site. The founders Binny, Sachin launched this a few months back and have done a good job so far.
maxheap is a technology company and they are launching a product in the sphere of social networking. They launched a networking site for residents of an apartment by the name Common Floor. An interesting concept of building a site for an existing community of apartment owners. Sumit and Lalit the cofounders have done a good job and with the vision they have for the product let's keep our fingers crossed and watch them.
muziboo the original music sharing site which promises to get you an audience. Prateek and Nithya who recently got featured in Economic Times for their startup promis to provide an audience for your latent musical talents.
i) Include a Marketing person in your founding team. Most of us being techies ignore this fundamental requirement that we need good marketing folks on the floor. Vaasu has rightly pointed out that there is a huge gap in Indian startups in the founders abilities in technology and marketing.
ii) This is one of the favorite topics around. What social networking models will work out. maxHeap with their CommonFloor is doing something that I haven't seen any of the Indian social networking startups do. Provide a platform for already existing apartment communities. This and ensuing discussion let me to the following key point on what kind of social networks will succeed in India.
Traditionally we in India have a a large number of communities. My family network, my work network, and a lot of other networks. We are different from the western society in that, we are a community based society than an individual based society. However, what we see in the social networking space is companies trying to push the US specific individual social networking platforms in our community based society. What we need is products which identify the problems which existing communities face and address those issues. CommonFloor is one step in this direction. I am sure companies which figure this out will be the ones which will make a mark.
iii) The other hot discussion topic is the gap between the VC's and the angel's in India. We all agree that their needs to be more and more angel's than VC's. But, most of the discussions stop at discussing this point than offering a solution. Vaasu has a very good idea which I feel is very practical and will take the entrepreneurial activity in this country to a different level. For now read on how Vaasu addresses the issue.
"India needs not USD 40 billion VC funds, but 40 million angel funds. There is an imbalance that needs to be addressed in the Indian entrepreneurial ecosystem.
google had a Ram Shriram much before they had a VC. What India needs are many more such Ram(s). Where are they ? There are just a handful of angels or angel firms to to the dozens of VCs. A clear imbalance in the ecosystem.
A proposal should be made to the Govt of India to give a tax break for anyone or any company that makes a seed investment with a ceiling of USD 1 million. Or be even bolder and pardon black money that gets invested as seed capital The administrative mechanism can work on similar lines of giving money to charity.
This will inspire the many hidden Rams, who want a little incentive to make the leap. Bodies like TIE and NEN can channel these seed funds, as individual angels might not know the process.
We need the support of the press to champion this cause along with Entrepreneurs. Imagine an India bustling with entrepreneurs ! Isn't that what built America ?
Brood over it. A simple but very practical stuff according to me.
I will write about this in more detail in a later post.
The take aways are good, esp the one with the inclusion of marketing guy among the founders.
Do you know Google was turned down by a few VC's before they got funding, and that too for queer reasons.
Very nice description Ram. You just recorded it all. Hats off to you. I must say that you are carrying over this group very nicely.
Beautifully summarized. Just to remind you ramji, lets have an OCC calender.
Interesting writeup Ramjee. Owing to my other priorities, i have been pretty irregular @OCC meets since last 4-6 weeks.
Hope to catch you all at the next OCC next year.
Excellent traction .. have been missing on account of Client Visits for our startup "Outdu"... will catch up in Early Jan.
Did anyone visit TIE Summit 2007 in Delhi?
Thats execellent summary of the things happened at OCC Ramjee.
May be I should remember you..You have not blogged since past 1 month...Where are you?
|
# coding=utf-8
__author__ = 'Anatoli Kalysch'
import pip
import sys
from os import getcwd, system, remove
from shutil import copyfile
def do(action, dependency):
return pip.main([action, dependency])
def usage():
print "Usage: python setup.py <install | uninstall>"
dependencies = ["distorm3", 'idacute']
if __name__ == '__main__':
print '[*] Starting dependency handling!'
stub_name = 'VMAttack_plugin_stub.py'
for dependency in dependencies:
try:
if sys.argv[1] in ["install", "uninstall"]:
retval = do(sys.argv[1], dependency)
else:
retval = do("install", dependency)
if retval == 0:
continue
else:
print '[!] An error occured! Please resolve issues with dependencies and try again.'
except IndexError:
usage()
sys.exit(1)
try:
if sys.argv[1] == 'uninstall':
with open('install_dir') as f:
ida_dir = f.read()
if ida_dir:
remove(ida_dir + stub_name)
sys.exit(0)
except:
pass
print '[*] Setting up environment and installing Plugin.'
# set up environment variable on Windows: setx Framework C:\path\to\Framework\
plugin_dir = getcwd()
system('setx VMAttack %s' % plugin_dir)
# copy stub into the IDA PRO Plugin directory
ida_dir = raw_input('Please input full path to the IDA *plugin* folder (e.g. X:\IDA\plugins\): ')
if not ida_dir.endswith(r'\\'):
ida_dir += r'\\'
with open('install_dir', 'w') as f:
f.write(ida_dir)
copyfile(stub_name, ida_dir+stub_name)
print '[*] Install complete. All Done!'
|
Bright Road Recovery is an outpatient treatment facility for individuals recovering from eating disorders. It is located in beautiful and historic Claremont, California. The facility itself is a picturesque ranch-style colonial building, nestled in idyllic Claremont Village. Bright Road Recovery is in close proximity to the campuses of the Claremont Colleges, and also easily accessible via freeway or train system.
Bright Road Recovery offers treatment plans tailored to the individual needs of each client, with both Partial Hospitalization Programs (PHP) and Intensive Outpatient Programs (IOP) available. The PHP is designed to be a high level of recovery support, while allowing clients the flexibility of continuing to work or remain in school. Clients in the PHP have access to six hours of medical and psychological support every day. During this time, they engage in activities that include individual and group therapy as well as meal support. Individuals in the IOP have access to treatment for three hours on weeknights, and three hours on the weekends. The IOP also includes meal support and two groups.
The staff at Bright Road Recovery is comprised of registered therapists and dietitians, who use a number of methodologies to help clients on the road to recovery. Treatment modalities vary by client but CBT is implemented most often. Care for dual diagnosis of disorders like depression and substance abuse is available. There are no 12-step programs on site. Treatment at Bright Road is ongoing, and generally around three months long.
Bright Road also offers nutrition counseling and meal support programs for individuals looking for a less restrictive form of treatment. Specialized treatment for teens is also available. Canine therapy is a part of the treatment at Bright Road, and clients can enjoy the adorable calming comfort of the facility’s resident dogs.
Bright Road Recovery offers a solid array of evidence-based care, dual diagnosis support and nutritional counseling. Clients ready to address disordered eating in a calm, peaceful setting with flexible programming would do well by seeking assistance through this network.
Bright Road Recovery Cost: $9,600 (30 days, PHP). Reach Bright Road Recovery by phone at (909) 994-1436.
Do you have a complaint or review of Bright Road Recovery to add? Use the comments area below to add your Bright Road Recovery review.
|
from app.db import db
from werkzeug import generate_password_hash
from libs.tools import code_generator
class User(db.Document):
name = db.StringField(required=True)
state = db.StringField(required=True)
code = db.StringField(required=True)
active = db.BooleanField(required=True, default=True)
password = db.StringField()
email = db.StringField(required=True)
rol = db.StringField(required=True)
protected = db.BooleanField(required=True, default=False)
deleted = db.BooleanField(required=True, default=False)
def generate_password(self):
"""Calculate the password."""
self.password = generate_password_hash(self.password)
def generate_code(self):
"""Calculate the password."""
self.code = code_generator(size=30, hexdigits=True)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.id.__str__()
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
|
M. Q. J. Cars Ltd Has been founded since 1977 originally as a family business, until Mark Quin-Jarvis decided to venture away in 1980. Concentrating on quality used vehicles of high quality, with a great dedication to client satisfaction. We currently have establish a reputation providing friendly advice.
|
#!/usr/bin/env python3
#GI_TYPELIB_PATH=$PREFIX/lib/girepository-1.0/ ./recurrence-type.py
###############################################################################
#
# Copyright (C) 2015 William Yu <williamyu@gnome.org>
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of version 2.1. of the GNU Lesser General Public License
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from gi.repository import ICalGLib
weekday = ICalGLib.RecurrenceType.day_day_of_week (0);
assert (weekday == ICalGLib.RecurrenceTypeWeekday.NO_WEEKDAY);
weekday = ICalGLib.RecurrenceType.day_day_of_week (1);
assert (weekday == ICalGLib.RecurrenceTypeWeekday.SUNDAY_WEEKDAY);
assert (ICalGLib.RecurrenceType.day_position(15) == 1);
assert (ICalGLib.RecurrenceType.day_position(16) == 2);
assert (ICalGLib.RecurrenceType.day_position(25) == 3);
string = "COUNT=10;FREQ=DAILY";
recurrence = ICalGLib.RecurrenceType.from_string (string);
assert (recurrence.as_string_r() == "FREQ=DAILY;COUNT=10");
by_second = recurrence.get_by_second();
# The value is dependent on the libical version.
assert len(by_second) == 61 or len(by_second) == 62;
by_minute = recurrence.get_by_minute();
assert len(by_minute) == 61;
by_hour = recurrence.get_by_hour();
assert len(by_hour) == 25;
by_day = recurrence.get_by_day();
# The value is dependent on the libical version.
assert len(by_day) == 364 or len(by_day) == 386;
by_month_day = recurrence.get_by_month_day();
assert len(by_month_day) == 32;
by_year_day = recurrence.get_by_year_day();
# The value is dependent on the libical version.
assert len(by_year_day) == 367 or len(by_year_day) == 386;
by_week_no = recurrence.get_by_week_no();
# The value is dependent on the libical version.
assert len(by_week_no) == 54 or len(by_week_no) == 56;
by_month = recurrence.get_by_month();
# The value is dependent on the libical version.
assert len(by_month) == 13 or len(by_month) == 14;
by_set_pos = recurrence.get_by_set_pos();
# The value is dependent on the libical version.
assert len(by_set_pos) == 367 or len(by_set_pos) == 386;
recurrence.set_by_second(0, 1);
by_second = recurrence.get_by_second();
assert by_second[0] == 1;
recurrence = ICalGLib.RecurrenceType.from_string (string);
assert (ICalGLib.recur_string_to_weekday ("MO") == ICalGLib.RecurrenceTypeWeekday.MONDAY_WEEKDAY);
start = 100000;
result = ICalGLib.recur_expand_recurrence (string, start, 10);
secs_per_day = 24*60*60;
for i in range (0, 9):
assert (result[i] == start + i*secs_per_day);
string = "19970101T183248Z/19970102T071625Z";
period = ICalGLib.PeriodType.from_string (string);
start = period.get_start();
iter = ICalGLib.RecurIterator.new (recurrence, start);
timetype = iter.next();
day = timetype.get_day();
ref = 1;
while day != 0:
assert (day == ref);
ref += 1;
timetype = iter.next();
day = timetype.get_day();
|
Karen Brown was born in Connecticut. She is the author of a novel , The Longings of Wayward Girls (July 2013), and two short story collections, Pins & Needles (July 2013) and Little Sinners and Other Stories, winner of the Prairie Schooner Book Prize, and named a Best Book of 2012 by Publishers Weekly. Her work has been featured in The PEN/O. Henry Prize Stories, Best American Short Stories, The New York Times, and Good Housekeeping, and in many literary journals. She teaches creative writing and literature at the University of South Florida.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011,2012,2013 American Registry for Internet Numbers
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Generated Sat Aug 31 15:00:09 2013 by generateDS.py version 2.10a.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class poc(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, registrationDate=None, ref=None, note=None, asns=None, city=None, companyName=None, iso3166_1=None, firstName=None, handle=None, lastName=None, emails=None, middleName=None, nets=None, orgs=None, phones=None, postalCode=None, comment=None, iso3166_2=None, streetAddress=None, updateDate=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
if isinstance(registrationDate, basestring):
initvalue_ = datetime_.datetime.strptime(registrationDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = registrationDate
self.registrationDate = initvalue_
self.ref = ref
self.note = note
self.asns = asns
self.city = city
self.companyName = companyName
self.iso3166_1 = iso3166_1
self.firstName = firstName
self.handle = handle
self.lastName = lastName
self.emails = emails
self.middleName = middleName
self.nets = nets
self.orgs = orgs
self.phones = phones
self.postalCode = postalCode
self.comment = comment
self.iso3166_2 = iso3166_2
self.streetAddress = streetAddress
if isinstance(updateDate, basestring):
initvalue_ = datetime_.datetime.strptime(updateDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = updateDate
self.updateDate = initvalue_
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if poc.subclass:
return poc.subclass(*args_, **kwargs_)
else:
return poc(*args_, **kwargs_)
factory = staticmethod(factory)
def get_registrationDate(self): return self.registrationDate
def set_registrationDate(self, registrationDate): self.registrationDate = registrationDate
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_note(self): return self.note
def set_note(self, note): self.note = note
def get_asns(self): return self.asns
def set_asns(self, asns): self.asns = asns
def get_city(self): return self.city
def set_city(self, city): self.city = city
def get_companyName(self): return self.companyName
def set_companyName(self, companyName): self.companyName = companyName
def get_iso3166_1(self): return self.iso3166_1
def set_iso3166_1(self, iso3166_1): self.iso3166_1 = iso3166_1
def get_firstName(self): return self.firstName
def set_firstName(self, firstName): self.firstName = firstName
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_lastName(self): return self.lastName
def set_lastName(self, lastName): self.lastName = lastName
def get_emails(self): return self.emails
def set_emails(self, emails): self.emails = emails
def get_middleName(self): return self.middleName
def set_middleName(self, middleName): self.middleName = middleName
def get_nets(self): return self.nets
def set_nets(self, nets): self.nets = nets
def get_orgs(self): return self.orgs
def set_orgs(self, orgs): self.orgs = orgs
def get_phones(self): return self.phones
def set_phones(self, phones): self.phones = phones
def get_postalCode(self): return self.postalCode
def set_postalCode(self, postalCode): self.postalCode = postalCode
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_iso3166_2(self): return self.iso3166_2
def set_iso3166_2(self, iso3166_2): self.iso3166_2 = iso3166_2
def get_streetAddress(self): return self.streetAddress
def set_streetAddress(self, streetAddress): self.streetAddress = streetAddress
def get_updateDate(self): return self.updateDate
def set_updateDate(self, updateDate): self.updateDate = updateDate
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.registrationDate is not None or
self.ref is not None or
self.note is not None or
self.asns is not None or
self.city is not None or
self.companyName is not None or
self.iso3166_1 is not None or
self.firstName is not None or
self.handle is not None or
self.lastName is not None or
self.emails is not None or
self.middleName is not None or
self.nets is not None or
self.orgs is not None or
self.phones is not None or
self.postalCode is not None or
self.comment is not None or
self.iso3166_2 is not None or
self.streetAddress is not None or
self.updateDate is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='poc', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='poc')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='poc'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='poc', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.registrationDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sregistrationDate>%s</%sregistrationDate>%s' % (namespace_, self.gds_format_datetime(self.registrationDate, input_name='registrationDate'), namespace_, eol_))
if self.ref is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sref>%s</%sref>%s' % (namespace_, self.gds_format_string(quote_xml(self.ref).encode(ExternalEncoding), input_name='ref'), namespace_, eol_))
if self.note is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snote>%s</%snote>%s' % (namespace_, self.gds_format_string(quote_xml(self.note).encode(ExternalEncoding), input_name='note'), namespace_, eol_))
if self.asns is not None:
self.asns.export(outfile, level, namespace_, name_='asns', pretty_print=pretty_print)
if self.city is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scity>%s</%scity>%s' % (namespace_, self.gds_format_string(quote_xml(self.city).encode(ExternalEncoding), input_name='city'), namespace_, eol_))
if self.companyName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scompanyName>%s</%scompanyName>%s' % (namespace_, self.gds_format_string(quote_xml(self.companyName).encode(ExternalEncoding), input_name='companyName'), namespace_, eol_))
if self.iso3166_1 is not None:
self.iso3166_1.export(outfile, level, namespace_, name_='iso3166-1', pretty_print=pretty_print)
if self.firstName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstName>%s</%sfirstName>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstName).encode(ExternalEncoding), input_name='firstName'), namespace_, eol_))
if self.handle is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shandle>%s</%shandle>%s' % (namespace_, self.gds_format_string(quote_xml(self.handle).encode(ExternalEncoding), input_name='handle'), namespace_, eol_))
if self.lastName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastName>%s</%slastName>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastName).encode(ExternalEncoding), input_name='lastName'), namespace_, eol_))
if self.emails is not None:
self.emails.export(outfile, level, namespace_, name_='emails', pretty_print=pretty_print)
if self.middleName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smiddleName>%s</%smiddleName>%s' % (namespace_, self.gds_format_string(quote_xml(self.middleName).encode(ExternalEncoding), input_name='middleName'), namespace_, eol_))
if self.nets is not None:
self.nets.export(outfile, level, namespace_, name_='nets', pretty_print=pretty_print)
if self.orgs is not None:
self.orgs.export(outfile, level, namespace_, name_='orgs', pretty_print=pretty_print)
if self.phones is not None:
self.phones.export(outfile, level, namespace_, name_='phones', pretty_print=pretty_print)
if self.postalCode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spostalCode>%s</%spostalCode>%s' % (namespace_, self.gds_format_string(quote_xml(self.postalCode).encode(ExternalEncoding), input_name='postalCode'), namespace_, eol_))
if self.comment is not None:
self.comment.export(outfile, level, namespace_, name_='comment', pretty_print=pretty_print)
if self.iso3166_2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%siso3166-2>%s</%siso3166-2>%s' % (namespace_, self.gds_format_string(quote_xml(self.iso3166_2).encode(ExternalEncoding), input_name='iso3166-2'), namespace_, eol_))
if self.streetAddress is not None:
self.streetAddress.export(outfile, level, namespace_, name_='streetAddress', pretty_print=pretty_print)
if self.updateDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%supdateDate>%s</%supdateDate>%s' % (namespace_, self.gds_format_datetime(self.updateDate, input_name='updateDate'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='poc'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.registrationDate is not None:
showIndent(outfile, level)
outfile.write('registrationDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.registrationDate, input_name='registrationDate'))
if self.ref is not None:
showIndent(outfile, level)
outfile.write('ref=%s,\n' % quote_python(self.ref).encode(ExternalEncoding))
if self.note is not None:
showIndent(outfile, level)
outfile.write('note=%s,\n' % quote_python(self.note).encode(ExternalEncoding))
if self.asns is not None:
showIndent(outfile, level)
outfile.write('asns=model_.asns(\n')
self.asns.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.city is not None:
showIndent(outfile, level)
outfile.write('city=%s,\n' % quote_python(self.city).encode(ExternalEncoding))
if self.companyName is not None:
showIndent(outfile, level)
outfile.write('companyName=%s,\n' % quote_python(self.companyName).encode(ExternalEncoding))
if self.iso3166_1 is not None:
showIndent(outfile, level)
outfile.write('iso3166_1=model_.iso3166_1(\n')
self.iso3166_1.exportLiteral(outfile, level, name_='iso3166_1')
showIndent(outfile, level)
outfile.write('),\n')
if self.firstName is not None:
showIndent(outfile, level)
outfile.write('firstName=%s,\n' % quote_python(self.firstName).encode(ExternalEncoding))
if self.handle is not None:
showIndent(outfile, level)
outfile.write('handle=%s,\n' % quote_python(self.handle).encode(ExternalEncoding))
if self.lastName is not None:
showIndent(outfile, level)
outfile.write('lastName=%s,\n' % quote_python(self.lastName).encode(ExternalEncoding))
if self.emails is not None:
showIndent(outfile, level)
outfile.write('emails=model_.emails(\n')
self.emails.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.middleName is not None:
showIndent(outfile, level)
outfile.write('middleName=%s,\n' % quote_python(self.middleName).encode(ExternalEncoding))
if self.nets is not None:
showIndent(outfile, level)
outfile.write('nets=model_.nets(\n')
self.nets.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.orgs is not None:
showIndent(outfile, level)
outfile.write('orgs=model_.orgs(\n')
self.orgs.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.phones is not None:
showIndent(outfile, level)
outfile.write('phones=model_.phones(\n')
self.phones.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.postalCode is not None:
showIndent(outfile, level)
outfile.write('postalCode=%s,\n' % quote_python(self.postalCode).encode(ExternalEncoding))
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=model_.comment(\n')
self.comment.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.iso3166_2 is not None:
showIndent(outfile, level)
outfile.write('iso3166_2=%s,\n' % quote_python(self.iso3166_2).encode(ExternalEncoding))
if self.streetAddress is not None:
showIndent(outfile, level)
outfile.write('streetAddress=model_.streetAddress(\n')
self.streetAddress.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.updateDate is not None:
showIndent(outfile, level)
outfile.write('updateDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.updateDate, input_name='updateDate'))
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'registrationDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.registrationDate = dval_
elif nodeName_ == 'ref':
ref_ = child_.text
ref_ = self.gds_validate_string(ref_, node, 'ref')
self.ref = ref_
elif nodeName_ == 'note':
note_ = child_.text
note_ = self.gds_validate_string(note_, node, 'note')
self.note = note_
elif nodeName_ == 'asns':
obj_ = asns.factory()
obj_.build(child_)
self.set_asns(obj_)
elif nodeName_ == 'city':
city_ = child_.text
city_ = self.gds_validate_string(city_, node, 'city')
self.city = city_
elif nodeName_ == 'companyName':
companyName_ = child_.text
companyName_ = self.gds_validate_string(companyName_, node, 'companyName')
self.companyName = companyName_
elif nodeName_ == 'iso3166-1':
obj_ = iso3166_1.factory()
obj_.build(child_)
self.set_iso3166_1(obj_)
elif nodeName_ == 'firstName':
firstName_ = child_.text
firstName_ = self.gds_validate_string(firstName_, node, 'firstName')
self.firstName = firstName_
elif nodeName_ == 'handle':
handle_ = child_.text
handle_ = self.gds_validate_string(handle_, node, 'handle')
self.handle = handle_
elif nodeName_ == 'lastName':
lastName_ = child_.text
lastName_ = self.gds_validate_string(lastName_, node, 'lastName')
self.lastName = lastName_
elif nodeName_ == 'emails':
obj_ = emails.factory()
obj_.build(child_)
self.set_emails(obj_)
elif nodeName_ == 'middleName':
middleName_ = child_.text
middleName_ = self.gds_validate_string(middleName_, node, 'middleName')
self.middleName = middleName_
elif nodeName_ == 'nets':
obj_ = nets.factory()
obj_.build(child_)
self.set_nets(obj_)
elif nodeName_ == 'orgs':
obj_ = orgs.factory()
obj_.build(child_)
self.set_orgs(obj_)
elif nodeName_ == 'phones':
obj_ = phone.factory()
obj_.build(child_)
self.set_phones(obj_)
elif nodeName_ == 'postalCode':
postalCode_ = child_.text
postalCode_ = self.gds_validate_string(postalCode_, node, 'postalCode')
self.postalCode = postalCode_
elif nodeName_ == 'comment':
obj_ = comment.factory()
obj_.build(child_)
self.set_comment(obj_)
elif nodeName_ == 'iso3166-2':
iso3166_2_ = child_.text
iso3166_2_ = self.gds_validate_string(iso3166_2_, node, 'iso3166_2')
self.iso3166_2 = iso3166_2_
elif nodeName_ == 'streetAddress':
obj_ = streetAddress.factory()
obj_.build(child_)
self.set_streetAddress(obj_)
elif nodeName_ == 'updateDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.updateDate = dval_
else:
obj_ = self.gds_build_any(child_, 'poc')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class poc
class asns(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, asnRef=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
self.asnRef = asnRef
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if asns.subclass:
return asns.subclass(*args_, **kwargs_)
else:
return asns(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_asnRef(self): return self.asnRef
def set_asnRef(self, asnRef): self.asnRef = asnRef
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.asnRef is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='asns', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='asns')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='asns'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='asns', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
if self.asnRef is not None:
self.asnRef.export(outfile, level, namespace_, name_='asnRef', pretty_print=pretty_print)
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='asns'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.asnRef is not None:
showIndent(outfile, level)
outfile.write('asnRef=model_.asnRef(\n')
self.asnRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'asnRef':
obj_ = asnRef.factory()
obj_.build(child_)
self.set_asnRef(obj_)
else:
obj_ = self.gds_build_any(child_, 'asns')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class asns
class iso3166_1(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, code2=None, code3=None, name=None, e164=None):
self.code2 = code2
self.code3 = code3
self.name = name
self.e164 = e164
def factory(*args_, **kwargs_):
if iso3166_1.subclass:
return iso3166_1.subclass(*args_, **kwargs_)
else:
return iso3166_1(*args_, **kwargs_)
factory = staticmethod(factory)
def get_code2(self): return self.code2
def set_code2(self, code2): self.code2 = code2
def get_code3(self): return self.code3
def set_code3(self, code3): self.code3 = code3
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_e164(self): return self.e164
def set_e164(self, e164): self.e164 = e164
def hasContent_(self):
if (
self.code2 is not None or
self.code3 is not None or
self.name is not None or
self.e164 is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='iso3166-1', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='iso3166-1')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='iso3166-1'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='iso3166-1', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.code2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode2>%s</%scode2>%s' % (namespace_, self.gds_format_string(quote_xml(self.code2).encode(ExternalEncoding), input_name='code2'), namespace_, eol_))
if self.code3 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode3>%s</%scode3>%s' % (namespace_, self.gds_format_string(quote_xml(self.code3).encode(ExternalEncoding), input_name='code3'), namespace_, eol_))
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.e164 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%se164>%s</%se164>%s' % (namespace_, self.gds_format_string(quote_xml(self.e164).encode(ExternalEncoding), input_name='e164'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='iso3166-1'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.code2 is not None:
showIndent(outfile, level)
outfile.write('code2=%s,\n' % quote_python(self.code2).encode(ExternalEncoding))
if self.code3 is not None:
showIndent(outfile, level)
outfile.write('code3=%s,\n' % quote_python(self.code3).encode(ExternalEncoding))
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.e164 is not None:
showIndent(outfile, level)
outfile.write('e164=%s,\n' % quote_python(self.e164).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'code2':
code2_ = child_.text
code2_ = self.gds_validate_string(code2_, node, 'code2')
self.code2 = code2_
elif nodeName_ == 'code3':
code3_ = child_.text
code3_ = self.gds_validate_string(code3_, node, 'code3')
self.code3 = code3_
elif nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'e164':
e164_ = child_.text
e164_ = self.gds_validate_string(e164_, node, 'e164')
self.e164 = e164_
# end class iso3166_1
class emails(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, email=None):
if email is None:
self.email = []
else:
self.email = email
def factory(*args_, **kwargs_):
if emails.subclass:
return emails.subclass(*args_, **kwargs_)
else:
return emails(*args_, **kwargs_)
factory = staticmethod(factory)
def get_email(self): return self.email
def set_email(self, email): self.email = email
def add_email(self, value): self.email.append(value)
def insert_email(self, index, value): self.email[index] = value
def hasContent_(self):
if (
self.email
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='emails', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='emails')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='emails'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='emails', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for email_ in self.email:
showIndent(outfile, level, pretty_print)
outfile.write('<%semail>%s</%semail>%s' % (namespace_, self.gds_format_string(quote_xml(email_).encode(ExternalEncoding), input_name='email'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='emails'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('email=[\n')
level += 1
for email_ in self.email:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(email_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'email':
email_ = child_.text
email_ = self.gds_validate_string(email_, node, 'email')
self.email.append(email_)
# end class emails
class nets(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, netRef=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
self.netRef = netRef
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if nets.subclass:
return nets.subclass(*args_, **kwargs_)
else:
return nets(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_netRef(self): return self.netRef
def set_netRef(self, netRef): self.netRef = netRef
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.netRef is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='nets', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nets')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='nets'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='nets', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
if self.netRef is not None:
self.netRef.export(outfile, level, namespace_, name_='netRef', pretty_print=pretty_print)
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='nets'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.netRef is not None:
showIndent(outfile, level)
outfile.write('netRef=model_.netRef(\n')
self.netRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'netRef':
obj_ = netRef.factory()
obj_.build(child_)
self.set_netRef(obj_)
else:
obj_ = self.gds_build_any(child_, 'nets')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class nets
class orgs(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, orgPocLinkRef=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
if orgPocLinkRef is None:
self.orgPocLinkRef = []
else:
self.orgPocLinkRef = orgPocLinkRef
def factory(*args_, **kwargs_):
if orgs.subclass:
return orgs.subclass(*args_, **kwargs_)
else:
return orgs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_orgPocLinkRef(self): return self.orgPocLinkRef
def set_orgPocLinkRef(self, orgPocLinkRef): self.orgPocLinkRef = orgPocLinkRef
def add_orgPocLinkRef(self, value): self.orgPocLinkRef.append(value)
def insert_orgPocLinkRef(self, index, value): self.orgPocLinkRef[index] = value
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.orgPocLinkRef
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='orgs', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orgs')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='orgs'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='orgs', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
for orgPocLinkRef_ in self.orgPocLinkRef:
orgPocLinkRef_.export(outfile, level, namespace_, name_='orgPocLinkRef', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='orgs'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('orgPocLinkRef=[\n')
level += 1
for orgPocLinkRef_ in self.orgPocLinkRef:
showIndent(outfile, level)
outfile.write('model_.orgPocLinkRef(\n')
orgPocLinkRef_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'orgPocLinkRef':
obj_ = orgPocLinkRef.factory()
obj_.build(child_)
self.orgPocLinkRef.append(obj_)
# end class orgs
class comment(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, line=None):
if line is None:
self.line = []
else:
self.line = line
def factory(*args_, **kwargs_):
if comment.subclass:
return comment.subclass(*args_, **kwargs_)
else:
return comment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_line(self): return self.line
def set_line(self, line): self.line = line
def add_line(self, value): self.line.append(value)
def insert_line(self, index, value): self.line[index] = value
def hasContent_(self):
if (
self.line
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='comment', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='comment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='comment'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='comment', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for line_ in self.line:
line_.export(outfile, level, namespace_, name_='line', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='comment'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('line=[\n')
level += 1
for line_ in self.line:
showIndent(outfile, level)
outfile.write('model_.line(\n')
line_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'line':
obj_ = line.factory()
obj_.build(child_)
self.line.append(obj_)
# end class comment
class streetAddress(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, line=None):
if line is None:
self.line = []
else:
self.line = line
def factory(*args_, **kwargs_):
if streetAddress.subclass:
return streetAddress.subclass(*args_, **kwargs_)
else:
return streetAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_line(self): return self.line
def set_line(self, line): self.line = line
def add_line(self, value): self.line.append(value)
def insert_line(self, index, value): self.line[index] = value
def hasContent_(self):
if (
self.line
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='streetAddress', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='streetAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='streetAddress'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='streetAddress', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for line_ in self.line:
line_.export(outfile, level, namespace_, name_='line', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='streetAddress'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('line=[\n')
level += 1
for line_ in self.line:
showIndent(outfile, level)
outfile.write('model_.line(\n')
line_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'line':
obj_ = line.factory()
obj_.build(child_)
self.line.append(obj_)
# end class streetAddress
class limitExceeded(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, limit=None, valueOf_=None):
self.limit = _cast(int, limit)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if limitExceeded.subclass:
return limitExceeded.subclass(*args_, **kwargs_)
else:
return limitExceeded(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limit(self): return self.limit
def set_limit(self, limit): self.limit = limit
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='limitExceeded', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='limitExceeded')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='limitExceeded'):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
outfile.write(' limit="%s"' % self.gds_format_integer(self.limit, input_name='limit'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='limitExceeded', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='limitExceeded'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
showIndent(outfile, level)
outfile.write('limit=%d,\n' % (self.limit,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('limit', node)
if value is not None and 'limit' not in already_processed:
already_processed.add('limit')
try:
self.limit = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class limitExceeded
class asnRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if asnRef.subclass:
return asnRef.subclass(*args_, **kwargs_)
else:
return asnRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='asnRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='asnRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='asnRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='asnRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='asnRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class asnRef
class netRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, startAddress=None, endAddress=None, handle=None, name=None, valueOf_=None):
self.startAddress = _cast(None, startAddress)
self.endAddress = _cast(None, endAddress)
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if netRef.subclass:
return netRef.subclass(*args_, **kwargs_)
else:
return netRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='netRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='netRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='netRef'):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
outfile.write(' startAddress=%s' % (self.gds_format_string(quote_attrib(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), ))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
outfile.write(' endAddress=%s' % (self.gds_format_string(quote_attrib(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='netRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='netRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
showIndent(outfile, level)
outfile.write('startAddress="%s",\n' % (self.startAddress,))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
showIndent(outfile, level)
outfile.write('endAddress="%s",\n' % (self.endAddress,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('startAddress', node)
if value is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
self.startAddress = value
value = find_attr_value_('endAddress', node)
if value is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
self.endAddress = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class netRef
class orgPocLinkRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, relPocDescription=None, handle=None, name=None, relPocHandle=None, relPocName=None, relPocFunction=None, valueOf_=None):
self.relPocDescription = _cast(None, relPocDescription)
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.relPocHandle = _cast(None, relPocHandle)
self.relPocName = _cast(None, relPocName)
self.relPocFunction = _cast(None, relPocFunction)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if orgPocLinkRef.subclass:
return orgPocLinkRef.subclass(*args_, **kwargs_)
else:
return orgPocLinkRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_relPocDescription(self): return self.relPocDescription
def set_relPocDescription(self, relPocDescription): self.relPocDescription = relPocDescription
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_relPocHandle(self): return self.relPocHandle
def set_relPocHandle(self, relPocHandle): self.relPocHandle = relPocHandle
def get_relPocName(self): return self.relPocName
def set_relPocName(self, relPocName): self.relPocName = relPocName
def get_relPocFunction(self): return self.relPocFunction
def set_relPocFunction(self, relPocFunction): self.relPocFunction = relPocFunction
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='orgPocLinkRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orgPocLinkRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='orgPocLinkRef'):
if self.relPocDescription is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
outfile.write(' relPocDescription=%s' % (self.gds_format_string(quote_attrib(self.relPocDescription).encode(ExternalEncoding), input_name='relPocDescription'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.relPocHandle is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
outfile.write(' relPocHandle=%s' % (self.gds_format_string(quote_attrib(self.relPocHandle).encode(ExternalEncoding), input_name='relPocHandle'), ))
if self.relPocName is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
outfile.write(' relPocName=%s' % (self.gds_format_string(quote_attrib(self.relPocName).encode(ExternalEncoding), input_name='relPocName'), ))
if self.relPocFunction is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
outfile.write(' relPocFunction=%s' % (self.gds_format_string(quote_attrib(self.relPocFunction).encode(ExternalEncoding), input_name='relPocFunction'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='orgPocLinkRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='orgPocLinkRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.relPocDescription is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
showIndent(outfile, level)
outfile.write('relPocDescription="%s",\n' % (self.relPocDescription,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.relPocHandle is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
showIndent(outfile, level)
outfile.write('relPocHandle="%s",\n' % (self.relPocHandle,))
if self.relPocName is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
showIndent(outfile, level)
outfile.write('relPocName="%s",\n' % (self.relPocName,))
if self.relPocFunction is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
showIndent(outfile, level)
outfile.write('relPocFunction="%s",\n' % (self.relPocFunction,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('relPocDescription', node)
if value is not None and 'relPocDescription' not in already_processed:
already_processed.add('relPocDescription')
self.relPocDescription = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('relPocHandle', node)
if value is not None and 'relPocHandle' not in already_processed:
already_processed.add('relPocHandle')
self.relPocHandle = value
value = find_attr_value_('relPocName', node)
if value is not None and 'relPocName' not in already_processed:
already_processed.add('relPocName')
self.relPocName = value
value = find_attr_value_('relPocFunction', node)
if value is not None and 'relPocFunction' not in already_processed:
already_processed.add('relPocFunction')
self.relPocFunction = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class orgPocLinkRef
class phone(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, phone=None):
self.phone = phone
def factory(*args_, **kwargs_):
if phone.subclass:
return phone.subclass(*args_, **kwargs_)
else:
return phone(*args_, **kwargs_)
factory = staticmethod(factory)
def get_phone(self): return self.phone
def set_phone(self, phone): self.phone = phone
def hasContent_(self):
if (
self.phone is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='phone', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='phone')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='phone'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='phone', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.phone is not None:
self.phone.export(outfile, level, namespace_, name_='phone', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='phone'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.phone is not None:
showIndent(outfile, level)
outfile.write('phone=model_.phone(\n')
self.phone.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'phone':
obj_ = phone.factory()
obj_.build(child_)
self.set_phone(obj_)
# end class phone
class line(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, number=None, valueOf_=None, mixedclass_=None, content_=None):
self.number = _cast(int, number)
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if line.subclass:
return line.subclass(*args_, **kwargs_)
else:
return line(*args_, **kwargs_)
factory = staticmethod(factory)
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='line', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='line')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='line'):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='line', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='line'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
pass
# end class line
class type_(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, description=None, code=None):
self.description = description
self.code = code
def factory(*args_, **kwargs_):
if type_.subclass:
return type_.subclass(*args_, **kwargs_)
else:
return type_(*args_, **kwargs_)
factory = staticmethod(factory)
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_code(self): return self.code
def set_code(self, code): self.code = code
def hasContent_(self):
if (
self.description is not None or
self.code is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='type', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='type')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='type'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='type', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.code is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scode>%s</%scode>%s' % (namespace_, self.gds_format_string(quote_xml(self.code).encode(ExternalEncoding), input_name='code'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='type'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.code is not None:
showIndent(outfile, level)
outfile.write('code=%s,\n' % quote_python(self.code).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
elif nodeName_ == 'code':
code_ = child_.text
code_ = self.gds_validate_string(code_, node, 'code')
self.code = code_
# end class type_
GDSClassesMapping = {
'phones': phone,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1',
pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="poc",
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'poc'
rootClass = poc
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from poc import *\n\n')
sys.stdout.write('import poc as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"asnRef",
"asns",
"comment",
"emails",
"iso3166_1",
"limitExceeded",
"line",
"netRef",
"nets",
"orgPocLinkRef",
"orgs",
"phone",
"poc",
"streetAddress",
"type_"
]
|
Gassing during the growth of bacteriological infection.
Whittaker, J. C. and Francis, Brian J. and Whittaker, J. (1996) Gassing during the growth of bacteriological infection. Student, 1 (3). pp. 211-214.
|
import json
import logging
from django.db import transaction
from rest_framework import mixins, serializers, status, viewsets
from rest_framework.response import Response
from mkt.api.authentication import RestOAuthAuthentication
from mkt.api.authorization import GroupPermission
from mkt.api.base import CORSMixin
from .forms import MonolithForm
from .models import MonolithRecord
logger = logging.getLogger('z.monolith')
class MonolithSerializer(serializers.ModelSerializer):
class Meta:
model = MonolithRecord
def transform_value(self, obj, value):
return json.loads(value)
class MonolithViewSet(CORSMixin, mixins.DestroyModelMixin,
mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
cors_allowed_methods = ('get', 'delete')
permission_classes = [GroupPermission('Monolith', 'API')]
authentication_classes = [RestOAuthAuthentication]
serializer_class = MonolithSerializer
def get_queryset(self):
form = MonolithForm(self.request.QUERY_PARAMS)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
key = form.cleaned_data['key']
start = form.cleaned_data['start']
end = form.cleaned_data['end']
qs = MonolithRecord.objects.all()
if key:
qs = qs.filter(key=key)
if start is not None:
qs = qs.filter(recorded__gte=start)
if end is not None:
qs = qs.filter(recorded__lt=end)
return qs
@transaction.commit_on_success
def delete(self, request, *args, **kwargs):
qs = self.filter_queryset(self.get_queryset())
logger.info('Deleting %d monolith resources' % qs.count())
qs.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
York Earwaker wrote 8 years ago.
ROCRIS wrote 9 years ago.
pressure of a static fluid?
Saew wrote 9 years ago.
to the one about Friction?
amr wrote 9 years ago.
Carlos Rodriguez wrote 9 years ago.
about friction. It is possible to post the friction lecture?
CosmoLearning is promoting these materials solely for nonprofit educational purposes, and to recognize contributions made by Walter Lewin (Prof. Lewin) to online education. We do not host or upload any copyrighted materials, including videos hosted on video websites like YouTube*, unless with explicit permission from the author(s). All intellectual property rights are reserved to Prof. Lewin and involved parties. CosmoLearning is not endorsed by Prof. Lewin, and we are not affiliated with them, unless otherwise specified. Any questions, claims or concerns regarding this content should be directed to their creator(s).
|
import reads
import writes
import re
import MySQLdb
#Run in the command line: "python server.py"
#Open browser and go to http://localhost:8888/
#Go to http://localhost:8888/recreate/ to reset database to example
def URLS():
yield ("^/$", writes.login)
yield ("^/logout/$", writes.logout)
yield ("^/manufacturer/$", reads.manufacturer)
yield ("^/manufacturer/[0-9]+/$", reads.manufacturer_id)
yield ("^/manufacturer/[0-9]+/edit/$", writes.manufacturer_id_edit)
yield ("^/manufacturer/add/$", writes.manufacturer_add)
yield ("^/product/$", reads.product)
yield ("^/product/[0-9]+/$", reads.product_id)
yield ("^/product/[0-9]+/edit/$", writes.product_id_edit)
yield ("^/product/add/$", writes.product_add)
yield ("^/product/mass-assign/$", writes.product_mass_assign)
yield ("^/distributor/$", reads.distributor)
yield ("^/distributor/[0-9]+/$", reads.distributor_id)
yield ("^/distributor/[0-9]+/edit/$", writes.distributor_id_edit)
yield ("^/distributor/add/$", writes.distributor_add)
yield ("^/seller/$", reads.seller)
yield ("^/seller/[0-9]+/$", reads.seller_id)
yield ("^/seller/[0-9]+/edit/$", writes.seller_id_edit)
yield ("^/seller/[0-9]+/commission/$", writes.seller_id_commission)
yield ("^/seller/add/$", writes.seller_add)
yield ("^/order/$", reads.order)
yield ("^/order/[0-9]+/$", reads.order_id)
yield ("^/order/[0-9]+/edit/$", writes.order_id_edit)
yield ("^/order/[0-9]+/fulfill/$", writes.order_id_fulfill)
yield ("^/order/[0-9]+/unfulfill/$", writes.order_id_unfulfill)
yield ("^/order/[0-9]+/commission/$", writes.order_id_commission)
yield ("^/order/add/$", writes.order_add)
yield ("^/pay/$", writes.pay)
yield ("^/customer/$", reads.customer)
yield ("^/customer/[0-9]+/$", reads.customer_id)
yield ("^/recreate/$", writes.recreate)
yield ("^/reset/$", writes.recreate)
yield ("^/reload/$", writes.recreate)
def app(req, resp):
for url, page in URLS():
if re.match(url, req['PATH_INFO']):
req['db'] = MySQLdb.connect(host="localhost", user="solaruser", passwd="solarpassword", db="solar")
req['cur'] = req['db'].cursor()
status, headers, data = page(req)
resp(status, headers)
req['cur'].close()
req['db'].commit()
req['db'].close()
return [data]
resp('404 Not Found', [('Content-type', 'text/plain')])
return ["404 Not Found"]
from wsgiref.simple_server import make_server
make_server('', 8888, app).serve_forever()
|
Maureen Egan is a Director in the Financial Advisory Services Group and specializes in complex commercial litigation and forensic investigations.
Maureen has over 20 years of experience analyzing complex financial, accounting and economic issues throughout the dispute resolution process, with an emphasis on breach of contract and fiduciary duty claims, partnership and shareholder disputes and purchase prices adjustments and breach of representation and warranty claims arising from mergers and acquisitions.
Additionally, Maureen has served as the lead case manager on federal district, bankruptcy, Delaware Chancery, state court and arbitration actions requiring the calculation of lost profits and lost value damages, the evaluation of insolvency and the forensic reconstruction of financial activity over multiple years. She has also managed forensic investigations involving internal and external fraud.
Maureen began her career as a staff accountant in regional accounting firms, providing audit and tax services to small and midsized privately held companies. Her experience covers a wide range of industries, including manufacturing, retail and wholesale product distribution, financial services, construction, real estate, professional service firms and electric utilities.
Maureen currently serves as a committee member of the New York Chapter of Women of EisnerAmper.
Yoga, hiking, cooking, watching my son play soccer.
Getting it done the right way.
Habitat for Humanity, because home is where it all begins.
EisnerAmper LLP is among the nation’s largest full-service advisory and accounting firms providing audit, accounting, and tax services to a broad range of clients. Our Forensic, Litigation and Valuation Services Group specializes in complex commercial litigation and forensic investigations and develop strategies throughout the dispute resolution process. Our experts lead projects on bankruptcy matters with an emphasis on lost value, insolvency, business valuation and forensic investigation reconstruction.
|
import os
import sys
import datetime
from collections import namedtuple
import functools
import glob
import logging
import subprocess
import time
from PyQt5.QtCore import (
QObject,
QProcess,
pyqtSignal,
QTimer,
QProcessEnvironment,
)
from . import wheels
from . import settings
from . import config
wheels_dirpath = os.path.dirname(wheels.__file__)
logger = logging.getLogger(__name__)
class SplashLogHandler(logging.NullHandler):
"""
A simple log handler that does only one thing: use the referenced Qt signal
to emit the log.
"""
def __init__(self, emitter):
"""
Returns an instance of the class that will use the Qt signal passed in
as emitter.
"""
super().__init__()
self.setLevel(logging.DEBUG)
self.emitter = emitter
def emit(self, record):
"""
Emits a record via the Qt signal.
"""
timestamp = datetime.datetime.fromtimestamp(record.created)
messages = record.getMessage().splitlines()
for msg in messages:
output = "[{level}]({timestamp}) - {message}".format(
level=record.levelname, timestamp=timestamp, message=msg
)
self.emitter.emit(output)
def handle(self, record):
"""
Handles the log record.
"""
self.emit(record)
class Process(QObject):
"""
Use the QProcess mechanism to run a subprocess asynchronously
This will interact well with Qt Gui objects, eg by connecting the
`output` signals to an `QTextEdit.append` method and the `started`
and `finished` signals to a `QPushButton.setEnabled`.
eg::
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class Example(QMainWindow):
def __init__(self):
super().__init__()
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('Main window')
self.show()
self.process = Process()
self.process.output.connect(textEdit.append)
self.process.run(sys.executable, ["-u", "-m", "pip", "list"])
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
"""
started = pyqtSignal()
output = pyqtSignal(str)
finished = pyqtSignal()
Slots = namedtuple("Slots", ["started", "output", "finished"])
Slots.__new__.__defaults__ = (None, None, None)
def __init__(self):
super().__init__()
#
# Always run unbuffered and with UTF-8 IO encoding
#
self.environment = QProcessEnvironment.systemEnvironment()
self.environment.insert("PYTHONUNBUFFERED", "1")
self.environment.insert("PYTHONIOENCODING", "utf-8")
def _set_up_run(self, **envvars):
"""Run the process with the command and args"""
self.process = QProcess()
environment = QProcessEnvironment(self.environment)
for k, v in envvars.items():
environment.insert(k, v)
self.process.setProcessEnvironment(environment)
self.process.setProcessChannelMode(QProcess.MergedChannels)
def run_blocking(self, command, args, wait_for_s=30.0, **envvars):
self._set_up_run(**envvars)
self.process.start(command, args)
self.wait(wait_for_s=wait_for_s)
output = self.data()
return output
def run(self, command, args, **envvars):
logger.info(
"About to run %s with args %s and envvars %s",
command,
args,
envvars,
)
self._set_up_run(**envvars)
self.process.readyRead.connect(self._readyRead)
self.process.started.connect(self._started)
self.process.finished.connect(self._finished)
partial = functools.partial(self.process.start, command, args)
QTimer.singleShot(
1,
partial,
)
def wait(self, wait_for_s=30.0):
finished = self.process.waitForFinished(1000 * wait_for_s)
#
# If finished is False, it could be be because of an error
# or because we've already finished before starting to wait!
#
if (
not finished
and self.process.exitStatus() == self.process.CrashExit
):
raise VirtualEnvironmentError("Some error occurred")
def data(self):
return self.process.readAll().data().decode("utf-8")
def _started(self):
self.started.emit()
def _readyRead(self):
self.output.emit(self.data().strip())
def _finished(self):
self.finished.emit()
class Pip(object):
"""
Proxy for various pip commands
While this is a fairly useful abstraction in its own right, it's at
least initially to assist in testing, so we can mock out various
commands
"""
def __init__(self, pip_executable):
self.executable = pip_executable
self.process = Process()
def run(
self, command, *args, wait_for_s=30.0, slots=Process.Slots(), **kwargs
):
"""
Run a command with args, treating kwargs as Posix switches.
eg run("python", version=True)
run("python", "-c", "import sys; print(sys.executable)")
"""
#
# Any keyword args are treated as command-line switches
# As a special case, a boolean value indicates that the flag
# is a yes/no switch
#
params = [command, "--disable-pip-version-check"]
for k, v in kwargs.items():
switch = k.replace("_", "-")
if v is False:
switch = "no-" + switch
params.append("--" + switch)
if v is not True and v is not False:
params.append(str(v))
params.extend(args)
if slots.output is None:
result = self.process.run_blocking(
self.executable, params, wait_for_s=wait_for_s
)
return result
else:
if slots.started:
self.process.started.connect(slots.started)
self.process.output.connect(slots.output)
if slots.finished:
self.process.finished.connect(slots.finished)
self.process.run(self.executable, params)
def install(self, packages, slots=Process.Slots(), **kwargs):
"""
Use pip to install a package or packages.
If the first parameter is a string one package is installed; otherwise
it is assumed to be an iterable of package names.
Any kwargs are passed as command-line switches. A value of None
indicates a switch without a value (eg --upgrade)
"""
if isinstance(packages, str):
return self.run(
"install", packages, wait_for_s=180.0, slots=slots, **kwargs
)
else:
return self.run(
"install", *packages, wait_for_s=180.0, slots=slots, **kwargs
)
def uninstall(self, packages, slots=Process.Slots(), **kwargs):
"""
Use pip to uninstall a package or packages
If the first parameter is a string one package is uninstalled;
otherwise it is assumed to be an iterable of package names.
Any kwargs are passed as command-line switches. A value of None
indicates a switch without a value (eg --upgrade)
"""
if isinstance(packages, str):
return self.run(
"uninstall",
packages,
wait_for_s=180.0,
slots=slots,
yes=True,
**kwargs
)
else:
return self.run(
"uninstall",
*packages,
wait_for_s=180.0,
slots=slots,
yes=True,
**kwargs
)
def freeze(self):
"""
Use pip to return a list of installed packages
NB this is fairly trivial but is pulled out principally for
testing purposes
"""
return self.run("freeze")
def list(self):
"""
Use pip to return a list of installed packages
NB this is fairly trivial but is pulled out principally for
testing purposes
"""
return self.run("list")
def installed(self):
"""
Yield tuples of (package_name, version)
pip list gives a more consistent view of name/version
than pip freeze which uses different annotations for
file-installed wheels and editable (-e) installs
"""
lines = self.list().splitlines()
iterlines = iter(lines)
#
# The first two lines are headers
#
try:
next(iterlines)
next(iterlines)
#
# cf https://lgtm.com/rules/11000086/
#
except StopIteration:
raise VirtualEnvironmentError("Unable to parse installed packages")
for line in iterlines:
#
# Some lines have a third location element
#
name, version = line.split()[:2]
yield name, version
class VirtualEnvironmentError(Exception):
pass
class VirtualEnvironment(object):
"""
Represents and contains methods for manipulating a virtual environment.
"""
Slots = Process.Slots
def __init__(self, dirpath=None):
self.process = Process()
self._is_windows = sys.platform == "win32"
self._bin_extension = ".exe" if self._is_windows else ""
self.settings = settings.VirtualEnvironmentSettings()
self.settings.init()
dirpath_to_use = (
dirpath or self.settings.get("dirpath") or self._generate_dirpath()
)
logger.info("Using dirpath: %s", dirpath_to_use)
self.relocate(dirpath_to_use)
def __str__(self):
return "<%s at %s>" % (self.__class__.__name__, self.path)
@staticmethod
def _generate_dirpath():
"""
Construct a unique virtual environment folder
To avoid clashing with previously-created virtual environments,
construct one which includes the Python version and a timestamp
"""
return "%s-%s-%s" % (
config.VENV_DIR,
"%s%s" % sys.version_info[:2],
time.strftime("%Y%m%d-%H%M%S"),
)
def reset_pip(self):
self.pip = Pip(self.pip_executable)
def relocate(self, dirpath):
"""
Relocate sets up variables for, eg, the expected location and name of
the Python and Pip binaries, but doesn't access the file system. That's
done by code in or called from `create`
"""
self.path = str(dirpath)
self.name = os.path.basename(self.path)
self._bin_directory = os.path.join(
self.path, "scripts" if self._is_windows else "bin"
)
#
# Pip and the interpreter will be set up when the virtualenv is created
#
self.interpreter = os.path.join(
self._bin_directory, "python" + self._bin_extension
)
self.pip_executable = os.path.join(
self._bin_directory, "pip" + self._bin_extension
)
self.reset_pip()
logger.debug(
"Virtual environment set up %s at %s", self.name, self.path
)
self.settings["dirpath"] = self.path
def run_python(self, *args, slots=Process.Slots()):
"""
Run the referenced Python interpreter with the passed in args
If slots are supplied for the starting, output or finished signals
they will be used; otherwise it will be assume that this running
headless and the process will be run synchronously and output collected
will be returned when the process is complete
"""
if slots.output:
if slots.started:
self.process.started.connect(slots.started)
self.process.output.connect(slots.output)
if slots.finished:
self.process.finished.connect(slots.finished)
self.process.run(self.interpreter, args)
return self.process
else:
return self.process.run_blocking(self.interpreter, args)
def _directory_is_venv(self):
"""
Determine whether a directory appears to be an existing venv
There appears to be no canonical way to achieve this. Often the
presence of a pyvenv.cfg file is enough, but this isn't always there.
Specifically, on debian it's not when created by virtualenv. So we
fall back to finding an executable python command where we expect
"""
if os.path.isfile(os.path.join(self.path, "pyvenv.cfg")):
return True
#
# On windows os.access X_OK is close to meaningless, but it will
# succeed for executable files (and everything else). On Posix it
# does distinguish executable files
#
if os.access(self.interpreter, os.X_OK):
return True
return False
def ensure_and_create(self, emitter=None):
"""
If an emitter is provided, this will be used by a custom log handler
to display logging events onto a splash screen.
"""
splash_handler = None
if emitter:
splash_handler = SplashLogHandler(emitter)
logger.addHandler(splash_handler)
logger.info("Added log handler.")
n_retries = 3
for n in range(n_retries):
try:
logger.debug(
"Checking virtual environment; attempt #%d.", 1 + n
)
self.ensure()
except VirtualEnvironmentError:
new_dirpath = self._generate_dirpath()
logger.debug(
"Creating new virtual environment at %s.", new_dirpath
)
self.relocate(new_dirpath)
self.create()
else:
logger.info("Virtual environment already exists.")
return
# If we get here, there's a problem creating the virtual environment,
# so attempt to signal this via the logger, wait for the log to be
# displayed in the splash screen and then exit via the exception.
logger.error("Unable to create a working virtual environment.")
if emitter and splash_handler:
logger.removeHandler(splash_handler)
raise VirtualEnvironmentError(
"Unable to create a working virtual environment."
)
def ensure(self):
"""
Ensure that virtual environment exists and is in a good state.
"""
self.ensure_path()
self.ensure_interpreter()
self.ensure_interpreter_version()
self.ensure_pip()
self.ensure_key_modules()
def ensure_path(self):
"""
Ensure that the virtual environment path exists and is a valid venv.
"""
if not os.path.exists(self.path):
message = "%s does not exist." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
elif not os.path.isdir(self.path):
message = "%s exists but is not a directory." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
elif not self._directory_is_venv():
message = "Directory %s exists but is not a venv." % self.path
logger.error(message)
raise VirtualEnvironmentError(message)
logger.info("Virtual Environment found at: %s", self.path)
def ensure_interpreter(self):
"""
Ensure there is an interpreter of the expected name at the expected
location, given the platform and naming conventions.
NB if the interpreter is present as a symlink to a system interpreter
(likely for a venv) but the link is broken, then os.path.isfile will
fail as though the file wasn't there. Which is what we want in these
circumstances.
"""
if os.path.isfile(self.interpreter):
logger.info("Interpreter found at: %s", self.interpreter)
else:
message = (
"Interpreter not found where expected at: %s"
% self.interpreter
)
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_interpreter_version(self):
"""
Ensure that the venv interpreter matches the version of Python running
Mu.
This is necessary because otherwise we'll have mismatched wheels etc.
"""
current_version = "%s%s" % sys.version_info[:2]
#
# Can't use self.run_python as we're not yet within the Qt UI loop
#
process = subprocess.run(
[
self.interpreter,
"-c",
'import sys; print("%s%s" % sys.version_info[:2])',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
venv_version = process.stdout.decode("utf-8").strip()
if current_version == venv_version:
logger.info("Both interpreters at version %s", current_version)
else:
message = (
"Mu interpreter at version %s; venv interpreter at version %s."
% (current_version, venv_version)
)
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_key_modules(self):
"""
Ensure that the venv interpreter is able to load key modules.
"""
for module, *_ in wheels.mode_packages:
logger.debug("Verifying import of: %s", module)
try:
subprocess.run(
[self.interpreter, "-c", "import %s" % module],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
except subprocess.CalledProcessError:
message = "Failed to import: %s" % module
logger.error(message)
raise VirtualEnvironmentError(message)
def ensure_pip(self):
"""
Ensure that pip is available.
"""
if os.path.isfile(self.pip_executable):
logger.info("Pip found at: %s", self.pip_executable)
else:
message = (
"Pip not found where expected at: %s" % self.pip_executable
)
logger.error(message)
raise VirtualEnvironmentError(message)
def create(self):
"""
Create a new virtualenv at the referenced path.
"""
logger.info("Creating virtualenv: {}".format(self.path))
logger.info("Virtualenv name: {}".format(self.name))
env = dict(os.environ)
subprocess.run(
[
sys.executable,
"-m",
"virtualenv",
"-p",
sys.executable,
"-q",
self.path,
],
check=True,
env=env,
)
# Set the path to the interpreter
self.install_baseline_packages()
self.register_baseline_packages()
self.install_jupyter_kernel()
def install_jupyter_kernel(self):
"""
Install a Jupyter kernel for Mu (the name of the kernel indicates this
is a Mu related kernel).
"""
kernel_name = '"Python/Mu ({})"'.format(self.name)
logger.info("Installing Jupyter Kernel: %s", kernel_name)
return self.run_python(
"-m",
"ipykernel",
"install",
"--user",
"--name",
self.name,
"--display-name",
kernel_name,
)
def install_baseline_packages(self):
"""
Install all packages needed for non-core activity.
Each mode needs one or more packages to be able to run: pygame zero
mode needs pgzero and its dependencies; web mode needs Flask and so on.
We intend to ship with all the necessary wheels for those packages so
no network access is needed. But if the wheels aren't found, because
we're not running from an installer, then just pip install in the
usual way.
--upgrade is currently used with a thought to upgrade-releases of Mu.
"""
logger.info("Installing baseline packages.")
logger.info(
"%s %s",
wheels_dirpath,
"exists" if os.path.isdir(wheels_dirpath) else "does not exist",
)
#
# This command should install the baseline packages, picking up the
# precompiled wheels from the wheels path
#
# For dev purposes (where we might not have the wheels) warn where
# the wheels are not already present and download them
#
wheel_filepaths = glob.glob(os.path.join(wheels_dirpath, "*.whl"))
if not wheel_filepaths:
logger.warn(
"No wheels found in %s; downloading...", wheels_dirpath
)
wheels.download()
wheel_filepaths = glob.glob(os.path.join(wheels_dirpath, "*.whl"))
if not wheel_filepaths:
raise VirtualEnvironmentError(
"No wheels in %s; try `python -mmu.wheels`" % wheels_dirpath
)
self.reset_pip()
logger.debug(self.pip.install(wheel_filepaths))
def register_baseline_packages(self):
"""
Keep track of the baseline packages installed into the empty venv.
"""
self.reset_pip()
packages = list(self.pip.installed())
self.settings["baseline_packages"] = packages
def baseline_packages(self):
"""
Return the list of baseline packages.
"""
return self.settings.get("baseline_packages")
def install_user_packages(self, packages, slots=Process.Slots()):
"""
Install user defined packages.
"""
logger.info("Installing user packages: %s", ", ".join(packages))
self.reset_pip()
self.pip.install(
packages,
slots=slots,
upgrade=True,
)
def remove_user_packages(self, packages, slots=Process.Slots()):
"""
Remove user defined packages.
"""
logger.info("Removing user packages: %s", ", ".join(packages))
self.reset_pip()
self.pip.uninstall(
packages,
slots=slots,
)
def installed_packages(self):
"""
List all the third party modules installed by the user in the venv
containing the referenced Python interpreter.
"""
logger.info("Discovering installed third party modules in venv.")
#
# FIXME: Basically we need a way to distinguish between installed
# baseline packages and user-added packages. The baseline_packages
# in this class (or, later, from modes) are just the top-level classes:
# flask, pgzero etc. But they bring in many others further down. So:
# we either need to keep track of what's installed as part of the
# baseline install; or to keep track of what's installed by users.
# And then we have to hold those in the settings file
# The latter is probably easier.
#
baseline_packages = [
name for name, version in self.baseline_packages()
]
user_packages = []
self.reset_pip()
for package, version in self.pip.installed():
if package not in baseline_packages:
user_packages.append(package)
logger.info(user_packages)
return baseline_packages, user_packages
#
# Create a singleton virtual environment to be used throughout
# the application
#
venv = VirtualEnvironment()
|
Stratford-Lee Inn Our records show that this inn is closed.
Touch Of The Past Our records show that this inn is closed.
Spring Grove bed and breakfast travel guide for romantic, historic and adventure b&b's. Browse through the iLoveInns.com database of Spring Grove, Minnesota bed and breakfasts and country inns to find detailed listings that include room rates, special deals and area activities. You can click on the 'check rates and availability' button to contact the innkeeper.
|
import abjad
import collections
from abjad.tools import abctools
from abjad.tools import mathtools
from abjad.tools import rhythmmakertools
class MusicSpecifierSequence(abctools.AbjadValueObject):
r'''A music specifier sequence.
::
>>> sequence_a = consort.MusicSpecifierSequence(
... music_specifiers='music',
... )
>>> print(format(sequence_a))
consort.tools.MusicSpecifierSequence(
music_specifiers=('music',),
)
::
>>> sequence_b = consort.MusicSpecifierSequence(
... application_rate='phrase',
... music_specifiers=['one', 'two', 'three'],
... )
>>> print(format(sequence_b))
consort.tools.MusicSpecifierSequence(
application_rate='phrase',
music_specifiers=('one', 'two', 'three'),
)
'''
### CLASS VARIABLES ###
__slots__ = (
'_application_rate',
'_music_specifiers',
)
### INITIALIZER ###
def __init__(
self,
application_rate=None,
music_specifiers=None,
):
if application_rate is not None:
application_rate = application_rate or 'phrase'
assert application_rate in ('division', 'phrase')
if music_specifiers is None:
music_specifiers = [None]
if not isinstance(music_specifiers, collections.Sequence) or \
isinstance(music_specifiers, str):
music_specifiers = [music_specifiers]
music_specifiers = tuple(music_specifiers)
#music_specifiers = abjad.CyclicTuple(music_specifiers)
assert len(music_specifiers)
self._application_rate = application_rate
self._music_specifiers = music_specifiers
### SPECIAL METHODS ###
def __call__(
self,
durations=None,
layer=None,
division_mask_seed=0,
division_masks=None,
padding=None,
seed=None,
start_offset=None,
timespan_specifier=None,
voice_name=None,
):
import consort
timespans = abjad.TimespanList()
timespan_specifier = timespan_specifier or \
consort.TimespanSpecifier()
seed = seed or 0
division_mask_seed = division_mask_seed or 0
durations = [_ for _ in durations if _]
offsets = mathtools.cumulative_sums(durations, start_offset)
if not offsets:
return timespans
offset_pair_count = len(offsets) - 1
if offset_pair_count == 1:
offset_pair_count = 2 # make patterns happy
iterator = consort.iterate_nwise(offsets)
for i, offset_pair in enumerate(iterator):
start_offset, stop_offset = offset_pair
music_specifier = self[seed % len(self)]
timespan = consort.PerformedTimespan(
forbid_fusing=timespan_specifier.forbid_fusing,
forbid_splitting=timespan_specifier.forbid_splitting,
layer=layer,
minimum_duration=timespan_specifier.minimum_duration,
music_specifier=music_specifier,
start_offset=start_offset,
stop_offset=stop_offset,
voice_name=voice_name,
)
if not division_masks:
timespans.append(timespan)
else:
output_mask = division_masks.get_matching_pattern(
i, offset_pair_count + 1, rotation=division_mask_seed)
if output_mask is None:
timespans.append(timespan)
elif isinstance(output_mask, rhythmmakertools.SustainMask):
timespans.append(timespan)
elif isinstance(output_mask, rhythmmakertools.SilenceMask):
pass
division_mask_seed += 1
if self.application_rate == 'division':
seed += 1
if padding:
silent_timespans = abjad.TimespanList()
for shard in timespans.partition(True):
silent_timespan_one = consort.SilentTimespan(
layer=layer,
start_offset=shard.start_offset - padding,
stop_offset=shard.start_offset,
voice_name=voice_name,
)
silent_timespans.append(silent_timespan_one)
silent_timespan_two = consort.SilentTimespan(
layer=layer,
start_offset=shard.stop_offset,
stop_offset=shard.stop_offset + padding,
voice_name=voice_name,
)
silent_timespans.append(silent_timespan_two)
silent_timespans.compute_logical_or()
for timespan in timespans:
silent_timespans - timespan
timespans.extend(silent_timespans)
timespans.sort()
return timespans
def __getitem__(self, item):
return self._music_specifiers[item]
def __len__(self):
return len(self._music_specifiers)
### PUBLIC METHODS ###
def transpose(self, expr):
music_specifiers = [_.transpose(expr) for _ in self.music_specifiers]
return abjad.new(
self,
music_specifiers=music_specifiers,
)
### PUBLIC PROPERTIES ###
@property
def application_rate(self):
return self._application_rate
@property
def music_specifiers(self):
return self._music_specifiers
|
Royal Enfield North America puts on its best face for the United States.
Royal Enfield Milwaukee Grand Opening is from 1 to 7 p.m. Saturday, Sept. 10, 2016 at 226 N. Water St., Milwaukee, Wis. Free to the public.
Today's the grand opening for the Royal Enfield North America showroom in Milwaukee, Wis. It's the continental headquarters as well as a showcase for Royal Enfield motorcycles and gear.
"Wife and I stopped by the RENA Milwaukee corporate office and showroom. They occupy a space of approximately (by my calculations) 7,500 square feet. The showroom and accessories sales area is about 2,500 square feet and is in the front of the space facing the street.
Royal Enfields models line up in spacious showroom.
"We met the PR ladies and they are busy squaring away their marketing... and meeting with dealers showing up for meetings.
"I ran into a gentleman from Pennsylvania interested in getting Royal Enfield into his motorcycle showroom.
Royal Enfield's Milwaukee showroom looks inviting.
"I asked them about the twin and they are candid about moving what they make now and marketing the bikes at a very select and targeted audience: the back road wanderers, the secondary road travellers, people running errands and shade tree mechanics.
Nostalgia, bare brick displays a pride in the past.
...while the Himalayan in the parking lot hints at the future.
Good to see they are thriving and attempting to break into the US market, as they have in UK where they have opened up similar premises.
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from warnings import warn
from email.Utils import formatdate
from twisted.python import log
from zope.interface import implements
from buildbot.process.buildstep import LoggingBuildStep, LoggedRemoteCommand
from buildbot.interfaces import BuildSlaveTooOldError, IRenderable
from buildbot.status.builder import SKIPPED
class _ComputeRepositoryURL(object):
implements(IRenderable)
def __init__(self, repository):
self.repository = repository
def getRenderingFor(self, build):
'''
Helper function that the repository URL based on the parameter the
source step took and the Change 'repository' property
'''
s = build.getSourceStamp()
repository = self.repository
if not repository:
return str(s.repository)
else:
if callable(repository):
return str(build.render(repository(s.repository)))
elif isinstance(repository, dict):
return str(build.render(repository.get(s.repository)))
elif isinstance(repository, str) or isinstance(repository, unicode):
try:
return str(repository % s.repository)
except TypeError:
# that's the backward compatibility case
return build.render(repository)
else:
return str(build.render(repository))
class Source(LoggingBuildStep):
"""This is a base class to generate a source tree in the buildslave.
Each version control system has a specialized subclass, and is expected
to override __init__ and implement computeSourceRevision() and
startVC(). The class as a whole builds up the self.args dictionary, then
starts a LoggedRemoteCommand with those arguments.
"""
renderables = [ 'workdir' ]
# if the checkout fails, there's no point in doing anything else
haltOnFailure = True
flunkOnFailure = True
notReally = False
branch = None # the default branch, should be set in __init__
def __init__(self, workdir=None, mode='update', alwaysUseLatest=False,
timeout=20*60, retry=None, **kwargs):
"""
@type workdir: string
@param workdir: local directory (relative to the Builder's root)
where the tree should be placed
@type mode: string
@param mode: the kind of VC operation that is desired:
- 'update': specifies that the checkout/update should be
performed directly into the workdir. Each build is performed
in the same directory, allowing for incremental builds. This
minimizes disk space, bandwidth, and CPU time. However, it
may encounter problems if the build process does not handle
dependencies properly (if you must sometimes do a 'clean
build' to make sure everything gets compiled), or if source
files are deleted but generated files can influence test
behavior (e.g. python's .pyc files), or when source
directories are deleted but generated files prevent CVS from
removing them. When used with a patched checkout, from a
previous buildbot try for instance, it will try to "revert"
the changes first and will do a clobber if it is unable to
get a clean checkout. The behavior is SCM-dependent.
- 'copy': specifies that the source-controlled workspace
should be maintained in a separate directory (called the
'copydir'), using checkout or update as necessary. For each
build, a new workdir is created with a copy of the source
tree (rm -rf workdir; cp -R -P -p copydir workdir). This
doubles the disk space required, but keeps the bandwidth low
(update instead of a full checkout). A full 'clean' build
is performed each time. This avoids any generated-file
build problems, but is still occasionally vulnerable to
problems such as a CVS repository being manually rearranged
(causing CVS errors on update) which are not an issue with
a full checkout.
- 'clobber': specifies that the working directory should be
deleted each time, necessitating a full checkout for each
build. This insures a clean build off a complete checkout,
avoiding any of the problems described above, but is
bandwidth intensive, as the whole source tree must be
pulled down for each build.
- 'export': is like 'clobber', except that e.g. the 'cvs
export' command is used to create the working directory.
This command removes all VC metadata files (the
CVS/.svn/{arch} directories) from the tree, which is
sometimes useful for creating source tarballs (to avoid
including the metadata in the tar file). Not all VC systems
support export.
@type alwaysUseLatest: boolean
@param alwaysUseLatest: whether to always update to the most
recent available sources for this build.
Normally the Source step asks its Build for a list of all
Changes that are supposed to go into the build, then computes a
'source stamp' (revision number or timestamp) that will cause
exactly that set of changes to be present in the checked out
tree. This is turned into, e.g., 'cvs update -D timestamp', or
'svn update -r revnum'. If alwaysUseLatest=True, bypass this
computation and always update to the latest available sources
for each build.
The source stamp helps avoid a race condition in which someone
commits a change after the master has decided to start a build
but before the slave finishes checking out the sources. At best
this results in a build which contains more changes than the
buildmaster thinks it has (possibly resulting in the wrong
person taking the blame for any problems that result), at worst
is can result in an incoherent set of sources (splitting a
non-atomic commit) which may not build at all.
@type retry: tuple of ints (delay, repeats) (or None)
@param retry: if provided, VC update failures are re-attempted up
to REPEATS times, with DELAY seconds between each
attempt. Some users have slaves with poor connectivity
to their VC repository, and they say that up to 80% of
their build failures are due to transient network
failures that could be handled by simply retrying a
couple times.
"""
LoggingBuildStep.__init__(self, **kwargs)
self.addFactoryArguments(workdir=workdir,
mode=mode,
alwaysUseLatest=alwaysUseLatest,
timeout=timeout,
retry=retry,
)
assert mode in ("update", "copy", "clobber", "export")
if retry:
delay, repeats = retry
assert isinstance(repeats, int)
assert repeats > 0
self.args = {'mode': mode,
'timeout': timeout,
'retry': retry,
'patch': None, # set during .start
}
# This will get added to args later, after properties are rendered
self.workdir = workdir
self.alwaysUseLatest = alwaysUseLatest
# Compute defaults for descriptions:
description = ["updating"]
descriptionDone = ["update"]
if mode == "clobber":
description = ["checkout"]
# because checkingouting takes too much space
descriptionDone = ["checkout"]
elif mode == "export":
description = ["exporting"]
descriptionDone = ["export"]
self.description = description
self.descriptionDone = descriptionDone
def setStepStatus(self, step_status):
LoggingBuildStep.setStepStatus(self, step_status)
def setDefaultWorkdir(self, workdir):
self.workdir = self.workdir or workdir
def describe(self, done=False):
if done:
return self.descriptionDone
return self.description
def computeSourceRevision(self, changes):
"""Each subclass must implement this method to do something more
precise than -rHEAD every time. For version control systems that use
repository-wide change numbers (SVN, P4), this can simply take the
maximum such number from all the changes involved in this build. For
systems that do not (CVS), it needs to create a timestamp based upon
the latest Change, the Build's treeStableTimer, and an optional
self.checkoutDelay value."""
return None
def start(self):
if self.notReally:
log.msg("faking %s checkout/update" % self.name)
self.step_status.setText(["fake", self.name, "successful"])
self.addCompleteLog("log",
"Faked %s checkout/update 'successful'\n" \
% self.name)
return SKIPPED
# Allow workdir to be WithProperties
self.args['workdir'] = self.workdir
# what source stamp would this build like to use?
s = self.build.getSourceStamp()
# if branch is None, then use the Step's "default" branch
branch = s.branch or self.branch
# if revision is None, use the latest sources (-rHEAD)
revision = s.revision
if not revision and not self.alwaysUseLatest:
revision = self.computeSourceRevision(s.changes)
# the revision property is currently None, so set it to something
# more interesting
if revision is not None:
self.setProperty('revision', str(revision), "Source")
# if patch is None, then do not patch the tree after checkout
# 'patch' is None or a tuple of (patchlevel, diff, root)
# root is optional.
patch = s.patch
if patch:
self.addCompleteLog("patch", patch[1])
if self.alwaysUseLatest:
revision = None
self.startVC(branch, revision, patch)
def commandComplete(self, cmd):
if cmd.updates.has_key("got_revision"):
got_revision = cmd.updates["got_revision"][-1]
if got_revision is not None:
self.setProperty("got_revision", str(got_revision), "Source")
class BK(Source):
"""I perform BitKeeper checkout/update operations."""
name = 'bk'
renderables = [ 'bkurl', 'baseURL' ]
def __init__(self, bkurl=None, baseURL=None,
directory=None, extra_args=None, **kwargs):
"""
@type bkurl: string
@param bkurl: the URL which points to the BitKeeper server.
@type baseURL: string
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{bkurl} and C{baseURL}.
"""
self.bkurl = _ComputeRepositoryURL(bkurl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.extra_args = extra_args
Source.__init__(self, **kwargs)
self.addFactoryArguments(bkurl=bkurl,
baseURL=baseURL,
directory=directory,
extra_args=extra_args,
)
if bkurl and baseURL:
raise ValueError("you must use exactly one of bkurl and baseURL")
def computeSourceRevision(self, changes):
return changes.revision
def startVC(self, branch, revision, patch):
warnings = []
slavever = self.slaveVersion("bk")
if not slavever:
m = "slave does not have the 'bk' command"
raise BuildSlaveTooOldError(m)
if self.bkurl:
assert not branch # we need baseURL= to use branches
self.args['bkurl'] = self.bkurl
else:
self.args['bkurl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
self.args['branch'] = branch
if self.extra_args is not None:
self.args['extra_args'] = self.extra_args
revstuff = []
revstuff.append("[branch]")
if revision is not None:
revstuff.append("r%s" % revision)
if patch is not None:
revstuff.append("[patch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("bk", self.args)
self.startCommand(cmd, warnings)
class CVS(Source):
"""I do CVS checkout/update operations.
Note: if you are doing anonymous/pserver CVS operations, you will need
to manually do a 'cvs login' on each buildslave before the slave has any
hope of success. XXX: fix then, take a cvs password as an argument and
figure out how to do a 'cvs login' on each build
"""
name = "cvs"
renderables = [ "cvsroot" ]
#progressMetrics = ('output',)
#
# additional things to track: update gives one stderr line per directory
# (starting with 'cvs server: Updating ') (and is fairly stable if files
# is empty), export gives one line per directory (starting with 'cvs
# export: Updating ') and another line per file (starting with U). Would
# be nice to track these, requires grepping LogFile data for lines,
# parsing each line. Might be handy to have a hook in LogFile that gets
# called with each complete line.
def __init__(self, cvsroot=None, cvsmodule="",
global_options=[], branch=None, checkoutDelay=None,
checkout_options=[], export_options=[], extra_options=[],
login=None,
**kwargs):
"""
@type cvsroot: string
@param cvsroot: CVS Repository from which the source tree should
be obtained. '/home/warner/Repository' for local
or NFS-reachable repositories,
':pserver:anon@foo.com:/cvs' for anonymous CVS,
'user@host.com:/cvs' for non-anonymous CVS or
CVS over ssh. Lots of possibilities, check the
CVS documentation for more.
@type cvsmodule: string
@param cvsmodule: subdirectory of CVS repository that should be
retrieved
@type login: string or None
@param login: if not None, a string which will be provided as a
password to the 'cvs login' command, used when a
:pserver: method is used to access the repository.
This login is only needed once, but must be run
each time (just before the CVS operation) because
there is no way for the buildslave to tell whether
it was previously performed or not.
@type branch: string
@param branch: the default branch name, will be used in a '-r'
argument to specify which branch of the source tree
should be used for this checkout. Defaults to None,
which means to use 'HEAD'.
@type checkoutDelay: int or None
@param checkoutDelay: if not None, the number of seconds to put
between the last known Change and the
timestamp given to the -D argument. This
defaults to exactly half of the parent
Build's .treeStableTimer, but it could be
set to something else if your CVS change
notification has particularly weird
latency characteristics.
@type global_options: list of strings
@param global_options: these arguments are inserted in the cvs
command line, before the
'checkout'/'update' command word. See
'cvs --help-options' for a list of what
may be accepted here. ['-r'] will make
the checked out files read only. ['-r',
'-R'] will also assume the repository is
read-only (I assume this means it won't
use locks to insure atomic access to the
,v files).
@type checkout_options: list of strings
@param checkout_options: these arguments are inserted in the cvs
command line, after 'checkout' but before
branch or revision specifiers.
@type export_options: list of strings
@param export_options: these arguments are inserted in the cvs
command line, after 'export' but before
branch or revision specifiers.
@type extra_options: list of strings
@param extra_options: these arguments are inserted in the cvs
command line, after 'checkout' or 'export' but before
branch or revision specifiers.
"""
self.checkoutDelay = checkoutDelay
self.branch = branch
self.cvsroot = _ComputeRepositoryURL(cvsroot)
Source.__init__(self, **kwargs)
self.addFactoryArguments(cvsroot=cvsroot,
cvsmodule=cvsmodule,
global_options=global_options,
checkout_options=checkout_options,
export_options=export_options,
extra_options=extra_options,
branch=branch,
checkoutDelay=checkoutDelay,
login=login,
)
self.args.update({'cvsmodule': cvsmodule,
'global_options': global_options,
'checkout_options':checkout_options,
'export_options':export_options,
'extra_options':extra_options,
'login': login,
})
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([c.when for c in changes])
if self.checkoutDelay is not None:
when = lastChange + self.checkoutDelay
else:
lastSubmit = max([br.submittedAt for br in self.build.requests])
when = (lastChange + lastSubmit) / 2
return formatdate(when)
def startVC(self, branch, revision, patch):
if self.slaveVersionIsOlderThan("cvs", "1.39"):
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (branch != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
log.msg(m)
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("cvs", "2.10"):
if self.args['extra_options'] or self.args['export_options']:
m = ("This buildslave (%s) does not support export_options "
"or extra_options arguments to the CVS step."
% (self.build.slavename))
log.msg(m)
raise BuildSlaveTooOldError(m)
# the unwanted args are empty, and will probably be ignored by
# the slave, but delete them just to be safe
del self.args['export_options']
del self.args['extra_options']
if branch is None:
branch = "HEAD"
self.args['cvsroot'] = self.cvsroot
self.args['branch'] = branch
self.args['revision'] = revision
self.args['patch'] = patch
if self.args['branch'] == "HEAD" and self.args['revision']:
# special case. 'cvs update -r HEAD -D today' gives no files
# TODO: figure out why, see if it applies to -r BRANCH
self.args['branch'] = None
# deal with old slaves
warnings = []
slavever = self.slaveVersion("cvs", "old")
if slavever == "old":
# 0.5.0
if self.args['mode'] == "export":
self.args['export'] = 1
elif self.args['mode'] == "clobber":
self.args['clobber'] = 1
elif self.args['mode'] == "copy":
self.args['copydir'] = "source"
self.args['tag'] = self.args['branch']
assert not self.args['patch'] # 0.5.0 slave can't do patch
cmd = LoggedRemoteCommand("cvs", self.args)
self.startCommand(cmd, warnings)
class SVN(Source):
"""I perform Subversion checkout/update operations."""
name = 'svn'
branch_placeholder = '%%BRANCH%%'
renderables = [ 'svnurl', 'baseURL' ]
def __init__(self, svnurl=None, baseURL=None, defaultBranch=None,
directory=None, username=None, password=None,
extra_args=None, keep_on_purge=None, ignore_ignores=None,
always_purge=None, depth=None, **kwargs):
"""
@type svnurl: string
@param svnurl: the URL which points to the Subversion server,
combining the access method (HTTP, ssh, local file),
the repository host/port, the repository path, the
sub-tree within the repository, and the branch to
check out. Use exactly one of C{svnurl} and C{baseURL}.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{svnurl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended
to C{baseURL} and the result handed to
the SVN command.
@type username: string
@param username: username to pass to svn's --username
@type password: string
@param password: password to pass to svn's --password
"""
if not 'workdir' in kwargs and directory is not None:
# deal with old configs
warn("Please use workdir=, not directory=", DeprecationWarning)
kwargs['workdir'] = directory
self.svnurl = svnurl and _ComputeRepositoryURL(svnurl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
self.username = username
self.password = password
self.extra_args = extra_args
self.keep_on_purge = keep_on_purge
self.ignore_ignores = ignore_ignores
self.always_purge = always_purge
self.depth = depth
Source.__init__(self, **kwargs)
self.addFactoryArguments(svnurl=svnurl,
baseURL=baseURL,
defaultBranch=defaultBranch,
directory=directory,
username=username,
password=password,
extra_args=extra_args,
keep_on_purge=keep_on_purge,
ignore_ignores=ignore_ignores,
always_purge=always_purge,
depth=depth,
)
if svnurl and baseURL:
raise ValueError("you must use either svnurl OR baseURL")
def computeSourceRevision(self, changes):
if not changes or None in [c.revision for c in changes]:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def checkCompatibility(self):
''' Handle compatibility between old slaves/svn clients '''
slavever = self.slaveVersion("svn", "old")
if not slavever:
m = "slave does not have the 'svn' command"
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("svn", "1.39"):
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (self.args['branch'] != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
raise BuildSlaveTooOldError(m)
if (self.depth is not None) and self.slaveVersionIsOlderThan("svn","2.9"):
m = ("This buildslave (%s) does not support svn depth "
"arguments. Refusing to build. "
"Please upgrade the buildslave." % (self.build.slavename))
raise BuildSlaveTooOldError(m)
if (self.username is not None or self.password is not None) \
and self.slaveVersionIsOlderThan("svn", "2.8"):
m = ("This buildslave (%s) does not support svn usernames "
"and passwords. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.10 or newer." % (self.build.slavename,))
raise BuildSlaveTooOldError(m)
def getSvnUrl(self, branch, revision, patch):
''' Compute the svn url that will be passed to the svn remote command '''
if self.svnurl:
return self.svnurl
else:
if branch is None:
m = ("The SVN source step belonging to builder '%s' does not know "
"which branch to work with. This means that the change source "
"did not specify a branch and that defaultBranch is None." \
% self.build.builder.name)
raise RuntimeError(m)
computed = self.baseURL
if self.branch_placeholder in self.baseURL:
return computed.replace(self.branch_placeholder, branch)
else:
return computed + branch
def startVC(self, branch, revision, patch):
warnings = []
self.checkCompatibility()
self.args['svnurl'] = self.getSvnUrl(branch, revision, patch)
self.args['revision'] = revision
self.args['patch'] = patch
self.args['always_purge'] = self.always_purge
#Set up depth if specified
if self.depth is not None:
self.args['depth'] = self.depth
if self.username is not None:
self.args['username'] = self.username
if self.password is not None:
self.args['password'] = self.password
if self.extra_args is not None:
self.args['extra_args'] = self.extra_args
revstuff = []
#revstuff.append(self.args['svnurl'])
if self.args['svnurl'].find('trunk') == -1:
revstuff.append("[branch]")
if revision is not None:
revstuff.append("r%s" % revision)
if patch is not None:
revstuff.append("[patch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("svn", self.args)
self.startCommand(cmd, warnings)
class Darcs(Source):
"""Check out a source tree from a Darcs repository at 'repourl'.
Darcs has no concept of file modes. This means the eXecute-bit will be
cleared on all source files. As a result, you may need to invoke
configuration scripts with something like:
C{s(step.Configure, command=['/bin/sh', './configure'])}
"""
name = "darcs"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the Darcs repository. This
is used as the default branch. Using C{repourl} does
not enable builds of alternate branches: use
C{baseURL} to enable this. Use either C{repourl} or
C{baseURL}, not both.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{repourl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended to
C{baseURL} and the result handed to the
'darcs pull' command.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
)
assert self.args['mode'] != "export", \
"Darcs does not have an 'export' mode"
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("darcs")
if not slavever:
m = "slave is too old, does not know about darcs"
raise BuildSlaveTooOldError(m)
if self.slaveVersionIsOlderThan("darcs", "1.39"):
if revision:
# TODO: revisit this once we implement computeSourceRevision
m = "0.6.6 slaves can't handle args['revision']"
raise BuildSlaveTooOldError(m)
# the slave doesn't know to avoid re-using the same sourcedir
# when the branch changes. We have no way of knowing which branch
# the last build used, so if we're using a non-default branch and
# either 'update' or 'copy' modes, it is safer to refuse to
# build, and tell the user they need to upgrade the buildslave.
if (branch != self.branch
and self.args['mode'] in ("update", "copy")):
m = ("This buildslave (%s) does not know about multiple "
"branches, and using mode=%s would probably build the "
"wrong tree. "
"Refusing to build. Please upgrade the buildslave to "
"buildbot-0.7.0 or newer." % (self.build.slavename,
self.args['mode']))
raise BuildSlaveTooOldError(m)
if self.repourl:
assert not branch # we need baseURL= to use branches
self.args['repourl'] = self.repourl
else:
self.args['repourl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[branch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("darcs", self.args)
self.startCommand(cmd)
class Git(Source):
"""Check out a source tree from a git repository 'repourl'."""
name = "git"
renderables = [ 'repourl' ]
def __init__(self, repourl=None,
branch="master",
submodules=False,
ignore_ignores=None,
reference=None,
shallow=False,
progress=False,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the git repository
@type branch: string
@param branch: The branch or tag to check out by default. If
a build specifies a different branch, it will
be used instead of this.
@type submodules: boolean
@param submodules: Whether or not to update (and initialize)
git submodules.
@type reference: string
@param reference: The path to a reference repository to obtain
objects from, if any.
@type shallow: boolean
@param shallow: Use a shallow or clone, if possible
@type progress: boolean
@param progress: Pass the --progress option when fetching. This
can solve long fetches getting killed due to
lack of output, but requires Git 1.7.2+.
"""
Source.__init__(self, **kwargs)
self.repourl = _ComputeRepositoryURL(repourl)
self.branch = branch
self.addFactoryArguments(repourl=repourl,
branch=branch,
submodules=submodules,
ignore_ignores=ignore_ignores,
reference=reference,
shallow=shallow,
progress=progress,
)
self.args.update({'submodules': submodules,
'ignore_ignores': ignore_ignores,
'reference': reference,
'shallow': shallow,
'progress': progress,
})
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def startVC(self, branch, revision, patch):
self.args['branch'] = branch
self.args['repourl'] = self.repourl
self.args['revision'] = revision
self.args['patch'] = patch
# check if there is any patchset we should fetch from Gerrit
try:
# GerritChangeSource
self.args['gerrit_branch'] = self.build.getProperty("event.patchSet.ref")
self.setProperty("gerrit_branch", self.args['gerrit_branch'])
except KeyError:
try:
# forced build
change = self.build.getProperty("gerrit_change").split('/')
if len(change) == 2:
self.args['gerrit_branch'] = "refs/changes/%2.2d/%d/%d" \
% (int(change[0]) % 100, int(change[0]), int(change[1]))
self.setProperty("gerrit_branch", self.args['gerrit_branch'])
except:
pass
slavever = self.slaveVersion("git")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about git")
cmd = LoggedRemoteCommand("git", self.args)
self.startCommand(cmd)
class Repo(Source):
"""Check out a source tree from a repo repository described by manifest."""
name = "repo"
renderables = [ "manifest_url" ]
def __init__(self,
manifest_url=None,
manifest_branch="master",
manifest_file="default.xml",
tarball=None,
**kwargs):
"""
@type manifest_url: string
@param manifest_url: The URL which points at the repo manifests repository.
@type manifest_branch: string
@param manifest_branch: The manifest branch to check out by default.
@type manifest_file: string
@param manifest_file: The manifest to use for sync.
"""
Source.__init__(self, **kwargs)
self.manifest_url = _ComputeRepositoryURL(manifest_url)
self.addFactoryArguments(manifest_url=manifest_url,
manifest_branch=manifest_branch,
manifest_file=manifest_file,
tarball=tarball,
)
self.args.update({'manifest_branch': manifest_branch,
'manifest_file': manifest_file,
'tarball': tarball,
})
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def parseDownloadProperty(self, s):
"""
lets try to be nice in the format we want
can support several instances of "repo download proj number/patch" (direct copy paste from gerrit web site)
or several instances of "proj number/patch" (simpler version)
This feature allows integrator to build with several pending interdependant changes.
returns list of repo downloads sent to the buildslave
"""
import re
if s == None:
return []
re1 = re.compile("repo download ([^ ]+) ([0-9]+/[0-9]+)")
re2 = re.compile("([^ ]+) ([0-9]+/[0-9]+)")
re3 = re.compile("([^ ]+)/([0-9]+/[0-9]+)")
ret = []
for cur_re in [re1, re2, re3]:
res = cur_re.search(s)
while res:
ret.append("%s %s" % (res.group(1), res.group(2)))
s = s[:res.start(0)] + s[res.end(0):]
res = cur_re.search(s)
return ret
def startVC(self, branch, revision, patch):
self.args['manifest_url'] = self.manifest_url
# only master has access to properties, so we must implement this here.
downloads = []
# download patches based on GerritChangeSource events
for change in self.build.allChanges():
if (change.properties.has_key("event.type") and
change.properties["event.type"] == "patchset-created"):
downloads.append("%s %s/%s"% (change.properties["event.change.project"],
change.properties["event.change.number"],
change.properties["event.patchSet.number"]))
# download patches based on web site forced build properties:
# "repo_d", "repo_d0", .., "repo_d9"
# "repo_download", "repo_download0", .., "repo_download9"
for propName in ["repo_d"] + ["repo_d%d" % i for i in xrange(0,10)] + \
["repo_download"] + ["repo_download%d" % i for i in xrange(0,10)]:
try:
s = self.build.getProperty(propName)
downloads.extend(self.parseDownloadProperty(s))
except KeyError:
pass
if downloads:
self.args["repo_downloads"] = downloads
self.setProperty("repo_downloads", downloads)
slavever = self.slaveVersion("repo")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about repo")
cmd = LoggedRemoteCommand("repo", self.args)
self.startCommand(cmd)
def commandComplete(self, cmd):
if cmd.updates.has_key("repo_downloaded"):
repo_downloaded = cmd.updates["repo_downloaded"][-1]
if repo_downloaded:
self.setProperty("repo_downloaded", str(repo_downloaded), "Source")
class Bzr(Source):
"""Check out a source tree from a bzr (Bazaar) repository at 'repourl'.
"""
name = "bzr"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
forceSharedRepo=None,
**kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the bzr repository. This
is used as the default branch. Using C{repourl} does
not enable builds of alternate branches: use
C{baseURL} to enable this. Use either C{repourl} or
C{baseURL}, not both.
@param baseURL: if branches are enabled, this is the base URL to
which a branch name will be appended. It should
probably end in a slash. Use exactly one of
C{repourl} and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly. It will simply be appended to
C{baseURL} and the result handed to the
'bzr checkout pull' command.
@param forceSharedRepo: Boolean, defaults to False. If set to True,
the working directory will be made into a
bzr shared repository if it is not already.
Shared repository greatly reduces the amount
of history data that needs to be downloaded
if not using update/copy mode, or if using
update/copy mode with multiple branches.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
forceSharedRepo=forceSharedRepo
)
self.args.update({'forceSharedRepo': forceSharedRepo})
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("bzr")
if not slavever:
m = "slave is too old, does not know about bzr"
raise BuildSlaveTooOldError(m)
if self.repourl:
assert not branch # we need baseURL= to use branches
self.args['repourl'] = self.repourl
else:
self.args['repourl'] = self.baseURL + branch
self.args['revision'] = revision
self.args['patch'] = patch
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[" + branch + "]")
if revision is not None:
revstuff.append("r%s" % revision)
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("bzr", self.args)
self.startCommand(cmd)
class Mercurial(Source):
"""Check out a source tree from a mercurial repository 'repourl'."""
name = "hg"
renderables = [ 'repourl', 'baseURL' ]
def __init__(self, repourl=None, baseURL=None, defaultBranch=None,
branchType='dirname', clobberOnBranchChange=True, **kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the Mercurial repository.
This uses the 'default' branch unless defaultBranch is
specified below and the C{branchType} is set to
'inrepo'. It is an error to specify a branch without
setting the C{branchType} to 'inrepo'.
@param baseURL: if 'dirname' branches are enabled, this is the base URL
to which a branch name will be appended. It should
probably end in a slash. Use exactly one of C{repourl}
and C{baseURL}.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly.
For 'dirname' branches, It will simply be
appended to C{baseURL} and the result handed to
the 'hg update' command.
For 'inrepo' branches, this specifies the named
revision to which the tree will update after a
clone.
@param branchType: either 'dirname' or 'inrepo' depending on whether
the branch name should be appended to the C{baseURL}
or the branch is a mercurial named branch and can be
found within the C{repourl}
@param clobberOnBranchChange: boolean, defaults to True. If set and
using inrepos branches, clobber the tree
at each branch change. Otherwise, just
update to the branch.
"""
self.repourl = _ComputeRepositoryURL(repourl)
self.baseURL = _ComputeRepositoryURL(baseURL)
self.branch = defaultBranch
self.branchType = branchType
self.clobberOnBranchChange = clobberOnBranchChange
Source.__init__(self, **kwargs)
self.addFactoryArguments(repourl=repourl,
baseURL=baseURL,
defaultBranch=defaultBranch,
branchType=branchType,
clobberOnBranchChange=clobberOnBranchChange,
)
if repourl and baseURL:
raise ValueError("you must provide exactly one of repourl and"
" baseURL")
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("hg")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about hg")
if self.repourl:
# we need baseURL= to use dirname branches
assert self.branchType == 'inrepo' or not branch
self.args['repourl'] = self.repourl
if branch:
self.args['branch'] = branch
else:
self.args['repourl'] = self.baseURL + (branch or '')
self.args['revision'] = revision
self.args['patch'] = patch
self.args['clobberOnBranchChange'] = self.clobberOnBranchChange
self.args['branchType'] = self.branchType
revstuff = []
if branch is not None and branch != self.branch:
revstuff.append("[branch]")
self.description.extend(revstuff)
self.descriptionDone.extend(revstuff)
cmd = LoggedRemoteCommand("hg", self.args)
self.startCommand(cmd)
def computeSourceRevision(self, changes):
if not changes:
return None
# without knowing the revision ancestry graph, we can't sort the
# changes at all. So for now, assume they were given to us in sorted
# order, and just pay attention to the last one. See ticket #103 for
# more details.
if len(changes) > 1:
log.msg("Mercurial.computeSourceRevision: warning: "
"there are %d changes here, assuming the last one is "
"the most recent" % len(changes))
return changes[-1].revision
class P4(Source):
""" P4 is a class for accessing perforce revision control"""
name = "p4"
renderables = [ 'p4base' ]
def __init__(self, p4base=None, defaultBranch=None, p4port=None, p4user=None,
p4passwd=None, p4extra_views=[], p4line_end='local',
p4client='buildbot_%(slave)s_%(builder)s', **kwargs):
"""
@type p4base: string
@param p4base: A view into a perforce depot, typically
"//depot/proj/"
@type defaultBranch: string
@param defaultBranch: Identify a branch to build by default. Perforce
is a view based branching system. So, the branch
is normally the name after the base. For example,
branch=1.0 is view=//depot/proj/1.0/...
branch=1.1 is view=//depot/proj/1.1/...
@type p4port: string
@param p4port: Specify the perforce server to connection in the format
<host>:<port>. Example "perforce.example.com:1666"
@type p4user: string
@param p4user: The perforce user to run the command as.
@type p4passwd: string
@param p4passwd: The password for the perforce user.
@type p4extra_views: list of tuples
@param p4extra_views: Extra views to be added to
the client that is being used.
@type p4line_end: string
@param p4line_end: value of the LineEnd client specification property
@type p4client: string
@param p4client: The perforce client to use for this buildslave.
"""
self.p4base = _ComputeRepositoryURL(p4base)
self.branch = defaultBranch
Source.__init__(self, **kwargs)
self.addFactoryArguments(p4base=p4base,
defaultBranch=defaultBranch,
p4port=p4port,
p4user=p4user,
p4passwd=p4passwd,
p4extra_views=p4extra_views,
p4line_end=p4line_end,
p4client=p4client,
)
self.args['p4port'] = p4port
self.args['p4user'] = p4user
self.args['p4passwd'] = p4passwd
self.args['p4extra_views'] = p4extra_views
self.args['p4line_end'] = p4line_end
self.p4client = p4client
def setBuild(self, build):
Source.setBuild(self, build)
self.args['p4client'] = self.p4client % {
'slave': build.slavename,
'builder': build.builder.name,
}
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("p4")
assert slavever, "slave is too old, does not know about p4"
args = dict(self.args)
args['p4base'] = self.p4base
args['branch'] = branch or self.branch
args['revision'] = revision
args['patch'] = patch
cmd = LoggedRemoteCommand("p4", args)
self.startCommand(cmd)
class P4Sync(Source):
"""This is a partial solution for using a P4 source repository. You are
required to manually set up each build slave with a useful P4
environment, which means setting various per-slave environment variables,
and creating a P4 client specification which maps the right files into
the slave's working directory. Once you have done that, this step merely
performs a 'p4 sync' to update that workspace with the newest files.
Each slave needs the following environment:
- PATH: the 'p4' binary must be on the slave's PATH
- P4USER: each slave needs a distinct user account
- P4CLIENT: each slave needs a distinct client specification
You should use 'p4 client' (?) to set up a client view spec which maps
the desired files into $SLAVEBASE/$BUILDERBASE/source .
"""
name = "p4sync"
def __init__(self, p4port, p4user, p4passwd, p4client, **kwargs):
assert kwargs['mode'] == "copy", "P4Sync can only be used in mode=copy"
self.branch = None
Source.__init__(self, **kwargs)
self.addFactoryArguments(p4port=p4port,
p4user=p4user,
p4passwd=p4passwd,
p4client=p4client,
)
self.args['p4port'] = p4port
self.args['p4user'] = p4user
self.args['p4passwd'] = p4passwd
self.args['p4client'] = p4client
def computeSourceRevision(self, changes):
if not changes:
return None
lastChange = max([int(c.revision) for c in changes])
return lastChange
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("p4sync")
assert slavever, "slave is too old, does not know about p4"
cmd = LoggedRemoteCommand("p4sync", self.args)
self.startCommand(cmd)
class Monotone(Source):
"""Check out a source tree from a monotone repository 'repourl'."""
name = "mtn"
renderables = [ 'repourl' ]
def __init__(self, repourl=None, branch=None, progress=False, **kwargs):
"""
@type repourl: string
@param repourl: the URI which points at the monotone repository.
@type branch: string
@param branch: The branch or tag to check out by default. If
a build specifies a different branch, it will
be used instead of this.
@type progress: boolean
@param progress: Pass the --ticker=dot option when pulling. This
can solve long fetches getting killed due to
lack of output.
"""
Source.__init__(self, **kwargs)
self.repourl = _ComputeRepositoryURL(repourl)
if (not repourl):
raise ValueError("you must provide a repository uri in 'repourl'")
if (not branch):
raise ValueError("you must provide a default branch in 'branch'")
self.addFactoryArguments(repourl=repourl,
branch=branch,
progress=progress,
)
self.args.update({'branch': branch,
'progress': progress,
})
def startVC(self, branch, revision, patch):
slavever = self.slaveVersion("mtn")
if not slavever:
raise BuildSlaveTooOldError("slave is too old, does not know "
"about mtn")
self.args['repourl'] = self.repourl
if branch:
self.args['branch'] = branch
self.args['revision'] = revision
self.args['patch'] = patch
cmd = LoggedRemoteCommand("mtn", self.args)
self.startCommand(cmd)
def computeSourceRevision(self, changes):
if not changes:
return None
# without knowing the revision ancestry graph, we can't sort the
# changes at all. So for now, assume they were given to us in sorted
# order, and just pay attention to the last one. See ticket #103 for
# more details.
if len(changes) > 1:
log.msg("Monotone.computeSourceRevision: warning: "
"there are %d changes here, assuming the last one is "
"the most recent" % len(changes))
return changes[-1].revision
|
Sigma-1 receptors potentiate epidermal growth factor signaling towards neuritogenesis: Potential relation to lipid raft reconstitution.
Sigma-1 receptor upregulation after chronic methamphetamine self-administration in rats: A study with yoked controls.
Lack of association between sigma receptor gene variants and schizophrenia.
Small molecule antagonists of the sigma-1 receptor cause selective release of the death program in tumour and self-reliant cells and inhibit tumor growth in vitro and in vivo.
Modulation of the firing activity of female dorsal raphe nucleus serotonergic neurons by neuroactive steroids.
Sigma receptors: Biology and therapeutic potential.
The sigma receptor ligand (+)pentazocine prevents apoptotic retinal ganglion cell death induced in vitro by homocysteine and glutamate.
Functional polymorphisms in the σ1 receptor gene associated with alcoholism.
Sigma-1 receptor antagonists attenuate antidepressant-like effect induced by co-administration of 1,3di-o-tolylguanidine (DTG) and memantine in the forced swimming test in rats.
Protein kinase Cdependent potentiation of intracellular calcium influx by σ1 receptor agonists in rat hippocampal neurons.
|
"""
This test file will run through some XBlock test scenarios regarding the
recommender system
"""
import json
import itertools
import StringIO
from ddt import ddt, data
from copy import deepcopy
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config
from courseware.tests.helpers import LoginEnrollmentTestCase
from courseware.tests.factories import GlobalStaffFactory
from lms.djangoapps.lms_xblock.runtime import quote_slashes
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
class TestRecommender(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Check that Recommender state is saved properly
"""
STUDENTS = [
{'email': 'view@test.com', 'password': 'foo'},
{'email': 'view2@test.com', 'password': 'foo'}
]
XBLOCK_NAMES = ['recommender', 'recommender_second']
def setUp(self):
self.course = CourseFactory.create(
display_name='Recommender_Test_Course'
)
self.chapter = ItemFactory.create(
parent=self.course, display_name='Overview'
)
self.section = ItemFactory.create(
parent=self.chapter, display_name='Welcome'
)
self.unit = ItemFactory.create(
parent=self.section, display_name='New Unit'
)
self.xblock = ItemFactory.create(
parent=self.unit,
category='recommender',
display_name='recommender'
)
self.xblock2 = ItemFactory.create(
parent=self.unit,
category='recommender',
display_name='recommender_second'
)
self.course_url = reverse(
'courseware_section',
kwargs={
'course_id': self.course.id.to_deprecated_string(),
'chapter': 'Overview',
'section': 'Welcome',
}
)
self.resource_urls = [
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/"
"SP13_Periodic_Trends_and_Bonding/"
),
(
"https://courses.edx.org/courses/MITx/3.091X/"
"2013_Fall/courseware/SP13_Week_4/SP13_Covalent_Bonding/"
)
]
self.test_recommendations = {
self.resource_urls[0]: {
"title": "Covalent bonding and periodic trends",
"url": self.resource_urls[0],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage1.png"
),
"descriptionText": (
"short description for Covalent bonding "
"and periodic trends"
)
},
self.resource_urls[1]: {
"title": "Polar covalent bonds and electronegativity",
"url": self.resource_urls[1],
"description": (
"http://people.csail.mit.edu/swli/edx/"
"recommendation/img/videopage2.png"
),
"descriptionText": (
"short description for Polar covalent "
"bonds and electronegativity"
)
}
}
for idx, student in enumerate(self.STUDENTS):
username = "u{}".format(idx)
self.create_account(username, student['email'], student['password'])
self.activate_user(student['email'])
self.staff_user = GlobalStaffFactory()
def get_handler_url(self, handler, xblock_name=None):
"""
Get url for the specified xblock handler
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
return reverse('xblock_handler', kwargs={
'course_id': self.course.id.to_deprecated_string(),
'usage_id': quote_slashes(self.course.id.make_usage_key('recommender', xblock_name).to_deprecated_string()),
'handler': handler,
'suffix': ''
})
def enroll_student(self, email, password):
"""
Student login and enroll for the course
"""
self.login(email, password)
self.enroll(self.course, verify=True)
def enroll_staff(self, staff):
"""
Staff login and enroll for the course
"""
email = staff.email
password = 'test'
self.login(email, password)
self.enroll(self.course, verify=True)
def initialize_database_by_id(self, handler, resource_id, times, xblock_name=None):
"""
Call a ajax event (vote, delete, endorse) on a resource by its id
several times
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
for _ in range(0, times):
self.client.post(url, json.dumps({'id': resource_id}), '')
def call_event(self, handler, resource, xblock_name=None):
"""
Call a ajax event (add, edit, flag, etc.) by specifying the resource
it takes
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
url = self.get_handler_url(handler, xblock_name)
resp = self.client.post(url, json.dumps(resource), '')
return json.loads(resp.content)
def check_event_response_by_element(self, handler, resource, resp_key, resp_val, xblock_name=None):
"""
Call the event specified by the handler with the resource, and check
whether the element (resp_key) in response is as expected (resp_val)
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
resp = self.call_event(handler, resource, xblock_name)
self.assertEqual(resp[resp_key], resp_val)
self.assert_request_status_code(200, self.course_url)
class TestRecommenderCreateFromEmpty(TestRecommender):
"""
Check whether we can add resources to an empty database correctly
"""
def test_add_resource(self):
"""
Verify the addition of new resource is handled correctly
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Check whether adding new resource is successful
for resource_id, resource in self.test_recommendations.iteritems():
for xblock_name in self.XBLOCK_NAMES:
result = self.call_event('add_resource', resource, xblock_name)
expected_result = {
'Success': True,
'upvotes': 0,
'downvotes': 0,
'id': resource_id
}
for field in resource:
expected_result[field] = resource[field]
self.assertDictEqual(result, expected_result)
self.assert_request_status_code(200, self.course_url)
def test_import_resources_by_student(self):
"""
Test the function for importing all resources into the Recommender
by a student.
"""
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Preparing imported resources
initial_configuration = {
'flagged_accum_resources': {},
'endorsed_recommendation_reasons': [],
'endorsed_recommendation_ids': [],
'deendorsed_recommendations': {},
'recommendations': self.test_recommendations[self.resource_urls[0]]
}
# Importing resources
f_handler = StringIO.StringIO(json.dumps(initial_configuration, sort_keys=True))
f_handler.name = 'import_resources'
url = self.get_handler_url('import_resources')
resp = self.client.post(url, {'file': f_handler})
self.assertEqual(resp.content, 'NOT_A_STAFF')
self.assert_request_status_code(200, self.course_url)
def test_import_resources(self):
"""
Test the function for importing all resources into the Recommender.
"""
self.enroll_staff(self.staff_user)
# Preparing imported resources
initial_configuration = {
'flagged_accum_resources': {},
'endorsed_recommendation_reasons': [],
'endorsed_recommendation_ids': [],
'deendorsed_recommendations': {},
'recommendations': self.test_recommendations[self.resource_urls[0]]
}
# Importing resources
f_handler = StringIO.StringIO(json.dumps(initial_configuration, sort_keys=True))
f_handler.name = 'import_resources'
url = self.get_handler_url('import_resources')
resp = self.client.post(url, {'file': f_handler})
self.assertEqual(resp.content, json.dumps(initial_configuration, sort_keys=True))
self.assert_request_status_code(200, self.course_url)
class TestRecommenderWithResources(TestRecommender):
"""
Check whether we can add/edit/flag/export resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderWithResources, self).setUp()
self.resource_id = self.resource_urls[0]
self.resource_id_second = self.resource_urls[1]
self.non_existing_resource_id = 'An non-existing id'
self.set_up_resources()
def set_up_resources(self):
"""
Set up resources and enroll staff
"""
self.logout()
self.enroll_staff(self.staff_user)
# Add resources, assume correct here, tested in test_add_resource
for resource, xblock_name in itertools.product(self.test_recommendations.values(), self.XBLOCK_NAMES):
self.call_event('add_resource', resource, xblock_name)
def generate_edit_resource(self, resource_id):
"""
Based on the given resource (specified by resource_id), this function
generate a new one for testing 'edit_resource' event
"""
resource = {"id": resource_id}
edited_recommendations = {
key: value + " edited" for key, value in self.test_recommendations[self.resource_id].iteritems()
}
resource.update(edited_recommendations)
return resource
def test_add_redundant_resource(self):
"""
Verify the addition of a redundant resource (url) is rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
result = self.call_event('add_resource', resource)
expected_result = {
'Success': False,
'error': (
'The resource you are attempting to '
'provide has already existed'
),
'dup_id': self.resource_id
}
for field in resource:
expected_result[field] = resource[field]
expected_result['dup_' + field] = self.test_recommendations[self.resource_id][field]
self.assertDictEqual(result, expected_result)
self.assert_request_status_code(200, self.course_url)
def test_add_deendorsed_resource(self):
"""
Verify the addition of a deendorsed resource (url) is rejected
"""
self.call_event('deendorse_resource', {"id": self.resource_id, 'reason': ''})
err_msg = 'The resource you are attempting to provide has been de-endorsed by staff, because: .*'
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = deepcopy(self.test_recommendations[self.resource_id])
resource['url'] += suffix
resp = self.call_event('add_resource', resource)
self.assertRegexpMatches(resp['error'], err_msg)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource_non_existing(self):
"""
Edit a non-existing resource
"""
resp = self.call_event(
'edit_resource', self.generate_edit_resource(self.non_existing_resource_id)
)
self.assertEqual(resp['error'], 'The selected resource is not existing')
self.assert_request_status_code(200, self.course_url)
def test_edit_redundant_resource(self):
"""
Check whether changing the url to the one of 'another' resource is
rejected
"""
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
resp = self.call_event('edit_resource', resource)
self.assertEqual(resp['error'], 'The resource you are attempting to provide has already existed')
self.assertEqual(resp['dup_id'], self.resource_id_second)
self.assert_request_status_code(200, self.course_url)
def test_edit_deendorsed_resource(self):
"""
Check whether changing the url to the one of a deendorsed resource is
rejected
"""
self.call_event('deendorse_resource', {"id": self.resource_id_second, 'reason': ''})
err_msg = 'The resource you are attempting to provide has been de-endorsed by staff, because: .*'
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource = self.generate_edit_resource(self.resource_id)
resource['url'] = self.resource_id_second + suffix
resp = self.call_event('edit_resource', resource)
self.assertRegexpMatches(resp['error'], err_msg)
self.assertEqual(resp['dup_id'], self.resource_id_second)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource(self):
"""
Check whether changing the content of resource is successful
"""
resp = self.call_event(
'edit_resource', self.generate_edit_resource(self.resource_id)
)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_edit_resource_same_url(self):
"""
Check whether changing the content (except for url) of resource is successful
"""
resource = self.generate_edit_resource(self.resource_id)
for suffix in ['', '#IAmSuffix', '%23IAmSuffix']:
resource['url'] = self.resource_id + suffix
resp = self.call_event('edit_resource', resource)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_edit_then_add_resource(self):
"""
Check whether we can add back an edited resource
"""
self.call_event('edit_resource', self.generate_edit_resource(self.resource_id))
# Test
resp = self.call_event('add_resource', self.test_recommendations[self.resource_id])
self.assertEqual(resp['id'], self.resource_id)
self.assert_request_status_code(200, self.course_url)
def test_edit_resources_in_different_xblocks(self):
"""
Check whether changing the content of resource is successful in two
different xblocks
"""
resource = self.generate_edit_resource(self.resource_id)
for xblock_name in self.XBLOCK_NAMES:
resp = self.call_event('edit_resource', resource, xblock_name)
self.assertEqual(resp['Success'], True)
self.assert_request_status_code(200, self.course_url)
def test_flag_resource_wo_reason(self):
"""
Flag a resource as problematic, without providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': ''}
# Test
self.check_event_response_by_element('flag_resource', resource, 'reason', '')
def test_flag_resource_w_reason(self):
"""
Flag a resource as problematic, with providing the reason
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
self.check_event_response_by_element('flag_resource', resource, 'reason', 'reason 0')
def test_flag_resource_change_reason(self):
"""
Flag a resource as problematic twice, with different reasons
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
# Test
resource['reason'] = 'reason 1'
resp = self.call_event('flag_resource', resource)
self.assertEqual(resp['oldReason'], 'reason 0')
self.assertEqual(resp['reason'], 'reason 1')
self.assert_request_status_code(200, self.course_url)
def test_flag_resources_in_different_xblocks(self):
"""
Flag resources as problematic in two different xblocks
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
# Test
for xblock_name in self.XBLOCK_NAMES:
self.check_event_response_by_element('flag_resource', resource, 'reason', 'reason 0', xblock_name)
def test_flag_resources_by_different_users(self):
"""
Different users can't see the flag result of each other
"""
resource = {'id': self.resource_id, 'isProblematic': True, 'reason': 'reason 0'}
self.call_event('flag_resource', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resp = self.call_event('flag_resource', resource)
# The second user won't see the reason provided by the first user
self.assertNotIn('oldReason', resp)
self.assertEqual(resp['reason'], 'reason 0')
self.assert_request_status_code(200, self.course_url)
def test_export_resources(self):
"""
Test the function for exporting all resources from the Recommender.
"""
self.call_event('deendorse_resource', {"id": self.resource_id, 'reason': ''})
self.call_event('endorse_resource', {"id": self.resource_id_second, 'reason': ''})
# Test
resp = self.call_event('export_resources', {})
self.assertIn(self.resource_id_second, resp['export']['recommendations'])
self.assertNotIn(self.resource_id, resp['export']['recommendations'])
self.assertIn(self.resource_id_second, resp['export']['endorsed_recommendation_ids'])
self.assertIn(self.resource_id, resp['export']['deendorsed_recommendations'])
self.assert_request_status_code(200, self.course_url)
@ddt
class TestRecommenderVoteWithResources(TestRecommenderWithResources):
"""
Check whether we can vote resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderVoteWithResources, self).setUp()
@data(
{'event': 'recommender_upvote'},
{'event': 'recommender_downvote'}
)
def test_vote_resource_non_existing(self, test_case):
"""
Vote a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'event': test_case['event']}
self.check_event_response_by_element('handle_vote', resource, 'error', 'The selected resource is not existing')
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_once(self, test_case):
"""
Vote a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 0},
{'event': 'recommender_downvote', 'new_votes': 0}
)
def test_vote_resource_twice(self, test_case):
"""
Vote a resource twice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resource_thrice(self, test_case):
"""
Vote a resource thrice
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
for _ in range(0, 2):
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'event_second': 'recommender_downvote', 'new_votes': -1},
{'event': 'recommender_downvote', 'event_second': 'recommender_upvote', 'new_votes': 1}
)
def test_switch_vote_resource(self, test_case):
"""
Switch the vote of a resource
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['event'] = test_case['event_second']
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_different_resources(self, test_case):
"""
Vote two different resources
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
resource['id'] = self.resource_id_second
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@data(
{'event': 'recommender_upvote', 'new_votes': 1},
{'event': 'recommender_downvote', 'new_votes': -1}
)
def test_vote_resources_in_different_xblocks(self, test_case):
"""
Vote two resources in two different xblocks
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'], self.XBLOCK_NAMES[1])
@data(
{'event': 'recommender_upvote', 'new_votes': 2},
{'event': 'recommender_downvote', 'new_votes': -2}
)
def test_vote_resource_by_different_users(self, test_case):
"""
Vote resource by two different users
"""
resource = {"id": self.resource_id, 'event': test_case['event']}
self.call_event('handle_vote', resource)
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
self.check_event_response_by_element('handle_vote', resource, 'newVotes', test_case['new_votes'])
@ddt
class TestRecommenderStaffFeedbackWithResources(TestRecommenderWithResources):
"""
Check whether we can deendorse/endorse resources correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderStaffFeedbackWithResources, self).setUp()
@data('deendorse_resource', 'endorse_resource')
def test_deendorse_or_endorse_resource_non_existing(self, test_case):
"""
Deendorse/endorse a non-existing resource
"""
resource = {"id": self.non_existing_resource_id, 'reason': ''}
self.check_event_response_by_element(test_case, resource, 'error', 'The selected resource is not existing')
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_resource_once(self, test_case):
"""
Deendorse/endorse a resource
"""
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'The selected resource is not existing'},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'undo endorsement'}
)
def test_deendorse_or_endorse_resource_twice(self, test_case):
"""
Deendorse/endorse a resource twice
"""
resource = {"id": self.resource_id, 'reason': ''}
self.call_event(test_case['handler'], resource)
# Test
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'The selected resource is not existing'},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_endorse_resource_thrice(self, test_case):
"""
Deendorse/endorse a resource thrice
"""
resource = {"id": self.resource_id, 'reason': ''}
for _ in range(0, 2):
self.call_event(test_case['handler'], resource)
# Test
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_different_resources(self, test_case):
"""
Deendorse/endorse two different resources
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id_second, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@data(
{'handler': 'deendorse_resource', 'key': 'Success', 'val': True},
{'handler': 'endorse_resource', 'key': 'status', 'val': 'endorsement'}
)
def test_deendorse_or_endorse_resources_in_different_xblocks(self, test_case):
"""
Deendorse/endorse two resources in two different xblocks
"""
self.call_event(test_case['handler'], {"id": self.resource_id, 'reason': ''})
# Test
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'], self.XBLOCK_NAMES[1])
@data(
{'handler': 'deendorse_resource', 'key': 'error', 'val': 'Deendorse resource without permission'},
{'handler': 'endorse_resource', 'key': 'error', 'val': 'Endorse resource without permission'}
)
def test_deendorse_or_endorse_resource_by_student(self, test_case):
"""
Deendorse/endorse resource by a student
"""
self.logout()
self.enroll_student(self.STUDENTS[0]['email'], self.STUDENTS[0]['password'])
# Test
resource = {"id": self.resource_id, 'reason': ''}
self.check_event_response_by_element(test_case['handler'], resource, test_case['key'], test_case['val'])
@ddt
class TestRecommenderFileUploading(TestRecommender):
"""
Check whether we can handle file uploading correctly
"""
def setUp(self):
# call the setUp function from the superclass
super(TestRecommenderFileUploading, self).setUp()
def attempt_upload_file_and_verify_result(self, test_case, xblock_name=None):
"""
Running on a test case, creating a temp file, uploading it by
calling the corresponding ajax event, and verifying that upload
happens or is rejected as expected.
"""
if xblock_name is None:
xblock_name = TestRecommender.XBLOCK_NAMES[0]
f_handler = StringIO.StringIO(test_case['magic_number'].decode('hex'))
f_handler.content_type = test_case['mimetypes']
f_handler.name = 'file' + test_case['suffixes']
url = self.get_handler_url('upload_screenshot', xblock_name)
resp = self.client.post(url, {'file': f_handler})
self.assertRegexpMatches(resp.content, test_case['response_regexp'])
self.assert_request_status_code(200, self.course_url)
@data(
{
'suffixes': '.csv',
'magic_number': 'ffff',
'mimetypes': 'text/plain',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong extension name
{
'suffixes': '.gif',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/gif',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/jpeg',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': '474946383761',
'mimetypes': 'image/png',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.jpg',
'magic_number': '474946383761',
'mimetypes': 'image/jpeg',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.png',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/png',
'response_regexp': 'FILE_TYPE_ERROR'
}, # Upload file with wrong magic number
{
'suffixes': '.gif',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/gif',
'response_regexp': 'FILE_TYPE_ERROR'
}
)
def test_upload_screenshot_wrong_file_type(self, test_case):
"""
Verify the file uploading fails correctly when file with wrong type
(extension/magic number) is provided
"""
self.enroll_staff(self.staff_user)
# Upload file with wrong extension name or magic number
self.attempt_upload_file_and_verify_result(test_case)
self.assert_request_status_code(200, self.course_url)
@data(
{
'suffixes': '.png',
'magic_number': '89504e470d0a1a0a',
'mimetypes': 'image/png',
'response_regexp': 'fs://.*.png'
},
{
'suffixes': '.gif',
'magic_number': '474946383961',
'mimetypes': 'image/gif',
'response_regexp': 'fs://.*.gif'
},
{
'suffixes': '.gif',
'magic_number': '474946383761',
'mimetypes': 'image/gif',
'response_regexp': 'fs://.*.gif'
},
{
'suffixes': '.jpg',
'magic_number': 'ffd8ffd9',
'mimetypes': 'image/jpeg',
'response_regexp': 'fs://.*.jpeg'
}
)
def test_upload_screenshot_correct_file_type(self, test_case):
"""
Verify the file type checking in the file uploading method is
successful.
"""
self.enroll_staff(self.staff_user)
# Upload file with correct extension name and magic number
self.attempt_upload_file_and_verify_result(test_case)
self.assert_request_status_code(200, self.course_url)
|
Johnson, Laschober & Associates utilizes the resources of the Landscape Architecture and Civil Engineering Departments in a team approach for creative and cost effective land planning. Our Landscape Architects have designed in complex environments including wetlands and have obtained the permitting necessary to design around wetlands.
Johnson, Laschober & Associates, P.C. (JLA) is a professional architecture, engineering and landscape architecture design firm conveniently located in Augusta, Georgia and Charleston, South Carolina.
|
#################################################################################
##____ ___ _ _ ____ ___ _____ ____ ___
#| _ \_ _| \ | | __ ) / _ \_ _| |___ \ / _ \
#| |_) | || \| | _ \| | | || | __) || | | |
#| __/| || |\ | |_) | |_| || | / __/ | |_| |
#|_|__|___|_|_\_|____/_\___/_|_| __|_____(_)___/_____ ___ ___ _ _
#| _ \ | _ \ / _ \ / ___| | ____| _ \_ _|_ _|_ _/ _ \| \ | |
#| |_) |____| |_) | | | | | | _| | | | | | | | | | | | | \| |
#| __/_____| _ <| |_| | |___ | |___| |_| | | | | | | |_| | |\ |
#|_| |_| \_\\___/ \____| |_____|____/___| |_| |___\___/|_| \_|
##
## A P-ROC Project by Dan Myers, Copyright 2013-2014
## Built on the PyProcGame Framework from Adam Preble and Gerry Stellenberg
## Thanks to Scott Danesi for his Earthshaker Project, which is my starting point
#################################################################################
#################################################################################
## __ _____ ____ ______________ ___ __ __
## / |/ / / / / / /_ __/ _/ __ )/ | / / / /
## / /|_/ / / / / / / / / // __ / /| | / / / /
## / / / / /_/ / /___/ / _/ // /_/ / ___ |/ /___/ /___
## /_/ /_/\____/_____/_/ /___/_____/_/ |_/_____/_____/
##
#################################################################################
import procgame.game
from procgame import *
import pinproc
from random import choice
from random import seed
class Multiball(game.Mode):
def __init__(self, game, priority):
super(Multiball, self).__init__(game, priority)
self.ballsLocked = 0
self.ballLock1Lit = False
self.ballLock2Lit = False
#self.ballLock3Lit = False
self.multiballStarting = False
#self.multiballIntroLength = 11.287
def mode_started(self):
self.getUserStats()
self.update_lamps()
return super(Multiball, self).mode_started()
def mode_stopped(self):
self.stopMultiball()
pass
def update_lamps(self):
print "Update Lamps: Multiball"
#self.disableLockLamps()
#if (self.ballLock1Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 1 is Lit"
#elif (self.ballLock2Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 2 is Lit"
#elif (self.ballLock3Lit == True):
#self.game.lamps.dropHoleLock.schedule(schedule=0xFF00FF00, cycle_seconds=0, now=True)
#self.game.lamps.rightRampLock.schedule(schedule=0x00FF00FF, cycle_seconds=0, now=True)
#print "Lock 3 is Lit"
def open_visor(self):
self.game.coils.visorMotor.enable()
self.ballLock1Lit = True
self.game.utilities.set_player_stats('lock1_lit', self.ballLock1Lit)
self.ballLock2Lit = True
self.game.utilities.set_player_stats('lock2_lit',self.ballLock2Lit)
#def disableLockLamps(self):
#self.game.lamps.rightRampLock.disable()
#self.game.lamps.ejectLock.disable()
#self.game.lamps.dropHoleLock.disable()
def getUserStats(self):
self.ballLock1Lit = self.game.utilities.get_player_stats('lock1_lit')
self.ballLock2Lit = self.game.utilities.get_player_stats('lock2_lit')
#self.ballLock3Lit = self.game.utilities.get_player_stats('lock3_lit')
self.ballsLocked = self.game.utilities.get_player_stats('balls_locked')
print "Lock 1: " + str(self.game.utilities.get_player_stats('lock1_lit'))
print "Lock 2: " + str(self.game.utilities.get_player_stats('lock2_lit'))
#print "Lock 3: " + str(self.game.utilities.get_player_stats('lock3_lit'))
print "Balls Locked: " + str(self.game.utilities.get_player_stats('balls_locked'))
#def liteLock(self,callback):
#self.callback = callback
#if (self.ballsLocked == 0):
#self.game.utilities.set_player_stats('lock1_lit',True)
#print "Setting Ball 1 Lock to Lit"
#self.getUserStats()
#elif (self.ballsLocked == 1):
#self.game.utilities.set_player_stats('lock2_lit',True)
#self.getUserStats()
#elif (self.ballsLocked == 2):
#self.game.utilities.set_player_stats('lock3_lit',True)
#self.getUserStats()
#self.update_lamps()
def lockLeftEyeBall(self):
self.game.sound.play('ball_lock_1')
self.game.utilities.set_player_stats('ball1_locked',True)
self.game.utilities.set_player_stats('balls_locked',self.game.utilities.get_player_stats('balls_locked') + 1)
self.game.utilities.set_player_stats('lock1_lit',False)
self.getUserStats()
self.update_lamps()
self.game.utilities.displayText(100,'LEFT', 'EYE','IS','MADE',seconds=3,justify='center')
self.game.utilities.score(1000)
self.game.lampctrlflash.play_show('skillshot', repeat=False, callback=self.game.update_lamps)
self.game.trough.launch_balls(num=1)
self.ballLock1Lit = False
#self.callback()
if self.game.utilities.get_player_stats('balls_locked')==2:
self.startMultiball()
def lockRightEyeBall(self):
self.game.sound.play('ball_lock_2')
self.game.utilities.set_player_stats('ball2_locked',True)
self.game.utilities.set_player_stats('balls_locked',self.game.utilities.get_player_stats('balls_locked') + 1)
self.game.utilities.set_player_stats('lock2_lit',False)
self.getUserStats()
self.update_lamps()
self.game.utilities.displayText(100,'RIGHT', 'EYE','IS','MADE',seconds=3,justify='center')
self.game.utilities.score(1000)
self.game.lampctrlflash.play_show('skillshot', repeat=False, callback=self.game.update_lamps)
self.game.trough.launch_balls(num=1)
self.ballLock2Lit = False
#self.callback()
if self.game.utilities.get_player_stats('balls_locked')==2:
self.startMultiball()
def startMultiball(self):
self.multiballStarting = True
self.game.utilities.set_player_stats('multiball_running',True)
self.resetMultiballStats()
#self.game.collect_mode.incrementActiveZoneLimit()
self.getUserStats()
self.update_lamps()
self.multiballIntro()
def multiballIntro(self):
self.cancel_delayed('dropReset')
self.game.utilities.disableGI()
self.game.sound.stop_music()
#self.game.lampctrlflash.play_show('multiball_intro_1', repeat=False)
#self.game.utilities.randomLampPulse(100)
# Sound FX #
self.game.sound.play('multiball_1')
self.game.sound.play_music('multiball_loop'+ str(self.game.ball),loops=1,music_volume=.5)
#Short Out Noises
#self.delay(delay=2,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=3,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=4.5,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=6,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=8,handler=self.game.sound.play,param='short_out_1')
#self.delay(delay=9,handler=self.game.sound.play,param='short_out_2')
#self.delay(delay=10,handler=self.game.sound.play,param='short_out_1')
#self.game.coils.quakeMotor.schedule(schedule=0x08080808,cycle_seconds=-1,now=True)
self.resetMultiballStats()
self.delay(delay=8,handler=self.multiballRun)
def multiballRun(self):
self.game.utilities.enableGI()
#self.game.coils.quakeMotor.patter(on_time=15,off_time=100)
#self.game.utilities.enableMultiballQuake()
#self.game.sound.play('centerRampComplete')
self.game.sound.play_music('multiball_loop'+ str(self.game.ball),loops=-1,music_volume=.6)
#self.game.utilities.acCoilPulse(coilname='singleEjectHole_LeftInsertBDFlasher',pulsetime=50)
#self.game.utilities.acFlashPulse('singleEjectHole_LeftInsertBDFlasher')
if self.game.switches.rightEyeball.is_active()==True:
self.game.utilities.acCoilPulse(coilname='rightEyeballEject_SunFlasher',pulsetime=50)
if self.game.switches.leftEyeball.is_active()==True:
self.game.utilities.acCoilPulse(coilname='leftEyeballEject_LeftPlayfieldFlasher',pulsetime=50)
if self.game.switches.singleEject.is_active()==True:
self.game.utilities.acCoilPulse(coilname='singleEjectHole_LeftInsertBDFlasher',pulsetime=50)
#self.game.trough.launch_balls(num=2)
self.multiballStarting = False
self.game.update_lamps()
def stopMultiball(self):
self.game.utilities.set_player_stats('multiball_running',False)
#self.game.utilities.set_player_stats('jackpot_lit',False)
self.game.utilities.setBallInPlay(True)
#self.game.sound.stop_music()
#self.game.sound.play_music('main'+ str(self.game.ball),loops=1,music_volume=.5)
self.resetMultiballStats()
#self.game.bonusmultiplier_mode.incrementBonusMultiplier()
self.game.update_lamps()
#self.game.coils.quakeMotor.disable()
#self.callback()
def resetMultiballStats(self):
self.game.utilities.set_player_stats('lock1_lit',False)
self.game.utilities.set_player_stats('lock2_lit',False)
self.game.utilities.set_player_stats('lock3_lit',False)
self.game.utilities.set_player_stats('balls_locked',0)
self.getUserStats()
#def sw_underPlayfieldDrop1_active(self, sw):
#if (self.ballLock1Lit == True):
#self.lockBall1()
#elif (self.ballLock2Lit == True):
#self.lockBall2()
#elif (self.ballLock3Lit == True):
#self.startMultiball()
#else:
#pass
#def sw_ballPopperBottom_closed(self, sw):
#if(self.multiballStarting == True):
#return procgame.game.SwitchStop
#else:
#return procgame.game.SwitchContinue
#def sw_outhole_closed_for_500ms(self, sw):
##if (self.game.trough.num_balls_in_play == 2):
##Last ball - Need to stop multiball
##self.stopMultiball()
#return procgame.game.SwitchContinue
def sw_leftEyeball_closed_for_100ms(self, sw):
if (self.ballLock1Lit == True):
self.lockLeftEyeBall()
return procgame.game.SwitchContinue
def sw_rightEyeball_closed_for_100ms(self, sw):
if (self.ballLock2Lit == True):
self.lockRightEyeBall()
return procgame.game.SwitchContinue
#EJECTS/EYEBALLS
#rightEyeball:
#number: S42
#label: 'Right Eye Eject'
#leftEyeball:
#number: S41
#label: 'Left Eye Eject'
def sw_visorClosed_open_for_100ms(self, sw):
self.open_visor()
return procgame.game.SwitchContinue
def sw_visorOpen_closed_for_100ms(self, sw):
self.open_visor()
return procgame.game.SwitchContinue
#visorOpen:
#number: S67
#visorClosed:
#number: S66
|
WHEN ARE YOU ELIGIBLE FOR EARLY TERMINATION OF PROBATION?
A defendant is eligible to petition for early termination of probation when they’ve completed a minimum of one third of their probation term and or 2 years whichever is earlier.
WHO CAN QUALIFY FOR EARLY TERMINATION FOR PROBATION?
Current on the payment of all court costs, fines and restitution.
WHAT OFFENSES DON’T QUALIFY FOR EARLY TERMINATION OF PROBATION?
THE LAW OFFICE OF RON BARON CAN ASSIST WITH THE FILING OF A PETITION FOR EARLY TERMINATION OF PROBATION.
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ExportConfigurationList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the ExportConfigurationList
:param Version version: Version that contains the resource
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationList
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationList
"""
super(ExportConfigurationList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, resource_type):
"""
Constructs a ExportConfigurationContext
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
return ExportConfigurationContext(self._version, resource_type=resource_type, )
def __call__(self, resource_type):
"""
Constructs a ExportConfigurationContext
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
return ExportConfigurationContext(self._version, resource_type=resource_type, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Bulkexports.V1.ExportConfigurationList>'
class ExportConfigurationPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the ExportConfigurationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationPage
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationPage
"""
super(ExportConfigurationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ExportConfigurationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return ExportConfigurationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Bulkexports.V1.ExportConfigurationPage>'
class ExportConfigurationContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, resource_type):
"""
Initialize the ExportConfigurationContext
:param Version version: Version that contains the resource
:param resource_type: The type of communication – Messages, Calls
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
super(ExportConfigurationContext, self).__init__(version)
# Path Solution
self._solution = {'resource_type': resource_type, }
self._uri = '/Exports/{resource_type}/Configuration'.format(**self._solution)
def fetch(self):
"""
Fetch the ExportConfigurationInstance
:returns: The fetched ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ExportConfigurationInstance(
self._version,
payload,
resource_type=self._solution['resource_type'],
)
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: Whether files are automatically generated
:param unicode webhook_url: URL targeted at export
:param unicode webhook_method: Whether to GET or POST to the webhook url
:returns: The updated ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
data = values.of({'Enabled': enabled, 'WebhookUrl': webhook_url, 'WebhookMethod': webhook_method, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return ExportConfigurationInstance(
self._version,
payload,
resource_type=self._solution['resource_type'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Bulkexports.V1.ExportConfigurationContext {}>'.format(context)
class ExportConfigurationInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, resource_type=None):
"""
Initialize the ExportConfigurationInstance
:returns: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
super(ExportConfigurationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'enabled': payload.get('enabled'),
'webhook_url': payload.get('webhook_url'),
'webhook_method': payload.get('webhook_method'),
'resource_type': payload.get('resource_type'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'resource_type': resource_type or self._properties['resource_type'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ExportConfigurationContext for this ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationContext
"""
if self._context is None:
self._context = ExportConfigurationContext(
self._version,
resource_type=self._solution['resource_type'],
)
return self._context
@property
def enabled(self):
"""
:returns: Whether files are automatically generated
:rtype: bool
"""
return self._properties['enabled']
@property
def webhook_url(self):
"""
:returns: URL targeted at export
:rtype: unicode
"""
return self._properties['webhook_url']
@property
def webhook_method(self):
"""
:returns: Whether to GET or POST to the webhook url
:rtype: unicode
"""
return self._properties['webhook_method']
@property
def resource_type(self):
"""
:returns: The type of communication – Messages, Calls
:rtype: unicode
"""
return self._properties['resource_type']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the ExportConfigurationInstance
:returns: The fetched ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return self._proxy.fetch()
def update(self, enabled=values.unset, webhook_url=values.unset,
webhook_method=values.unset):
"""
Update the ExportConfigurationInstance
:param bool enabled: Whether files are automatically generated
:param unicode webhook_url: URL targeted at export
:param unicode webhook_method: Whether to GET or POST to the webhook url
:returns: The updated ExportConfigurationInstance
:rtype: twilio.rest.bulkexports.v1.export_configuration.ExportConfigurationInstance
"""
return self._proxy.update(enabled=enabled, webhook_url=webhook_url, webhook_method=webhook_method, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Bulkexports.V1.ExportConfigurationInstance {}>'.format(context)
|
Celebrate Your Birthday : The New 400 Movie Theaters – Chicago, IL. We offer first run films at the best prices.
We’d like for you to come and celebrate your Birthday with us! Whether it’s turning 9 or 90 we welcome you, your friends, and family to come see a movie.
We offer competitive pricing and custom party packages to make your day as special as it can be!
To inquire about our various party packages and pricing, contact us at 400theaterevents@gmail.com. Please note that responses may take up to three business days due to the high volume of inquiries we receive.
|
"""Compatibility code for using magicbus with various versions of Python.
Magic Bus 3.3 is compatible with Python versions 2.7+. This module provides a
useful abstraction over the differences between Python versions, sometimes by
preferring a newer idiom, sometimes an older one, and sometimes a custom one.
In particular, Python 2 uses str and '' for byte strings, while Python 3
uses str and '' for unicode strings. We will call each of these the 'native
string' type for each version. Because of this major difference, this module
provides new 'bytestr', 'unicodestr', and 'nativestr' attributes, as well as
the function: 'ntob', which translates native strings (of type 'str') into
byte strings regardless of Python version.
"""
import sys
if sys.version_info >= (3, 0):
py3k = True
basestring = (bytes, str)
unicodestr = str
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
# Python 2
py3k = False
basestring = basestring
unicodestr = unicode
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
try:
from http.server import HTTPServer, BaseHTTPRequestHandler as HTTPHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler as HTTPHandler
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
import threading
try:
from _thread import get_ident as get_thread_ident
except ImportError:
from thread import get_ident as get_thread_ident
if sys.version_info >= (3, 3):
TimerClass = threading.Timer
else:
TimerClass = threading._Timer
|
...what is hiding in your carpet?
...how much time you will spend cleaning it?
...how much it will cost you?
Do you want to get rid of it?
We will install unfinished or prefinished floors for you.
We will sand, stain, finish, refinish and repair.
We are a professional hardwood flooring company with U.S and European experience. Our prices start at $1.89 per sq.ft. for installation or $1.89 for sanding and finishing, including 3 coats of oil-based Poly. We are dust-free, insured, licensed and our job is 100% guaranteed.
Check out my interview, Everything You Need to Know About Hardwood Floors on North Carolina Homes, one of the top sites for North Carolina real estate, including Mooresville, NC homes for sale. North Carolina Homes also services South Carolina real estate and Georgia real estate.
|
# Copyright 2018 The TensorFlow Global Objectives Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for global objectives util functions."""
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from global_objectives import util
def weighted_sigmoid_cross_entropy(targets, logits, weight):
return (weight * targets * np.log(1.0 + np.exp(-logits)) + (
(1.0 - targets) * np.log(1.0 + 1.0 / np.exp(-logits))))
def hinge_loss(labels, logits):
# Mostly copied from tensorflow.python.ops.losses but with loss per datapoint.
labels = tf.to_float(labels)
all_ones = tf.ones_like(labels)
labels = tf.subtract(2 * labels, all_ones)
return tf.nn.relu(tf.subtract(all_ones, tf.multiply(labels, logits)))
class WeightedSigmoidCrossEntropyTest(parameterized.TestCase, tf.test.TestCase):
def testTrivialCompatibilityWithSigmoidCrossEntropy(self):
"""Tests compatibility with unweighted function with weight 1.0."""
x_shape = [300, 10]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits)
expected_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
logits, targets))
with self.test_session():
self.assertAllClose(expected_loss.eval(),
weighted_loss.eval(),
atol=0.000001)
def testNonTrivialCompatibilityWithSigmoidCrossEntropy(self):
"""Tests use of an arbitrary weight (4.12)."""
x_shape = [300, 10]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
weight = 4.12
weighted_loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits,
weight,
weight)
expected_loss = (
weight *
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
logits, targets))
with self.test_session():
self.assertAllClose(expected_loss.eval(),
weighted_loss.eval(),
atol=0.000001)
def testDifferentSizeWeightedSigmoidCrossEntropy(self):
"""Tests correctness on 3D tensors.
Tests that the function works as expected when logits is a 3D tensor and
targets is a 2D tensor.
"""
targets_shape = [30, 4]
logits_shape = [targets_shape[0], targets_shape[1], 3]
targets = np.random.random_sample(targets_shape).astype(np.float32)
logits = np.random.randn(*logits_shape).astype(np.float32)
weight_vector = [2.0, 3.0, 13.0]
loss = util.weighted_sigmoid_cross_entropy_with_logits(targets,
logits,
weight_vector)
with self.test_session():
loss = loss.eval()
for i in range(0, len(weight_vector)):
expected = weighted_sigmoid_cross_entropy(targets, logits[:, :, i],
weight_vector[i])
self.assertAllClose(loss[:, :, i], expected, atol=0.000001)
@parameterized.parameters((300, 10, 0.3), (20, 4, 2.0), (30, 4, 3.9))
def testWeightedSigmoidCrossEntropy(self, batch_size, num_labels, weight):
"""Tests thats the tf and numpy functions agree on many instances."""
x_shape = [batch_size, num_labels]
targets = np.random.random_sample(x_shape).astype(np.float32)
logits = np.random.randn(*x_shape).astype(np.float32)
with self.test_session():
loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets,
logits,
weight,
1.0,
name='weighted-loss')
expected = weighted_sigmoid_cross_entropy(targets, logits, weight)
self.assertAllClose(expected, loss.eval(), atol=0.000001)
def testGradients(self):
"""Tests that weighted loss gradients behave as expected."""
dummy_tensor = tf.constant(1.0)
positives_shape = [10, 1]
positives_logits = dummy_tensor * tf.Variable(
tf.random_normal(positives_shape) + 1.0)
positives_targets = tf.ones(positives_shape)
positives_weight = 4.6
positives_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
positives_logits, positives_targets) * positives_weight)
negatives_shape = [190, 1]
negatives_logits = dummy_tensor * tf.Variable(
tf.random_normal(negatives_shape))
negatives_targets = tf.zeros(negatives_shape)
negatives_weight = 0.9
negatives_loss = (
tf.contrib.nn.deprecated_flipped_sigmoid_cross_entropy_with_logits(
negatives_logits, negatives_targets) * negatives_weight)
all_logits = tf.concat([positives_logits, negatives_logits], 0)
all_targets = tf.concat([positives_targets, negatives_targets], 0)
weighted_loss = tf.reduce_sum(
util.weighted_sigmoid_cross_entropy_with_logits(
all_targets, all_logits, positives_weight, negatives_weight))
weighted_gradients = tf.gradients(weighted_loss, dummy_tensor)
expected_loss = tf.add(
tf.reduce_sum(positives_loss),
tf.reduce_sum(negatives_loss))
expected_gradients = tf.gradients(expected_loss, dummy_tensor)
with tf.Session() as session:
tf.global_variables_initializer().run()
grad, expected_grad = session.run(
[weighted_gradients, expected_gradients])
self.assertAllClose(grad, expected_grad)
def testDtypeFlexibility(self):
"""Tests the loss on inputs of varying data types."""
shape = [20, 3]
logits = np.random.randn(*shape)
targets = tf.truncated_normal(shape)
positive_weights = tf.constant(3, dtype=tf.int64)
negative_weights = 1
loss = util.weighted_sigmoid_cross_entropy_with_logits(
targets, logits, positive_weights, negative_weights)
with self.test_session():
self.assertEqual(loss.eval().dtype, np.float)
class WeightedHingeLossTest(tf.test.TestCase):
def testTrivialCompatibilityWithHinge(self):
# Tests compatibility with unweighted hinge loss.
x_shape = [55, 10]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.3))
weighted_loss = util.weighted_hinge_loss(targets, logits)
expected_loss = hinge_loss(targets, logits)
with self.test_session():
self.assertAllClose(expected_loss.eval(), weighted_loss.eval())
def testLessTrivialCompatibilityWithHinge(self):
# Tests compatibility with a constant weight for positives and negatives.
x_shape = [56, 11]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.7))
weight = 1.0 + 1.0/2 + 1.0/3 + 1.0/4 + 1.0/5 + 1.0/6 + 1.0/7
weighted_loss = util.weighted_hinge_loss(targets, logits, weight, weight)
expected_loss = hinge_loss(targets, logits) * weight
with self.test_session():
self.assertAllClose(expected_loss.eval(), weighted_loss.eval())
def testNontrivialCompatibilityWithHinge(self):
# Tests compatibility with different positive and negative weights.
x_shape = [23, 8]
logits_positives = tf.constant(np.random.randn(*x_shape).astype(np.float32))
logits_negatives = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets_positives = tf.ones(x_shape)
targets_negatives = tf.zeros(x_shape)
logits = tf.concat([logits_positives, logits_negatives], 0)
targets = tf.concat([targets_positives, targets_negatives], 0)
raw_loss = util.weighted_hinge_loss(targets,
logits,
positive_weights=3.4,
negative_weights=1.2)
loss = tf.reduce_sum(raw_loss, 0)
positives_hinge = hinge_loss(targets_positives, logits_positives)
negatives_hinge = hinge_loss(targets_negatives, logits_negatives)
expected = tf.add(tf.reduce_sum(3.4 * positives_hinge, 0),
tf.reduce_sum(1.2 * negatives_hinge, 0))
with self.test_session():
self.assertAllClose(loss.eval(), expected.eval())
def test3DLogitsAndTargets(self):
# Tests correctness when logits is 3D and targets is 2D.
targets_shape = [30, 4]
logits_shape = [targets_shape[0], targets_shape[1], 3]
targets = tf.to_float(
tf.constant(np.random.random_sample(targets_shape) > 0.7))
logits = tf.constant(np.random.randn(*logits_shape).astype(np.float32))
weight_vector = [1.0, 1.0, 1.0]
loss = util.weighted_hinge_loss(targets, logits, weight_vector)
with self.test_session():
loss_value = loss.eval()
for i in range(len(weight_vector)):
expected = hinge_loss(targets, logits[:, :, i]).eval()
self.assertAllClose(loss_value[:, :, i], expected)
class BuildLabelPriorsTest(tf.test.TestCase):
def testLabelPriorConsistency(self):
# Checks that, with zero pseudocounts, the returned label priors reproduce
# label frequencies in the batch.
batch_shape = [4, 10]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.678)))
label_priors_update = util.build_label_priors(
labels=labels, positive_pseudocount=0, negative_pseudocount=0)
expected_priors = tf.reduce_mean(labels, 0)
with self.test_session():
tf.global_variables_initializer().run()
self.assertAllClose(label_priors_update.eval(), expected_priors.eval())
def testLabelPriorsUpdate(self):
# Checks that the update of label priors behaves as expected.
batch_shape = [1, 5]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.4)))
label_priors_update = util.build_label_priors(labels)
label_sum = np.ones(shape=batch_shape)
weight_sum = 2.0 * np.ones(shape=batch_shape)
with self.test_session() as session:
tf.global_variables_initializer().run()
for _ in range(3):
label_sum += labels.eval()
weight_sum += np.ones(shape=batch_shape)
expected_posteriors = label_sum / weight_sum
label_priors = label_priors_update.eval().reshape(batch_shape)
self.assertAllClose(label_priors, expected_posteriors)
# Re-initialize labels to get a new random sample.
session.run(labels.initializer)
def testLabelPriorsUpdateWithWeights(self):
# Checks the update of label priors with per-example weights.
batch_size = 6
num_labels = 5
batch_shape = [batch_size, num_labels]
labels = tf.Variable(
tf.to_float(tf.greater(tf.random_uniform(batch_shape), 0.6)))
weights = tf.Variable(tf.random_uniform(batch_shape) * 6.2)
update_op = util.build_label_priors(labels, weights=weights)
expected_weighted_label_counts = 1.0 + tf.reduce_sum(weights * labels, 0)
expected_weight_sum = 2.0 + tf.reduce_sum(weights, 0)
expected_label_posteriors = tf.divide(expected_weighted_label_counts,
expected_weight_sum)
with self.test_session() as session:
tf.global_variables_initializer().run()
updated_priors, expected_posteriors = session.run(
[update_op, expected_label_posteriors])
self.assertAllClose(updated_priors, expected_posteriors)
class WeightedSurrogateLossTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
('hinge', util.weighted_hinge_loss),
('xent', util.weighted_sigmoid_cross_entropy_with_logits))
def testCompatibilityLoss(self, loss_name, loss_fn):
x_shape = [28, 4]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5))
positive_weights = 0.66
negative_weights = 11.1
expected_loss = loss_fn(
targets,
logits,
positive_weights=positive_weights,
negative_weights=negative_weights)
computed_loss = util.weighted_surrogate_loss(
targets,
logits,
loss_name,
positive_weights=positive_weights,
negative_weights=negative_weights)
with self.test_session():
self.assertAllClose(expected_loss.eval(), computed_loss.eval())
def testSurrogatgeError(self):
x_shape = [7, 3]
logits = tf.constant(np.random.randn(*x_shape).astype(np.float32))
targets = tf.to_float(tf.constant(np.random.random_sample(x_shape) > 0.5))
with self.assertRaises(ValueError):
util.weighted_surrogate_loss(logits, targets, 'bug')
if __name__ == '__main__':
tf.test.main()
|
Carpet twist is the number of turns used when creating the carpet fiber or yarn. Generally, the more twists on a fiber, the better performance of the carpet. Fibers that are twisted more tightly have a different "feel" than looser twists.
How accurate is a rating system?
A rating system is developed by the manufacturer and is based on its individual standards. Due to discrepancies in testing methods and customer reviews, these rating systems cannot be compared across manufacturers.
Can you compare products solely based on technical specifications?
While technical specifications can give you a general idea, there are a variety of factors that play into each carpet. Two carpets may have the same specifications but if they're made from different fibers they could act completely different in the same environment.
If I can bend the carpet and see the backing, is it low quality?
Not at all! All carpets, excluding woven carpets, are made with spaces between the tufts. When checking for thickness or density, place the carpet on a flat surface and stick your fingers in the pile. Generally, denser carpets perform better.
What is the difference between engineered and solid hardwood?
Engineered hardwood is comprised of multiple layers of plywood that are then firmly pressed together to create a solid, stable core. Layers of hardwood are then placed on the top and bottom of the core helps to reinforce the board.
Solid hardwood is exactly how it sounds. Each plank is created from a single piece of solid wood.
Engineered hardwood can be installed on any level of the house. The flexibility of the layers allows engineered hardwoods to be placed in basements, over concrete subfloors and even over radiant heating systems.
Solid hardwoods are more susceptible to changes in moisture and temperature so solid hardwood floors are only recommended for rooms at the ground level or above.
How do I clean my hardwood floor?
First of all, never use a wet mop on a hardwood surface. It will raise the grain and may permanently damage the surface. Refer to the manufacturer for its recommended cleaning products.
How Important is room temperature and humidity?
Solid hardwood floors are more susceptible to temperature and humidity changes than engineered hardwood floors, but all floors are affected by drastic climate changes. During hot and humid months, a dehumidifier may be required while in colder, dryer months a humidifier may be necessary.
Can I repair a chipped floor?
Yes. If the chip is smaller than an inch in diameter, you can use a filling putty to fix the damage. If it is too large to repair with putty, replacing the plank may be necessary. Always refer to the manufacturers installation guide for additional information and recommendations.
Where can laminate be installed?
The flexibility and durability of laminate allows the flooring to be installed anywhere in the house including basements, bathrooms and mudrooms.
Can I install laminate over my current flooring?
Yes - for the most part. Laminate floors can be installed over a variety of floors like ceramic tile, vinyl and hardwood. We recommend having a professional installer inspect your floor and make sure it is suitable for this type of installation.
Can pets damage laminate floors?
Yes. While laminate is resistant to scratches and scrapes, untrimmed nails and claws can damage laminate flooring.
How do I repair scratches in my laminate floor?
For surface scratches, there are special color matching pencils that may be used to cover up any blemishes.
For deeper scratches and chips, you can replace the whole plank without disrupting the rest of the floor. While there are tools to aid in plank replacement, we recommend the replacement being performed by professional installer.
Luxury vinyl tile (LVT) is a premium vinyl that showcases visuals, warranties and installation options that can't be offered in traditional vinyl offerings.
Where can I install LVT in my home?
LVT's flexibility allows it to be installed on all levels of the home and over most subfloors. We recommend having a professional installer survey your subfloor before installing any new LVT.
How do I clean my LVT floor?
To find appropriate cleaning products, we recommend referring to your LVT manufacturer for suggested products. Make sure not to use any detergents, abrasive cleaners or "mop and shine" products. Never use paste wax or solvent-based polishes when trying to restore a shine to your floor.
Can scratches and dents be repaired?
If damaged, the area can be repaired by replacing the tile or plank. Make sure to clean the subfloor and remove any dirt or debris before replacing the plank.
What could cause ceramic tile to crack?
There are a few possibilities but most often if the crack continues from one tile to another - through the grout joint - there is a problem with the substrate. There is also the possibility of a fractured tile. Both problems are able to be solved but we recommend having a professional installer handle the repairs.
Where should I install glazed versus unglazed tile and why?
Unglazed tiles are recommended for most horizontal projects on both the interior and exterior of your home. Porcelain tiles are recommended for any situations that may require horizontal placement or freeze/thaw conditions.
What kind of tile should I use in the kitchen?
Generally, manufacturers recommend double glazed or glazes fired at higher temperatures due to increased scratch resistance.
While not required, we strongly recommend the use of a rug pad. These pads protect your floors from scratching or color transfer as well as extra slip resistance.
Can I use my rug outside?
It depends on the type of rug. Certain rug types and materials are made specifically for outdoor use.
What type of rug should I use for high traffic areas?
While many rugs work well in high traffic areas, woven and printed rugs tend to perform the best.
What is the best rug to use with pets?
A variety of rugs work well with pets but we recommend either woven or printed rugs. We advise against looped carpets as the loops tend to be pulled or snagged by larger pets.
|
#!/usr/bin/env python
'''
SubSynco - a tool for synchronizing subtitle files
Copyright (C) 2015 da-mkay
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gdk
from gi.repository import Gst
from gi.repository import Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(),
# respectively:
from gi.repository import GdkX11, GstVideo
import ctypes
import re
import sys
# Import TimeClbFilter so that the plugin gets registered:
from subsynco.gst.filter import TimeClbFilter
from subsynco.media.text_formatter import TextFormatter
from subsynco.utils.logger import Logger
class MultimediaPlayer(object):
def __init__(self, drawing_area):
self._drawing_area = drawing_area;
self._subtitle = None
self._position_changed_callback = None
self._duration_changed_callback = None
self._subtitle_list = None
self._cur_subtitle = None
self._duration = None
self._position = 0
self._file_uri = None
self._drawing_area.connect('realize', self._on_video_realize)
self._drawing_area.connect('unrealize', self._on_video_unrealize)
self._drawing_area.connect('draw', self._on_video_draw)
# GStreamer setup
# ---------------
self._player = Gst.ElementFactory.make('playbin', 'MultimediaPlayer')
# PlayBin uses autovideosink by default but we need to wrap it
# in a Bin so that we can use timeclbfilter and textoverlay.
video_sink = Gst.ElementFactory.make('autovideosink')
# Create the following bin:
# timeclbfilter ! textoverlay ! autovideosink
# video_bin is then set as self._player's video-sink
self._textoverlay = Gst.ElementFactory.make('textoverlay',
'textoverlay')
timeclbfilter = Gst.ElementFactory.make('timeclbfilter',
'timeclbfilter')
video_bin = Gst.Bin.new('timer-text-video-bin')
video_bin.add(timeclbfilter)
video_bin.add(self._textoverlay)
video_bin.add(video_sink)
sink_pad = Gst.GhostPad.new('sink',
timeclbfilter.get_static_pad('sink'))
video_bin.add_pad(sink_pad)
timeclbfilter.link(self._textoverlay)
self._textoverlay.link(video_sink)
timeclbfilter.set_timer_callback(self._on_timer_tick)
self._textoverlay.set_property('font-desc', 'Sans 28')
self._textoverlay.set_property('color', 0xffffe400)
self._textoverlay.set_property('outline-color', 0xff333333)
self._player.set_property('video-sink', video_bin)
bus = self._player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message', self._on_player_message)
bus.connect('sync-message::element', self._on_player_sync_message)
self._text_formatter = TextFormatter()
def _on_timer_tick(self, nanos):
self._position = nanos
# If a SubtitleList is set we show/hide the subtitles here
# based on the time.
if (self._subtitle_list is not None):
millis = nanos / 1000000
__, subtitle = self._subtitle_list.get_subtitle(millis)
if (subtitle is not self._cur_subtitle):
if (subtitle is None):
txt = ''
else:
txt = self._text_formatter.fix_format(subtitle.text,
pango_markup=True)
self._textoverlay.set_property('text', txt)
self._cur_subtitle = subtitle
# Invoke users position_changed callback if any.
if (self._position_changed_callback is not None):
self._position_changed_callback(nanos)
def _on_video_realize(self, widget):
# The window handle must be retrieved in GUI-thread and before
# playing pipeline.
video_window = self._drawing_area.get_property('window')
if sys.platform == 'win32':
# On Windows we need a "hack" to get the native window
# handle.
# See http://stackoverflow.com/questions/23021327/how-i-can-
# get-drawingarea-window-handle-in-gtk3/27236258#27236258
if not video_window.ensure_native():
Logger.error(
_('[Player] Video playback requires a native window'))
return
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
video_window_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(
video_window.__gpointer__, None)
gdkdll = ctypes.CDLL ('libgdk-3-0.dll')
self._video_window_handle = gdkdll.gdk_win32_window_get_handle(
video_window_gpointer)
else:
self._video_window_handle = video_window.get_xid()
def _on_video_unrealize(self, widget):
# To prevent race conditions when closing the window while
# playing
self._player.set_state(Gst.State.NULL)
def _on_video_draw(self, drawing_area, cairo_context):
"""This method is called when the player's DrawingArea emits the
draw-signal.
Usually the playbin will render the currently opened video in
the DrawingArea. But if no video is opened we take care of
drawing.
"""
if self._file_uri is not None:
# A video is already opened. So playbin will take care of
# showing the video inside the DrawingArea.
return False
# No video is opened. So we draw a simple black background.
width = drawing_area.get_allocated_width()
height = drawing_area.get_allocated_height()
cairo_context.rectangle(0, 0, width, height)
cairo_context.set_source_rgb(0.15, 0.15, 0.15)
cairo_context.fill()
text = _('no video loaded')
cairo_context.set_font_size(14)
x_bearing, y_bearing, txt_width, txt_height, x_advance, y_advance = (
cairo_context.text_extents(text))
cairo_context.move_to(width/2 - txt_width/2 - x_bearing, height/2 -
y_bearing/2)
cairo_context.set_source_rgb(1.0, 1.0, 1.0)
cairo_context.show_text(text)
return True
def _on_player_message(self, bus, message):
if message.type == Gst.MessageType.EOS:
# We pause the video instead of stop, because we may still
# want to seek.
self.pause()
elif message.type == Gst.MessageType.ERROR:
self.stop()
(err, debug) = message.parse_error()
Logger.error(_('[Player] {}').format(err), debug)
elif message.type == Gst.MessageType.ASYNC_DONE:
# TODO Don't try to get duration at each ASYNC_DONE, only
# on new file and real state change.
self._query_duration()
# TODO Gst.MessageType.DURATION_CHANGED: query_duration would
# fail if ASYNC_DONE was not received
def _on_player_sync_message(self, bus, message):
# For more information see here:
# http://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-p
# lugins-base-libs/html/gst-plugins-base-libs-gstvideooverlay.ht
# ml
if message.get_structure() is None:
return
if not GstVideo.is_video_overlay_prepare_window_handle_message(message):
return
imagesink = message.src
imagesink.set_property('force-aspect-ratio', True)
imagesink.set_window_handle(self._video_window_handle)
def _query_duration(self):
if self._duration is not None:
return True
ok, dur = self._player.query_duration(Gst.Format.TIME)
self._duration = dur
if (self._duration_changed_callback is not None):
self._duration_changed_callback(self._duration)
return ok
def set_position_changed_callback(self, callback):
self._position_changed_callback = callback
def set_duration_changed_callback(self, callback):
self._duration_changed_callback = callback
def set_subtitle_list(self, subtitle_list):
"""Set the subsynco.media.subtitle.SubtitleList to be used for
showing subtitles.
"""
self._textoverlay.set_property('text', '')
self._cur_subtitle = None
self._subtitle_list = subtitle_list
def pause(self):
if self._file_uri is not None:
self._player.set_state(Gst.State.PAUSED)
def play(self):
if self._file_uri is not None:
self._player.set_state(Gst.State.PLAYING)
def stop(self):
self._player.set_state(Gst.State.NULL)
self._duration = None
self._position = 0
if (self._duration_changed_callback is not None):
self._duration_changed_callback(0)
if (self._position_changed_callback is not None):
self._position_changed_callback(0)
def set_file(self, file_uri):
self.stop()
self._file_uri = file_uri
if file_uri is None:
# The DrawingArea may still show the old video (if any was
# opened before). So we force a draw-signal which will lead
# to a call to _on_video_draw.
self._drawing_area.queue_draw()
else:
self._player.set_property('uri', file_uri)
def seek(self, nanos):
if self._file_uri is None:
return
# The duration should have been already queried when the file
# was loaded. However ensure that we have a duration!
ok = self._query_duration()
if not ok:
Logger.warn(
_('Warning - [Player] Failed to get duration. Seek aborted!'))
return
if (nanos < 0):
nanos = 0
elif (nanos > self._duration):
nanos = self._duration # TODO: duration is inaccurate!!!
if (nanos == self._position):
return
ok = self._player.seek_simple(Gst.Format.TIME,
Gst.SeekFlags.FLUSH | Gst.SeekFlags.ACCURATE,
nanos)
if not ok:
Logger.warn(_('Warning - [Player] Failed to seek.'))
def seek_relative(self, nanos):
self.seek(self._position + nanos)
|
Date: 7. – 9. June 2019.
Maps: 1:10000, 1:7500 & 1:4000.
First start: 12:00 relay, 21:00 night sprint, 11:00 middle distance and 9:00 long distance.
In case of too few entries, organizer reserves the right to join the classes!
Late entries will be possible in case of vacant places at 50% higher price!
Control descriptions: On the map and in the start corridor. For relay only on map.
Information: questions via facebook page.
Accommodation: We will offer sport hall accommodation for price of 3 EUR per night.
All Alpe-Adria team members shall be offered simple accommodation (sport hall) for a discounted fee of a 2 EUR per night from the day before the first competition until the last competition day. The maximum amount of discounted accommodation places for one team is 30.
Alpe-Adria meeting will be held on friday, 7th June at 18:00. Location will be anounced later.
Terrain: Fast continentall terrain with pleny of streams and interesting relief details. Nice beech forest will be perfect for exciting relay!
Special info: There will be codes of control points printed on the map near control point number seperated with “-” symbol. Control number 100 will be used 2 times during the course. You need to punch it every time when visiting it. Control 100 is both spectator control and last control point. After competitor pass spectator control she/he will have aproximatelly 5 min left of running. Sample of the course and coding is presented on the photo below.
Terrain: Complex city centre with some open area. Since it is small area there will be map exchange for most of categories. Start and finish will be on the main city square. Prepare your headlamp!
Mapmakers: Milan Bílý, Roman Horký, Jan Picek. Revision 2019 Vladimir Tkalec.
Terrain: One of the nicest Croatian map with mix of steep slopes around Jankovac lake and detailed plateu with numerous depressions and holes.
|
__author__ = 'Philipp Lang'
from django import template
from dingos import DINGOS_TEMPLATE_FAMILY
from mantis_actionables.forms import TagForm
from mantis_actionables.models import Status
register = template.Library()
@register.filter
def lookup_status_processing(value):
return Status.PROCESSING_MAP.get(value,'ERROR')
@register.filter
def lookup_status_tlp(value):
return Status.TLP_MAP.get(value,'ERROR')
@register.filter
def lookup_status_confidence(value):
return Status.CONFIDENCE_MAP.get(value,'ERROR')
@register.simple_tag()
def show_addTagInput_actionables(obj_id,curr_context):
form = TagForm()
form.fields['tag'].widget.attrs.update({
'data-obj-id': obj_id,
'data-curr-context': curr_context,
})
return form.fields['tag'].widget.render('tag','')
@register.inclusion_tag('mantis_actionables/%s/includes/_ContextMetaDataWidget.html' % DINGOS_TEMPLATE_FAMILY)
def show_ContextMetaData(context_obj,widget_config=None):
if not widget_config:
widget_config = {'action_buttons' : ['edit','show_history']}
context = {'context_obj': context_obj,
'edit_button' : False,
'show_history_button': False,
'show_details_button' : False}
for button in widget_config.get('action_buttons',[]):
context["%s_button" % button] = True
return context
|
Leslie Satcher was born in Paris. Well, Texas that is, where she sang in local churches and schools an experience which she lists as one of her biggest influences. In 1989, she moved to Nashville, Tennessee to pursue her dream of being a country music singer, but found a niche writing the songs, for other artists. Many notable country music acts like Patty Loveless, Vince Gill, Willie Nelson, and Reba McEntire and Pam Tillis have recorded her songs. Her songwriting hits include: “I Said a Prayer” (Pam Tillis), or “When God-Fearin’ Women Get the Blues” (Martina McBride), while “Politically Uncorrect,” performed by Merle Haggard and Gretchen Wilson was nominated for a Grammy award.
With the release of her first album Love Letters in 2002 on Warner Brothers, and an independent release of Creation in 2005, she has continuously been met with critical acclaim. Even USA Today listed her as part of a “groundswell” in traditional country and bluegrass music. In 2008, two songs she co-wrote with Monty Holmes, “Troubadour” and “House Of Cash” were recorded by George Strait. In 2017, Leslie and The Electric Honey Badgers released the single “This Won’t Take Long”, featuring Vince Gill and Sheryl Crow.
Now in 2018, with the release of her full-length album 2 Days in Muscle Shoals, its time once again for this electrifying artist to step into the spot light. “Run it Down South,” kicks off the album with guest artist Trisha Yearwood. The Electric Honey Badgers crank it up and give a no-holds bar grooving good time. Satcher has a gritty bluesy voice that rustles the leaves with her powerful delivery. Satcher seamlessly sews pearls of lyrics together for a righteous storyline and she delivers it with soaring vocals to boot. With the addition of Yearwood on backing vocals the two harmonize with soulful conviction for a winning track.
“This Won’t Take Long,” featuring Vince Gill and Sheryl Crow has a reminiscence of Bonnie Raitt’s work with Don Was. Slick harmonies and jangling guitars, the tune is an anthem to love and its pursuit. Whereas “It Won’t Take Long,” featuring Yearwood once again as a guest artist gets back to Satcher’s roots and her commitment to her faith and unabashed conviction. The words ‘I’ve Always Known Who I Belong To,’ could not ring truer.
When “Chrome Halo” started to play I thought I was about to listen to Led Zepplin’s “Whole Lotta Love,” but then it quickly changed up to more of an elongated melody. Satcher uses an overdriven effect on her voice that gives the tune a rockish edge.
Overall, there is a nice blend of blues, country, singer-songwriter, and roots rock tunes that will fully satisfy your appetite. It is easy to see why Satcher has written for numerous country artists in the past, her ability to craft well-honed tunes with hook laden melodies is gifted. Its nice to see her out front on this outing as she certainly can hold her own as a leader. Worth the price of admission and more. Get yourself a copy.
|
from defaults import ONS_PEER_ROOT
from error import GladieterLibError
from defaults import ONS_SUPPORTED_REGEX
from xml.dom.minidom import parse
import urllib
import ons
def gtin_to_fqdn(gtin, peer_root=ONS_PEER_ROOT):
aus = gtin[0:1] + "." + gtin[12:13] + "." + gtin[11:12] + "." \
+ gtin[10:11] + "." + gtin[9:10] + "." + gtin[8:9] + "." \
+ gtin[7:8] + "." + gtin[6:7] + "." + gtin[5:6] + "." \
+ gtin[4:5] + "." + gtin[3:4] + "." + gtin[2:3] + "." \
+ gtin[1:2] + "."
return aus + "gtin.gs1.id." + peer_root
def regexp_to_uri(regexp):
s = regexp.split('!')
# be picky!
if s[1] in ONS_SUPPORTED_REGEX:
return regexp.split('!')[2] # url
raise GladieterLibError("Regexp not supported")
def is_product_recalled(gtin):
fqdn = gtin_to_fqdn(str(gtin))
o = ons.ONSServer()
epcis = o.query_epcis(fqdn)
uri = regexp_to_uri(epcis['regexp'])
query_url = uri + "Service/Poll/SimpleEventQuery?" +\
"EQ_bizStep=urn:epcglobal:cbv:bizstep:holding&EQ_disposition=urn:epcglobal:cbv:disp:recalled" +\
"&MATCH_epc=urn:epc:id:gtin:" + str(gtin) + "&"
try:
xml = urllib.urlopen(query_url)
dom = parse(xml)
if len(dom.getElementsByTagName('action')) == 0:
return False
return True # recalled
except:
return False
def get_gs1source_query_url(gtin):
fqdn = gtin_to_fqdn(str(gtin))
o = ons.ONSServer()
gs1_source = o.query_gs1source(fqdn)
uri = regexp_to_uri(gs1_source['regexp'])
return uri
|
This is a pilot program to prevent Type 2 Diabetes in adults age 55 years and older who are at risk. The pilot program is based on the National Diabetes Prevention Program. Participants are provided with a health risk assessment to identify lifestyle factors, nutrition awareness and education and linked to exercise and fitness and support lifestyle improvement. Lifestyle Coaches will assist and empower participants.
The program is funded by the Ohio Commission on Minority Health.
Program operates weekly Monday - Friday, 9:00 am to 1:30 p.m.
This service transports seniors to and from Murtis Taylor's Senior program for meals and activities. Senior consumers are also transported to and from senior-oriented community programs/services and community resources.
Sewing Instruction is a skill-based service guided by a trained seamstress. Individualized and group instructions are provided.
|
import numpy
import logging
from sandbox.util.Sampling import Sampling
from sandbox.predictors.LibSVM import LibSVM
class SVMLeafRank(LibSVM):
"""
This is a subclass of LibSVM which will do model selection before learning.
"""
def __init__(self, paramDict, folds, sampleSize=None, numProcesses=1):
"""
sampleSize is the number of randomly chosen examples to use for model
selection
"""
super(SVMLeafRank, self).__init__()
self.paramDict = paramDict
self.folds = folds
self.chunkSize = 2
self.setMetricMethod("auc2")
self.sampleSize = sampleSize
self.processes = numProcesses
def generateLearner(self, X, y):
"""
Train using the given examples and labels, and use model selection to
find the best parameters.
"""
if numpy.unique(y).shape[0] != 2:
print(y)
raise ValueError("Can only operate on binary data")
#Do model selection first
if self.sampleSize == None:
idx = Sampling.crossValidation(self.folds, X.shape[0])
learner, meanErrors = self.parallelModelSelect(X, y, idx, self.paramDict)
else:
idx = Sampling.crossValidation(self.folds, self.sampleSize)
inds = numpy.random.permutation(X.shape[0])[0:self.sampleSize]
learner, meanErrors = self.parallelModelSelect(X[inds, :], y[inds], idx, self.paramDict)
learner = self.getBestLearner(meanErrors, self.paramDict, X, y)
return learner
def getBestLearner(self, meanErrors, paramDict, X, y, idx=None):
"""
As we are using AUC we will look for the max value.
"""
return super(SVMLeafRank, self).getBestLearner(meanErrors, paramDict, X, y, idx, best="max")
def copy(self):
"""
Return a new copied version of this object.
"""
svm = SVMLeafRank(self.paramDict, self.folds, self.sampleSize)
svm.setKernel(self.kernel,self.kernelParam)
svm.setC(self.C)
svm.setErrorCost(self.errorCost)
svm.setPenalty(self.penalty)
svm.setSvmType(self.type)
svm.processes=self.processes
svm.epsilon=self.epsilon
svm.metricMethod = self.metricMethod
svm.chunkSize = self.chunkSize
svm.timeout = self.timeout
svm.normModelSelect = svm.normModelSelect
return svm
|
A refined five –arm candelabra pendant with gracious dual-layered glass diffusers and long stem design. The Kimberly pendant is a timeless light that will add a touch of elegance to bathrooms, dinning rooms and hallways. A companion sconce is also available to complete the room interior.
Dimensions Height: 15.25" X Width: 27 X Projection: 27"
|
##
# Copyright (C) 2013, 2014, 2015, 2016, 2017 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime
import logging
from celery import chord
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import transaction
from django.utils import timezone
from pytz import utc
from inboxen.celery import app
from inboxen.models import Inbox
from inboxen.tasks import batch_delete_items, delete_inboxen_item
from inboxen.tickets.models import Question, Response
from inboxen.utils.tasks import chunk_queryset, create_queryset, task_group_skew
log = logging.getLogger(__name__)
INBOX_RESET_FIELDS = [
"description",
"disabled",
"exclude_from_unified",
"new",
"pinned",
"search_tsv",
"user",
]
QUESTION_RESET_FIELDS = [
"author",
"subject",
"body",
]
RESPONSE_RESET_FIELDS = [
"author",
"body",
]
def model_cleaner(instance, fields):
"""Resets model fields to their defaults"""
for field_name in fields:
field = instance._meta.get_field(field_name)
setattr(instance, field_name, field.get_default())
@app.task
@transaction.atomic()
def clean_questions(user_id):
for question in Question.objects.filter(author_id=user_id):
model_cleaner(question, QUESTION_RESET_FIELDS)
question.date = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
question.save()
@app.task
@transaction.atomic()
def clean_responses(user_id):
for response in Response.objects.filter(author_id=user_id):
model_cleaner(response, RESPONSE_RESET_FIELDS)
response.save()
@app.task(rate_limit="10/m", default_retry_delay=5 * 60) # 5 minutes
@transaction.atomic()
def disown_inbox(inbox_id):
try:
inbox = Inbox.objects.get(id=inbox_id)
except Inbox.DoesNotExist:
return False
# delete emails in another task(s)
batch_delete_items.delay("email", kwargs={'inbox__id': inbox.pk})
# remove data from inbox
model_cleaner(inbox, INBOX_RESET_FIELDS)
inbox.deleted = True
inbox.created = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
inbox.save()
return True
@app.task(ignore_result=True)
@transaction.atomic()
def finish_delete_user(result, user_id):
inbox = Inbox.objects.filter(user__id=user_id).only('id').exists()
user = get_user_model().objects.get(id=user_id)
if inbox:
raise Exception("User {0} still has inboxes!".format(user.username))
else:
log.info("Deleting user %s", user.username)
user.delete()
@app.task(ignore_result=True)
@transaction.atomic()
def delete_account(user_id):
# first we need to make sure the user can't login
user = get_user_model().objects.get(id=user_id)
user.set_unusable_password()
user.is_active = False
user.save()
# get ready to delete all inboxes
inboxes = user.inbox_set.only('id')
inbox_tasks = [disown_inbox.s(inbox.id) for inbox in inboxes]
question_tasks = [clean_questions.s(user_id), clean_responses.s(user_id)]
delete_chord = chord(inbox_tasks + question_tasks, finish_delete_user.s(user_id))
delete_chord.apply_async()
log.info("Deletion tasks for %s sent off", user.username)
@app.task
def user_suspended():
now = timezone.now()
for delta_start, delta_end, function in settings.USER_SUSPEND_TASKS:
kwargs = {}
if delta_start is None:
kwargs["last_login__gt"] = now - delta_end
elif delta_end is None:
kwargs["last_login__lt"] = now - delta_start
else:
kwargs["last_login__range"] = (now - delta_end, now - delta_start)
task = app.tasks[function]
task.apply_async(kwargs={"kwargs": kwargs})
@app.task
def user_suspended_disable_emails(kwargs):
kwargs = {"user__%s" % k: v for k, v in kwargs.items()}
items = create_queryset("userprofile", kwargs=kwargs)
items.update(receiving_emails=False)
@app.task
def user_suspended_delete_emails(kwargs, batch_number=500, chunk_size=10000, delay=20):
kwargs = {"inbox__user__%s" % k: v for k, v in kwargs.items()}
emails = create_queryset("email", kwargs=kwargs)
for idx, chunk in chunk_queryset(emails, chunk_size):
email_tasks = delete_inboxen_item.chunks([("email", i) for i in chunk], batch_number).group()
task_group_skew(email_tasks, start=(idx + 1) * delay, step=delay)
email_tasks.delay()
@app.task
def user_suspended_delete_user(kwargs, batch_number=500, chunk_size=10000, delay=20):
users = create_queryset(get_user_model(), kwargs=kwargs)
for idx, chunk in chunk_queryset(users, chunk_size):
user_tasks = delete_account.chunks([(i,) for i in chunk], batch_number).group()
task_group_skew(user_tasks, start=idx + 1, step=delay)
user_tasks.delay()
@app.task
def user_suspended_delete_user_never_logged_in(kwargs, batch_number=500, chunk_size=10000, delay=20):
kwargs = {k.replace("last_login", "date_joined"): v for k, v in kwargs.items()}
kwargs["last_login__isnull"] = True
user_suspended_delete_user(kwargs, batch_number, chunk_size, delay)
|
Doug, Becker and Chad celebrate the listeners on the inaugural Fan Appreciation Podcast.
Brooklinen.com - Get $20 off AND free shipping when you use promo code STANHOPE at Brooklinen.com.
Closing song “Party Time”, by The Mattoid. Available on iTunes.
|
#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
In doing this series of posts about Game of Thrones I have read and watched enough theories on Reddit and YouTube that my head is likely to explode before Season Eight premieres on April 14. What I have finally decided after all that “research” is that there are some theories that are more plausible than others but that more than likely my streak of correct predictions (i.e., Harry Potter, Left Behind, etc.) will be broken.
In the Song of Ice and Fire series by George R. R. Martin, popularly known as Game of Thrones, dreams and visions have import. They can be significant, and they have and may still predict future events, although characters have sometimes dismissed the dreams or misinterpreted visions. Following are excerpts (without commentary) from the five published books that may give clues as to what will happen in the final season of the HBO series, which premiers on April 14.
Fans of George R. R. Martin’s epic fantasy know that the TV show Game of Thrones, based on the Song of Ice and Fire book series, is due to premiere its final season on April 14. For the obsessed, theories abound (plausible and wild) about how it all ends, which we’ll get into in a bit.
|
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject, cached_property
from django.utils.version import get_version_tuple
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
# Set restype for compatibility across 32 and 64-bit platforms.
_lgeos.GEOSversion.restype = c_char_p
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n', warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n', err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory:
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, restype=None, errcheck=None, argtypes=None, **kwargs):
self.func_name = func_name
if restype is not None:
self.restype = restype
if errcheck is not None:
self.errcheck = errcheck
if argtypes is not None:
self.argtypes = argtypes
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@cached_property
def func(self):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
def geos_version():
"""Return the string version of the GEOS library."""
return lgeos.GEOSversion()
def geos_version_tuple():
"""Return the GEOS version as a tuple (major, minor, subminor)."""
return get_version_tuple(geos_version().decode())
|
Word on the tax professional “street” is that tax filing numbers are down across the board so far. That is from data released by software providers (though this year, of all years, I simply don’t understand why somebody would use software), as well as overall numbers from the IRS, AND those who are using a professional.
It seems that many Ashland, KY taxpayers are a little skittish about all of these changes.
But that being the case on a broad level, we at Team Ernest P. Sharp II, CPA are in the midst of one of the best tax seasons we’ve ever experienced. From a bunch of wonderful new Ashland, KY clients, to the gratification we’ve experienced from helping long standing clients keep MORE than they thought was possible … well, suffice to say that we love what we get to do.
So thank you for your referrals, and for continuing to trust us as you have been. And if you haven’t gotten started on it yet … well, having a pro in your corner can make a big difference. So take advantage! Shoot us an email or give us a call: (606) 324-5655. We’ll walk you through whatever you need.
If you work, or have worked, for somebody else, you know a W-2 form is essential when filing your taxes. Without it, you’re stepping into the batter’s box without a bat.
The good part is that the IRS has made an effort to streamline their customer service questions through a toll-free number: (800) 829-1040. Calling this number is your first step toward receiving a substitute W-2 which you’ll need ASAP.
If you’re low on patience with the IRS, and your employer has yet to deliver your W-2, there are a couple more forms you could fill out.
Form 4852 is a “create your own” W-2 form, and you can use some of the information you prepared for the IRS, and therefore have on hand, to fill it out.
And if, for instance, you forgot to mark down a bonus you had received (in addition to your income from the year before), I don’t want you to panic.
Form 1040X isn’t as intimidating as it sounds. It will help you record the changes necessary to your W-2, which the IRS will fix after you mail it in.
Again, if you complete these steps or have further questions, please give me a call. Trying to file your taxes without a W-2 is not a happy feeling … but you’re not alone.
I commend any effort you take to resolve this issue on your own, but don’t hesitate to reach out for help. Talking with people like you is why I love my job.
|
#!/usr/bin/env python
# Copyright (c) 2008-14 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future_builtins import *
from PyQt4.QtCore import (QPointF, QSize, Qt)
from PyQt4.QtCore import pyqtSignal as Signal
from PyQt4.QtGui import (QApplication, QBrush, QColor, QFontMetricsF,
QFrame, QLabel, QLinearGradient, QPainter, QPolygon,
QSizePolicy, QSpinBox, QWidget)
class YPipeWidget(QWidget):
value_changed = Signal(int, int)
def __init__(self, leftFlow=0, rightFlow=0, maxFlow=100,
parent=None):
super(YPipeWidget, self).__init__(parent)
self.leftSpinBox = QSpinBox(self)
self.leftSpinBox.setRange(0, maxFlow)
self.leftSpinBox.setValue(leftFlow)
self.leftSpinBox.setSuffix(" l/s")
self.leftSpinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.leftSpinBox.valueChanged.connect(self.valueChanged)
self.rightSpinBox = QSpinBox(self)
self.rightSpinBox.setRange(0, maxFlow)
self.rightSpinBox.setValue(rightFlow)
self.rightSpinBox.setSuffix(" l/s")
self.rightSpinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.rightSpinBox.valueChanged.connect(self.valueChanged)
self.label = QLabel(self)
self.label.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
self.label.setAlignment(Qt.AlignCenter)
fm = QFontMetricsF(self.font())
self.label.setMinimumWidth(fm.width(" 999 l/s "))
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding))
self.setMinimumSize(self.minimumSizeHint())
self.valueChanged()
def valueChanged(self):
a = self.leftSpinBox.value()
b = self.rightSpinBox.value()
self.label.setText("{0} l/s".format(a + b))
self.value_changed.emit(a, b)
self.update()
def values(self):
return self.leftSpinBox.value(), self.rightSpinBox.value()
def minimumSizeHint(self):
return QSize(self.leftSpinBox.width() * 3,
self.leftSpinBox.height() * 5)
def resizeEvent(self, event=None):
fm = QFontMetricsF(self.font())
x = (self.width() - self.label.width()) / 2
y = self.height() - (fm.height() * 1.5)
self.label.move(x, y)
y = self.height() / 60.0
x = (self.width() / 4.0) - self.leftSpinBox.width()
self.leftSpinBox.move(x, y)
x = self.width() - (self.width() / 4.0)
self.rightSpinBox.move(x, y)
def paintEvent(self, event=None):
LogicalSize = 100.0
def logicalFromPhysical(length, side):
return (length / side) * LogicalSize
fm = QFontMetricsF(self.font())
ymargin = ((LogicalSize / 30.0) +
logicalFromPhysical(self.leftSpinBox.height(),
self.height()))
ymax = (LogicalSize -
logicalFromPhysical(fm.height() * 2, self.height()))
width = LogicalSize / 4.0
cx, cy = LogicalSize / 2.0, LogicalSize / 3.0
ax, ay = cx - (2 * width), ymargin
bx, by = cx - width, ay
dx, dy = cx + width, ay
ex, ey = cx + (2 * width), ymargin
fx, fy = cx + (width / 2), cx + (LogicalSize / 24.0)
gx, gy = fx, ymax
hx, hy = cx - (width / 2), ymax
ix, iy = hx, fy
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
side = min(self.width(), self.height())
painter.setViewport((self.width() - side) / 2,
(self.height() - side) / 2, side, side)
painter.setWindow(0, 0, LogicalSize, LogicalSize)
painter.setPen(Qt.NoPen)
gradient = QLinearGradient(QPointF(0, 0),
QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
a = self.leftSpinBox.value()
gradient.setColorAt(1, (Qt.red if a != 0 else Qt.white))
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon([ax, ay, bx, by, cx, cy, ix, iy]))
gradient = QLinearGradient(QPointF(0, 0), QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
b = self.rightSpinBox.value()
gradient.setColorAt(1, (Qt.blue if b != 0
else Qt.white))
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon([cx, cy, dx, dy, ex, ey, fx, fy]))
if (a + b) == 0:
color = QColor(Qt.white)
else:
ashare = (a / (a + b)) * 255.0
bshare = 255.0 - ashare
color = QColor(ashare, 0, bshare)
gradient = QLinearGradient(QPointF(0, 0), QPointF(0, 100))
gradient.setColorAt(0, Qt.white)
gradient.setColorAt(1, color)
painter.setBrush(QBrush(gradient))
painter.drawPolygon(QPolygon(
[cx, cy, fx, fy, gx, gy, hx, hy, ix, iy]))
painter.setPen(Qt.black)
painter.drawPolyline(QPolygon([ax, ay, ix, iy, hx, hy]))
painter.drawPolyline(QPolygon([gx, gy, fx, fy, ex, ey]))
painter.drawPolyline(QPolygon([bx, by, cx, cy, dx, dy]))
if __name__ == "__main__":
import sys
def valueChanged(a, b):
print(a, b)
app = QApplication(sys.argv)
form = YPipeWidget()
form.value_changed.connect(valueChanged)
form.setWindowTitle("YPipe")
form.move(0, 0)
form.show()
form.resize(400, 400)
app.exec_()
|
If you have hair loss from any medical condition such as Chemotherapy, Alopecia, Trichotillomania, psoriasis or other medical conditions that has caused you to lose your hair, you may be eligible to receive a full or partial refund or financial assistance for purchasing a "Full Cranial Prosthesis." You might also be eligible to receive a voucher or grant from one of the organizations listed below.
Our system is designed to meet the specific needs of women suffering from hair loss. Made from 100% Remy Human Hair, our system gives a natural scalp appearance no matter where it is parted or styled.
Request Your Health Insurance Provider to Cover the Cost of Your Cranial Prosthesis. The prescription must be for a full Cranial Prosthesis this is very important!
Note: You MUST use this terminology "cranial prosthesis” because this is the terminology used for the system or hair piece and it might enable it to be covered under your medical insurance policy.
DO NOT request coverage for a wig because more than likely, your claim will be denied. You MUST request a "cranial prosthesis."
insurance for payment, please let us know in advance so that we can make sure your invoice reads correctly or otherwise, your health insurance provider might not pay.
Note: You MUST follow all instructions given to you by your health insurance provider or you may not qualify for the reimbursement. Make sure you keep copies of all of your documentation and invoices for tax exempt purposes.
The Volumizer Integration System is an advanced method that provides a solution for thinning or fine hair. Women with thin or fine hair commonly have concerns about their appearance. Most solutions offered today create further hair loss or damage. Our unique Integration systems offer top of the head hair enhancement without shaving or bonding. This System are worn for 6-8 weeks at a time and can be used for up to 12 months. This is the perfect solution for women looking for a little more top of the head coverage.
A consultation is necessary to access your suitability and requirements for this method. We will give you a quote once we have discussed your needs and desired end results.
Please contact us to set up an appointment for a consultation.
Please note, for a price quote by email or over the phone, it is impossible for us to give quotes because of the customized nature of each hair extensions service. Prices will vary from person to person. By coming in for a consultation, we determine your hair texture, desired length, budget, the type of extensions that can be applied. The pricing for Hair Extensions will depend on the level of service required, which is determined by the type and length of the hair extensions, and the condition of your natural hair.
Consultations are $50. Please visit our consultation page here.
|
"""
Django settings for foodie project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i-$0s)*3+m%d_kw4c&f6h+5a_k8f$bco=4gg2xm-=88l33!(d!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'randomizer',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foodie.urls'
WSGI_APPLICATION = 'foodie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
|
The Feniex Flasher is a versatile 4-channel LED flasher that offers 40 patterns in a compact footprint. It's solid-state design allows for compatibility with both positive and ground switched LED modules. Compatible with all emergency response vehicles, the Flasher’s three modes make it flexible enough to function as both a warning and/or indication controller.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.