text
stringlengths 29
850k
|
|---|
import numpy
import theano
import theano.tensor as T
rng = numpy.random
N = 400
feats = 784
D = (rng.randn(N, feats).astype(theano.config.floatX),
rng.randint(size=N,low=0, high=2).astype(theano.config.floatX))
training_steps = 10000
# Declare Theano symbolic variables
x = T.matrix("x")
y = T.vector("y")
w = theano.shared(rng.randn(feats).astype(theano.config.floatX), name="w")
b = theano.shared(numpy.asarray(0., dtype=theano.config.floatX), name="b")
x.tag.test_value = D[0]
y.tag.test_value = D[1]
#print "Initial model:"
#print w.get_value(), b.get_value()
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w)-b)) # Probability of having a one
prediction = p_1 > 0.5 # The prediction that is done: 0 or 1
xent = -y*T.log(p_1) - (1-y)*T.log(1-p_1) # Cross-entropy
cost = xent.mean() + 0.01*(w**2).sum() # The cost to optimize
gw,gb = T.grad(cost, [w,b])
# Compile expressions to functions
train = theano.function(
inputs=[x,y],
outputs=[prediction, xent],
updates={w:w-0.01*gw, b:b-0.01*gb},
name = "train")
predict = theano.function(inputs=[x], outputs=prediction,
name = "predict")
if any([x.op.__class__.__name__ in ['Gemv', 'CGemv', 'Gemm', 'CGemm'] for x in
train.maker.fgraph.toposort()]):
print 'Used the cpu'
elif any([x.op.__class__.__name__ in ['GpuGemm', 'GpuGemv'] for x in
train.maker.fgraph.toposort()]):
print 'Used the gpu'
else:
print 'ERROR, not able to tell if theano used the cpu or the gpu'
print train.maker.fgraph.toposort()
for i in range(training_steps):
pred, err = train(D[0], D[1])
#print "Final model:"
#print w.get_value(), b.get_value()
print "target values for D"
print D[1]
print "prediction on D"
print predict(D[0])
|
... a thin little headband accessorizing her simple hair bun.
I haven't thought to throw on a headband with a bun in so long, but I think it adds such a nice subtle something-something to make the hairstyle more interesting--especially when it's in a color that closely matches the hair, like Dita's is (keeps things from looking too cluttered). It's also an excellent way to tame those wayward baby hairs around the fringe of your face for a sleeker finish.
What do you think about wearing a little headband with a bun? Is this look for you, or not?
|
from .actions import parse_actions, parse_commands
class Tile(object):
def __init__(self, name, front=None, back=None,
front_commands=None, back_commands=None):
self.name = name
self.front = parse_actions(front)
self.back = parse_actions(back)
self.front_commands = parse_commands(front_commands)
self.back_commands = parse_commands(back_commands)
def __str__(self):
return self.name
def __repr__(self):
return '<Tile {}>'.format(self.name.upper())
def action(self, x, y, side):
actions = self.front if side == 'front' else self.back
return actions.get((x, y))
def command(self, x, y, side):
commands = self.front_commands if side == 'front' else self.back_commands
return commands.get((x, y))
def on_board(self, color):
return BoardTile(self, color)
class BoardTile(Tile):
def __init__(self, parent, color):
super(BoardTile, self).__init__(front=parent.front, back=parent.back,
front_commands=parent.front_commands,
back_commands=parent.back_commands)
self.color = color
self.side = 'front'
def flip(self):
self.side = 'back' if self.side == 'front' else 'front'
def action(self, x, y):
return super(BoardTile, self).action(x, y, self.side)
def command(self, x, y):
return super(BoardTile, self).command(x, y, self.side)
|
Steak night should be fun and fancy, but it doesn’t have to be a complicated. In this dish, both the steaks and the sides are put in the oven to cook, so there’s very little hands-on time (and fewer dishes to wash!). You should keep a timer handy, to be sure your steaks are cooked to your liking. Apple and sweet potato, baked together until tender and sweet, pair nicely with the juicy grass-fed sirloin. A quick sour cream sauce made with Dijon mustard has a tanginess that complements every single bite.
Suggested beer pairing Kellerbier A steak served with apples and sweet potatoes screams German beer—or at least German-style. For this dish, we are going to suggest a slightly lesser-known style of German Lager known as a kellerbier, aka "cellar beer". North Carolina's own Burial Brewing makes such a beer under the moniker Blood Tusk: a hazy yellow lager traditionally hopped with lots of biscuit flavor from the malt. These flavors are like adding a thick slice of bread to go with the steak, apples, and sweet potato. Granted, finding a kellerbier might be difficult, so we suggest a traditional lager as a substitute.
Suggested wine pairing Sangiovese Blend - Tuscany, Italy There are soft cherry and wild berry notes in a super Tuscan blend. I also get red apple skin that I think will go well with the steak and apple combination in this dish.
• Peel sweet potato. Cut in 1/4-inch dice.
• Peel apple. Cut in 1/4-inch dice.
• On a baking sheet, toss sweet potato with 1/2 teaspoon olive oil and 1/4 teaspoon PeachDish Salt. Roast 10 minutes.
• Add apple to baking sheet. Sprinkle thyme over sweet potato and apple. Roast until tender, about 10 minutes.
While potato roasts, season steak on all sides with a total 1/4 teaspoon PeachDish Salt.
In a bowl, whisk together sour cream, mustard and vinegar.
• Transfer steak to a plate. Let rest.
• Divide sweet potato mixture between 2 plates.
• Divide steak between plates. Drizzle with sauce, and enjoy!
|
import urllib,urllib2,re,xbmcplugin,xbmcgui
#!/usr/bin/python
# -*- coding: utf-8 -*-
XbmcTRteam='http://XbcmTR.com'
def CATEGORIES():
addDir('[COLOR red][B]>> INFOYU OKUYUNUZ <<[/B][/COLOR] ', "INFO(name)",'7','http://www.kanal23.com/dosya/unlem-1.jpg')
addDir('[COLOR orange][B]>> [/B][/COLOR]'+ '[COLOR beige][B]PACKAGES TEMIZLIGI - 1 -[/B][/COLOR] ', "MAINDEL(name)",'1','http://ryayla.com/uploads/images/okey.png')
addDir('[COLOR orange][B]>> [/B][/COLOR]'+ '[COLOR beige][B]CACHE TEMIZLIGI - 2 -[ Sadece PC ][/B][/COLOR] ', "MAINDEL2(name)",'2','http://ryayla.com/uploads/images/okey.png')
addDir('[COLOR yellow][B]>> [/B][/COLOR]'+ '[COLOR yellow][B]! Apple TV & Android BOX Cache Temizligi ![/B][/COLOR] ', "MAINDEL3(name)",'11','http://ryayla.com/uploads/images/okey.png')
def MAINDEL(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'PACKAGES temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/addons/packages/', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!!! Packages !!!', "[COLOR beige]Packages Temizliginiz Bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL2(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'CACHE temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/cache', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('Temizlendi Uyarisi !!!', "[COLOR beige]Temizliginiz basariyla bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL3(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('DreamTR Team UYARI', 'Apple TV & Android Box - CACHE temizliginden Eminmisiniz ! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/temp', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('Temizlendi Uyarisi !!!', "[COLOR beige]Temizliginiz basariyla bitmistir[/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL4(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('!!Dream Sifrenizi Silmek icin Eminmisiniz !!', '! Dream Sifrelerinizi Silmek istediginizden Eminmisiniz !! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/plugin.video.dream-clup', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!Dream Sifreniz Silindi!', "[COLOR beige]Dream Sifreler Silindi ![/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def MAINDEL5(name):
dialog = xbmcgui.Dialog()
ret = dialog.yesno('!!MagicTR Sifrenizi Silmek icin Eminmisiniz !!', '! MagicTR Sifrelerinizi Silmek istediginizden Eminmisiniz !! ','','','No', 'Yes')
if ret:
import os
folder = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/plugin.video.magicTR', ''))
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
dialog = xbmcgui.Dialog(e)
i = dialog.ok('!MagicTR Sifreniz Silindi!', "[COLOR beige]MagicTR Sifreler Silindi ![/COLOR]","[COLOR pink]iyi kullanimlar.[/COLOR]")
def INFO(url):
try:
CATEGORIES()
dialog = xbmcgui.Dialog()
i = dialog.ok(url, "[COLOR beige]XBMC daha hizli ve sorunsuz kullanmaniz icindir.[/COLOR]","[COLOR yellow]Bu Islemleri SIK SIK YAPINIZ !![/COLOR]")
except:
pass
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,iconimage):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
print ""
CATEGORIES()
elif mode==1:
print ""+url
MAINDEL(name)
elif mode==2:
print ""+url
MAINDEL2(name)
elif mode==11:
print ""+url
MAINDEL3(name)
elif mode==12:
print ""+url
MAINDEL4(name)
elif mode==13:
print ""+url
MAINDEL5(name)
elif mode==7:
print ""+url
INFO(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
At Copper Canyon Boutique Hotel we make group travel the best experience. Whether it’s a work group, schools or family trips, our hotel in Chihuahua, Mexico will offer you the best group rates and the ideal services to satisfy the needs of each and every traveler.
Traveling to Chihuahua and need 10 or more rooms? We have a special rate for you!
Fill up the following form and we’ll contact you as soon as possible.
|
#!/user/bin/python
from datetime import datetime, timedelta
import os
from constants import MessageCodes, Sensors, Statuses, DebugLevels
import db
from workerthread import WorkerThread
class SystemMonitor(WorkerThread):
def __init__(self, inQueue, outQueue):
self.initTime = datetime.now()
super(SystemMonitor, self).__init__(inQueue, outQueue)
RUNTIME = 900
EXCEPTION_TIMEOUT = 60
FRIENDLY_NAME = "System Monitor"
def dowork(self):
diskSpace = self.readsensor(Sensors.DISK_SPACE)
if diskSpace < 100*10**6:
self.setstatus(Statuses.ALERT, "Diskspace {0}MB below 100MB".format(int(diskSpace/10**6)))
elif diskSpace < 1000*10**6:
self.setstatus(Statuses.WARNING, "Diskspace {0}MB below 1000MB".format(int(diskSpace/10**6)))
availableMemory = self.readsensor(Sensors.AVAILABLE_MEMORY)
if availableMemory < 30:
self.setstatus(Statuses.ALERT, "Available memory {0}% below 25%".format(availableMemory))
elif availableMemory < 50:
self.setstatus(Statuses.WARNING, "Available memory {0}% below 30%".format(availableMemory))
# TODO: We can add a CPU fan if necessary
cpuTemp = self.readsensor(Sensors.CPU_TEMP)
if cpuTemp > 70:
self.setstatus(Statuses.ALERT, "CPU temp {0} above 70".format(cpuTemp))
elif cpuTemp > 60:
self.setstatus(Statuses.WARNING, "CPU temp {0} above 60".format(cpuTemp))
def getcapabilities(self, request):
upTime = datetime.now() - self.initTime
self.sensorReadings['SYSTEM_UP_TIME'] = {
MessageCodes.VALUE:upTime,
MessageCodes.FRIENDLY_VALUE:self.formatTimeDelta(upTime),
MessageCodes.FRIENDLY_NAME:'System Up Time'
}
return super(SystemMonitor, self).getcapabilities(request)
def formatTimeDelta(self, upTime):
daysPart = "{0} day, ".format(upTime.days) if upTime.days == 1 else "{0} days, ".format(upTime.days)
timePart = timedelta(seconds = upTime.seconds)
out = daysPart + str(timePart)
return out
def setup(self):
"""Nothing to do"""
return
def teardown(self, message):
"""Nothing to do"""
return
|
Hyster C160 J30 40xmt Service Shop Manual Forklift Workshop Repair Book - Hyster C160 (J30-40XMT) Shop Repair Manual. The Service workshop repair manual offers detailed servicing instructions and will give you complete step by step information on repair, servicing, preventative maintenance & troubleshooting procedures for your Hyster Fork Lift.. Our C160 Hyster workshop manuals contain in-depth maintenance, service and repair information. Get your eManual now! Hyster C160 (J30-40XMT) Service Shop Manual Forklift Workshop Repair Book. £26.99. VIEW DETAILS. Hyster C160 (J30XMT J35XMT J40XMT) Forklift Complete Workshop Service Repair Manual Hyster J30-35 40XMT C160 Forklift. hyster c114 (ej1.25-1.75xl europe) service shop manual forklift workshop repair book: hyster c160 (j30-40xmt) service shop manual forklift workshop repair book: hyster c203 (a1.00-1.50xl europe) service shop manual forklift workshop repair book: hyster d098 (e3.50xl, e4.00xl, e4.50xlxls, e5.50xl europe) service shop manual forklift workshop.
HYSTER C160 (J30XMT J35XMT J40XMT) FORKLIFT repair manual & service manual is in pdf format so it will work with computers including WIN, MAC etc.You can Easily view, Navigate, print, Zoom in/out as per your requirements. We accept Paypal and All Credit Cards.. 2.STAGE FULL FREEIIFT VISTA 3STAGE FULL VISTA Mast Dimensions & Battery Specifications. hyster forklift factory repair manual download here hyster forklift factory repair manual covers all repairs. covers: class iv space j30bs j30-35-40xmt j30-35-40xmt2 temp space j40-50a, j60as j40-60xl j40-65xm j40-65xm2 n30xmh n30xmh2 class ii narrow -.
Hyster J25, J30, J35B, J30BS (B160) Forklift Workshop Service Repair Manual The Hyster J25, J30, J35B, J30BS (B160) Forklift Workshop Manual we provide is a Complete Informational book in an all-device compatible PDF Format. This service manual has easy-to-read and apply section-wise instructions to repair every part. HYSTER forklift truck SERIAL PREFIX C160 MODEL J30-35-40XMT parts and service (workshop) manual 3550162 C174 HYSTER forklift truck SERIAL PREFIX C174 MODEL R30XMS parts and service (workshop) manual 3550163 C176 HYSTER forklift truck SERIAL PREFIX C176 MODEL HI-RACKER R40EH parts and service (workshop) manual 3550164 C117. Here is our extensive Hyster forklift manual library (PDF formats) that includes the Hyster forklift manuals for repair, maintenance, parts and service. It is a book that is indesentibe for warehouse managers. All warehouse pros that operate a Hyster need this PDF manual for their warehouse or Hyster forklift-supported operation..
Hyster Service Manuals. Hyster A177 (H2.00XL H2.50XL H3.00XL Europe) Forklift Service Repair Workshop Manual Hyster B160 (J25B, J35B, J30BS) Electric Forklift Service Repair Workshop Manual. Hyster C160 (J30XMT, J35XMT, J40XMT) Electric Forklift Service Repair Workshop Manual.. hyster parts and service manuals full Admin 16:20 forklifts hyster hyster parts and service parts manual repair repair manual service manual shop manual training workshop manual Admin. C160 J30-35-40XMT 7/98 B168 J40-50-60XL 12/95 A216 J40-50-60-65XM 11/99 B114 E20-25B/E30BS 3/96 Related products for Hyster Forklift PDF: includes workshop repair manual, detailed technical information about the engines forklifts Hyster. 50$ Catalogs exchange. Let us know if you have any catalog for exchange. Important Information..
Covers: Hyster J30XMT J35XMT J40XMT (C160/F160) Forklift models Format: PDF files (zipped) Compatibility: Windows/Mac computers File size: 54mb Note: Linked HTML table of contents. Instant digital download only – no printed copy or CD-ROM media. This service manual contains detailed maintenance, service, repair and troubleshooting procedures for J30XMT J35XMT J40XMT (C160/F160) Forklifts.. Hyster C160 (J30XMT, J35XMT, J40XMT) Electric Forklift Service Repair Workshop Manual Go ahead to take this service manual.Please contact us if with any problems. This manual can be used by anyone from a first time owner/amateur to a professional technician.Easy to read type,And give you all the information needed to do the procedure correctly. Find great deals on eBay for hyster forklift manuals. Shop with confidence. Skip to main content. eBay: HYSTER R30 C/CR/CA Forklift Service Manual repair shop overhaul book ORDERMASTER. Pre-owned. AU $167.15. HYSTER E25 E30 E35 E40 E50 E60 J50 J60 FORKLIFT Service Repair Manual book shop. Pre-owned. AU $278.60. From United States. 10%.
Hyster C114 E25 35xl Service Shop Manual Forklift Workshop Repair Book Ebook Hyster C114 E25 35xl Service Shop Manual Forklift Workshop Repair Book currently available at idosweets.co.uk for review only, if you need complete manual forklift workshop repair book hyster c160 j30 40xmt service shop manual. HYSTER FORKLIFT TRUCK WORKSHOP SERVICE MANUAL MASSIVE File ending in: rar Estimated download time: 8.79 Minutes Recognized HYSTER FORKLIFT TRUCK WORKSHOP SERVICE REPAIR SHOP MAINTENANCE MANUAL This covers in detail all service and repairs of ALL the models listed below: J30-35-40XMT J30-35-40XMT2 TEMP SPACE J40-50A, J60AS J40-60XL. Electronic catalog HYSTER CHALLENGER H360H, H400H, H400H-EC5, H450H, H450H-EC6 FORKLIFT SERVICE + PARTS MANUAL includes an electronic spare parts catalog for forklifts Hyster, contains repair and service manuals, electronic programs to help diagnose, instruction manuals for forklifts Hyster..
Repair manual for Hyster Class 4 Internal Combustion Engine Trucks – Cushion Tire Hyster C187 (S40XL S50XL S60XL) Forklift . Download COMPLETE Service & Repair Manual for HYSTER C187 (S40XL S50XL S60XL) FORKLIFT. It covers every single detail on your HYSTER C187 (S40XL S50XL S60XL) FORKLIFT. This manual very useful in the treatment and repair.. Hyster Class 1 Electric Motor Rider Trucks Repair Manuals 2013 (HTML+PDF), workshop manuals and wiring diagrams for Hyster fork lift and Hyster Class 1 lift trucks., Hyster Lift Truck, HYSTER, hyster. Find great deals on eBay for Hyster Forklift Manual in Manuals & Books. Shop with confidence. Find great deals on eBay for Hyster Forklift Manual in Manuals & Books. Shop with confidence. Skip to main content. eBay Logo 70, 80, 100B. Hyster Forklift Service Repair Manual. HYSTER FORKLIFT PARTS MANUAL H800E . $73.60. Buy It Now. HYSTER.
Hyster C114 E25 35xl Service Shop Manual Forklift Workshop Repair Book PDF Kindle. Hyster C160 J30 40xmt Service Shop Manual Forklift Workshop Repair Book PDF ePub. Gina Wilson All Things Algebra 2016 Answers PDF Download Gives the readers many references and knowledge that bring positive influence in the future.. Hyster Service Manual: FREE HYSTER H3.0FT FORKLIFT SERVICE REPAIR MANUAL Forklift Service Repair Workshop Manual DOWNLOAD. Repair Manuals Brisbane Workshop Automobile Business Fork Pneumatic Tire Hyster Forklift Original Factory Hyster Forklift Service Repair Manual is a Complete Informational Book. This Service Manual has easy-to-read. Hyster C210 (N30XMH) Forklift Service Repair Workshop Manual Go ahead to take this service manual.Please contact to us if with any problems. This manual can be used by anyone from a first time owner/amateur to a professional technician.Easy to read type,And give you all the information needed to do the procedure correctly.Keep this shop manual.
Hyster f160 (j30 xmt j35xmt j40xmt) forklift service repair factory manual instant download 1. Hyster F160 (J30XMT J35XMT J40XMT)Forklift Service Repair Factory ManualINSTANT DOWNLOADINSTANT DOWNLOADRepair manual for Hyster Class 1 Electric Motor Rider - Hyster F160(J30XMT J35XMT J40XMT) ForkliftHyster F160 (J30XMT J35XMT J40XMT) Forklift Service RepairFactory Manual. Hyster Forklift PDF is a collection of PDF files in the shell of HTML. This set includes electronic parts manuals, repair manuals, workshop manuals, fitting instructions, technical specifications, detailed wiring circuits for forklifts Hyster. The program is fairly simple.. Get access to the Hyster J40XL (B168) forklift repair manual which include service, electrical and maintenance information in PDF format. Get access to the Hyster J40XL (B168) forklift repair manual which include service, electrical and maintenance information in PDF format. Hyster E25XL, E30XL, E35XL Forklift Repair Manual (C114) Hyster.
Original Factory Hyster N005 (H80FT, H90FT, H100FT, H110FT, H120FT) Forklift Service Repair Manual is a Complete Informational Book. This Service Manual has easy-to-read text sections with top quality diagrams and instructions.. Misc. Tractors Hyster UC-30 YC-40 HC-50 Manual Service, Repair & Owners Operators Manuals Shop. Allis Chalmers: Avery: Bobcat: Hyster J30-40XMT (F160) Diagrams & Operators Manual, 113 pages: $43.95 $42.19 This riding lawn mower repair manual includes service and maintenance procedures for riding mowers built prior to 1992. Coverage. Some manuals combine types (i.e. Parts + Operators, etc.), but the basic function of the manual types remain the same and are as follows: Service Manual (SVC) - The service manual (a.k.a. Shop manual or Repair manual) tells you how to take the Hyster Forklift apart, fix it, and put it back together. It is written in the language of a mechanic.
Auto Repair Manuals Thứ Hai, 19 tháng 3, 2018. Hyster Forklift J30-35-40XMT Electric Service & Parts Manual C-F160 Hyster Forklift J30-35-40XMT Electric Service & Parts Manual C-F160 Hyster Forklift J30-35-40XMT Electric Service & Parts Manual C-F160 Size: 67.5 mb Language: English Type: pdf. Find great deals on eBay for hyster forklift 40. Shop with confidence.. Hyster FULL RANGE OF REPAIR MANUAL. Leave a reply. Hyster Workshop, service & repair manuals download: Hyster B227 (HR45-25 HR45-27 HR45-31 HR45-36L HR45-40LS HR45-40S HR45-45LSX) Diesel Counter Balanced Truck Service Repair Factory Manual INSTANT DOWNLOAD Hyster C160 (J30XMT J35XMT J40XMT) Forklift Service Repair Factory Manual INSTANT.
Hyster E114 E25 40z Service Shop Manual Forklift Workshop Repair Book Hyster E024 S135ft S155ft Forklift Service Repair Factory Manual Instant Hyster C160 J30 40xmt Service Shop Manual Forklift Workshop Repair Book Hyosung Gf125 Factory Service Repair Manual. Clark Forklift Workshop Service and Repair Manual C60-80 D.SERVICE MANUAL CRARK C60-80 DRATED CAPACI. parts manual, parts book, workshop service repair manual Crown. . $190.00 Hyster C160 (J30-40XMT) Repair manual for Hyster Class 1 Electric Motor Rider C160 (J30-40XMT. Highly professional laptop diagnostic tool and new hyster repair manuals pdf 2016 for full set version laptop diagnostic tools of various models provided by sattv1244 are your best choice for.
Shop Repair Manual. The Service workshop repair manual offers detailed servicing instructions and will give you complete step by step information on repair, servicing, preventative maintenance & troubleshooting procedures for your Hyster Fork Lift. Free Hyster J30-35 40XMT C160 Forklift Service Repair Manual Download. Free Hyster Electric. hyster c174 (r30xms) forklift service repair workshop manual+ parts manual download this manual contains information and data to this model. has specs, diagrams, and. Hyster L005 (H70XM-H120XM) Forklift Service Repair Workshop Manual DOWNLOAD Repair manual for Hyster Class 5 Internal Combustion Engine Trucks - Pneumatic Tire Hyster L005 (H70XM-H120XM) Forklift Original Factory Hyster L005 (H70XM-H120XM) Forklift Service Repair Manual is Download Now Similar manuals: Hyster L005 (H70XM-H120XM) Forklift.
HYSTER FORKLIFT FACTORY REPAIR MANUAL - Download: Service - hyster forklift factory repair manual covers all repairs. related: j25a, j30as, j35a j25-35b, j30bs j30-35-40xmt j30-35-40xmt2 temp space j40-50a, j60as. Repair manual for Hyster Class 5 Internal Combustion Engine Trucks - Pneumatic Tire Hyster J006 (H135FT, H155FT) Forklift Original Factory Hyster J006 (H135FT, H155FT) Forklift Service Repair Manual is a Complete Informational Book. This Service Manual has easy-to-read text sections with top quality diagrams and instru. Original Factory Hyster J006 (H 135FT, H 155FT) Forklift Service Repair Manual is a Complete Informational Book. This Service Manual has easy-to-read text sections with top quality diagrams and instructions. Trust Hyster J006 (H 135FT, H 155FT) Forklift Service Repair Manual will give you everything you need to do the job. Save time and money by doing it yourself, with the confidence only.
HYSTER FORKLIFT FACTORY REPAIR MANUAL HYSTER FORKLIFT FACTORY REPAIR MANUAL Forklift Parts Manual is a Complete Informational Book. This Parts Manual has easy-to-read text sections with top quality diagrams and instruction Hyster G006 (H135-155XL) Forklift Service Repair Workshop Manual DOWNLOAD Repair manual for Hyster Class 5. Hyster J30-35 40XMT C160 Shop Repair Manual. The Service workshop repair manual offers detailed servicing instructions and will give you complete This Hyster manual is a complete service repair manual in one PDF file. With a book marking to the left-hand HYSTER CLASS 1 FORKLIFT REPAIR.
|
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import scipy.misc
import numpy as np
from time import gmtime, strftime
# -----------------------------
# new added functions for pix2pix
def load_data(image_path, image_size, input_c_dim, output_c_dim, is_train=False):
input_img = imread(image_path)
images = np.split(input_img, input_c_dim + output_c_dim, axis=1)
half_offset = 8
offset = half_offset * 2
hypersize = image_size + offset
fullsize = 512 + offset
h1 = int(np.ceil(np.random.uniform(1e-2, offset)))
w1 = int(np.ceil(np.random.uniform(1e-2, offset)))
conv = []
for image in images:
#print(image.shape)
top = int((fullsize - image.shape[1]) / 2)
bottom = fullsize - image.shape[1] - top
image = np.append(np.zeros((image.shape[0], top)), image, axis=1)
image = np.append(image, np.zeros((image.shape[0], bottom)), axis=1)
left = int((fullsize - image.shape[0]) / 2)
right = fullsize - image.shape[0] - left
image = np.append(np.zeros((left, image.shape[1])), image, axis=0)
image = np.append(image, np.zeros((right, image.shape[1])), axis=0)
tmp = scipy.misc.imresize(image, [hypersize, hypersize], interp='nearest')
if is_train:
image = tmp[h1:h1+image_size, w1:w1+image_size]
else:
image = tmp[half_offset:half_offset+image_size, half_offset:half_offset+image_size]
image = image/127.5 - 1.
conv.append(image)
return np.stack(conv, axis=2)
# -----------------------------
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path):
return scipy.misc.imread(path).astype(np.float)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
return img
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
|
Our current global food system consists is a web of pharmaceuticals, confinement and resource-intense concentration of crop, animal and food production. And there’s now a higher mortality rate related to obesity than to hunger. What if farms, food processors and restaurants worked together in a seamless system, local and small? Tech entrepreneur-turned-farmer Chris Baggott explains how, ten years from now, the burgers at every neighborhood diner can be sustainably grown, processed, sold and eaten—and how a beautifully simple system could go global.
Chris is an email marketing leader and content marketing innovator. He co-founded two companies that both sold to publicly traded companies in 2013: ExactTarget and Compendium Software. He writes a highly praised blog, which was called the “Best of the Web” by Forbes and quoted by several publications, including the Wall Street Journal, Forbes, and ADWEEK. Chris also authored the Wiley book: Email Marketing By the Numbers.
A native of Pittsburgh, Chris now resides near Indianapolis with his wife and four children.
|
""" Remove duplicated Gmail Message objects and tighten constraints for Gmail messages.
Revision ID: 4b4c5579c083
Revises: 1925c535a52d
Create Date: 2014-07-17 00:01:09.410292
"""
# revision identifiers, used by Alembic.
revision = '4b4c5579c083'
down_revision = '4b4674f1a726'
from alembic import op
from sqlalchemy import func
def upgrade():
op.drop_constraint('messagecontactassociation_ibfk_1',
'messagecontactassociation', type_='foreignkey')
op.drop_constraint('messagecontactassociation_ibfk_2',
'messagecontactassociation', type_='foreignkey')
op.create_foreign_key('messagecontactassociation_ibfk_1',
'messagecontactassociation', 'contact',
['contact_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('messagecontactassociation_ibfk_2',
'messagecontactassociation', 'message',
['message_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('imapuid_ibfk_2', 'imapuid', type_='foreignkey')
op.create_foreign_key('imapuid_ibfk_2', 'imapuid', 'message',
['message_id'], ['id'], ondelete='CASCADE')
from inbox.models import Message
from inbox.models.session import session_scope
with session_scope(versioned=False) \
as db_session:
groups = db_session.query(
Message.id, Message.thread_id, Message.g_msgid)\
.filter(~Message.g_msgid.is_(None))\
.group_by(Message.thread_id, Message.g_msgid).having(
func.count(Message.id) > 1).all()
for message_id, thread_id, g_msgid in groups:
print "deleting duplicates of ({}, {}), saving {}".format(
thread_id, g_msgid, message_id)
db_session.query(Message).filter(
Message.thread_id == thread_id,
Message.g_msgid == g_msgid,
Message.id != message_id).delete()
op.execute('ALTER TABLE message ADD UNIQUE INDEX ix_message_thread_id_g_msgid (thread_id, g_msgid)')
def downgrade():
pass
|
A proper bald eagle scowl.
Usually I am the watcher. Today I found myself being the observed species by this osprey.
The tiny fluffs of feathers on each side of this double-crested cormorant’s head are his “crests”.
Only the mature double-crested cormorant has the crests.
There are none on the immature bird at the lower right.
This is a first for me and my camera – a great horned owl.
Truly a mouthful…for this great blue heron.
Posted in birds, Eagles of Jordan Lake, fish, Jordan Lake, Jordan Lake State Park, ospreys of Jordan Lake, photography, raptors, wild birds and tagged as American bald eagle, bald eagle, bald eagles of Jordan Lake, double-crested cormorant, eagles of Jordan Lake, great blue heron, great blue heron fishing, great horned owl, Jordan Lake, Jordan Lake State Park, osprey, ospreys of Jordan Lake, owl.
|
# -*- coding:utf-8 -*-
"""
start.py获取详细页html源码的程序
存放在数据库code_0507里面
start2.py对详细页html源码解析
存放在数据库code_0507里面
功能:先爬取……后解析……
"""
import redis
import time
from process.html_save import FetchDetailHtml
from process.list_page import SummaryFetchParse
def main():
"""
爬取详细页源码 存入code_0507数据库中
"""
r = redis.Redis(host = 'localhost', port = 6379, db = 1)
r.flushdb()
url_set = set()
#河南省 + 新疆省
#湖北省 + 湖南省 + 黑龙江省 + 吉林省 +广东省
#四川省 + 云南省 + 贵州省 + 陕西省
#城市简称列表
city_name_list = ['bt','chifeng','erds','hu','sjz','xj','changji','bygl','yili','aks',
'ks','hami','klmy','betl','tlf','ht',
'shz','kzls','ale','wjq','tmsk',
'ganzhou','nc','liuzhou','qinzhou','haikou',
'zz','luoyang','xx',]
#城市简称字典
city_dict = {'bt':24,'chifeng':32,'erds':13,'hu':48,'sjz':70,'xj':70,'changji':6,'bygl':7,'yili':5,'aks':6,
'ks':4,'hami':4,'klmy':4,'betl':4,'tlf':4,'ht':4,
'shz':6,'kzls':3,'ale':3,'wjq':3,'tmsk':3,
'ganzhou':31,'nc':70,'liuzhou':46,'qinzhou':5,'haikou':23,
'zz':70,'luoyang':28,'xx':12,}
while len(city_name_list) > 0:
city_name = city_name_list.pop()
print "======城市%s爬取开始======"%(city_name)
page_nums = city_dict.get(city_name)
page_num_start = 1
page_num_end = 2
while page_num_start <= page_nums:
p1 = SummaryFetchParse(r, url_set, page_num_start, page_num_end)
p1.get_city_list(city_name)
p2 = FetchDetailHtml(r, url_set)
p2.parser_to_save()
print "*********本次爬取结束***********"
time.sleep(60*1)
page_num_start = page_num_end + 1
page_num_end = page_num_end + 2
print "=====城市%s爬取结束====="%(city_name)
if __name__ == "__main__":
main()
|
BOTOX® Cosmetic (a purified protein produced by the Clostridium botulinum bacteria) has been used safely by doctors for many years. BOTOX® Cosmetic has been used for wrinkle therapy since the early 1990's.
BOTOX® Cosmetic has grown to become one of the most popular cosmetic procedures. There are many reasons for this. It attacks wrinkles at their source. If you develop a frown line, putting a substance in to fill that line is not as effective without the use of BOTOX® Cosmetic to stop the frowning. With BOTOX® Cosmetic, the negative image of frowning is stopped, and the lines created by frowning are improved. The same applies to forehead lines and crow's feet: if you can stop the cause of wrinkles then you will help to prevent them from returning or worsening.
BOTOX® Cosmetic works by relaxing muscles that cause lines or wrinkles. It can therefore work only on areas that can benefit from this effect. These tend to occur in people who are expressive with their facial movements.
Another area where BOTOX® Cosmetic is used is the upper lip. If someone tends to purse their lips when they speak they can develop vertical lines on the upper lip. These are commonly mistaken for smoker's lines. BOTOX® Cosmetic can help prevent these creases from forming, and improve their appearance. It can also help other substances used to help plump these lines last longer.
If one develops perioral lines, or as they're commonly called, oral commissures, usually there is a turning down of the outside corners of the mouth. With two simple injections BOTOX® Cosmetics can relax these muscles creating a more turned-up mouth and a less noticeable line. As well it creates a happier appearance on the face.
Necklines are usually vertical or horizontal folds of the skin. There is a fine muscle underneath the skin in the neck, and relaxing this muscle allows the skin to tighten resulting in significant improvement in this area.
|
#pylint: disable = F0401
from helpers import *
from time import time as now
from time import sleep
from sys import exc_info
import thread
import org.bukkit.inventory.ItemStack as ItemStack
import org.bukkit.Bukkit as Bukkit
from basecommands import simplecommand
@hook.event("player.PlayerJoinEvent", "monitor")
def on_join(event):
"""
Welcome new players
"""
player = event.getPlayer()
# send welcome broadcast
if not player.hasPlayedBefore():
broadcast("utils.greet_new", "\n&a&lPlease welcome &f" + player.getDisplayName() + " &a<o Redstoner!\n")
# clear out some eventual crap before
msg(player, " \n \n \n \n \n \n \n \n \n \n \n \n ")
msg(player, " &4Welcome to the Redstoner Server!")
msg(player, " &6Before you ask us things, take a quick")
msg(player, " &6look at &a&nredstoner.com/info")
msg(player, " \n&6thank you and happy playing ;)")
msg(player, " \n \n")
# teleport to spawn when spawning inside portal
loginloc = player.getLocation().getBlock().getType()
headloc = player.getEyeLocation().getBlock().getType()
if "PORTAL" in [str(headloc), str(loginloc)]:
msg(player, "&4Looks like you spawned in a portal... Let me help you out")
msg(player, "&6You can use /back if you &nreally&6 want to go back")
player.teleport(player.getWorld().getSpawnLocation())
"""
This code fixes /up 0 destroying/replacing blocks in plots that are not yours.
If you use //up, this is caught by plotme and cancelled if you are not allowed to build.
However, if you use //up, WorldEdit does the following on "low" priority:
* Change the command to /up with the same arguments
* Run another event with /up but its cancelled (dunno why it does this)
Keep in mind that, on "lowest" priority, PlotMe might cancel events.
"""
dup = 0 #Used to store when someone used //up
@hook.event("player.PlayerCommandPreprocessEvent", "lowest")
def cmd_event(event):
global dup
if event.getMessage().split(" ")[0] in ("//up", "/worldedit:/up"):
dup = True
@hook.event("player.PlayerCommandPreprocessEvent", "normal")
def cmd_event2(event):
global dup
args = event.getMessage().split(" ")
if args[0].lower() in ("/up", "/worldedit:up"):
if dup: #If plotme cancelled this, it will not matter. This lets it through but PlotMe doesn't.
dup = False
elif not event.isCancelled():
event.setCancelled(True)
event.getPlayer().chat("//up " + " ".join(args[1:]))
""" Disabled while builder can't access Trusted
@hook.event("player.PlayerGameModeChangeEvent", "low")
def on_gamemode(event):
user = event.getPlayer()
if str(event.getNewGameMode()) != "SPECTATOR" and user.getWorld().getName() == "Trusted" and not user.hasPermission("mv.bypass.gamemode.Trusted"):
event.setCancelled(True)
"""
@hook.event("player.PlayerBedEnterEvent")
def on_bed_enter(event):
world = event.getPlayer().getWorld()
if world.getName() in ("Survival_1", "TrustedSurvival_1"):
for player in world.getPlayers():
player.setSleepingIgnored(True)
@hook.event("player.PlayerTeleportEvent")
def on_player_teleport(event):
"""
Disable spectator teleportation
"""
player = event.getPlayer()
if not event.isCancelled() and str(event.getCause()) == "SPECTATE" and not player.hasPermission("utils.tp.spectate"):
event.setCancelled(True)
msg(event.getPlayer(), "&cSpectator teleportation is disabled")
@hook.event("block.BlockFromToEvent", "highest")
def on_flow(event):
if event.isCancelled():
return
block = event.getToBlock()
if block.getWorld().getName() == "Creative" and rs_material_broken_by_flow(str(block.getType())):
event.setCancelled(True)
def rs_material_broken_by_flow(material):
if material in ("REDSTONE", "LEVER", "TRIPWIRE"):
return True
parts = material.split("_")
length = len(parts)
return length > 1 and (parts[0] == "DIODE" or parts[1] in ("TORCH", "WIRE", "BUTTON", "HOOK") or (length == 3 and parts[1] == "COMPARATOR"))
@simplecommand("sudo",
usage = "<player> [cmd..]",
description = "Makes <player> write [cmd..] in chat",
amin = 2,
helpNoargs = True)
def on_sudo_command(sender, command, label, args):
target = args[0]
cmd = " ".join(args[1:])
msg(sender, "&2[SUDO] &rRunning '&e%s&r' as &3%s" % (cmd, target))
is_cmd = cmd[0] == "/"
is_console = target.lower() in ["server", "console"]
if is_console:
server.dispatchCommand(server.getConsoleSender(), cmd[1:] if is_cmd else cmd)
return None
target_player = server.getPlayer(target)
if target_player and uid(target_player) not in pythoners:
target_player.chat(cmd)
return None
return "&cPlayer %s not found!" % target
@simplecommand("me",
usage = "[message..]",
description = "Sends a message in third person",
helpNoargs = True)
def on_me_command(sender, command, label, args):
text = colorify("&7- %s &7%s " % (sender.getDisplayName() if isinstance(sender, Player) else "&9CONSOLE", u"\u21E6"))
broadcast("utils.me", text + " ".join(args), usecolor = sender.hasPermission("essentials.chat.color"))
return None
@hook.command("pluginversions")
def on_pluginversions_command(sender, command, label, args):
"""
/pluginversions
print all plugins + versions; useful when updating plugins
"""
try:
plugin_header(sender, "Plugin versions")
plugins = [pl.getDescription() for pl in list(ArrayList(java_array_to_list(server.getPluginManager().getPlugins())))]
info(type(plugins[0]).__name__)
plugins.sort(key = lambda pl: pl.getDescription().getName())
msg(sender, "&3Listing all " + str(len(plugins)) + " plugins and their version:")
for plugin in plugins:
msg(sender, "&6" + pl.getDescription().getName() + "&r: &e" + pl.getDescription().getVersion())
return True
except:
error(trace())
@hook.command("echo")
def on_echo_command(sender, command, label, args):
"""
/echo
essentials echo sucks and prints mail alerts sometimes
"""
msg(sender, " ".join(args).replace("\\n", "\n"))
def eval_thread(sender, code):
"""
/pyeval
run python ingame
"""
try:
result = eval(code)
msg(sender, ">>> %s: %s" % (colorify("&3") + type(result).__name__, colorify("&a") + unicode(result) + "\n "), usecolor = False)
except:
e = exc_info()[1]
try:
eclass = e.__class__
except AttributeError:
eclass = type(e)
msg(sender, ">>> %s: %s" % (eclass.__name__, e) + "\n ", False, "c")
thread.exit()
pythoners = [
"e452e012-2c82-456d-853b-3ac8e6b581f5", # Nemes
"ae795aa8-6327-408e-92ab-25c8a59f3ba1", # jomo
"305ccbd7-0589-403e-a45b-d791dcfdee7d" # PanFritz
]
@simplecommand("pyeval",
usage = "[code..]",
description = "Runs python [code..] and returns the result",
helpNoargs = True)
def on_pyeval_command(sender, command, label, args):
if is_player(sender) and uid(sender) not in pythoners:
return noperm(sender)
msg(sender, " ".join(args), False, "e")
thread.start_new_thread(eval_thread, (sender, " ".join(args)))
return None
@simplecommand("tempadd",
usage = "<user> <group> [duration]",
description = "Temporarily adds <user> to <group> for \n[duration] minutes. Defaults to 1 week.",
helpNoargs = True,
helpSubcmd = True,
amin = 2,
amax = 3)
def tempadd_command(sender, command, label, args):
if not sender.hasPermission("permissions.manage.membership." + args[1]):
return "&cYou do not have permission to manage that group!"
if len(args) == 3:
if not args[2].isdigit():
return "&cThats not a number!"
duration = int(args[2]) * 60
else:
duration = 604800
if duration <= 0:
return "&cThats too short!"
cmd = "pex user %s group add %s * %s" % (args[0], args[1], duration)
runas(sender, cmd)
m, s = divmod(duration, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "&aAdded to group for %dd%dh%dm" % (d, h, m)
@hook.command("modules")
def on_modules_command(sender, command, label, args):
"""
/modules
list all modules, unloaded modules in red
"""
plugin_header(sender, "Modules")
msg(sender, ", ".join([(("&a" if mod in shared["modules"] else "&c") + mod) for mod in shared["load_modules"]]))
""" Something I'm planning for schematics
@hook.event("player.PlayerCommandPreprocessEvent", "low")
def on_command(event):
msg = " ".split(event.getMessage())
if len(msg) < 3:
return
if msg[0].lower() not in ("/schematic", "/schem"):
return
if msg[1].lower() not in ("save", "load"):
return
msg[2] = event.getPlayer().getName() + "/" + msg[2]
"""
|
Many interpreters understand anathema and anathemasimply as different spellings of the same word that may be used interchangeably. If that were true, there would be no point in including these words in a book of synonyms. Like heurema and heurema and epithema and epithema, anathema and anathema probably were once no more than different pronunciations of the same word that eventually came to be spelled in two different ways. And in such cases it is not unusual for words with slightly different spellings to develop different meanings and so to become independent. For example, one member in each of the following pairs of words began as a variant spelling of the other: the Greek thrasos and tharsos, the Latin Thrax (Thracian) and threx (a gladiator), the German rechtlich (just) and redlich (upright), the French harnais (armor) and harnois (harness), and the English frayand frey, allay and alloy, and mettleand metal.Anathema and anathema share that same type of derivation.
Earnest debate about the different meanings of anathema and anathema occurred even among the early Hellenists. Salmasius, for example, was among those who argued that the words had distinct meanings, at least as they were used in Hellenistic Greek; Beza was among those who denied such a distinction. Perhaps the truth lies somewhere in between, though nearer to one side than to the other. After weighing all the evidence, the most reasonable conclusion is that anathema and anathema have distinct meanings that were recognized and observed by many but not by all.
In classical Greek anathema is the predominant form and the only one that Attic writers permitted. It was the technical word for costly offerings that were presented to the gods and then suspended or otherwise displayed in their temples. These offerings were separated from all common and profane uses and were openly dedicated to the honor of the deity to whom they were originally presented.
When the Hebrew Scriptures were translated into Greek, however, a new meaning was needed for anathema, because the Scriptures spoke of two ways in which objects might be holy, that is, set apart for God and devoted to him. The children of Israel were devoted to God, and he was glorified inthem; the wicked Canaanites were devoted to God, and he was glorified onthem. Persons and things might be heremthey might be devoted to God for good or for evil. There was such a thing as being "accursed to the Lord." Part of the spoil of a city might be consecrated to the Lord in his treasury and a part utterly destroyed, though each part was dedicated to him. These distinct concepts were expressed by using anathema and anathema. Those who believe that separation from God is the central idea of anathema are not able to trace a common meaning between it and anathema, which plainly refers to separation to God, or to show the point at which these words diverge. Those who believe that separation to God is implied in both cases face no such difficulty.
In the Septuagint and Apocrypha anathema and anathemawere used in distinct ways. Because of the variety of readings in the various editions, however, it is difficult to determine if the distinction between them was universally observed or to know how consistently the distinction between them was observed. In Tischendorf's critical edition of the Septuagint (Strong's #1850), however, the distinction between the two words is maintained in many passages, though that is not the case in some earlier editions of the Septuagint. In the New Testament anathema is always used to express the sacrum (sacred thing) that is pleasing to God, while anathema is used to refer to things that deserve God's wrath. These words are not used frequently enough in the New Testament, however, to convince an opponent of this view. Anathema occurs only once: "Then, as some spoke of the temple, how it was adorned with beautiful stones and donations [anathemasi]"( Luke 21:5). Anathema occurs no more than six times ( Acts 23:14; Romans 9:3; 1 Corinthians 12:3; 1 Corinthians 16:22; Galatians 1:8-9), and its use in these passages confirms the distinction made above.
Some of the Greek fathers neglected this distinction. Others, however, observed it implicitly, and some explicitly recognized the distinction and accurately and precisely traced its development.
Let us summarize our findings. Based on similar phenomena in all languages, it is probable that anathema and anathema gradually developed distinct meanings. In Scripture the two ways that persons and things may be dedicated to Godfor good or for evilare described by using these two slightly different forms of the same word. Every New Testament use of these words maintains this distinction. The later ecclesiastical books also maintain this distinction, though not perfectly. I conclude, therefore, that the sacred writers of the New Testament deliberately used anathema and anathema in different senses. Luke used anathema ( Luke 21:5) because he intended to express that which was dedicated to God for its own honor as well as for God's glory. Paul used anathema in the sense of that which is devoted to God's honor (as were the Canaanites of old) but to its own destruction. And in the end, every intelligent being who is capable of knowing and loving God and who has been called to this knowledge must be either anathema or anathema to him. acceptable and consecrated to himself or as detestable to him and his wrath and as owed and subject to punishment."
Trench, Richard C. Entry for 'Gift'. Synonyms of the New Testament. https://www.studylight.org/lexicons/trench/34. 1854.
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
# Dependency imports
import numpy as np
import numpy as onp # Avoid JAX rewrite. # pylint: disable=reimported
import six
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import nest
from tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape
try: # May not be available, not a core dep for TFP.
import wrapt # pylint: disable=g-import-not-at-top
except ImportError:
wrapt = None
__all__ = [
'bitcast',
'broadcast_dynamic_shape',
'broadcast_static_shape',
'broadcast_to',
'cast',
'clip_by_value',
'constant',
'control_dependencies',
'convert_to_tensor',
'custom_gradient',
'device',
'enable_v2_behavior',
'ensure_shape',
'executing_eagerly',
'get_static_value',
'identity',
'init_scope',
'is_tensor',
'name_scope',
'newaxis',
'register_tensor_conversion_function',
'stop_gradient',
'GradientTape',
'Module',
'Tensor',
'Variable',
# 'gradients',
]
JAX_MODE = False
if JAX_MODE:
import jax # pylint: disable=g-import-not-at-top
class _NullContext(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
def _broadcast_static_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_static_shape` in JAX/NumPy."""
if (tensor_shape.TensorShape(shape_x).ndims is None or
tensor_shape.TensorShape(shape_y).ndims is None):
return tensor_shape.TensorShape(None)
shape_x = tuple(tensor_shape.TensorShape(shape_x).as_list())
shape_y = tuple(tensor_shape.TensorShape(shape_y).as_list())
try:
if JAX_MODE:
error_message = 'Incompatible shapes for broadcasting'
return tensor_shape.TensorShape(lax.broadcast_shapes(shape_x, shape_y))
error_message = ('shape mismatch: objects cannot be broadcast to'
' a single shape')
return tensor_shape.TensorShape(
np.broadcast(np.zeros(shape_x), np.zeros(shape_y)).shape)
except ValueError as e:
# Match TF error message
if error_message in str(e):
raise ValueError(
'Incompatible shapes for broadcasting: {} and {}'.format(
shape_x, shape_y))
raise
def _broadcast_dynamic_shape(shape_x, shape_y):
"""Reimplements `tf.broadcast_dynamic_shape` in JAX/NumPy."""
return convert_to_tensor(_broadcast_static_shape(shape_x, shape_y))
broadcast_shape = _broadcast_static_shape
def _constant(value, dtype=None, shape=None, name='Const'): # pylint: disable=unused-argument
x = convert_to_tensor(value, dtype=dtype)
if shape is None:
return x
if not x.shape:
return np.full(shape, x)
return np.reshape(x, shape)
def _control_dependencies(control_inputs):
if control_inputs:
for control in control_inputs:
if callable(control):
control()
return _NullContext()
tensor_conversion_registry = {}
def register_tensor_conversion_function(base_type, conversion_func):
# No priority system like TensorFlow yet
tensor_conversion_registry[base_type] = conversion_func
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): # pylint: disable=unused-argument
"""Emulates tf.convert_to_tensor."""
dtype = utils.numpy_dtype(dtype)
dtype_hint = utils.numpy_dtype(dtype_hint)
if is_tensor(value) and not isinstance(value, Variable):
# In NumPy mode, we are lenient on the dtype compatibility check because
# some codepaths rely on flexible conversion from int/float64 to 32.
if dtype is not None and value.dtype != dtype:
if JAX_MODE:
raise TypeError(('Tensor conversion requested dtype {} for array with '
'dtype {}: {}').format(dtype, value.dtype, value))
return value.astype(dtype)
return value
conversion_func = tensor_conversion_registry.get(type(value),
_default_convert_to_tensor)
ret = None
if dtype is None and dtype_hint is not None:
try:
ret = conversion_func(value, dtype=dtype_hint)
except (TypeError, ValueError):
pass
if ret is None:
ret = conversion_func(value, dtype=dtype)
return ret
def _infer_dtype(value, default_dtype):
"""Guesses an object's dtype."""
# Need to check for onp type first because onp types are subclasses of Python
# types.
if hasattr(value, 'dtype'):
# Duck-typing onp types
return value.dtype
elif isinstance(value, bool):
return np.bool_
elif isinstance(value, six.integer_types):
return np.int32
elif isinstance(value, float):
return np.float32
elif isinstance(value, complex):
return np.complex128
elif isinstance(value, (tuple, list)):
# Try inferring the type from items in the object if possible.
for v in nest.flatten(value):
if hasattr(v, 'dtype'):
return v.dtype
try: # Finally fall back to raw types (int, bool).
return _infer_dtype(value[0], default_dtype)
except (IndexError, TypeError):
return default_dtype
raise ValueError(('Attempt to convert a value ({})'
' with an unsupported type ({}) to a Tensor.').format(
value, type(value)))
class _Int64ToInt32Error(TypeError):
"""Error thrown when trying to convert an int64 to int32."""
def __init__(self, int_value):
self.int_value = int_value
super(_Int64ToInt32Error, self).__init__('Overflow when casting an int64 to'
' an int32.')
class _FloatToIntError(TypeError):
"""Error thrown when trying to convert a float to an int."""
def _is_int64(value):
return value > onp.iinfo(onp.int32).max or value < onp.iinfo(onp.int32).min
def _default_convert_to_tensor(value, dtype=None):
"""Default tensor conversion function for array, bool, int, float, and complex."""
inferred_dtype = _infer_dtype(value, np.float32)
# When a dtype is provided, we can go ahead and try converting to the dtype
# and force overflow/underflow if an int64 is converted to an int32.
if dtype is not None:
try:
return _default_convert_to_tensor_with_dtype(value, dtype)
except _Int64ToInt32Error as e:
# Force conversion to int32 if requested
return e.int_value
# If no dtype is provided, we try the inferred dtype and fallback to int64 or
# float32 depending on the type of conversion error we see.
try:
return _default_convert_to_tensor_with_dtype(value, inferred_dtype)
except _Int64ToInt32Error as e:
return np.array(value, dtype=np.int64)
except _FloatToIntError as e:
return np.array(value, dtype=np.float32)
class TypeConversionError(TypeError):
def __init__(self, value, dtype):
super(TypeConversionError, self).__init__(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
class MixedTypesError(ValueError):
def __init__(self):
super(MixedTypesError, self).__init__('Can\'t convert Python sequence with'
' mixed types to Tensor.')
def _default_convert_to_tensor_with_dtype(value, dtype,
error_if_mismatch=False):
"""Converts a value to a tensor with a given dtype.
Args:
value: An object to be converted to tensor.
dtype: A NPTF dtype.
error_if_mismatch: Enables a stricter check for use when converting an
iterable from a tensor.
Returns:
A tensor.
Raises:
TypeConversionError: If type conversion fails.
MixedTypesError: If types are mismatched in an iterable context.
ValueError: If object isn't convertible to tensor.
_Int64ToInt32Error: If trying to convert an int64 to an int32.
_FloatToIntError: If trying to convert a float to an int.
"""
is_arraylike = hasattr(value, 'dtype')
if is_arraylike:
# Duck-typed for `onp.array`/`onp.generic`
arr = np.array(value)
if dtype is not None:
# arr.astype(None) forces conversion to float64
return arr.astype(dtype)
return arr
elif isinstance(value, complex):
dtype_compatible = np.issubdtype(dtype, np.complexfloating)
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeConversionError(value, dtype)
elif isinstance(value, bool):
# Bool check needs to happen before int check because bools are instances of
# int.
dtype_compatible = (dtype == np.bool_ or np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating))
if not dtype_compatible:
if error_if_mismatch:
raise MixedTypesError()
raise TypeError(value, dtype)
elif isinstance(value, six.integer_types):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if dtype == np.int32 and _is_int64(value):
raise _Int64ToInt32Error(np.array(value, dtype=dtype))
if dtype == np.bool_:
# Can't downcast an int to a bool
raise TypeConversionError(value, dtype)
elif isinstance(value, float):
if error_if_mismatch and not (np.issubdtype(dtype, np.integer)
or np.issubdtype(dtype, np.floating)):
raise MixedTypesError()
if np.issubdtype(dtype, np.integer):
raise _FloatToIntError(
'Cannot convert {} to array of dtype {}'.format(value, dtype))
if not (np.issubdtype(dtype, np.floating)
or np.issubdtype(dtype, np.complexfloating)):
raise TypeConversionError(value, dtype)
else:
# Try to iterate through object and throw ValueError if we can't.
if hasattr(value, '__getitem__'):
ret = []
error_in_list = False
for v in value:
ret.append(_default_convert_to_tensor_with_dtype(
v, dtype, error_if_mismatch=error_in_list))
error_in_list = True
value = ret
else:
raise ValueError(
('Attempting to convert a value {} with an'
' unsupported type {} to a Tensor.').format(value, type(value)))
return np.array(value, dtype=dtype)
@contextlib.contextmanager
def _init_scope():
yield
# --- Begin Public Functions --------------------------------------------------
class GradientTape(object):
"""tf.GradientTape stub."""
def __init__(self, persistent=False, watch_accessed_variables=True): # pylint: disable=unused-argument
raise NotImplementedError('GradientTape not currently supported in JAX and '
'NumPy backends.')
def __enter__(self):
return self
def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument
pass
def watch(self, tensor): # pylint: disable=unused-argument
pass
def gradient(self, target, sources, output_gradients=None, # pylint: disable=unused-argument
unconnected_gradients=None): # pylint: disable=unused-argument
raise NotImplementedError
def batch_jacobian(self, target, source, # pylint: disable=unused-argument
unconnected_gradients=None, # pylint: disable=unused-argument
parallel_iterations=None, experimental_use_pfor=True): # pylint: disable=unused-argument
raise NotImplementedError
bitcast = utils.copy_docstring(
'tf.bitcast',
lambda input, type, name=None: convert_to_tensor( # pylint: disable=g-long-lambda
input, dtype_hint=type).view(type))
broadcast_dynamic_shape = utils.copy_docstring(
'tf.broadcast_dynamic_shape', _broadcast_dynamic_shape)
broadcast_static_shape = utils.copy_docstring(
'tf.broadcast_static_shape', _broadcast_static_shape)
broadcast_to = utils.copy_docstring(
'tf.broadcast_to',
lambda input, shape, name=None: np.broadcast_to(input, shape))
def _cast(x, dtype):
x = np.asarray(x)
if (np.issubdtype(x.dtype, np.complexfloating) and
not np.issubdtype(dtype, np.complexfloating)):
x = np.real(x)
return x.astype(dtype)
cast = utils.copy_docstring(
'tf.cast',
lambda x, dtype, name=None: _cast(x, utils.numpy_dtype(dtype)))
clip_by_value = utils.copy_docstring(
'tf.clip_by_value',
lambda t, clip_value_min, clip_value_max, name=None: # pylint: disable=g-long-lambda
np.clip(t, clip_value_min, clip_value_max))
constant = utils.copy_docstring(
'tf.constant',
_constant)
control_dependencies = utils.copy_docstring(
'tf.control_dependencies',
_control_dependencies)
convert_to_tensor = utils.copy_docstring(
'tf.convert_to_tensor',
_convert_to_tensor)
def _custom_gradient(f):
"""JAX implementation of tf.custom_gradient."""
if not JAX_MODE:
# Numpy backend ignores custom gradients, so we do too.
return lambda *args, **kwargs: f(*args, **kwargs)[0]
@jax.custom_gradient
@functools.wraps(f)
def wrapped(*args, **kwargs):
value, vjp = f(*args, **kwargs)
def vjp_(cts_out):
cts_in = vjp(cts_out)
if isinstance(cts_in, list):
cts_in = tuple(cts_in)
return cts_in
return value, vjp_
return wrapped
custom_gradient = utils.copy_docstring(
'tf.custom_gradient', _custom_gradient)
device = lambda _: _NullContext()
def _ensure_shape(x, shape, name=None): # pylint: disable=unused-argument
x_shape = tensor_shape.TensorShape(x.shape)
shape = tensor_shape.TensorShape(shape)
if not shape.is_compatible_with(x_shape):
msg = 'Shape of tensor x {} is not compatible with expected shape {}'
raise ValueError(msg.format(x_shape, shape))
return x
ensure_shape = utils.copy_docstring(
'tf.ensure_shape', _ensure_shape)
executing_eagerly = utils.copy_docstring(
'tf.executing_eagerly',
lambda: True)
def _get_static_value_jax(tensor, partial=False):
"""JAX implementation of tf.get_static_value."""
del partial
if isinstance(tensor, jax.core.Tracer):
return None
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
if isinstance(tensor, np.ndarray):
return onp.array(tensor)
return tensor
def _get_static_value_numpy(tensor, partial=False):
"""NumPy implementation of tf.get_static_value."""
del partial
if isinstance(tensor, NumpyVariable):
return None
if isinstance(tensor, Module):
return None
return tensor
get_static_value = utils.copy_docstring(
'tf.get_static_value',
_get_static_value_jax if JAX_MODE else _get_static_value_numpy)
identity = utils.copy_docstring(
'tf.identity',
lambda input, name=None: np.array(input))
is_tensor = utils.copy_docstring(
'tf.is_tensor',
lambda x: isinstance(x, Tensor))
init_scope = utils.copy_docstring('tf.init_scope', _init_scope)
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
@property
def name(self):
return self._name
def __init__(self, name, *args, **kwargs):
del args, kwargs
self._name = name
def __enter__(self):
return self._name
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions.
newaxis = np.newaxis
if JAX_MODE:
from jax import lax # pylint: disable=g-import-not-at-top
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: lax.stop_gradient(input))
else:
stop_gradient = utils.copy_docstring(
'tf.stop_gradient',
lambda input, name=None: np.array(input))
def _convert_tensorshape_to_tensor(value, dtype=None):
"""Copied from TF's TensorShape conversion."""
if not value.is_fully_defined():
raise ValueError(
'Cannot convert a partially known TensorShape to a Tensor: {}'.format(
value))
value_list = value.as_list()
int64_value = 0
for dim in value_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
if dtype == np.int32 and int64_value:
raise ValueError('Cannot convert a TensorShape to dtype int32; '
'a dimension is too large ({})'.format(int64_value))
else:
dtype = np.int64 if int64_value else np.int32
return convert_to_tensor(value_list, dtype=dtype)
register_tensor_conversion_function(tensor_shape.TensorShape,
_convert_tensorshape_to_tensor)
def _convert_dimension_to_tensor(value, dtype=None):
dtype = dtype or np.int32
if dtype not in (np.int32, np.int64):
raise TypeConversionError(value, dtype)
return convert_to_tensor(tensor_shape.dimension_value(value), dtype=dtype)
register_tensor_conversion_function(tensor_shape.Dimension,
_convert_dimension_to_tensor)
class NumpyVariable(getattr(wrapt, 'ObjectProxy', object)):
"""Stand-in for tf.Variable."""
__slots__ = ('initializer',)
# pylint: disable=unused-argument
def __init__(
self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
shape=None):
assert constraint is None
v = convert_to_tensor(initial_value)
if dtype is not None:
v = v.astype(utils.numpy_dtype(dtype))
super(NumpyVariable, self).__init__(v)
self._self_name = name
self.initializer = None
# pylint: enable=unused-argument
@property
def name(self):
return self._self_name if self._self_name is not None else str(id(self))
def __array__(self, dtype=None):
if dtype is not None:
dtype = utils.numpy_dtype(dtype)
return self.__wrapped__.__array__(dtype)
# Passing in dtype=None to __array__ has differing behavior in numpy.
# When an `np.ndarray` has `.__array__(None)` invoked, the array is casted
# to `float64`. Thus we handle this case separately.
return self.__wrapped__.__array__()
def assign(self, value, **_):
super(NumpyVariable, self).__init__(onp.array(value, dtype=self.dtype))
return self
def assign_add(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) + onp.array(value, dtype=self.dtype))
return self
def assign_sub(self, value, **_):
super(NumpyVariable, self).__init__(
onp.array(self, dtype=self.dtype) - onp.array(value, dtype=self.dtype))
return self
if JAX_MODE:
jax.interpreters.xla.canonicalize_dtype_handlers[NumpyVariable] = (
jax.interpreters.xla.canonicalize_dtype_handlers[onp.ndarray])
jax.interpreters.xla.pytype_aval_mappings[NumpyVariable] = (
jax.interpreters.xla.pytype_aval_mappings[onp.ndarray])
jax.core.pytype_aval_mappings[NumpyVariable] = (
jax.core.pytype_aval_mappings[onp.ndarray])
def _convert_variable_to_tensor(value, dtype=None):
return convert_to_tensor(value.__wrapped__, dtype=dtype)
register_tensor_conversion_function(NumpyVariable, _convert_variable_to_tensor)
Variable = NumpyVariable
class _TensorMeta(type(np.ndarray)):
@classmethod
def __instancecheck__(cls, instance):
if JAX_MODE:
return isinstance(instance, (jax.xla.DeviceArray,
jax.core.Tracer))
return isinstance(instance, np.ndarray)
class Tensor(six.with_metaclass(_TensorMeta)):
OVERLOADABLE_OPERATORS = frozenset((
# Binary.
'__add__',
'__radd__',
'__sub__',
'__rsub__',
'__mul__',
'__rmul__',
'__truediv__',
'__rtruediv__',
'__floordiv__',
'__rfloordiv__',
'__mod__',
'__rmod__',
'__lt__',
'__le__',
'__gt__',
'__ge__',
'__ne__',
'__eq__',
'__and__',
'__rand__',
'__or__',
'__ror__',
'__xor__',
'__rxor__',
'__getitem__',
'__pow__',
'__rpow__',
# Unary.
'__invert__',
'__neg__',
'__abs__',
'__matmul__',
'__rmatmul__'
))
class Module(object):
"""tf.Module."""
_TF_MODULE_IGNORED_PROPERTIES = frozenset()
def __init__(self, name):
self._name = name
def _no_dependency(self, x):
return x
@property
def trainable_variables(self):
return []
@property
def variables(self):
return []
enable_v2_behavior = lambda: None
|
The rain is pouring down in Fort William so that may well be end of an amazing run of weather. Not to worry I just bought a new kayak so I’ve been waiting to get some use out of it! My last post from September is from a couple of days on Skye with Alfie and Oliver. I have been to skye a fair bit but always headed to the Cuillin to climb so this time we decided to get after some classics on the sea cliffs of Neist, Kilt Rock and Floddigarry.
I drove up from Glasgow after a couple of days working for Tayler Made Adventures. The light was stunning as I drove through Ballachuillish and I stopped to get this shot looking out at one of my favourite mountains – Garbh Bheinn, Ardgour. I love driving into Fort William from this direction.
A Sign of things to come at Kilt Rock!
After being heavily disappointed that the Cal Mac Ferry didnt do bacon rolls and nor did anywhere on Skye (!) we headed to Neist. Above – Oliver leading Midas Touch as the sun slowly comes around the crag.
Alfie “aspirant mountain guide” Tipler heading off up one of the 3star E1’s “Security Risk”. On “Security Risk” watched by Oliver.
Photo courtesy of Oliver. Last route of the day “Wall Street” me belaying Alfie – probably one of the best single pitch E2’s I’ve climbed Last light at Neist Lighthouse. We went for a wander after climbing and I was shocked to see all the photographers lined up along the cliff edge. While I think there is a lot of merit in “getting the shot” at the perfect time as you sit out there I was a bit thrown by the plethora of tripods, lenses, filters, partners giving minutes till sundown, wifes handing out sandwiches etc. Why not be out there all day with a camera and a lens and watch the light change. The above shot was taken from the top of the hill above Neist (where none of the photographers were) and below from the cliff edge with approximately 12 other photographers. I was smiling inside when the light went flat at the last minute! Next day it was off to Kilt Rock first thing. I actually took this as we were leaving – another party abseiling into the route that we had just climbed – the awesome “Grey Panther” Oliver and I on the belay of Grey Panther Oliver checking I am in fact belaying as well as taking photos.
Next up it was off to Flodigarry just up the coast. Alfie and Oliver at the base of “Spantastic” (the pillar that Alfie is leaning on). The 1996 guidebook states “The pillar has a narrow base that may not support it much longer. This is perhaps the only route in the country which attracts a weight limit – more reassuring to make an ascent at high tide when the sea can help cushion a fall”! We decided that it was Alfies lead – I dont think he’d read the bit about a weight limit! This route is given HVS 5a. I think it would be easier to call it 4b and say that the leader should have a complete lack of imagination as to the consequences of a fall! Great fun.
|
import subprocess
import os
import sys
import commands
import numpy as np
import pyroms
import pyroms_toolbox
from remap_bio_woa import remap_bio_woa
from remap_bio_glodap import remap_bio_glodap
data_dir_woa = '/archive/u1/uaf/kate/COBALT/'
data_dir_glodap = '/archive/u1/uaf/kate/COBALT/'
dst_dir='./'
src_grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/kate/COBALT/GFDL_CM2.1_grid.nc', name='ESM2M_NWGOA3')
dst_grd = pyroms.grid.get_ROMS_grid('NWGOA3')
# define all tracer stuff
list_tracer = ['alk', 'cadet_arag', 'cadet_calc', 'dic', 'fed', 'fedet', 'fedi', 'felg', 'fesm', 'ldon', 'ldop', 'lith', 'lithdet', 'nbact', 'ndet', 'ndi', 'nlg', 'nsm', 'nh4', 'no3', 'o2', 'pdet', 'po4', 'srdon', 'srdop', 'sldon', 'sldop', 'sidet', 'silg', 'sio4', 'nsmz', 'nmdz', 'nlgz']
tracer_longname = ['Alkalinity', 'Detrital CaCO3', 'Detrital CaCO3', 'Dissolved Inorganic Carbon', 'Dissolved Iron', 'Detrital Iron', 'Diazotroph Iron', 'Large Phytoplankton Iron', 'Small Phytoplankton Iron', 'labile DON', 'labile DOP', 'Lithogenic Aluminosilicate', 'lithdet', 'bacterial', 'ndet', 'Diazotroph Nitrogen', 'Large Phytoplankton Nitrogen', 'Small Phytoplankton Nitrogen', 'Ammonia', 'Nitrate', 'Oxygen', 'Detrital Phosphorus', 'Phosphate', 'Semi-Refractory DON', 'Semi-Refractory DOP', 'Semilabile DON', 'Semilabile DOP', 'Detrital Silicon', 'Large Phytoplankton Silicon', 'Silicate', 'Small Zooplankton Nitrogen', 'Medium-sized zooplankton Nitrogen', 'large Zooplankton Nitrogen']
tracer_units = ['mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'g/kg', 'g/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg', 'mol/kg']
#------- WOA13 ---------------------------------
id_tracer_update_woa = [19,20,22,29]
list_tracer_update_woa = []
tracer_longname_update_woa = []
tracer_units_update_woa = []
for idtra in id_tracer_update_woa:
print list_tracer[idtra]
for idtra in id_tracer_update_woa:
# add to tracer update
list_tracer_update_woa.append(list_tracer[idtra])
tracer_longname_update_woa.append(tracer_longname[idtra])
tracer_units_update_woa.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_woa)):
ctra = list_tracer_update_woa[ktr]
if ctra == 'sio4':
ctra = 'si'
mydict = {'tracer':list_tracer_update_woa[ktr],'longname':tracer_longname_update_woa[ktr],'units':tracer_units_update_woa[ktr],'file':data_dir_woa + ctra + '_WOA13-CM2.1_monthly.nc', \
'frame':mm}
remap_bio_woa(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_woa[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
#--------- GLODAP -------------------------------
id_tracer_update_glodap = [0,3]
list_tracer_update_glodap = []
tracer_longname_update_glodap = []
tracer_units_update_glodap = []
for idtra in id_tracer_update_glodap:
print list_tracer[idtra]
for idtra in id_tracer_update_glodap:
# add to tracer update
list_tracer_update_glodap.append(list_tracer[idtra])
tracer_longname_update_glodap.append(tracer_longname[idtra])
tracer_units_update_glodap.append(tracer_units[idtra])
for mm in np.arange(12):
clim_file = dst_dir + dst_grd.name + '_clim_bio_GFDL+WOA+GLODAP_m' + str(mm+1).zfill(2) + '.nc'
print '\nBuild CLIM file for month', mm
for ktr in np.arange(len(list_tracer_update_glodap)):
ctra = list_tracer_update_glodap[ktr]
mydict = {'tracer':list_tracer_update_glodap[ktr],'longname':tracer_longname_update_glodap[ktr],'units':tracer_units_update_glodap[ktr],'file':data_dir_glodap + ctra + '_GLODAP-ESM2M_annual.nc', \
'frame':mm}
remap_bio_glodap(mydict, src_grd, dst_grd, dst_dir=dst_dir)
out_file = dst_dir + dst_grd.name + '_clim_bio_' + list_tracer_update_glodap[ktr] + '.nc'
command = ('ncks', '-a', '-A', out_file, clim_file)
subprocess.check_call(command)
os.remove(out_file)
|
^ The average commercial electricity rate in Geneva, IL is 7.99¢/kWh.
^ The average residential electricity rate in Geneva, IL is 9.56¢/kWh.
^ The average industrial electricity rate in Geneva, IL is 6.74¢/kWh.
The average commercial electricity rate in Geneva is 7.99¢/kWh.
This average (commercial) electricity rate in Geneva is equal to the Illinois average rate of 7.99¢/kWh.
The average (commercial) electricity rate in Geneva is 20.81% less than the national average rate of 10.09¢/kWh. Commercial rates in the U.S. range from 6.86¢/kWh to 34.88¢/kWh.
The average residential electricity rate in Geneva is 9.56¢/kWh.
This average (residential) electricity rate in Geneva is 15.99% less than the Illinois average rate of 11.38¢/kWh.
The average (residential) electricity rate in Geneva is 19.53% less than the national average rate of 11.88¢/kWh. Residential rates in the U.S. range from 8.37¢/kWh to 37.34¢/kWh.
The average industrial electricity rate in Geneva is 6.74¢/kWh.
This average (industrial) electricity rate in Geneva is 16.21% greater than the Illinois average rate of 5.8¢/kWh.
The average (industrial) electricity rate in Geneva is 1.05% greater than the national average rate of 6.67¢/kWh. Industrial rates in the U.S. range from 4.13¢/kWh to 30.82¢/kWh.
Geneva is a city located in Kane County in the state of Illinois, and has a population of approximately 21,495.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-19 15:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talent', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='musicians',
name='bio',
field=models.TextField(blank=True, default=None, max_length=500, null=True),
),
migrations.AlterField(
model_name='musicians',
name='company',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AlterField(
model_name='musicians',
name='genre',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AlterField(
model_name='musicians',
name='location',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterField(
model_name='musicians',
name='phone',
field=models.CharField(default=None, max_length=15, null=True),
),
migrations.AlterField(
model_name='musicians',
name='social',
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
]
|
We analyzed Julgamento.clipesebandas.com.br page load time and found that the first response time was 92 ms and then it took 379 ms to load all DOM resources and completely render a web page. This is an excellent result, as only a small number of websites can load faster.
HTML content can be minified and compressed by a website’s server. The most efficient way is to compress content using GZIP which reduces data amount travelling through the network between server and browser. This page needs HTML code to be minified as it can gain 467 B, which is 14% of the original size. It is highly recommended that content of this web page should be compressed using GZIP, as it can save up to 2.1 kB or 61% of the original size.
CSS files minification is very important to reduce a web page rendering time. The faster CSS files can load, the earlier a page can be rendered. Julgamento.clipesebandas.com.br needs all CSS files to be minified and compressed as it can save up to 24.4 kB or 80% of the original size.
Our browser made a total of 5 requests to load all elements on the main page. We found that all of those requests were addressed to Julgamento.clipesebandas.com.br and no external sources were called. The less responsive or slowest element that took the longest time to load (92 ms) belongs to the original domain Julgamento.clipesebandas.com.br.
As for redirects, our browser was forwarded to http://julgamento.clipesebandas.com.br/users/sign_in before it reached this domain.
This IP address is dedicated to Julgamento.clipesebandas.com.br. This is the best domain hosting practice .
Language claimed in HTML meta tag should match the language actually used on the web page. Otherwise Julgamento.clipesebandas.com.br can be misinterpreted by Google and other search engines. Our service has detected that Portuguese is used on the page, and it matches the claimed language. Our system also found out that Julgamento.clipesebandas.com.br main page’s claimed encoding is utf-8. Use of this encoding format is the best practice as the main page visitors from all over the world won’t have any issues with symbol transcription.
Julgamento.clipesebandas.com.br has no SSL certificate. Web browsing can be safer with HTTPS connection, so we suggest that it should be obtained for this site.
Country of origin for 100% of all visits is Brazil. It lies approximately 3690 miles away from the server location (United States) and such a long distance can negatively affect website speed, as it takes some time for data to travel back and forth between those places. That’s why one of the best ways to speed up Julgamento.clipesebandas.com.br page load time for the majority of users is moving the server to Brazil or just closer to the user base.
|
# -*- coding: utf-8 -*-
import json
import datetime
import urllib2
from mock import Mock, patch
from flask import session
from werkzeug.datastructures import ImmutableMultiDict
from purchasing.users.models import User
from purchasing.data.contracts import ContractBase
from purchasing.data.contract_stages import ContractStage, ContractStageActionItem
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.opportunities.models import Opportunity
from purchasing.extensions import mail
from purchasing.conductor.util import assign_a_contract
from purchasing_test.factories import ContractTypeFactory, DepartmentFactory, CategoryFactory
from purchasing_test.test_base import BaseTestCase
from purchasing_test.util import (
insert_a_contract, insert_a_stage, insert_a_flow,
insert_a_role, insert_a_user
)
class TestConductorSetup(BaseTestCase):
def setUp(self):
super(TestConductorSetup, self).setUp()
# create a conductor and general staff person
self.county_type = ContractTypeFactory.create(**{
'name': 'County', 'allow_opportunities': True, 'managed_by_conductor': True
})
self.department = DepartmentFactory.create(**{'name': 'test department'})
self.conductor_role_id = insert_a_role('conductor')
self.staff_role_id = insert_a_role('staff')
self.conductor = insert_a_user(email='foo@foo.com', role=self.conductor_role_id)
self.staff = insert_a_user(email='foo2@foo.com', role=self.staff_role_id)
self.conductor2 = insert_a_user(email='foo3@foo.com', role=self.conductor_role_id)
# create three stages, and set up a flow between them
self.stage1 = insert_a_stage(
name='stage1', send_notifs=True, post_opportunities=True,
default_message='i am a default message'
)
self.stage2 = insert_a_stage(name='stage2', send_notifs=True, post_opportunities=False)
self.stage3 = insert_a_stage(name='stage3', send_notifs=False, post_opportunities=False)
self.flow = insert_a_flow(stage_ids=[self.stage1.id, self.stage2.id, self.stage3.id])
self.flow2 = insert_a_flow(name='test2', stage_ids=[self.stage1.id, self.stage3.id, self.stage2.id])
self.simple_flow = insert_a_flow(name='simple', stage_ids=[self.stage1.id])
# create two contracts
self.contract1 = insert_a_contract(
contract_type=self.county_type, description='scuba supplies', financial_id=123,
expiration_date=datetime.date.today(), properties=[{'key': 'Spec Number', 'value': '123'}],
is_visible=True, department=self.department, has_metrics=True
)
self.contract2 = insert_a_contract(
contract_type=self.county_type, description='scuba repair', financial_id=456,
expiration_date=datetime.date.today() + datetime.timedelta(120),
properties=[{'key': 'Spec Number', 'value': '456'}],
is_visible=True, has_metrics=True
)
self.category = CategoryFactory.create()
self.login_user(self.conductor)
self.detail_view = '/conductor/contract/{}/stage/{}'
self.transition_view = '/conductor/contract/{}/stage/{}/'
def assign_contract(self, flow=None, contract=None, start_time=None):
flow = flow if flow else self.flow
contract = contract if contract else self.contract1
start_time = start_time if start_time else datetime.datetime.now()
assign_a_contract(contract, flow, self.conductor, start_time=start_time)
return contract.children[0]
def get_current_contract_stage_id(self, contract, old_stage=None):
if contract.current_stage_id is None:
return -1
if not old_stage:
stage = ContractStage.query.filter(
contract.current_stage_id == ContractStage.stage_id,
contract.id == ContractStage.contract_id
).first()
else:
stage = ContractStage.query.filter(
old_stage.id == ContractStage.stage_id,
contract.id == ContractStage.contract_id
).first()
return stage.id
def build_detail_view(self, contract, old_stage=None):
contract = contract.children[0] if len(contract.children) > 0 else contract
return self.detail_view.format(
contract.id, self.get_current_contract_stage_id(contract, old_stage)
)
def tearDown(self):
super(TestConductorSetup, self).tearDown()
session.clear()
class TestConductor(TestConductorSetup):
render_templates = True
def test_conductor_contract_list(self):
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
self.assert_template_used('conductor/index.html')
# we have 2 contracts
_all = self.get_context_variable('_all')
self.assertEquals(len(_all), 2)
# we can't get to the page normally
self.logout_user()
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
self.login_user(self.staff)
index_view = self.client.get('/conductor', follow_redirects=True)
self.assert200(index_view)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
def test_conductor_start_new(self):
self.assertEquals(ContractStage.query.count(), 0)
self.assert200(self.client.get('/conductor/contract/new'))
self.client.post('/conductor/contract/new', data={
'description': 'totally new wow', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id
})
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(
ContractBase.query.filter(ContractBase.description == 'totally new wow').first().current_stage_id,
self.flow.stage_order[0]
)
self.assertEquals(ContractBase.query.count(), 3)
def test_conductor_start_existing(self):
start_work_url = '/conductor/contract/{}/start'.format(self.contract1.id)
old_contract_id = self.contract1.id
old_description = self.contract1.description
self.assertEquals(ContractStage.query.count(), 0)
self.assert200(self.client.get(start_work_url))
self.assertEquals(self.get_context_variable('form').description.data, self.contract1.description)
self.client.post(start_work_url, data={
'description': 'updated!', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id
})
old_contract = ContractBase.query.get(old_contract_id)
new_contract = ContractBase.query.get(old_contract_id).children[0]
self.assertEquals(old_contract.description, old_description)
self.assertEquals(new_contract.description, 'updated!')
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(new_contract.current_stage_id, self.flow.stage_order[0])
self.assertEquals(ContractBase.query.count(), 3)
def test_conductor_modify_start(self):
assign = self.assign_contract()
start_url = '/conductor/contract/{}/start'.format(self.contract1.id)
# should load successfully on first stage
self.assert200(self.client.get(start_url))
self.client.post(start_url, data={
'description': 'totally new wow', 'flow': self.flow.id,
'assigned': self.conductor.id, 'department': self.department.id,
'start': datetime.datetime.now() - datetime.timedelta(1)
})
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
# should redirect on not-first stage
self.assertTrue(self.client.get(start_url).status_code, 302)
def test_conductor_contract_assign(self):
self.assertEquals(ContractStage.query.count(), 0)
assign = self.assign_contract()
self.assertEquals(ContractStage.query.count(), len(self.flow.stage_order))
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
self.assertEquals(assign.assigned_to, self.conductor.id)
# re-assigning shouldn't cause problems
self.assign_contract()
def test_conductor_assign_unstarted_contract(self):
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.staff.id
))
self.assert_flashes(
'That user does not have the right permissions to be assigned a contract', 'alert-danger'
)
self.assertTrue(self.contract1.assigned is None)
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.conductor.id
))
self.assert_flashes(
'Successfully assigned {} to {}!'.format(self.contract1.description, self.conductor.email),
'alert-success'
)
self.assertTrue(self.contract1.assigned is not None)
self.assertEquals(self.contract1.assigned, self.conductor)
def test_conductor_reassign_in_progress(self):
self.assign_contract(contract=self.contract1)
self.client.get('/conductor/contract/{}/assign/{}'.format(
self.contract1.id, self.conductor2.id
))
self.assert_flashes(
'Successfully assigned {} to {}!'.format(self.contract1.description, self.conductor2.email),
'alert-success'
)
self.assertTrue(self.contract1.assigned is not None)
self.assertEquals(self.contract1.assigned, self.conductor2)
def test_conductor_contract_detail_view(self):
self.assert404(self.client.get(self.detail_view.format(999, 999)))
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
detail = self.client.get(self.build_detail_view(assign))
self.assert200(detail)
self.assert_template_used('conductor/detail.html')
self.assertEquals(self.get_context_variable('active_tab'), '#activity')
self.assertEquals(
self.get_context_variable('current_stage').id,
self.get_context_variable('active_stage').id
)
self.assertEquals(len(self.get_context_variable('actions')), 1)
# make sure the redirect works
redir = self.client.get('/conductor/contract/{}'.format(assign.id))
self.assertEquals(redir.status_code, 302)
self.assertEquals(redir.location, 'http://localhost' + detail_view_url)
self.logout_user()
# make sure we can't get to it unless we are the right user
detail = self.client.get(detail_view_url, follow_redirects=True)
self.assert200(detail)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
self.login_user(self.staff)
detail = self.client.get(detail_view_url, follow_redirects=True)
self.assert200(detail)
# it should redirect us to the home page
self.assert_template_used('public/home.html')
def test_conductor_contract_transition(self):
assign = self.assign_contract()
transition_url = self.build_detail_view(assign) + '/transition'
transition = self.client.get(transition_url)
self.assertEquals(transition.status_code, 302)
new_page = self.client.get(self.build_detail_view(assign))
self.assertTrue('<a href="#post" aria-controls="post" role="tab" data-toggle="tab">' not in new_page.data)
contract_stages = ContractStage.query.all()
for stage in contract_stages:
if stage.stage_id == self.stage1.id:
self.assertTrue(stage.entered is not None and stage.exited is not None)
elif stage.stage_id == self.stage2.id:
self.assertTrue(stage.entered is not None and stage.exited is None)
elif stage.stage_id == self.stage3.id:
self.assertTrue(stage.entered is None and stage.exited is None)
def test_conductor_auto_fix_dates(self):
two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)
assign = self.assign_contract(start_time=two_days_ago)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.post(transition_url, data={'complete': two_days_ago})
self.client.post(transition_url, data={'complete': two_days_ago})
revert_url = self.build_detail_view(assign) + '/transition?destination={}'
self.client.post(revert_url.format(self.stage2.id), data={
'complete': datetime.datetime.now() - datetime.timedelta(days=1)
})
contract_stage_1 = ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first()
contract_stage_2 = ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first()
self.assertNotEquals(contract_stage_1.exited, contract_stage_2.entered)
self.client.get(transition_url)
self.assertEquals(contract_stage_1.exited, contract_stage_2.entered)
def test_conductor_transition_complete_date_validation(self):
assign = self.assign_contract()
transition_url = self.build_detail_view(assign) + '/transition'
early = self.client.post(transition_url, data={
'complete': datetime.datetime.now() - datetime.timedelta(days=1)
}, follow_redirects=True)
self.assertTrue('Invalid date (before step start)' in early.data)
late = self.client.post(transition_url, data={
'complete': datetime.datetime.now() + datetime.timedelta(days=1)
}, follow_redirects=True)
self.assertTrue('Invalid date (in future)' in late.data)
contract_stages = ContractStage.query.all()
for stage in contract_stages:
if stage.id == self.stage1.id:
self.assertTrue(stage.entered is not None and stage.exited is not None)
elif stage.id == self.stage2.id:
self.assertTrue(stage.entered is not None and stage.exited is None)
elif stage.id == self.stage3.id:
self.assertTrue(stage.entered is None and stage.exited is None)
def test_conductor_directed_transition(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
# transition to the third stage
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get(transition_url)
self.assertEquals(ContractStageActionItem.query.count(), 5)
self.assertEquals(assign.current_stage_id, self.stage3.id)
revert_url = self.build_detail_view(assign) + '/transition?destination={}'
# revert to the original stage
self.client.get(revert_url.format(self.stage1.id))
self.assertEquals(ContractStageActionItem.query.count(), 6)
self.assertEquals(assign.current_stage_id, self.stage1.id)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first().entered is not None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first().entered is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage3.id).first().entered is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage1.id).first().exited is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage2.id).first().exited is None)
self.assertTrue(ContractStage.query.filter(ContractStage.stage_id == self.stage3.id).first().exited is None)
def test_conductor_link_directions(self):
assign = self.assign_contract()
self.client.get(self.detail_view.format(assign.id, assign.get_current_stage().id) + '/transition')
# assert the current stage is stage 2
redir = self.client.get('/conductor/contract/{}'.format(assign.id))
self.assertEquals(redir.status_code, 302)
self.assertEquals(redir.location, 'http://localhost' + self.build_detail_view(assign))
# assert we can/can't go the correct locations
old_view = self.client.get(self.build_detail_view(assign, old_stage=self.stage1))
self.assert200(old_view)
self.assertTrue('This stage has been completed.' in old_view.data)
self.assert200(self.client.get(self.build_detail_view(assign, old_stage=self.stage2)))
self.assert404(self.client.get(self.build_detail_view(assign, old_stage=self.stage3)))
def test_conductor_flow_switching(self):
assign = self.assign_contract()
self.client.get(self.detail_view.format(assign.id, assign.get_current_stage().id) + '/transition')
# we should have three actions -- entered, exited, entered
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get(self.detail_view.format(assign.id, self.stage2.id) +
'/flow-switch/{}'.format(self.flow2.id))
# assert that we have been updated appropriately
self.assertEquals(assign.flow_id, self.flow2.id)
self.assertEquals(assign.current_stage_id, self.flow2.stage_order[0])
# assert that the action log has been properly cleaned
new_actions = ContractStageActionItem.query.all()
self.assertEquals(len(new_actions), 2)
flow_switch_action, entered_action = 0, 0
for i in new_actions:
if i.action_type == 'entered':
entered_action += 1
elif i.action_type == 'flow_switch':
flow_switch_action += 1
self.assertEquals(entered_action, 1)
self.assertEquals(flow_switch_action, 1)
# assert that the old contract stages from the previous flow
# have had their enter/exit times cleared
old_stages = ContractStage.query.filter(
ContractStage.flow_id == self.flow.id,
ContractStage.contract_id == assign.id
).all()
for i in old_stages:
self.assertTrue(i.entered is None)
self.assertTrue(i.exited is None)
# assert that you can transition back to the original flow
current_stage = ContractStage.query.filter(
ContractStage.stage_id == assign.current_stage_id,
ContractStage.contract_id == assign.id,
ContractStage.flow_id == assign.flow_id
).first()
# switch back to the first stage
self.client.get(
self.detail_view.format(assign.id, current_stage.id) +
'/flow-switch/{}'.format(self.flow.id)
)
# assert that our contract properties work as expected
self.assertEquals(assign.flow_id, self.flow.id)
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
# assert that the actions were logged correctly
new_actions = ContractStageActionItem.query.all()
self.assertEquals(len(new_actions), 3)
flow_switch_action, entered_action, restarted_action = 0, 0, 0
for i in new_actions:
if i.action_type == 'entered':
entered_action += 1
elif i.action_type == 'flow_switch':
flow_switch_action += 1
elif i.action_type == 'restarted':
restarted_action += 1
self.assertEquals(entered_action, 1)
self.assertEquals(flow_switch_action, 2)
self.assertEquals(restarted_action, 0)
@patch('urllib2.urlopen')
def test_url_validation(self, urlopen):
mock_open = Mock()
mock_open.getcode.side_effect = [
200,
urllib2.HTTPError('', 404, 'broken', {}, file),
urllib2.URLError('')
]
urlopen.return_value = mock_open
post_url = '/conductor/contract/{}/edit/url-exists'.format(self.contract1.id)
post1 = self.client.post(
post_url, data=json.dumps(dict(no_url='')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post1.data).get('status'), 404)
post2 = self.client.post(
post_url, data=json.dumps(dict(url='works')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post2.data).get('status'), 200)
post3 = self.client.post(
post_url, data=json.dumps(dict(url='doesnotwork')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post3.data).get('status'), 404)
post4 = self.client.post(
post_url, data=json.dumps(dict(url='doesnotwork')),
headers={'Content-Type': 'application/json;charset=UTF-8'}
)
self.assertEquals(json.loads(post4.data).get('status'), 500)
def test_conductor_contract_post_note(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a test note!'
))
self.assertEquals(ContractStageActionItem.query.count(), 2)
detail_view = self.client.get(detail_view_url)
self.assertEquals(len(self.get_context_variable('actions')), 2)
self.assertTrue('a test note!' in detail_view.data)
# make sure you can't post notes to an unstarted stage
self.assert404(self.client.post(
self.build_detail_view(assign, old_stage=self.stage3) + '?form=activity',
data=dict(note='a test note!')
))
# make sure you can't post a note to an unstarted contract
self.assert404(self.client.post(
self.build_detail_view(self.contract2) + '?form=activity',
data=dict(note='a test note!')
))
def test_delete_note(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a test note!'
))
self.client.post(detail_view_url + '?form=activity', data=dict(
note='a second test note!'
))
first_note = ContractStageActionItem.query.filter(
ContractStageActionItem.action_type == 'activity'
).first()
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.client.get('/conductor/contract/1/stage/1/note/{}/delete'.format(first_note.id))
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.client.get('/conductor/contract/1/stage/1/note/100/delete')
self.assert_flashes("That note doesn't exist!", 'alert-warning')
self.logout_user()
# make sure you can't delete notes randomly
self.assert200(
self.client.get('/conductor/contract/1/stage/1/note/1/delete', follow_redirects=True)
)
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.assert_template_used('public/home.html')
def test_conductor_stage_default_message(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
request = self.client.get(detail_view_url)
self.assertTrue('i am a default message' in request.data)
def test_conductor_send_update(self):
assign = self.assign_contract()
self.assertEquals(ContractStageActionItem.query.count(), 1)
detail_view_url = self.build_detail_view(assign)
# make sure the form validators work
bad_post = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='bademail', subject='test', body='test'
), follow_redirects=True)
self.assertEquals(ContractStageActionItem.query.count(), 1)
self.assertEquals(bad_post.status_code, 200)
self.assertTrue('One of the supplied emails is invalid' in bad_post.data)
with mail.record_messages() as outbox:
good_post = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='foo@foo.com; foo2@foo.com', subject='test', body='test',
send_to_cc='foo3@foo.com'
), follow_redirects=True)
self.assertEquals(len(outbox), 1)
self.assertEquals(ContractStageActionItem.query.count(), 2)
self.assertTrue('test' in outbox[0].subject)
self.assertTrue('with the subject' in good_post.data)
self.assertTrue(len(outbox[0].cc), 1)
self.assertTrue(len(outbox[0].recipients), 2)
good_post_ccs = self.client.post(detail_view_url + '?form=update', data=dict(
send_to='foo@foo.com', subject='test', body='test',
send_to_cc='foo3@foo.com; foo4@foo.com'
), follow_redirects=True)
self.assertEquals(len(outbox), 2)
self.assertEquals(ContractStageActionItem.query.count(), 3)
self.assertTrue('test' in outbox[1].subject)
self.assertTrue('with the subject' in good_post_ccs.data)
self.assertTrue(len(outbox[1].cc), 2)
self.assertTrue(len(outbox[1].recipients), 1)
def test_conductor_post_to_beacon(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
old_view = self.client.get(detail_view_url)
self.assertTrue(self.department.name in old_view.data)
self.client.post(detail_view_url + '?form=post', data={
'contact_email': self.conductor.email, 'title': 'foobar', 'description': 'barbaz',
'planned_publish': datetime.date.today() + datetime.timedelta(1),
'planned_submission_start': datetime.date.today() + datetime.timedelta(2),
'planned_submission_end': datetime.datetime.today() + datetime.timedelta(days=2),
'department': self.department.id,
'subcategories-{}'.format(self.category.id): 'on',
'opportunity_type': self.county_type.id
})
self.assertEquals(Opportunity.query.count(), 1)
self.assertEquals(ContractStageActionItem.query.count(), 2)
detail_view = self.client.get(detail_view_url)
self.assertEquals(len(self.get_context_variable('actions')), 2)
self.assertTrue('barbaz' in detail_view.data)
def test_edit_contract_metadata(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign, self.stage1)
self.client.post(detail_view_url + '?form=update-metadata', data=dict(
financial_id=999
))
self.assertEquals(ContractStageActionItem.query.count(), 2)
for i in ContractStageActionItem.query.all():
self.assertTrue(i.action_detail is not None)
self.assertEquals(assign.financial_id, '999')
def test_edit_contract_complete(self):
assign = self.assign_contract(flow=self.simple_flow)
should_redir = self.client.get('/conductor/contract/{}/edit/contract'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}'.format(assign.id)
)
should_redir = self.client.get('/conductor/contract/{}/edit/company'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.id)
)
should_redir = self.client.get('/conductor/contract/{}/edit/contacts'.format(assign.id))
self.assertEquals(should_redir.status_code, 302)
self.assertEquals(
should_redir.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.id)
)
def test_contract_completion_session_set(self):
with self.client as c:
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
self.assertTrue(assign.completed_last_stage())
self.assert200(c.get('/conductor/contract/{}/edit/contract'.format(assign.id)))
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
self.assertTrue(session['contract-{}'.format(assign.id)] is not None)
self.assert200(c.get('/conductor/contract/{}/edit/company'.format(assign.id)))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u'__None'),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'test')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
self.assert200(c.get('/conductor/contract/{}/edit/contacts'.format(assign.id)))
def test_edit_contract_form_validators(self):
with self.client as c:
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
# set contract session variable so we can post to the company endpoint
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set both controller numbers
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u'__None'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set both company names
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert you can't set mismatched names/numbers
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u''),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue('companies-{}'.format(assign.id) not in session.keys())
# assert new works
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
# assert old works
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u'')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
# assert multiple companies work
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u''),
('companies-0-company_name', u'foobar'),
('companies-0-controller_number', u'1234'),
('companies-0-new_company_name', u''),
('companies-1-new_company_controller_number', u'1234'),
('companies-1-company_name', u''),
('companies-1-controller_number', u''),
('companies-1-new_company_name', u'foobar2')
]))
self.assertTrue(session['companies-{}'.format(assign.id)] is not None)
session.pop('companies-{}'.format(assign.id))
def test_actual_contract_completion(self):
with self.client as c:
self.assertTrue(self.contract1.is_visible)
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar')
]))
c.post('/conductor/contract/{}/edit/contacts'.format(assign.id), data=ImmutableMultiDict([
('companies-0-contacts-0-first_name', 'foo'),
('companies-0-contacts-0-last_name', 'bar'),
('companies-0-contacts-0-phone_number', '123-456-7890'),
('companies-0-contacts-0-email', 'foo@foo.com'),
]))
self.assertTrue(assign.parent.is_archived)
self.assertFalse(assign.parent.is_visible)
self.assertTrue(assign.is_visible)
self.assertEquals(ContractBase.query.count(), 3)
self.assertEquals(assign.description, 'foo')
self.assertEquals(assign.parent.description, 'scuba supplies [Archived]')
def test_actual_contract_completion_multi_company(self):
with self.client as c:
self.assertTrue(self.contract1.is_visible)
assign = self.assign_contract(flow=self.simple_flow)
transition_url = self.build_detail_view(assign) + '/transition'
self.client.get(transition_url)
c.post('conductor/contract/{}/edit/contract'.format(assign.id), data=dict(
expiration_date=datetime.date(2020, 1, 1), spec_number='abcd',
description='foo'
))
c.post('conductor/contract/{}/edit/company'.format(assign.id), data=ImmutableMultiDict([
('companies-0-new_company_controller_number', u'1234'),
('companies-0-company_name', u''),
('companies-0-controller_number', u''),
('companies-0-new_company_name', u'foobar'),
('companies-2-new_company_controller_number', u'5678'),
('companies-2-company_name', u''),
('companies-2-controller_number', u''),
('companies-2-new_company_name', u'foobar3'),
('companies-1-new_company_controller_number', u'1234'),
('companies-1-company_name', u''),
('companies-1-controller_number', u''),
('companies-1-new_company_name', u'foobar2'),
]))
c.post('/conductor/contract/{}/edit/contacts'.format(assign.id), data=ImmutableMultiDict([
('companies-0-contacts-0-first_name', 'foo'),
('companies-0-contacts-0-last_name', 'bar'),
('companies-0-contacts-0-phone_number', '123-456-7890'),
('companies-0-contacts-0-email', 'foo@foo.com'),
('companies-1-contacts-0-first_name', 'foo'),
('companies-1-contacts-0-last_name', 'bar'),
('companies-1-contacts-0-phone_number', '123-456-7890'),
('companies-1-contacts-0-email', 'foo@foo.com'),
('companies-2-contacts-0-first_name', 'foo'),
('companies-2-contacts-0-last_name', 'bar'),
('companies-2-contacts-0-phone_number', '123-456-7890'),
('companies-2-contacts-0-email', 'foo@foo.com'),
]))
# we should create two new contract objects
self.assertEquals(ContractBase.query.count(), 4)
self.assertTrue(assign.parent.is_archived)
self.assertFalse(assign.parent.is_visible)
# two of the contracts should be children of our parent contract
children = assign.parent.children
self.assertEquals(len(children), 2)
for child in children:
self.assertTrue(child.is_visible)
self.assertEquals(child.description, 'foo')
self.assertEquals(child.parent.description, 'scuba supplies [Archived]')
self.assertEquals(assign.assigned, child.assigned)
if child.financial_id == 1234:
self.assertEquals(len(child.companies), 2)
def test_contract_extension(self):
assign = self.assign_contract()
detail_view_url = self.build_detail_view(assign)
extend = self.client.get(detail_view_url + '/extend')
self.assertEquals(extend.status_code, 302)
self.assertEquals(
extend.location,
'http://localhost/conductor/contract/{}/edit/contract'.format(assign.parent.id)
)
extend_post = self.client.post('conductor/contract/{}/edit/contract'.format(assign.parent.id), data=dict(
expiration_date=datetime.date.today(), spec_number='1234',
description=assign.parent.description
))
self.assertEquals(extend_post.status_code, 302)
self.assertEquals(
extend_post.location,
'http://localhost/conductor/'
)
self.assertEquals(assign.parent.expiration_date, datetime.date.today())
# our child contract should be untouched
self.assertEquals(assign.current_stage_id, self.flow.stage_order[0])
self.assertTrue(assign.parent.is_visible)
self.assertFalse(assign.is_visible)
|
Waltz Me To Heaven: Shopping Event At Fig!
I am completely obsessed with this outfit from Fig! Maybe it's all the talk of Coachella, but I would love to be sitting on a blanket listening to some good music in this outfit (minus the wedges). I fell in love with this Pepin top on the hanger, and I like even more on. The detailing is beautiful, and it has a fun, boho-chic vibe. The jeans are Current/Elliot, and I could not love them more! They are so comfortable. I have to admit, the shoes are mine from last year, but I pulled some below that I think will complete the outfit nicely.
We are excited to announce that we are partnering with Fig (inside Duh) this Thursday 4/16 between 4-7 pm for a night of bubbly, bites and spring shopping! The girls at Fig are generously offering 10% off all purchases for the evening, and there will be a drawing for a fun item. You don't want to miss it!
|
import sublime
import sublime_plugin
from ..SublimeCscope import DEBUG
from . import indexer
# These commands should trigger a state change event in the indexer
PROJECT_COMMANDS = ('prompt_add_folder',
'prompt_open_project_or_workspace',
'prompt_switch_project_or_workspace',
'prompt_select_workspace',
'open_recent_project_or_workspace')
class EventListener(sublime_plugin.EventListener):
"""Monitors events from the editor and tries to figure out
when it is meaningful to notify the indexers"""
def __init__(self):
super().__init__()
self._curr_active_window = 0
self._project_command_in_progres = []
self._last_saved_buffer = None
self._last_closed_buffer = None
def _check_active_window(self):
curr_active_window = sublime.active_window().id()
# don't notify any change the first time
if self._curr_active_window == 0:
self._curr_active_window = curr_active_window
return False
prev_active_window = self._curr_active_window
self._curr_active_window = curr_active_window
#A change in active window can mean that a new window was created,
#a window was closed or the user switched between windows.
if prev_active_window != curr_active_window:
return True
return False
def _clear_last_saved_buffer(self):
self._last_saved_buffer = None
def _clear_last_closed_buffer(self):
self._last_closed_buffer = None
def _find_open_file(self, file_name):
for win in sublime.windows():
if win.find_open_file(file_name):
return True
return False
def on_post_save(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# ignore multiple calls for the same buffer for 1 second.
if file_name != self._last_saved_buffer:
self._last_saved_buffer = file_name
indexer.buffer_promoted(file_name)
sublime.set_timeout_async(self._clear_last_saved_buffer, 1000)
def on_close(self, view):
self._check_active_window()
file_name = view.file_name()
if not view.is_scratch() and file_name:
# only send buffer demoted if all views into the buffer have been
# closed.
if file_name != self._last_closed_buffer and not self._find_open_file(file_name):
self._last_closed_buffer = file_name
indexer.buffer_demoted(file_name)
sublime.set_timeout_async(self._clear_last_closed_buffer, 1000)
def on_activated(self, view):
focus_changed = self._check_active_window()
window_id = view.window().id() if view.window() else 0
proj_command_complete = False
if window_id in self._project_command_in_progres:
proj_command_complete = True
self._project_command_in_progres.remove(window_id)
if window_id and (focus_changed or proj_command_complete):
indexer.window_state_changed()
def on_window_command(self, win, cmd_name, args):
self._check_active_window()
if not win.id():
return
# if DEBUG:
# print("Got window command: %s" % cmd_name)
if cmd_name in PROJECT_COMMANDS:
if win.id() not in self._project_command_in_progres:
self._project_command_in_progres.append(win.id())
else:
print("Got command %s from win: %d while other already in progress")
elif cmd_name == 'refresh_folder_list':
indexer.refresh(win)
elif cmd_name == 'remove_folder':
indexer.window_state_changed()
|
Garza K M, Sebzda E, Ohteki T, and Ohashi P S. . Immunologically Mediated Endocrine Diseases. Lippincott Williams&Wilkins, 2002.01 Immunotherapies: role of peptides in the generation and regulation of autoimmunity.
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CloudServiceRolesOperations(object):
"""CloudServiceRolesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
role_name, # type: str
resource_group_name, # type: str
cloud_service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CloudServiceRole"
"""Gets a role from a cloud service.
:param role_name: Name of the role.
:type role_name: str
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudServiceRole, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.CloudServiceRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'roleName': self._serialize.url("role_name", role_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudServiceRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
cloud_service_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CloudServiceRoleListResult"]
"""Gets a list of all roles in a cloud service. Use nextLink property in the response to get the
next page of roles. Do this till nextLink is null to fetch all the roles.
:param resource_group_name:
:type resource_group_name: str
:param cloud_service_name:
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServiceRoleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.CloudServiceRoleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceRoleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CloudServiceRoleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles'} # type: ignore
|
Like many working men and women in the U.S. the staff of political campaigns and state parties experience pressures on the job and like all workers in America, these campaign staffers deserve to have a strong voice on the job. The Teamsters Union has a proven record when it comes to delivering on strong contracts for all of our members. The Teamster are proud to welcome our newest members – the Teamster Campaign Workers.
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from collections import OrderedDict
"""
Main scanning functions.
Note: this API is unstable and still evolving.
"""
def extract_archives(location, recurse=True):
"""
Extract recursively any archives found at location and yield an iterable of
ExtractEvents.
If verbose is False, only the "done" event is returned at extraction
completion.
If verbose is True, both "start" and "done" events are returned.
"""
from extractcode.extract import extract
from extractcode import default_kinds
for xevent in extract(location, kinds=default_kinds, recurse=recurse):
yield xevent
def get_copyrights(location):
"""
Yield an iterable of dictionaries of copyright data detected in the file at
location. Each item contains a list of copyright statements and a start and
end line.
"""
from cluecode.copyrights import detect_copyrights
for copyrights, authors, _years, holders, start_line, end_line in detect_copyrights(location):
if not copyrights:
continue
result = OrderedDict()
# FIXME: we should call this copyright instead, and yield one item per statement
result['statements'] = copyrights
result['holders'] = holders
result['authors'] = authors
result['start_line'] = start_line
result['end_line'] = end_line
yield result
def get_emails(location):
"""
Yield an iterable of dictionaries of emails detected in the file at
location.
"""
from cluecode.finder import find_emails
for email, line_num in find_emails(location):
if not email:
continue
misc = OrderedDict()
misc['email'] = email
misc['start_line'] = line_num
misc['end_line'] = line_num
yield misc
def get_urls(location):
"""
Yield an iterable of dictionaries of urls detected in the file at
location.
"""
from cluecode.finder import find_urls
for urls, line_num in find_urls(location):
if not urls:
continue
misc = OrderedDict()
misc['url'] = urls
misc['start_line'] = line_num
misc['end_line'] = line_num
yield misc
DEJACODE_LICENSE_URL = 'https://enterprise.dejacode.com/license_library/Demo/{}/'
def get_licenses(location, minimum_score=100):
"""
Yield an iterable of dictionaries of license data detected in the file at
location for each detected license.
minimum_score is the minimum score threshold from 0 to 100. The default is
100 means only exact licenses will be detected. With any value below 100,
approximate license results are included. Note that the minimum length for
an approximate match is four words.
"""
from licensedcode.models import get_license
from licensedcode.detect import get_license_matches
for match in get_license_matches(location, minimum_score=minimum_score):
for license_key in match.rule.licenses:
lic = get_license(license_key)
result = OrderedDict()
result['key'] = lic.key
result['score'] = match.score
result['short_name'] = lic.short_name
result['category'] = lic.category
result['owner'] = lic.owner
result['homepage_url'] = lic.homepage_url
result['text_url'] = lic.text_urls[0] if lic.text_urls else ''
result['dejacode_url'] = DEJACODE_LICENSE_URL.format(lic.key)
result['spdx_license_key'] = lic.spdx_license_key
result['spdx_url'] = lic.spdx_url
result['start_line'] = match.query_position.start_line
result['end_line'] = match.query_position.end_line
yield result
def get_file_infos(location):
"""
Return a list of dictionaries of informations collected from the file or
directory at location.
"""
from commoncode import fileutils
from commoncode import filetype
from commoncode.hash import sha1, md5
from typecode import contenttype
T = contenttype.get_type(location)
is_file = T.is_file
is_dir = T.is_dir
infos = OrderedDict()
infos['type'] = filetype.get_type(location, short=False)
infos['name'] = fileutils.file_name(location)
infos['extension'] = is_file and fileutils.file_extension(location) or ''
infos['date'] = is_file and filetype.get_last_modified_date(location) or None
infos['size'] = T.size
infos['sha1'] = is_file and sha1(location) or None
infos['md5'] = is_file and md5(location) or None
infos['files_count'] = is_dir and filetype.get_file_count(location) or None
infos['mime_type'] = is_file and T.mimetype_file or None
infos['file_type'] = is_file and T.filetype_file or None
infos['programming_language'] = is_file and T.programming_language or None
infos['is_binary'] = is_file and T.is_binary or None
infos['is_text'] = is_file and T.is_text or None
infos['is_archive'] = is_file and T.is_archive or None
infos['is_media'] = is_file and T.is_media or None
infos['is_source'] = is_file and T.is_source or None
infos['is_script'] = is_file and T.is_script or None
return [infos]
def get_package_infos(location):
"""
Return a list of dictionaries of package information
collected from the location or an empty list.
"""
from packagedcode.recognize import recognize_packaged_archives
package = recognize_packaged_archives(location)
if not package:
return []
return [package.as_dict(simple=True)]
|
You guys notice those 10-Step “How-To” Articles? The ones that give different tips or ways to improve something; ranging from hair to relationships. I came across one the other day that advised its readers on how to be happy. And as much as I find these types of articles interesting, I feel they are somewhat misleading…They say things like: Smile, Be Compassionate, Travel, etc.
But what if I just don’t feel like smiling? I’m tired, on my way home from a long, stressful day at work and the only thing I want to do is swan dive into my bed…there would probably be nothing more annoying at that point than someone telling me to smile. Being compassionate is a great quality to have, but what’s the line between compassion and doing more for others than for yourself? Which can be more detrimental than beneficial after a certain point and with a certain type of person. Traveling, which I’m sure is on the top of everyone’s bucket list, but what if we simply can’t afford to go anywhere? What then??
What irks me about these articles is they act like such a monumental accomplishment in life, like being happy, can be achieved in some certain amount of steps. Everyone has an opinion on what works, but that is based on their own perspective and life experience, which can be completely different from your own. The definition for happiness and how to get there means something different to everyone. For me, finding happiness has been a journey that is ongoing. I can say that I am in an exponentially better place than I have in the past, but I can’t say I’m where I want to be yet. The biggest things I’ve learned is that happiness is not a constant state. There are some days where you’ll feel like dancing, and other day’s you’ll feel like crying and there is nothing wrong with that. As long as you recognize the good and bad in life, and appreciate the high’s and acknowledge the lows, I feel like you’re on the right track. Anyone who says they’re happy 100% of the time, is probably not being completely honest with themselves.
I’ve also learned that your surroundings affect your attitude which affects how happy you can be. If you have people in your life that are always negative: they’re not supportive, they always doubt you, they’re always talking about something or someone; whether they’re a significant other or a best friend or your boss, that person is not healthy for you. Those How-To articles don’t mention the time and effort it takes to make necessary changes and commit to them which is hard for many of us who are a part of this generation that is so accustomed to quick fixes and instantaneous solutions. But the most important thing I’ve realized is that there is no quick fix to happiness. It’s not like a light switch that you can turn on. Toxic energies are contagious, and when someone is down, they bring you down to their level whether you want to be there or not. But if you keep pushing and trying to seek what makes you happy, you’ll get there.
|
"""An implementation of a doubly linked list in Python."""
class Node():
"""Instantiate a node."""
def __init__(self, value=None, nxt=None, previous=None):
"""."""
self.value = value
self.next = nxt
self.previous = previous
class DbLinkedList():
"""Instantiate a doubly linked list."""
def __init__(self, value=None):
"""."""
self.head = None
self.tail = None
self.length = 0
if value:
self.push(value)
def push(self, value=None):
"""Push value to the head of dll."""
new_node = Node(value, nxt=self.head)
if self.length < 1:
self.tail = new_node
else:
self.head.previous = new_node
self.head = new_node
self.length += 1
def append(self, value):
"""Append value to the tail of dll."""
new_node = Node(value, None, self.tail)
if self.length < 1:
self.head = new_node
else:
self.tail.next = new_node
self.tail = new_node
self.length += 1
def pop(self):
"""Pop first value off of the head of dll."""
if self.head:
returned_value = self.head.value
self.head = self.head.next
self.head.previous = None
self.length -= 1
return returned_value
raise ValueError("Cannot pop from an empty list")
def shift(self):
"""Remove and return the last value of the dll."""
if self.head:
returned_value = self.tail.value
self.tail = self.tail.previous
self.tail.next = None
self.length -= 1
return returned_value
raise ValueError("Cannot shift from an empty list")
def remove(self, value):
"""Remove the value from the dll."""
curr_node = self.head
if not self.length:
raise ValueError("Cannot remove from an empty list")
else:
if curr_node.value == value:
self.pop()
else:
while curr_node is not None:
if curr_node.value == value:
curr_node.previous.next = curr_node.next
curr_node.next.previous = curr_node.previous
print("{} was removed".format(value))
return
else:
curr_node = curr_node.next
raise ValueError("{} not in the list".format(value))
|
Cycles up to 13 weeks have entered down phases on schedule, while the 6 month and 10-12 month cycles remain in up phases. Here’s how to play it.
Try Lee Adler’s Technical Trader and Liquidity Trader risk free for 90 days! Subscribe today and save! Subscription prices will increase when the Liquidity Trader website is formally launched within the next couple of weeks.
|
#! /usr/bin/env python
"""
usage: %(progname)s [args]
"""
import os, sys, string, time, getopt
from log import *
import odb
import MySQLdb
class Cursor(odb.Cursor):
def insert_id(self, tablename, colname):
return self.cursor.insert_id()
class Connection(odb.Connection):
def __init__(self, host, user, passwd, db):
odb.Connection.__init__(self)
self._conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db)
self.SQLError = MySQLdb.Error
def getConnType(self): return "mysql"
def cursor(self):
return Cursor(self._conn.cursor())
def escape(self,str):
if str is None: return None
return MySQLdb.escape_string(str)
def listTables(self, cursor):
cursor.execute("show tables")
rows = cursor.fetchall()
tables = []
for row in rows:
tables.append(row[0])
return tables
def listIndices(self, tableName, cursor):
cursor.execute("show index from %s" % tableName)
rows = cursor.fetchall()
tables = map(lambda row: row[2], rows)
return tables
def listFieldsDict(self, table_name, cursor):
sql = "show columns from %s" % table_name
cursor.execute(sql)
rows = cursor.fetchall()
columns = {}
for row in rows:
colname = row[0]
columns[colname] = row
return columns
def alterTableToMatch(self, table, cursor):
invalidAppCols, invalidDBCols = table.checkTable()
if not invalidAppCols: return
defs = []
for colname in invalidAppCols.keys():
col = table.getColumnDef(colname)
colname = col[0]
coltype = col[1]
options = col[2]
defs.append(table._colTypeToSQLType(colname, coltype, options))
defs = string.join(defs, ", ")
sql = "alter table %s add column " % table.getTableName()
sql = sql + "(" + defs + ")"
print sql
cursor.execute(sql)
def createTable(self, sql, cursor):
sql = sql + " TYPE=INNODB"
return sql
def supportsTriggers(self): return False
|
So how to preface this collection of images of the stunning West of Ireland Elopement of Saja & Dirk?
I could talk about how they got in touch first by replying to an Instagram Story of me filing my taxes by stuffing a fist full or receipts into an envelope for my poor suffering accountant. I could talk of how I realised a few emails in to our discussions that the bride was an amazing wedding photographer herself. I could talk about how I then realised the person she was marrying was ALSO an amazing wedding photographer. I could mention how at this point I thought “oh f**k can I do this?” but trusted they loved my work and were up for an adventure. I could talk about how they hired one of my best friends to join them to provide beautiful acoustic music that floated out into the incredible landscape of Lough Inagh and filled the already electric air with atmosphere and emotion. I could talk about how I sang at HIS wedding, but that would be sidetracking. How about I just say very little and let these images speak from themselves?
Saja and Dirk are photographers, you can see their brilliant work here and here. They have a dog named Hugo and they brought him and their families from Germany to Connemara and eloped. Their ceremony was held at the foot of Lough Inagh Lodge and we then spent nearly 2 hours walking around taking portraits and having fun. They gave me wine and gin as presents.
Saja and Dirk – thanks for trusting me to capture this and see you guys at Islanders!
|
#!/usr/bin/env python
# coding: utf-8
###################################################################
# カーリルAPIを用いて,東大図書館の蔵書の貸出可否を調べるスクリプト
# 2015.05.25 yk_tani
###################################################################
import urllib2, sys, json
try: isbn = sys.argv[1] # specify ISBN in commandline argument
except: isbn = '9784873112107' # defalut value
def pp(obj): # unicode を含む dictionary を eval して表示する関数
# adapted from http://taichino.com/programming/1599
# https://github.com/taichino/prettyprint
if isinstance(obj, list) or isinstance(obj, dict):
orig = json.dumps(obj, indent=4)
print eval("u'''%s'''" % orig).encode('utf-8')
else:
print obj
appkey = 'gci2015'
systemid = 'Univ_Tokyo' # 東大図書館をデフォルトで指定
# apiからデータを取る
resp = urllib2.urlopen('http://api.calil.jp/check?appkey={%s}&isbn=4834000826&systemid=%s&format=json'%(appkey, systemid)).read()
# validなjsonに整える
resp = resp.replace('callback(', '', 1).replace(');', '' ,1)
# dictionaryに変換
data = json.loads(resp)
for b in data["books"]: # 所蔵している図書室と,貸出可能かを表示
#print type(data["books"][b][systemid]['libkey'])
#print data["books"][b][systemid]['libkey']
pp( data["books"][b][systemid]['libkey'] )
|
Reasonable growth potential with acceptable track record.
Polymetal International plc operates as a precious metals mining company in Russia, Kazakhstan, East Asia, and Europe. The last earnings update was 30 days ago. More info.
POLY outperformed the Metals and Mining industry which returned -1.6% over the past year.
POLY outperformed the Market in Russian Federation which returned 7.9% over the past year.
Is Polymetal International undervalued based on future cash flows and its price relative to the stock market?
Here we compare the current share price of Polymetal International to its discounted cash flow analysis.value.
Below are the data sources, inputs and calculation used to determine the intrinsic value for Polymetal International.
The calculations below outline how an intrinsic value for Polymetal International is arrived at by discounting future cash flows to their present value using the 2 stage method. We try to start with analysts estimates of free cash flow, however if these are not available we use the most recent financial results. In the 1st stage we continue to grow the free cash flow over a 10 year period, with the growth rate trending towards the perpetual growth rate used in the 2nd stage. The 2nd stage assumes the company grows at a stable rate into perpetuity.
The current share price of Polymetal International is above its future cash flow value.
The amount the stock market is willing to pay for Polymetal International's earnings, growth and assets is considered below, and whether this is a fair price.
Are Polymetal International's earnings available for a low price, and how does this compare to other companies in the same industry?
** Primary Listing of Polymetal International.
Polymetal International is overvalued based on earnings compared to the RU Metals and Mining industry average.
Polymetal International is overvalued based on earnings compared to the Russian Federation market.
Does Polymetal International's expected growth come at a high price?
Polymetal International is poor value based on expected growth next year.
What value do investors place on Polymetal International's assets?
* Primary Listing of Polymetal International.
Polymetal International is overvalued based on assets compared to the RU Metals and Mining industry average.
Polymetal International has a total score of 0/6, see the detailed checks below.
How is Polymetal International expected to perform in the next 1 to 3 years based on estimates from 17 analysts?
Is Polymetal International expected to grow at an attractive rate?
Polymetal International's earnings growth is expected to exceed the low risk savings rate of 8.4%.
Polymetal International's earnings growth is expected to exceed the Russian Federation market average.
Polymetal International's revenue growth is expected to exceed the Russian Federation market average.
Polymetal International's earnings are expected to grow by 10.5% yearly, however this is not considered high growth (20% yearly).
Polymetal International's revenue is expected to grow by 7.5% yearly, however this is not considered high growth (20% yearly).
All data from Polymetal International Company Filings, last reported 3 months ago, and in Trailing twelve months (TTM) annual period rather than quarterly.
Polymetal International is expected to efficiently use shareholders’ funds in the future (Return on Equity greater than 20%).
Polymetal International has a total score of 4/6, see the detailed checks below.
How has Polymetal International performed over the past 5 years?
Below we compare Polymetal International's growth in the last year to its industry (Metals and Mining).
Polymetal International has delivered over 20% year on year earnings growth in the past 5 years.
Polymetal International's earnings growth has exceeded the RU Metals and Mining industry average in the past year (15.8% vs -2.7%).
Polymetal International's revenue and profit over the past 5 years is shown below, any years where they have experienced a loss will show up in red.
Whilst Polymetal International has efficiently used shareholders’ funds last year (Return on Equity greater than 20%), this is metric is skewed due to its high level of debt.
Polymetal International used its assets more efficiently than the RU Metals and Mining industry average last year based on Return on Assets.
Polymetal International's use of capital deteriorated last year versus 3 years ago (Return on Capital Employed).
Polymetal International has a total score of 3/6, see the detailed checks below.
How is Polymetal International's financial health and their level of debt?
The boxes below represent the relative size of what makes up Polymetal International's finances.
Polymetal International is able to meet its short term (1 year) commitments with its holdings of cash and other short term assets.
Polymetal International's long term commitments exceed its cash and other short term assets.
This treemap shows a more detailed breakdown of Polymetal International's finances. If any of them are yellow this indicates they may be out of proportion and red means they relate to one of the checks below.
All data from Polymetal International Company Filings, last reported 3 months ago.
Polymetal International's level of debt (135.9%) compared to net worth is high (greater than 40%).
The level of debt compared to net worth has increased over the past 5 years (62.1% vs 135.9% today).
Debt is well covered by operating cash flow (27%, greater than 20% of total debt).
What is Polymetal International's current dividend yield, its reliability and sustainability?
Current annual income from Polymetal International dividends. Estimated to be 5.5% next year.
If you bought RUB2,000 of Polymetal International shares you are expected to receive RUB90 in your first year as a dividend.
Polymetal International's pays a higher dividend yield than the bottom 25% of dividend payers in Russian Federation (3.26%).
Polymetal International's dividend is below the markets top 25% of dividend payers in Russian Federation (9.04%).
Purchase Polymetal International before the 'Buy Limit' to receive their next dividend payment.
Polymetal International has been paying a dividend for less than 10 years and during this time payments have been volatile (annual drop of over 20%).
Dividend payments have increased, but Polymetal International only paid a dividend in the past 7 years.
What portion of Polymetal International's earnings are paid to the shareholders as a dividend.
What is the CEO of Polymetal International's salary, the management and board of directors tenure and is there insider trading?
Mr. Vitaly N. Nesis has been Group Chief Executive Officer of Polymetal International Plc since December 1, 2014. Mr. Nesis served as the Chief Executive Officer of Joint Stock Company Polymetal since 2003 and served as its Member of the Management Board. From 2002 to 2003, he served as Chief Executive Officer of Vostsibugol. Mr. Nesis served as Chief Executive Officer at Polymetal International Plc since September 2011. He served as the General Manager of JSC, Vostsibugol (Irkutsk). He served as an Analyst in Merrill Lynch (USA) from 1997 to 1999. He served in the Moscow representation of McKinsey & Company office in Moscow from 1999 to 2000. From 2001 to 2002, he served as the Director/Head of the Investment Planning Department at SUAL Holding. He served as Strategic Development Director of JSC Ulyanovsk Car Factory, UAZ/Ulyanovsk Automobile Plant from 2000 to 2001. Mr. Nesis has been an Executive Director of Polymetal International Plc since September 29, 2011. He served as an Executive Director at Joint Stock Company Polymetal from June 2004 to 2012. Mr. Nesis graduated from Yale University in 1997 with a BA in Economics and received an MA in Mining Economics from St. Petersburg Mining Institute.
Vitaly's compensation has increased by more than 20% in the past year.
Insufficient data for Vitaly to establish whether their remuneration is reasonable compared to companies of similar size in .
The average tenure for the Polymetal International management team is over 5 years, this suggests they are a seasoned and experienced team.
The tenure for the Polymetal International board of directors is about average.
Polymetal International has a total score of 0/6, this is not included on the snowflake, see the detailed checks below.
Polymetal International plc operates as a precious metals mining company in Russia, Kazakhstan, East Asia, and Europe. The company operates in four segments: Magadan, Ural, Khabarovsk, and Kazakhstan. It is involved in the exploration, extraction, processing, reclamation, and other related activities of gold, silver, copper, zinc, and platinum group metals. The company’s flagship project is the Kyzyl project located in the East Kazakhstan region, Kazakhstan. Polymetal International plc was founded in 1998 and is headquartered in Saint Petersburg, Russia.
|
#!/usr/bin/env python2
# Copyright 2011, 2012 Alexandre Gravier (al.gravier@gmail)
# This file is part of PyCogMo.
# PyCogMo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyCogMo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyCogMo. If not, see <http://www.gnu.org/licenses/>.
""" Functions and classes wrapping or complementing PyNN
functionality.
"""
import csv
import itertools
import functools
from math import isnan, ceil
import magic
import math
import numpy
import operator
from PIL import Image
import pyNN.brian as pynnn
import SimPy.Simulation as sim
import types
from utils import LOGGER, is_square, splice
class InvalidFileFormatError(Exception):
def __init__(self, mime_type, mime_subtype):
self._type = mime_type
self._subtype = mime_subtype
def __str__(self):
return "%s files of type %s are not supported." % \
(self._type, self._subtype)
class InvalidMatrixShapeError(Exception):
def __init__(self, req_dim1, req_dim2, prov_dim1, prov_dim2):
self._req = req_dim1, req_dim2
self._prov = prov_dim1, prov_dim2
def __str__(self):
return ("The required input data shape should be "
"%s,%s, but the shape of the data provided is "
"%s,%s.") % (self._req[0], self._req[1], \
self._prov[0], self._prov[1])
class SimulationError(Exception):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return self._msg
def presynaptic_outputs(unit, projection, t=None):
"""Returns the vector of all firing rates of units in the
presynaptic population that are connected to the given unit. The t
parameter can be set to restrict the computation to activity
younger than t units of time. The presynaptic population must have
a registered rate encoder with records."""
pre_population = projection.pre
post_population = projection.post
if unit not in post_population:
raise SimulationError("Unit not found in post-synaptic "
"population.")
unit_index = post_population.id_to_index(unit)
renc = get_rate_encoder(pre_population)
if renc.idx < 0:
raise SimulationError(
"Cannot compute presynaptic activation because the "
"rate encoder of the presynaptic population does not "
"contain any record.")
connectivity = projection.get('weight', 'array')
connectivity_to_unit = \
[(i, not math.isnan(connectivity[i][unit_index]))
for i in xrange(len(connectivity))]
rates = numpy.array(
[renc.get_rate_for_unit_index(i, t) for i, _
in itertools.ifilter((lambda v: v[1]), connectivity_to_unit)])
return rates
class Weights(object):
"""Wraps a 2D array of floating-point numbers that has the same
dimensions as the connectivity matrix between the two populations
of neurons connected. Non-connected units i and j have
weights[i][j] == NaN. Initial weights should be input (and are
internally stored) in nA or micro-Siemens. As they need to be
normalized for the purpose of learning, max_weight needs to be
provided. It is a model-specific and should reflect the maximum
conductance of a synapse/group of synpatic connections from one
cell to the other. It is the physical value corresponding to the
normalized weight value of 1 between 2 cells.
All methods and properties return normalized weights unless
specified otherwise."""
def __init__(self, weights_array, max_weight):
self._max_weight = max_weight * 1.
self._weights = numpy.array(weights_array) / self._max_weight
self._update_shape()
# TODO: use max_weight for hard bounding here and make soft bounded
# learning functions in nettraining.
def __eq__(self, other):
internalw = None
if isinstance(other, numpy.ndarray):
internalw = other
elif isinstance(other, list):
internalw = numpy.array(other)
elif not isinstance(other, Weights):
return False
else:
if other.max_weight != self.max_weight:
return False
internalw = other.non_normalized_numpy_weights
if len(numpy.atleast_1d(self.non_normalized_numpy_weights)) != \
len(numpy.atleast_1d(internalw)):
return False
n_r = len(self.non_normalized_numpy_weights)
for i in xrange(n_r):
l = len(numpy.atleast_1d(self.non_normalized_numpy_weights[i]))
if l != len(numpy.atleast_1d(internalw[i])):
return False
for j in xrange(l):
v1 = self.non_normalized_numpy_weights[i][j]
v2 = internalw[i][j]
if (isnan(v1) and isnan(v2)):
continue
if v1 != v2:
return False
return True
def _update_shape(self):
shape = self._weights.shape
self._dim1 = shape[0]
if len(shape) > 1:
self._dim2 = shape[1]
else:
self._dim2 = 0
@property
def max_weight(self):
return self._max_weight
@property
def shape(self):
return self._dim1, self._dim2
@property
def non_normalized_weights(self):
return (self._weights * self._max_weight).tolist()
@non_normalized_weights.setter
def non_normalized_weights(self, weights_array):
if isinstance(weights_array, numpy.ndarray):
self._weights = weights_array / self._max_weight
elif isinstance(weights_array, list):
self._weights = numpy.array(weights_array) / self._max_weight
elif isinstance(weights_array, Weights):
self._weights = weights_array.normalized_numpy_weights
else:
raise TypeError("Weights can be assigned to "
"numpy.ndarray, common.pynn_utils.Weights,"
" or list types.")
self._update_shape()
@property
def flat_non_normalized_weights(self):
return list(itertools.chain.from_iterable((self._weights * self._max_weight).tolist()))
@flat_non_normalized_weights.setter
def flat_non_normalized_weights(self, w):
wr = numpy.reshape(w, self.shape)
self._weights = wr / self._max_weight
@property
def non_normalized_numpy_weights(self):
return self._weights * self._max_weight
@non_normalized_numpy_weights.setter
def non_normalized_numpy_weights(self, w):
self._weights = w / self._max_weight
self._update_shape()
@property
def normalized_numpy_weights(self):
return self._weights
@normalized_numpy_weights.setter
def normalized_numpy_weights(self, w):
self._weights = w
self._update_shape()
def __getitem__(self, i):
return self._weights[i]
def set_normalized_weight(self, i, j, w):
self._weights[i][j] = w
def set_non_normalized_weight(self, i, j, w):
self._weights[i][j] = w / self._max_weight
def _apply_binary_scalar_operator(self, operator, other):
oshape = None
r = None
try:
if isinstance(other, Weights):
if other._max_weight != self._max_weight:
ValueError("Operation not possible as operands have "
"incompatible maximum conductances.")
oshape = other.shape
r = numpy.array(self._weights)
else:
if isinstance(other, list):
oshape = numpy.shape(other)
elif not hasattr(initializer, '__getitem__'):
raise TypeError("Second operand could not be interpreted "
"as an array of weights.")
if oshape != None and oshape != self.shape:
raise IndexError
if r == None:
r = numpy.zeros(oshape)
for x in xrange(self._dim1):
for y in xrange(self._dim2):
r[x][y] = operator(self._weights[x][y], other[x][y])
else:
r -= other._weights
except IndexError:
raise ValueError("Operation not possible as operands have "
"incompatible shapes.")
w = Weights([0], max_weight=self._max_weight)
w._dim1, w._dim2 = self.shape
w._weights = r
return w
def __add__(self, other):
return self._apply_binary_scalar_operator(operator.add, other)
def __sub__(self, other):
return self._apply_binary_scalar_operator(operator.sub, other)
def __radd__(self, other):
return self._apply_binary_scalar_operator(operator.add, other)
def __rsub__(self, other):
return self._apply_binary_scalar_operator(lambda a, b: b - a, other)
def get_normalized_weights_vector(self, target_idx):
"""Returns the weights vector to unit target_idx (target unit
index in target population). NaNs (weights of connections from
non-connected units) are omitted."""
w_with_nans = [self._weights[i][target_idx]
for i in xrange(self._dim1)]
return list(itertools.ifilterfalse(math.isnan, w_with_nans))
def set_normalized_weights_vector(self, target_idx, weights):
"""Sets the weights vector to unit target_idx (target unit
index in target population). The weight vector should have as
many elements as connected units (no NaN allowed)."""
wi = 0
try:
for i in xrange(self._dim1):
if not math.isnan(self._weights[i][target_idx]):
self._weights[i][target_idx] = weights[wi]
wi += 1
except IndexError:
raise SimulationError("Dimension mismatch (not enough elements "
"to assign to weights vector).")
if wi < len(weights):
raise SimulationError("Dimension mismatch (too many elements "
"to assign to weights vector).")
def __repr__(self):
"Prints the weights, mostly for debug purposes"
old_printopt = numpy.get_printoptions()
try:
import sys
numpy.set_printoptions(threshold=sys.maxint, suppress=True)
import os
rows, columns = map(int, os.popen('stty size', 'r').read().split())
r = "Weights(weights_array= \\\n%s, max_weight=%r)" % \
(numpy.array_str(a=self._weights,
max_line_width=columns-5,
precision=2),
self._max_weight)
finally:
numpy.set_printoptions(**old_printopt)
return r
def get_weights(proj, max_weight):
"""Returns a Weights object with the values of the weights of the
projection. Use max_w to setup the maximal conductance in micro-S
or current in nA."""
return Weights(proj.getWeights(format='array'), max_weight=max_weight)
def set_weights(proj, w):
"""Sets the weights of the projection to the internal (non-normalized)
values in w."""
if isinstance(w, Weights):
proj.setWeights(w.flat_non_normalized_weights)
else:
raise TypeError("Requires an argument of class Weights.")
def read_input_data(file_path, dim1, dim2, m=None):
"""The libmagic file identifier can be passed as argument m (used for
testing)."""
if m == None:
m = magic.Magic(mime=True)
mime = m.from_file(file_path)
mime = mime.lower().split('/')
float_array = None
if mime[0] == 'image':
float_array = read_image_data(file_path)
elif mime[0] == 'text':
if mime[1] == 'plain':
float_array = read_csv_data(file_path)
else:
raise InvalidFileFormatError(mime[0], mime[1])
else:
raise InvalidFileFormatError(mime[0], mime[1])
verify_input_array(float_array, dim1, dim2)
return float_array
def read_image_data(file_path):
"""Raises IOError if the file is not an image."""
im = Image.open(file_path)
# if im.size != (dim1, dim2):
# raise InvalidMatrixShapeError((dim1, dim2), im.size)
byte_array = numpy.array(im.convert("L")) # grayscale, [0 255]
norm_array = byte_array / 255.
return norm_array
def read_csv_data(file_path):
"""Raises IOError if the file is not a CSV file."""
float_array = []
try:
with open(file_path, 'rb') as f:
row_reader = csv.reader(f)
for r in itertools.ifilter(None, row_reader):
float_array.append(map(float, r))
return numpy.array(float_array)
except ValueError as e:
raise IOError(str(e))
def verify_input_array(float_array, dim1, dim2):
d1 = len(float_array)
if d1 != dim1:
raise InvalidMatrixShapeError(dim1, dim2, d1, "unkown")
for r in float_array:
d2 = len(r)
if d2 != dim2:
raise InvalidMatrixShapeError(dim1, dim2, d1, d2)
real = numpy.isreal(r)
if not isinstance(real, bool):
real = real.all()
if not real: # row test
raise TypeError("The input array contains invalid data.")
class InputSample(object):
"""Wraps a 2D array of normalized floating-point numbers that has
the same dimensions as the InputLayer to which it is
presented. The data can be an array, or copied from an object with
[][] accessor, loaded from a file, uniformly initialized to the
same value, or initialized by a user-provided function."""
# implement an [][] accessor
def __init__(self, dim1, dim2, initializer, expand=True):
"""The initializer can be an array, an object with [][]
accessor, a file path (string), a single floating point number
within [0,1] (the array is uniformly initialized to the same
value), or a user-provided callable that takes two integers x
and y in [0, dim1[ and [0, dim2[ respectively, and returns the
value to be stored in the array at [x][y]. The optional
parameter expand affects the case where the initializer is a
callable, an object with __getitem__, or a single number. In
those case, setting expand to False prevents the
precomputation of the whole array, and the InputSample
accessor encapsulate the function call, the object accessor,
or always returns the given number. If expand is True, the
InputSample created is mutable. If expand is False, the
InputSample is immutable."""
self._array = []
self._getitem = lambda k: self._array[k]
self._setitem = self._assign_to_array
if isinstance(initializer, basestring):
try:
self._array = read_input_data(initializer, dim1, dim2)
except IOError as e:
LOGGER.error("Could not read file %s.", initializer)
raise e
elif isinstance(initializer, types.FileType):
raise TypeError("Pass a string with the filepath to the "
"InputSample initializer, instead of a "
"file descriptor.")
elif isinstance(initializer, list):
self._array = initializer
elif hasattr(initializer, '__getitem__'):
if expand:
for x in xrange(dim1):
self._array.append([])
for y in xrange(dim2):
self._array[x].append(initializer[x][y])
else:
self._array = initializer
self._setitem = self._raise_immutable
elif hasattr(initializer, '__call__'):
# to restrict to functions:
# isinstance(initializer,
# (types.FunctionType, types.BuiltinFunctionType))
if expand:
for x in xrange(dim1):
self._array.append([])
for y in xrange(dim2):
self._array[x].append(initializer(x,y))
else:
class InitCont(object):
def __init__(self, x):
self._x = x
def __getitem__(self, y):
return initializer(self._x, y)
self._getitem = lambda x: InitCont(x)
self._setitem = self._raise_immutable
self._dim1 = dim1
self._dim2 = dim2
if expand:
verify_input_array(self._array, dim1, dim2)
def _raise_immutable(self, *args):
raise TypeError("Attempted change of state on an "
"immutable InputSample (created with "
"expand=False)")
def _assign_to_array(self, k, v):
self._array[k] = v
def __getitem__(self, k):
return self._getitem(k)
def __setitem__(self, k, v):
self._setitem(k, v)
@property
def shape(self):
return self._dim1, self._dim2
class RectilinearLayerAdapter(object):
"""Base class adapting PyNN layers."""
def __init__(self, pynn_pop, dim1, dim2):
self.unit_adapters_mat = []
for x in xrange(dim1):
self.unit_adapters_mat.append([])
for y in xrange(dim2):
self.unit_adapters_mat[x].append([None,
pynn_pop[x*dim2+y]])
self._dim1 = dim1
self._dim2 = dim2
self.pynn_population = pynn_pop
@property
def shape(self):
return self._dim1, self._dim2
def __getitem__(self, i):
return self.unit_adapters_mat[i]
def get_unit(self, i, j):
return self.unit_adapters_mat[i][j][1]
INPUT_LAYER_MAX_NAMP_DEFAULT = 100
class RectilinearInputLayer(RectilinearLayerAdapter):
"""Wraps a 2D array of electrodes with the same dimensions (dim1,
dim2) as the PyNN population in which it injects current. The
stimulation scale can be adjusted by providing the max input
amplitude in nA."""
def __init__(self, pynn_pop, dim1, dim2, max_namp=INPUT_LAYER_MAX_NAMP_DEFAULT):
super(RectilinearInputLayer, self).__init__(pynn_pop, dim1, dim2)
self.input_scaling = max_namp
# DCSources have to be recreated each time.
def apply_input(self, sample, start_time, duration,
max_namp = None, dcsource_class = pynnn.DCSource):
"""Given a sample of type InputSample and of same shape as the
input layer, and a duration, creates and connects electrodes
that apply the input specified by the input sample matrix to
the input population. A max_namp value can be specified in
nanoamperes to override the max current corresponding to an
input value of 1 given at construction time. dcsource_class is
here as a primitive dependency injection facility, for
testing."""
if max_namp == None:
max_namp = self.input_scaling
# TODO: Common current source for cells what should get the
# exact same input
for x in xrange(self._dim1):
for y in xrange(self._dim2):
# Will the GC collect the electrodes? Does PyNN delete
# them after use?
self.unit_adapters_mat[x][y][0] = \
dcsource_class({"amplitude": max_namp * sample[x][y],
"start" : start_time,
"stop" : start_time+duration})
self.unit_adapters_mat[x][y][0].inject_into(
[self.unit_adapters_mat[x][y][1]])
class RectilinearOutputRateEncoder(RectilinearLayerAdapter):
"""Keeps track of the weighted averages on a sliding window of the
output rates of all units in the topographically rectilinear
population of units. The update period and can be overridden at update
time."""
# Default width of the sliding window in simulator time units. The
# weight of past rates in activity calculation decreases linearly
# so that it is 0 when window_width old, and 1 for sim.now()
DEFAULT_WINDOW_WIDTH = 100;
DEFAULT_UPDATE_PERIOD = 10
def __init__(self, pynn_pop, dim1, dim2,
update_period = DEFAULT_UPDATE_PERIOD,
window_width=DEFAULT_WINDOW_WIDTH):
super(RectilinearOutputRateEncoder, self).__init__(pynn_pop, dim1, dim2)
self.window_width = window_width
self.update_period = update_period
# the number of records needs to be one more than requested
# because we are interested in the rate of firing, which is
# the difference in total number of spikes fired between now
# and 1 update period ago. In general, we need n+1 data points
# to determine n such differences.
self.hist_len = int(ceil(self.window_width/self.update_period)) + 1
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0] = \
numpy.zeros(self.hist_len, dtype=numpy.int)
self.idx = -1
self.update_history = None # initialized at first update
def extend_capacity(self, idx):
"""Adds one cell to all logging structures at position idx, and
increments self.hist_len."""
if idx == 0:
# Edge case: extension at the end of the records
idx = self.hist_len
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0] = numpy.concatenate(
(self.unit_adapters_mat[x][y][0][:idx],
[-1],
self.unit_adapters_mat[x][y][0][idx:]))
self.update_history = numpy.concatenate(
(self.update_history[:idx], [-1], self.update_history[idx:]))
self.hist_len += 1
def make_hist_weights_vec(self, update_history=None, window_width=None, idx=None):
""" Returns the ndarray of weights by which to multiply the
rates history vector to calculate the weighted recent activity
of the unit. Parameters are the update times array
(update_history), the rate averaging window width
(window_width), and the current time index in the update times
array (idx). If update_history is not provided,
self.update_history is usedIf window_width is not provided,
self.window_width is used. If idx is not provided, self.idx
is used. The weight for the oldest rate is the head of the
array. The sum of weights is 1 if the update_history array
covers at least the duration of window_width."""
if update_history == None:
update_history = self.update_history
if idx == None:
idx = self.idx
if window_width == None:
window_width = self.window_width
update_hist = numpy.append(update_history[idx+1:],
update_history[:idx+1])
update_dt = numpy.diff(update_hist)
cumsum_dt = update_dt[::-1].cumsum()[::-1] # reversed cumulative sum
last_t = update_hist[-1]
cutoff_t = last_t - window_width
l_h = 1 - cumsum_dt / (window_width * 1.)
r_h = 1 - (numpy.append(cumsum_dt[1:], [0]) / (window_width * 1.))
areas = numpy.fromiter(
itertools.imap(lambda i, x:
# in window -> area; out -> 0; border -> triangle
(l_h[i] + r_h[i]) * update_dt[i] if x <= window_width
else max(abs(r_h[i]) * (update_hist[i + 1] - cutoff_t), 0),
itertools.count(), cumsum_dt),
numpy.float)
return areas / window_width
def advance_idx(self):
self.idx = self.next_idx
@property
def next_idx(self):
return self.idx_offset(1)
@property
def last_update_time(self):
if self.update_history != None:
return self.update_history[self.idx]
return None
@property
def previous_idx(self):
return self.idx_offset(-1)
def idx_offset(self, offset):
"""Returns the value of the index with the (positive or negative)
offset added."""
return (self.idx + offset) % self.hist_len
# The data structure for the rate history of one unit is a
# circular list of rates, and an integer index (self.idx, common
# to all units) pointing to the most recent record. The size of
# this list is determined in __init__ by the window_width and
# update_period. Each unit's history is kept in the
# RectilinearLayerAdapter's unit_adapters_mat[x][y][0]. There is
# an additional circular list of updates timestamps for testing.
# We assume that the necessary recorders have been set up.
def update_rates(self, t_now):
"""t_now is the timestamp for the current rates being recorded."""
if self.idx != -1:
# Not the first update, so the state is consistent.
dt = t_now - self.update_history[self.idx]
if dt < 0:
raise SimulationError("update_rates was called with a past "
"update time. Only monotonic updates "
"are supported.")
if dt == 0.:
# It's a re-update of the current record! Let's rewind history!
self.idx = self.previous_idx
elif dt < self.update_period:
# Premature update -> we may need to increase the arrays length
# to have enough place to cover the full window width.
# The total time covered by the rate log after idx increment
# will be:
total_covered_dt = t_now - \
self.update_history[self.next_idx]
if total_covered_dt < self.window_width:
# The arrays are insufficient to cover the whole window
# width. We need to extend all arrays by one (add one entry
# to all logging structures).
self.extend_capacity(self.next_idx)
else:
# First update:
# Initialize the update times log to past values to have a
# consistent state without having to wait for the whole update
# window to have been crawled once.
self.update_history = t_now - self.update_period * \
numpy.array([0] + range(self.hist_len-1, 0, -1))
self.advance_idx()
self.update_history[self.idx] = t_now
rec = self.pynn_population.get_spike_counts();
for x in xrange(self._dim1):
for y in xrange(self._dim2):
self.unit_adapters_mat[x][y][0][self.idx] = \
rec.get(self.pynn_population[x*self._dim2+y])
def get_rates(self, t=None):
"""Returns the matrix of units weighted firing rates for the
last t time units, or for the whole window width of this rate
encoder it t is not specified."""
r = numpy.zeros((self._dim1, self._dim2), dtype=numpy.float)
for x in xrange(self._dim1):
for y in xrange(self._dim2):
r[x][y] = self.get_rate(x, y, t=t)
return r
def get_rate_for_unit_index(self, unit_index, t=None):
return self.get_rate(unit_index / self._dim1,
unit_index % self._dim2,
t=t)
def get_rate(self, x, y, t=None):
return self.f_rate(self.unit_adapters_mat[x][y][0], t=t)
def f_rate(self, np_a, t=None, update_history=None):
"""Returns the weighted average of the rates recorded in the
differences of the array np_a. The t parameter can be used to
silence rate information older than t units of time, which is
necessary to select the firing rate pertaining to one event
only. If now-t does not fall on a recording boundary, the more
recent boundary is used, otherwise the rate recording may be
contaminated by spikes older than t. If that leaves no record
available (i.e. t < age of previous record), an error is
raised.
The update_history parameter overrides the rate encoder's
update history, it should only be used for testing."""
if update_history == None:
update_history = self.update_history
update_hist = numpy.append(update_history[self.idx+1:],
update_history[:self.idx+1])
cut_i = 0
if t != None:
cut_t = sim.now() - t
cut_i = numpy.searchsorted(update_hist, cut_t, side='left')
# t must not be in the last interval:
if cut_i >= len(update_hist) - 1:
raise SimulationError("The rate encoder resolution is "
"insufficient to get any rate "
"data on the requested period.")
update_hist = update_hist[cut_i:]
update_dt = numpy.diff(update_hist) * 1.
np_a = numpy.append(np_a[self.idx+1:], np_a[:self.idx+1])
np_a = np_a[cut_i:]
rates = numpy.diff(np_a)
window_width = min(sum(update_dt), self.window_width) if t!= None \
else self.window_width
return self.make_hist_weights_vec(update_history=update_hist,
window_width=window_width,
idx=len(update_hist)
).dot(rates / update_dt)
def __repr__(self):
"Returns a string representation for debugging."
old_printopt = numpy.get_printoptions()
try:
import sys
numpy.set_printoptions(threshold=sys.maxint, suppress=True)
import os
rows, columns = map(int, os.popen('stty size', 'r').read().split())
# We don't return the rates in self.unit_adapters_mat
array_str = numpy.array_str(a=self.update_history,
max_line_width=columns-26,
precision=2) \
if self.update_history != None \
else None
r = (
"RectilinearOuputRateEncoder(\n"
" self.pynn_pop = %r\n"
" self.shape = %r\n"
" self.window_width = %r\n"
" self.update_period = %r\n"
" self.hist_len = %r\n"
" self.idx = %r\n"
" self.update_history = %s\n"
")" ) % \
(self.pynn_population,
(self._dim1, self._dim2),
self.window_width,
self.update_period,
self.hist_len,
self.idx,
array_str)
finally:
numpy.set_printoptions(**old_printopt)
return r
def rectilinear_shape(population):
try:
pos = population.positions
except Exception, e:
LOGGER.warning(("Could not retrieve units positions for population "
"%s; assuming square shape."), population.label)
if not is_square(population.size):
raise TypeError(("The shape population %s is not square and could "
"neither be retreived nor guessed."), population.label)
dim1 = dim2 = int(math.sqrt(population.size))
else:
dim1 = len(set(pos[1]))
dim2 = len(set(pos[0]))
return (dim1, dim2)
# WARNING / TODO: The following function reveals a design flaw in
# pycogmo. PyNN is insufficient and its networks should be
# encapsulated along with more metadata.
def population_adpater_provider(pop_prov_dict,
provided_class,
population):
"""Factory function providing an adapter of the specified class
for the population parameter. pop_prov_dict is a dictionary taking
a (population, provided_class) tuple as key, and returning an
instance of provided_class initialized with 3 arguments: the
population, its size in the first dimension, and its size in the
second dimension."""
key = (population, provided_class)
if pop_prov_dict.has_key(key):
return pop_prov_dict[key]
else:
LOGGER.warning("No %s for population %s, creating one.",
provided_class.__name__, population.label)
dim1, dim2 = rectilinear_shape(population)
inst = provided_class(population, dim1, dim2)
return pop_prov_dict.setdefault(key, inst)
POP_ADAPT_DICT = {}
get_input_layer = functools.partial(population_adpater_provider,
POP_ADAPT_DICT,
RectilinearInputLayer)
get_input_layer.__doc__ = ("Provides a unique input layer for the "
"given population.")
get_rate_encoder = functools.partial(population_adpater_provider,
POP_ADAPT_DICT,
RectilinearOutputRateEncoder)
get_rate_encoder.__doc__ = ("Provides a unique rectilinear output rate "
"encoder for the given population.")
def enable_recording(*p):
"""Turns on spike recorders for all populations in parameter"""
for pop in p:
pop.record(to_file=False)
|
PROVIDENCE — The Providence Place Mall was briefly evacuated on Labor Day after someone pulled a fire alarm.
Mall security tells WBSM News that a person had pulled the fire alarm at around 2:30 p.m. this afternoon, and the mall was evacuated.
The all-clear sign was given just after 3 p.m., and the mall has re-opened and is following its normal holiday schedule.
|
#!/usr/bin/env python
import sys, os, subprocess, urllib, logging
log = logging.getLogger(__name__)
major, minor = [int(i) for i in sys.version_info[:2]]
if major != 2 or minor < 4:
sys.exit("""Please install python. Versions 2.4, 2.5, 2.6, and 2.7 should all work
If you have python installed, make sure that \"python\" is on your PATH
""")
if not os.path.exists('mydevenv/bin/activate'):
if not os.path.exists('go-pylons.py'):
try:
sys.stderr.write('Downloading go-pylons.py\n')
o = open('go-pylons.py', 'w')
o.write(urllib.urlopen('http://pylonshq.com/download/1.0/go-pylons.py').read())
o.close()
except:
raise
sys.exit("""The script needs go-pylons.py but the attempt to download it using urllib failed.
Please, download the go-pylons.py script from
http://pylonshq.com/download/1.0/go-pylons.py
and then place the downloaded script in this directory.
""")
sys.stderr.write('Running go-pylons.py\n')
if subprocess.call([sys.executable, 'go-pylons.py', '--no-site-packages', 'mydevenv']) != 0:
sys.exit(1)
if not os.path.exists('phyloplumber'):
try:
result = subprocess.call(['git', 'clone', 'git://github.com/mtholder/phyloplumber.git'])
except:
sys.exit("""The attempt to pull down the latest version of phyloplumber using git failed.
If you do not have git installed, you can download it from http://git-scm.com
If you have installed git, make sure that it is on your path.""")
if result != 0:
sys.exit(1)
string_args = {'pp' : os.path.abspath(os.curdir) }
if not os.path.exists('phyloplumber_env.sh'):
sys.stdout.write("Creating phyloplumber_env.sh bash script\n")
o = open('phyloplumber_env.sh', 'w')
o.write('''#!/bin/sh
export PHYLOPLUMBER_PARENT="%(pp)s"
export PHYLOPLUMBER_ROOT=${PHYLOPLUMBER_PARENT}/phyloplumber
source ${PHYLOPLUMBER_PARENT}/mydevenv/bin/activate
''' % string_args)
o.close()
if os.path.exists('dendropy'):
string_args['dd'] = 'dendropy'
else:
string_args['dd'] = 'DendroPy'
if not os.path.exists(string_args['dd']):
try:
result = subprocess.call(['git', 'clone', 'git://github.com/jeetsukumaran/DendroPy.git'])
except:
sys.exit("""The attempt to pull down the latest version of dendropy using git failed.
If you do not have git installed, you can download it from http://git-scm.com
If you have installed git, make sure that it is on your path.""")
if result != 0:
sys.exit(1)
if sys.platform.upper().startswith('WIN'):
sys.exit("""At this point you will need to execute the "%(pp)s/mydevenv/bin/activate.bat" script, then
1. run "easy_install sphinx"
2. change the working directory to phyloplumber and run "python setup.py develop"
3. change the working directory to %(dd)s and run "python setup.py develop"
to finish the installation process.
You will need to execute the
"%(pp)s/mydevenv/bin/activate.bat"
script each time you launch the phyloplumber server.
""" % string_args)
else:
fn = 'finish_phyloplumber_installation.sh'
o = open(fn, 'w')
o.write('''#!/bin/sh
source phyloplumber_env.sh || exit 1
################################################################################
# Install sphinx to the devenv
################################################################################
easy_install sphinx
################################################################################
# Checkout dendropy and use "setup.py develop" command to install it the dev env
################################################################################
cd %(dd)s || exit 1
python setup.py develop || exit 1
cd ..
################################################################################
# install phyloplumber using the "setup.py develop" command
################################################################################
cd phyloplumber || exit 1
python setup.py develop || exit 1
cd ..
echo "phyloplumber_env.sh has been written. Whenever you want to work on phyloplumber"
echo " from the command line, then (from a bash shell) source this file to "
echo " configure your environment"
''' % string_args)
o.close()
result = subprocess.call(['/bin/sh', fn])
if result == 0:
os.remove(fn)
else:
sys.exit(1)
|
Mark A. Socinski, MD: Most of the regimens that we use to treat advanced lung cancer are administered as every-3 weeks regimens. For instance, for nonsquamous patients, the 2 typical combinations we use are either carboplatin with paclitaxel or carboplatin with pemetrexed plus or minus bevacizumab. These are drugs that are given once every 3 weeks, and this does not necessarily require a clinic visit in-between the 3 weeks, so they have a level of convenience that’s actually quite nice.
Now, when you’re in the clinic, depending upon the efficiency of the clinic, these regimens would typically take 5 to 6 hours to administer from the time you get there and have your blood checked—the doctors, or healthcare professionals, have to check you out and make sure it’s okay to go with chemotherapy. You have to have your labs checked. Someone has to order the chemotherapy; it just doesn’t appear. The pharmacist has to mix it up. The nurse has to administer the drug. So, by the time all of that’s done with these sorts of regimens, we’re talking, typically, about 5 or 6 hours.
Edward S. Kim, MD: We’ve now decided on treatment. Let’s, for example, say it’s going to be carboplatin/paclitaxel and Avastin. So, what happens now? We make sure we have your labs, which means your chemistries from your blood, your white blood cell count, and your red cell count. We will draw blood to make sure that we know what those levels are. We will then administer the drugs. You get those via an intravenous method. So, usually we either put in a line, which my nurse will talk about to you, or an IV. And usually you spend a few hours in infusion taking these drugs. They’re given once every 3 weeks, so we’ll repeat this process again in 3 weeks—blood work and then as long as the blood work looks good, we’ll continue with the therapy.
After we finish 2 cycles (two 3-week segments), we will repeat CT scans or imaging. And that’s when we take our first assessment to see if the tumor is shrinking, growing, or has stayed the same. Two of those answers we like. We like 1 of those answers better than the other, but we still accept 2 of those answers. The third one, the growing one, we don’t like so much. But it still can occur in some patients. We will then, as long as the tumor is the same size or is shrinking, do the exact same thing again. We would then move to cycle 3 and cycle 4 and repeat the imaging.
If everything looks good after 4 cycles (so that’s 4 x 3, that’s 12 weeks), we then assess whether it is appropriate to now go on to maintenance, which is just the single drug Avastin. Or if the tumor continues to shrink and the patient is tolerating it really well, then we decide if we should give cycle 5 and cycle 6 of the 3 drugs. That’s a decision point that’s made after 4 cycles.
My nurse will be here. A nurse should be helping you and can answer questions. We usually have a pharmacist and an ACP (advanced care provider), a nurse practitioner, or a PA (physician’s assistant). We also have navigators, or social workers, that can help. So there is a huge team involved when delivering your care—and that’s just in the clinic. When you go to the infusion room, there’s also a nurse or infusion specialist who’s able to help you there, as well as a pharmacist who’s mixing the drugs. So it’s a very large team. It can, again, be a little confusing on who to ask what question to. I would encourage you not to ask every question to every person who comes to you, but to be specific in where to direct those questions. Anyone will help you, but our team, here in the clinic, is the quarterback. That’s where a lot of these questions are relevant, and questions can be directed to any one of our team members here in the clinic as we make those decisions on how we’re proceeding forward with treatment and with what drugs.
|
"Script to generate the general views of Haldis"
import os
from datetime import datetime, timedelta
import yaml
from typing import Optional
from flask import Flask, render_template, make_response
from flask import request, jsonify
from flask import Blueprint, abort
from flask import current_app as app
from flask import send_from_directory, url_for
from flask_login import login_required
from utils import first
from hlds.definitions import location_definitions
from hlds.models import Location
from models import Order
# import views
from views.order import get_orders
import json
from flask import jsonify
general_bp = Blueprint("general_bp", __name__)
with open(os.path.join(os.path.dirname(__file__), "themes.yml"), "r") as _stream:
_theme_data = yaml.safe_load(_stream)
THEME_OPTIONS = _theme_data["options"]
THEMES = _theme_data["themes"]
@general_bp.route("/")
def home() -> str:
"Generate the home view"
prev_day = datetime.now() - timedelta(days=1)
recently_closed = get_orders(
((Order.stoptime > prev_day) & (Order.stoptime < datetime.now()))
)
return render_template(
"home.html", orders=get_orders(), recently_closed=recently_closed
)
def is_theme_active(theme, now):
theme_type = theme["type"]
if theme_type == "static":
return True
if theme_type == "seasonal":
start_day, start_month = map(int, theme["start"].split("/"))
start_datetime = datetime(year=now.year, day=start_day, month=start_month)
end_day, end_month = map(int, theme["end"].split("/"))
end_year = now.year + (1 if start_month > end_month else 0)
end_datetime = datetime(year=end_year, day=end_day, month=end_month)
return start_datetime <= now <= end_datetime
raise Exception("Unknown theme type {}".format(theme_type))
def get_theme_css(theme, options):
# Build filename
# Each option's chosen value is appended, to get something like mytheme_darkmode_heavy.css
filename = theme["file"]
for option in theme.get("options", []):
theme_name = theme["name"]
assert option in THEME_OPTIONS, f"Theme `{theme_name}` uses undefined option `{option}`"
chosen_value = options[option]
possible_values = list(THEME_OPTIONS[option].keys())
value = chosen_value if chosen_value in possible_values \
else THEME_OPTIONS[option]["_default"]
filename += "_" + value
filename += ".css"
theme_css_dir = "static/css/themes/"
return os.path.join(app.root_path, theme_css_dir, filename)
def get_active_themes():
now = datetime.now()
return [theme for theme in THEMES if is_theme_active(theme, now)]
@general_bp.route("/theme.css")
def theme_css():
"Send appropriate CSS for current theme"
themes = get_active_themes()
theme_name = request.cookies.get("theme", None)
theme = first((t for t in themes if t["file"] == theme_name), default=themes[-1])
options = {
name: request.cookies.get("theme_" + name, None)
for name in ["atmosphere", "performance"]
}
path = get_theme_css(theme, options)
with open(path) as f:
response = make_response(f.read())
response.headers["Content-Type"] = "text/css"
return response
@general_bp.route("/current_theme.js")
def current_theme_js():
themes = get_active_themes()
selected_theme_name = request.cookies.get("theme", None)
matching_theme = first((t for t in themes if t["file"] == selected_theme_name))
cur_theme = matching_theme or themes[-1]
response = make_response(rf'''
var currentTheme = {json.dumps(cur_theme['file'])};
var currentThemeOptions = {json.dumps(cur_theme.get('options', []))};
''')
response.headers["Content-Type"] = "text/javascript"
# Theme name that is not valid at this moment: delete cookie
if matching_theme is None:
response.delete_cookie("theme", path="/")
return response
@general_bp.route("/map")
def map_view() -> str:
"Generate the map view"
return render_template("maps.html", locations=location_definitions)
@general_bp.route("/location")
def locations() -> str:
"Generate the location view"
return render_template("locations.html", locations=location_definitions)
@general_bp.route("/location/<location_id>")
def location(location_id) -> str:
"Generate the location view given an id"
loc = first(filter(lambda l: l.id == location_id, location_definitions))
if loc is None:
abort(404)
return render_template("location.html", location=loc, title=loc.name)
@general_bp.route("/location/<location_id>/<dish_id>")
def location_dish(location_id, dish_id) -> str:
loc: Optional[Location] = first(
filter(lambda l: l.id == location_id, location_definitions)
)
if loc is None:
abort(404)
dish = loc.dish_by_id(dish_id)
if dish is None:
abort(404)
return jsonify([
{
"type": c[0],
"id": c[1].id,
"name": c[1].name,
"description": c[1].description,
"options": [
{
"id": o.id,
"name": o.name,
"description": o.description,
"price": o.price,
"tags": o.tags,
}
for o in c[1].options
],
}
for c in dish.choices
])
@general_bp.route("/about/")
def about() -> str:
"Generate the about view"
return render_template("about.html")
@general_bp.route("/profile/")
@login_required
def profile() -> str:
"Generate the profile view"
return render_template("profile.html", themes_list=get_active_themes())
@general_bp.route("/favicon.ico")
def favicon() -> str:
"Generate the favicon"
# pylint: disable=R1705
if not get_orders((Order.stoptime > datetime.now())):
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon.ico",
mimetype="image/x-icon",
)
else:
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon_orange.ico",
mimetype="image/x-icon",
)
|
also leads the country for foreign buyers selling off their stake of real estate.
Long the top choice for international buyers in the U.S., Florida also leads the country for foreign buyers selling off their real estate stakes.
The state drew 22 percent of the country’s international buyers purchasing property and 26 percent of the country’s sell-offs by foreign owners for the 12 months ending in March, according to the National Association of Realtors.
The sales reflect an opportune time to sell with few listings, particularly for Canadian investors who bought years ago, said Lawrence Yun, chief economist for the association.
Orange County property ownership records, meanwhile, show foreign addresses dwindled last year from five years ago, when bottomed-out prices drew investors from other countries — especially Canada.
Orlando real estate broker Armando Perez said he helped a Canadian couple purchase a $50,000 house in Deltona for investment purposes about five years ago. Late last year, he helped them sell it for about $150,000.
Owners with addresses in Canada, the United Kingdom, Brazil, Venezuela, China, France, Colombia, Australia, Singapore and Chile — the top 10 origins of foreign owners — dropped to 432 last year from 1,124 five years earlier. The analysis serves only as an indicator and does not include properties owned by foreign buyers who live here part time or list addresses of U.S. lawyers, companies and property managers to handle tax-related correspondence.
“They’re selling because they have investments here and purchased at a good time and, now that our dollar is rising, they’re able to get more for their money,” said Perez, a former Orange County deputy who heads Heroes Real Estate Group LLC.
The international-buyer market is important beyond sparking Central Florida’s economy. It also boosts local and state property tax coffers.
Foreign buyers spent $19.4 billion in Florida during a 12-month period that ended in late 2016, according to a National Association of Realtors report in December specifically for Florida. That was down 18 percent from the previous year.
Rising home prices, a strong U.S. dollar and tough talk about immigration at the federal level have created more uncertainties for agents marketing to foreign buyers.
But the emergence of Orlando City soccer and expansions at theme parks have boosted Central Florida’s appeal, as have higher prices in South Florida, said Kevin Mays, development director at BTI Partners which is building out Grove Resort and Spa in southwest Orange County near Walt Disney World.
Competition for buyers has increased, with developers employing new tactics to turn their heads. The Grove has brought in celebrities such as Brazilian singer Ivete Sangalo to drive excitement with prospective buyers.
About to launch its second phase with about 300 residences, the Grove employs Mandarin- and Portuguese-speaking agents.
Buyers from China often seek value and a return on their investment, said Orlando real estate broker Gloria Chu, based in Dr. Phillips. They will spend $350,000 if they think they can get $2,200 in monthly rental income. Chinese buyers who speak English are even taking on property management duties to increase their net profits, she added.
“The schools here are not as good as those in Boston and New York but, compared dollar to dollar, those buyers are getting a better bargain here,” she said, adding that direct flights from Orlando to Shanghai or Beijing would draw more potential buyers to the Orlando area.
Obstacles to international purchases include immigration restrictions, property taxes, exchange rates, U.S. tax laws, and costs and maintenance fees, according the the national real estate association’s survey released earlier this month. The British pound lost ground against the U.S dollar, for instance, during a 12-month period that ended in March. And in Venezuela, runaway inflation has drawn buyers looking for a safe haven for their cash, according to the survey-based report.
Some survey respondents expressed concern about potential policy changes on immigration, trade, and international relations, the real estate group reported.
Buyers are starting to understand the federal-level talk about tougher immigration is aimed at people who come into the country illegally rather than investors, said Aurelio Martin, loan officer at SecurityNational Mortgage in Lake Mary, where he and his wife Doris specialize in loans to foreign buyers purchasing new homes as investments.
“Some clients from Mexico, we have had to reassure them that they were buying here legally,” he said.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.forms import forms
from django.forms import widgets
from django.utils.encoding import force_unicode
from django.utils.functional import Promise # noqa
from django.utils.html import conditional_escape
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
import netaddr
import re
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(forms.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class SelectWidget(widgets.Select):
"""Customizable select widget, that allows to render
data-xxx attributes from choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None):
self.data_attrs = data_attrs
self.transform = transform
super(SelectWidget, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
other_html = (option_value in selected_choices) and \
u' selected="selected"' or ''
if not isinstance(option_label, (basestring, Promise)):
for data_attr in self.data_attrs:
data_value = conditional_escape(
force_unicode(getattr(option_label,
data_attr, "")))
other_html += ' data-%s="%s"' % (data_attr, data_value)
if self.transform:
option_label = self.transform(option_label)
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), other_html,
conditional_escape(force_unicode(option_label)))
|
Since 1956, innovation, quality and craftsmanship have been part of the Williams Sonoma legacy. We’re proud to bring you the perfect complement to our Williams Sonoma Stemware collection. This handsome decanter was designed in-house in collaboration with expert sommeliers, winemakers and chefs. Its classic shape efficiently aerates both red and white varietals of any age before serving. Decanting allows older wines, particularly reds, to fully display their full clarity and true color, and it gives young vintages a chance to breathe and develop a complexity that normally comes with aging.
Designed by William-Sonoma and hand blown of clear, lead-free glass exclusively for us by master glass blowers.
Aerates red and white wines and removes sediments from older wines.
33.8-oz. cap.; 4 1/4" diam., 9 3/4" high.
This item is shipped from our warehouse by UPS to arrive within 3-5 business days of our receiving your order.
|
# StartTarget.py -- Lustre action class : start (mount) target
# Copyright (C) 2009-2013 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
This module contains a FSAction class to start a Lustre target.
"""
import os
from ClusterShell.Task import task_self
from Shine.Configuration.Globals import Globals
from Shine.Lustre.Actions.Action import FSAction, Result
class StartTarget(FSAction):
"""
File system target start action class.
Lustre, since 1.6, starts a target simply by mounting it.
"""
NAME = 'start'
def __init__(self, target, **kwargs):
FSAction.__init__(self, target, **kwargs)
self.mount_options = kwargs.get('mount_options')
self.mount_paths = kwargs.get('mount_paths')
def _already_done(self):
"""Return a Result object is the target is already mounted."""
# Already done?
if self.comp.is_started():
return Result("%s is already started" % self.comp.label)
# LBUG #18624
if not self.comp.dev_isblk:
task_self().set_info("fanout", 1)
return None
def _prepare_cmd(self):
"""Mount file system target."""
# If there is a user-defined path
if self.mount_paths and self.comp.TYPE in self.mount_paths:
mount_path = self.mount_paths[self.comp.TYPE]
else:
# Default mount path
mount_path = "/mnt/$fs_name/$type/$index"
# Replace variables
var_map = {'index': str(self.comp.index),
'dev' : os.path.basename(self.comp.dev)}
if self.comp.journal:
var_map['jdev'] = os.path.basename(self.comp.journal.dev)
mount_path = self._vars_substitute(mount_path, var_map)
#
# Build command
#
command = ["mkdir -p \"%s\"" % mount_path]
command += ["&& /bin/mount -t lustre"]
# Loop devices handling
if not self.comp.dev_isblk:
command.append("-o loop")
options = []
# Mount options from configuration
if self.mount_options and self.mount_options.get(self.comp.TYPE):
options += [self.mount_options.get(self.comp.TYPE)]
# Mount options from command line
if self.addopts:
options += [self.addopts]
# When device detection order is variable, jdev could have a different
# major/minor than the one it has on previous mount.
# In this case, we must be sure we use the current one to avoid error.
#
# (Note: We can use `blkid' instead of jdev and extract the current
# journal UUID if we have issue using directly jdev path.)
if self.comp.journal:
majorminor = os.stat(self.comp.journal.dev).st_rdev
options += ["journal_dev=%#x" % majorminor]
if len(options):
command.append('-o ' + ','.join(options))
command.append(self.comp.dev)
command.append(mount_path)
return command
def needed_modules(self):
if Globals().lustre_version_is_smaller('2.4') or \
not Globals().lustre_version_is_smaller('2.5'):
return ['lustre', 'ldiskfs']
else:
# lustre 2.4 needs fsfilt_ldiskfs
return ['lustre', 'fsfilt_ldiskfs']
|
Attorney General Loretta Lynch unveiled the U.S. Justice Department’s “expansive new strategy for combating fraudulent goods” at a roundtable in Boston on Friday.
The plan, Lynch said, will involve closer collaboration between the FBI and the businesses, entrepreneurs and other industry leaders whose intellectual property is most at risk. It will also include a $3.2 million payout to state and local law enforcement agencies in 10 jurisdictions across the country through the department’s Intellectual Property Enforcement Program.
“Through this new approach, we intend to provide information and resources to individuals and companies that will help them identify and disrupt attempts on their intellectual property, extend greater protection to American commerce as a whole and safeguard the health and safety of individual Americans,” Lynch said.
Hacking and cyber bullying are also among the major intellectual property issues the justice department intends to combat more aggressively, Lynch noted.
“High-profile instances of hacking — even against large companies like Sony and Target — have demonstrated the seriousness of the threat all businesses face and have underscored the potential for sophisticated adversaries to inflict real and lasting harm,” Lynch said.
Footwear organizations, such as the American Apparel and Footwear Association, have brought significant attention to the impact of the proliferation of counterfeit goods on the shoe industry.
This year, the AAFA aggressively targeted giant China-based e-tailer Alibaba Group, accusing it of allowing the sale of counterfeit goods on its Taobao platform.
In response to the allegations, an Alibaba spokesperson told FN that the company had enacted a list of rules and procedures to address the potential sale of counterfeit goods on its websites.
China continues to be the No. 1 source of counterfeit goods, according to the latest government data.
|
"""
A Rankine vapor power cycle
"""
import cantera as ct
# parameters
eta_pump = 0.6 # pump isentropic efficiency
eta_turbine = 0.8 # turbine isentropic efficiency
p_max = 8.0e5 # maximum pressure
def pump(fluid, p_final, eta):
"""Adiabatically pump a fluid to pressure p_final, using
a pump with isentropic efficiency eta."""
h0 = fluid.h
s0 = fluid.s
fluid.SP = s0, p_final
h1s = fluid.h
isentropic_work = h1s - h0
actual_work = isentropic_work / eta
h1 = h0 + actual_work
fluid.HP = h1, p_final
return actual_work
def expand(fluid, p_final, eta):
"""Adiabatically expand a fluid to pressure p_final, using
a turbine with isentropic efficiency eta."""
h0 = fluid.h
s0 = fluid.s
fluid.SP =s0, p_final
h1s = fluid.h
isentropic_work = h0 - h1s
actual_work = isentropic_work * eta
h1 = h0 - actual_work
fluid.HP = h1, p_final
return actual_work
def printState(n, fluid):
print('\n***************** State {0} ******************'.format(n))
print(fluid.report())
if __name__ == '__main__':
# create an object representing water
w = ct.Water()
# start with saturated liquid water at 300 K
w.TX = 300.0, 0.0
h1 = w.h
p1 = w.P
printState(1, w)
# pump it adiabatically to p_max
pump_work = pump(w, p_max, eta_pump)
h2 = w.h
printState(2, w)
# heat it at constant pressure until it reaches the saturated vapor state
# at this pressure
w.PX = p_max, 1.0
h3 = w.h
heat_added = h3 - h2
printState(3, w)
# expand back to p1
turbine_work = expand(w, p1, eta_turbine)
printState(4, w)
# efficiency
eff = (turbine_work - pump_work)/heat_added
print('efficiency = ', eff)
|
Allied Forces have discovered an ancient relic, known as the Hourglass, on a forgotten island. The treasure in combination with its key, the Dagger of Time, allows the user to control and manipulate time. Axis command has learned of this discovery and has assembled an immediate landing party to capture the relics. Allied Forces have secured the island and are ready for any attack. To advance, the Axis Forces will require anything that will facilitate their progress throughout the island palace and fortress.
The Axis force will have to move swiftly to infiltrate the Palace in hope of overcoming it; eventually leading them to the Fortress that gaurds the relics. Allies must desperately slow their progress throughout the Palace and its gardens. If the Allies fail the Fortress is their last chance.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to show a list of files which had or still have
conflicts.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSlot, Qt, QPoint, QProcess, QTimer
from PyQt5.QtGui import QWidget
from PyQt5.QtWidgets import QAbstractButton, QDialogButtonBox, QHeaderView, \
QTreeWidgetItem, QLineEdit, QApplication
from E5Gui import E5MessageBox
from E5Gui.E5Application import e5App
from .Ui_HgConflictsListDialog import Ui_HgConflictsListDialog
import Utilities.MimeTypes
class HgConflictsListDialog(QWidget, Ui_HgConflictsListDialog):
"""
Class implementing a dialog to show a list of files which had or still
have conflicts.
"""
StatusRole = Qt.UserRole + 1
FilenameRole = Qt.UserRole + 2
def __init__(self, vcs, parent=None):
"""
Constructor
@param vcs reference to the vcs object
@param parent parent widget (QWidget)
"""
super(HgConflictsListDialog, self).__init__(parent)
self.setupUi(self)
self.__position = QPoint()
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.conflictsList.headerItem().setText(
self.conflictsList.columnCount(), "")
self.conflictsList.header().setSortIndicator(0, Qt.AscendingOrder)
self.refreshButton = self.buttonBox.addButton(
self.tr("&Refresh"), QDialogButtonBox.ActionRole)
self.refreshButton.setToolTip(
self.tr("Press to refresh the list of conflicts"))
self.refreshButton.setEnabled(False)
self.vcs = vcs
self.project = e5App().getObject("Project")
self.__hgClient = vcs.getClient()
if self.__hgClient:
self.process = None
else:
self.process = QProcess()
self.process.finished.connect(self.__procFinished)
self.process.readyReadStandardOutput.connect(self.__readStdout)
self.process.readyReadStandardError.connect(self.__readStderr)
def closeEvent(self, e):
"""
Protected slot implementing a close event handler.
@param e close event (QCloseEvent)
"""
if self.__hgClient:
if self.__hgClient.isExecuting():
self.__hgClient.cancel()
else:
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
self.__position = self.pos()
e.accept()
def show(self):
"""
Public slot to show the dialog.
"""
if not self.__position.isNull():
self.move(self.__position)
super(HgConflictsListDialog, self).show()
def start(self, path):
"""
Public slot to start the tags command.
@param path name of directory to list conflicts for (string)
"""
self.errorGroup.hide()
QApplication.processEvents()
self.intercept = False
dname, fname = self.vcs.splitPath(path)
# find the root of the repo
self.__repodir = dname
while not os.path.isdir(
os.path.join(self.__repodir, self.vcs.adminDir)):
self.__repodir = os.path.dirname(self.__repodir)
if os.path.splitdrive(self.__repodir)[1] == os.sep:
return
self.activateWindow()
self.raise_()
self.conflictsList.clear()
self.__started = True
self.__getEntries()
def __getEntries(self):
"""
Private method to get the conflict entries.
"""
args = self.vcs.initCommand("resolve")
args.append('--list')
if self.__hgClient:
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
out, err = self.__hgClient.runcommand(args)
if err:
self.__showError(err)
if out:
for line in out.splitlines():
self.__processOutputLine(line)
if self.__hgClient.wasCanceled():
break
self.__finish()
else:
self.process.kill()
self.process.setWorkingDirectory(self.__repodir)
self.process.start('hg', args)
procStarted = self.process.waitForStarted(5000)
if not procStarted:
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('hg'))
else:
self.inputGroup.setEnabled(True)
self.inputGroup.show()
def __finish(self):
"""
Private slot called when the process finished or the user pressed
the button.
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
QApplication.restoreOverrideCursor()
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setDefault(True)
self.inputGroup.setEnabled(False)
self.inputGroup.hide()
self.refreshButton.setEnabled(True)
self.__resizeColumns()
self.__resort()
self.on_conflictsList_itemSelectionChanged()
@pyqtSlot(QAbstractButton)
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.buttonBox.button(QDialogButtonBox.Close):
self.close()
elif button == self.buttonBox.button(QDialogButtonBox.Cancel):
if self.__hgClient:
self.__hgClient.cancel()
else:
self.__finish()
elif button == self.refreshButton:
self.on_refreshButton_clicked()
def __procFinished(self, exitCode, exitStatus):
"""
Private slot connected to the finished signal.
@param exitCode exit code of the process (integer)
@param exitStatus exit status of the process (QProcess.ExitStatus)
"""
self.__finish()
def __resort(self):
"""
Private method to resort the tree.
"""
self.conflictsList.sortItems(
self.conflictsList.sortColumn(),
self.conflictsList.header().sortIndicatorOrder())
def __resizeColumns(self):
"""
Private method to resize the list columns.
"""
self.conflictsList.header().resizeSections(
QHeaderView.ResizeToContents)
self.conflictsList.header().setStretchLastSection(True)
def __generateItem(self, status, name):
"""
Private method to generate a tag item in the tag list.
@param status status of the file (string)
@param name name of the file (string)
"""
itm = QTreeWidgetItem(self.conflictsList)
if status == "U":
itm.setText(0, self.tr("Unresolved"))
elif status == "R":
itm.setText(0, self.tr("Resolved"))
else:
itm.setText(0, self.tr("Unknown Status"))
itm.setText(1, name)
itm.setData(0, self.StatusRole, status)
itm.setData(0, self.FilenameRole, self.project.getAbsolutePath(name))
def __readStdout(self):
"""
Private slot to handle the readyReadStdout signal.
It reads the output of the process, formats it and inserts it into
the contents pane.
"""
self.process.setReadChannel(QProcess.StandardOutput)
while self.process.canReadLine():
s = str(self.process.readLine(), self.vcs.getEncoding(),
'replace').strip()
self.__processOutputLine(s)
def __processOutputLine(self, line):
"""
Private method to process the lines of output.
@param line output line to be processed (string)
"""
status, filename = line.strip().split(None, 1)
self.__generateItem(status, filename)
@pyqtSlot()
def on_refreshButton_clicked(self):
"""
Private slot to refresh the log.
"""
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.inputGroup.setEnabled(True)
self.inputGroup.show()
self.refreshButton.setEnabled(False)
self.start(self.__repodir)
def __readStderr(self):
"""
Private slot to handle the readyReadStderr signal.
It reads the error output of the process and inserts it into the
error pane.
"""
if self.process is not None:
s = str(self.process.readAllStandardError(),
self.vcs.getEncoding(), 'replace')
self.__showError(s)
def __showError(self, out):
"""
Private slot to show some error.
@param out error to be shown (string)
"""
self.errorGroup.show()
self.errors.insertPlainText(out)
self.errors.ensureCursorVisible()
def on_passwordCheckBox_toggled(self, isOn):
"""
Private slot to handle the password checkbox toggled.
@param isOn flag indicating the status of the check box (boolean)
"""
if isOn:
self.input.setEchoMode(QLineEdit.Password)
else:
self.input.setEchoMode(QLineEdit.Normal)
@pyqtSlot()
def on_sendButton_clicked(self):
"""
Private slot to send the input to the subversion process.
"""
input = self.input.text()
input += os.linesep
if self.passwordCheckBox.isChecked():
self.errors.insertPlainText(os.linesep)
self.errors.ensureCursorVisible()
else:
self.errors.insertPlainText(input)
self.errors.ensureCursorVisible()
self.process.write(input)
self.passwordCheckBox.setChecked(False)
self.input.clear()
def on_input_returnPressed(self):
"""
Private slot to handle the press of the return key in the input field.
"""
self.intercept = True
self.on_sendButton_clicked()
def keyPressEvent(self, evt):
"""
Protected slot to handle a key press event.
@param evt the key press event (QKeyEvent)
"""
if self.intercept:
self.intercept = False
evt.accept()
return
super(HgConflictsListDialog, self).keyPressEvent(evt)
@pyqtSlot(QTreeWidgetItem, int)
def on_conflictsList_itemDoubleClicked(self, item, column):
"""
Private slot to open the double clicked entry.
@param item reference to the double clicked item (QTreeWidgetItem)
@param column column that was double clicked (integer)
"""
self.on_editButton_clicked()
@pyqtSlot()
def on_conflictsList_itemSelectionChanged(self):
"""
Private slot to handle a change of selected conflict entries.
"""
selectedCount = len(self.conflictsList.selectedItems())
unresolved = resolved = 0
for itm in self.conflictsList.selectedItems():
status = itm.data(0, self.StatusRole)
if status == "U":
unresolved += 1
elif status == "R":
resolved += 1
self.resolvedButton.setEnabled(unresolved > 0)
self.unresolvedButton.setEnabled(resolved > 0)
self.reMergeButton.setEnabled(unresolved > 0)
self.editButton.setEnabled(
selectedCount == 1 and
Utilities.MimeTypes.isTextFile(
self.conflictsList.selectedItems()[0].data(
0, self.FilenameRole)))
@pyqtSlot()
def on_resolvedButton_clicked(self):
"""
Private slot to mark the selected entries as resolved.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "U"
]
if names:
self.vcs.hgResolved(names)
self.on_refreshButton_clicked()
@pyqtSlot()
def on_unresolvedButton_clicked(self):
"""
Private slot to mark the selected entries as unresolved.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "R"
]
if names:
self.vcs.hgResolved(names, unresolve=True)
self.on_refreshButton_clicked()
@pyqtSlot()
def on_reMergeButton_clicked(self):
"""
Private slot to re-merge the selected entries.
"""
names = [
itm.data(0, self.FilenameRole)
for itm in self.conflictsList.selectedItems()
if itm.data(0, self.StatusRole) == "U"
]
if names:
self.vcs.hgReMerge(names)
@pyqtSlot()
def on_editButton_clicked(self):
"""
Private slot to open the selected file in an editor.
"""
itm = self.conflictsList.selectedItems()[0]
filename = itm.data(0, self.FilenameRole)
if Utilities.MimeTypes.isTextFile(filename):
e5App().getObject("ViewManager").getEditor(filename)
|
Ilhan Omar is telling the truth. How is that anti-Semitic?
What moves the wheels of American politics? Is it a dedicated tireless commitment to public service? A strong desire to better the lives of constituents? A genuine ideology? Maybe sometimes, in the odd rare case. But more often that not, it’s money.
Money funds elections, it funds events all over Washington, it funds lobbyists who work tirelessly to make their cause seem like the only thing worth caring about at any given moment. Single issue partisan groups like the NRA, J Street and Emily’s List spent over $300m in 2018, over $230m of which went directly to candidates. Call me naive, but it seems possible that those donations, often vital to win closely contested districts, could perhaps have an impact on those candidate’s views once elected.
PACs and lobbyists wouldn’t splurge money on politicians if it didn’t have an impact. The sheer volume of the money flying around the Beltway means that if a politician doesn’t take money from someone, they’re behind. So why is it so controversial of Minnesota congresswoman Ilhan Omar to suggest that some people’s views on Israel may be shaped by a pro-Israel lobby?
Kevin McCarthy is decidedly not Jewish. Accusing him of being oversensitive to criticism of Israel is not anti-Semitic. Neither is describing how the pro-Israel lobby works. The activity of AIPAC and other groups like them is completely valid, but they shouldn’t be above criticism: this is America, no one is. Besides, Omar didn’t accuse American Jews of anything untoward. She just said that pro-Israel groups spend a lot of money in Washington, which is a fact.
I don’t know if deep down Ilhan Omar is an anti-Semite. Her support for the BDS movement and her 2012 tweet saying Israel has ‘hypnotized’ the world make it pretty clear that she’s no big fan of Zionism, but it seems like the ones making the connection between American Jews and Israel are her critics, looking for an excuse to take a shot at her. Compared to those on the British left, her comments are tepid, by no means beyond the pale.
She didn’t imply for example, that American Zionists didn’t understand irony, she didn’t refer to Hamas as her ‘friends’ or refuse to accept a widely held definition of anti-Semitism because it didn’t suit her personal views.
Ilhan Omar may not like Israel. She may hate the idea of the Jewish state and bristle at the presence of a strong pro-Israel lobby in Washington. But none of those things are crimes. When Jewish supporters of Israel take every attack on their views as a threat to Jewish life, when they play the anti-Semitism card the second AIPAC is questioned, it cements in the minds of casual observers that Israel is the only thing they care about.
Plenty of Jewish people don’t support Israel, and plenty do. Instantly branding every critique of Israel or pro-Israel groups as an anti-Semitic attack does neither of them any favors.
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cta_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cta_event.proto',
package='cta_event',
serialized_pb=_b('\n\x0f\x63ta_event.proto\x12\tcta_event\"2\n\x08\x43TAEvent\x12\x14\n\x0ctelescope_id\x18\x01 \x02(\r\x12\x10\n\x04\x64\x61ta\x18\x04 \x03(\x02\x42\x02\x10\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CTAEVENT = _descriptor.Descriptor(
name='CTAEvent',
full_name='cta_event.CTAEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='telescope_id', full_name='cta_event.CTAEvent.telescope_id', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='cta_event.CTAEvent.data', index=1,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=80,
)
DESCRIPTOR.message_types_by_name['CTAEvent'] = _CTAEVENT
CTAEvent = _reflection.GeneratedProtocolMessageType('CTAEvent', (_message.Message,), dict(
DESCRIPTOR = _CTAEVENT,
__module__ = 'cta_event_pb2'
# @@protoc_insertion_point(class_scope:cta_event.CTAEvent)
))
_sym_db.RegisterMessage(CTAEvent)
_CTAEVENT.fields_by_name['data'].has_options = True
_CTAEVENT.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
|
IDNA Convert is a pure PHP implementation of IDNA, which allows to convert internationalized domain names.
It implements most of the relevant RFCs such as RFC3490, RFC3491, RFC3492 and RFC3454 as well as RFC5890, RFC5891, RFC5892, RFC5893 and RFC5894.
See the documentation to learn, how to work with IDNA Convert.
See the demo page for a live preview of the class.
Go to the downloads page to find out how to acquire it through Composer or GitHub.
Se the about page for more info about the author and contributors.
|
"""
Provides the command-line interface for pymake2.
"""
#---------------------------------------
# IMPORTS
#---------------------------------------
import os
import sys
from pymake2 import report
from pymake2.cli import info, options
from pymake2.core import makeconf
from pymake2.core.exceptions import NoSuchTargetError
from pymake2.core.maker import Maker
from pymake2.core.target import Target
from pymake2.utils import color
#---------------------------------------
# CONSTANTS
#---------------------------------------
# Exit code when a fatal error has been encountered.
EXIT_FATAL = -1
# Exit code all went well.
EXIT_MAKE_OK = 0
# Exit code when there was nothing to do.
EXIT_NO_MAKE = 1
#---------------------------------------
# GLOBALS
#---------------------------------------
# Pymake exit code.
exit_code = EXIT_MAKE_OK
#---------------------------------------
# FUNCTIONS
#---------------------------------------
def fatal(s, *args):
s = "fatal: " + s.format(*args)
if not options.disable_color:
s = color.red(s)
println(s)
sys.exit(EXIT_FATAL)
def println(s=None, *args):
if s:
s = s.format(*args)
print s
else:
print
def pymake2(conf=None, args=None):
args = sys.argv if args is None else [sys.argv[0]] + args
# Keep arguments beginning with two hyphens.
opts = [arg for arg in args if arg.startswith('--')]
# Keep arguments *not* beginning with two hyphens.
args = [arg for arg in args if arg not in opts]
# Parse command line options.
options.parse(opts)
if conf and isinstance(conf, dict):
conf = makeconf.from_dict(conf)
conf = conf or options.conf or makeconf.from_dict({})
if options.conf:
conf = makeconf.merge(conf, options.conf)
Maker.inst().check_targets()
report_problems()
targets = args[1:]
if not targets:
targets = [ None ]
for name in targets:
if not name and not Maker.inst().def_target:
println("\nNo target specified and there is no default target.")
info.print_targets()
sys.exit(EXIT_NO_MAKE)
try:
Maker.inst().make(name, conf)
except NoSuchTargetError as e:
fatal("no such target: '{}'", e.target_name)
#sys.exit(exit_code)
def report_problems():
any_errors = False
# Report all problems
for problem in report.problems():
if problem.is_error:
any_errors = True
s = problem.text
if not options.disable_color:
if problem.is_error: s = color.red (s)
else : s = color.yellow(s)
if problem.is_error or not options.disable_warnings:
println(s)
if any_errors:
fatal("there were errors; aborting.")
|
PlanetHardware’s a comprehensive, easy-to-navigate site with many CPU, mainboard, and monitor reviews, to name but a few product categories covered. Each section shows recent reviews, previews, and articles. The site also features roundups and shootouts of Intel and AMD CPUs.
|
import time
import numpy as np
from typing import NamedTuple
from sdc.io.csv_ext import to_varname
from sdc.tests.test_utils import *
class CallExpression(NamedTuple):
"""
code: function or method call as a string
type_: type of function performed (Python, Numba, SDC)
jitted: option indicating whether to jit call
"""
code: str
type_: str
jitted: bool
class TestCase(NamedTuple):
"""
name: name of the API item, e.g. method, operator
size: size of the generated data for tests
params: method parameters in format 'par1, par2, ...'
call_expr: call expression as a string, e.g. '(A+B).sum()' where A, B are Series or DF
usecase_params: input parameters for usecase in format 'par1, par2, ...', e.g. 'data, other'
data_num: total number of generated data, e.g. 2 (data, other)
input_data: input data for generating test data
skip: flag for skipping a test
"""
name: str
size: list
params: str = ''
call_expr: str = None
usecase_params: str = None
data_num: int = 1
input_data: list = None
skip: bool = False
def to_varname_without_excess_underscores(string):
"""Removing excess underscores from the string."""
return '_'.join(i for i in to_varname(string).split('_') if i)
def generate_test_cases(cases, class_add, typ, prefix=''):
for test_case in cases:
test_name_parts = ['test', typ, prefix, test_case.name, gen_params_wo_data(test_case)]
test_name = to_varname_without_excess_underscores('_'.join(test_name_parts))
setattr(class_add, test_name, gen_test(test_case, prefix))
def gen_params_wo_data(test_case):
"""Generate API item parameters without parameters with data, e.g. without parameter other"""
extra_data_num = test_case.data_num - 1
method_params = test_case.params.split(', ')[extra_data_num:]
return ', '.join(method_params)
def gen_usecase_params(test_case):
"""Generate usecase parameters based on method parameters and number of extra generated data"""
extra_data_num = test_case.data_num - 1
extra_usecase_params = test_case.params.split(', ')[:extra_data_num]
usecase_params_parts = ['data'] + extra_usecase_params
return ', '.join(usecase_params_parts)
def gen_call_expr(test_case, prefix):
"""Generate call expression based on method name and parameters and method prefix, e.g. str"""
prefix_as_list = [prefix] if prefix else []
call_expr_parts = ['data'] + prefix_as_list + ['{}({})'.format(test_case.name, test_case.params)]
return '.'.join(call_expr_parts)
def gen_test(test_case, prefix):
func_name = 'func'
usecase = gen_usecase(test_case, prefix)
skip = '@skip_numba_jit\n' if test_case.skip else ''
test_name = test_case.name
if test_case.params:
test_name = f'{test_name}({test_case.params})'
func_text = f"""
{skip}def {func_name}(self):
self._test_case(usecase, name='{test_name}', total_data_length={test_case.size},
data_num={test_case.data_num}, input_data={test_case.input_data})
"""
loc_vars = {}
global_vars = {'usecase': usecase,
'skip_numba_jit': skip_numba_jit}
exec(func_text, global_vars, loc_vars)
func = loc_vars[func_name]
return func
def create_func(usecase_params, call_expr):
func_name = 'func'
func_text = f"""
def {func_name}({usecase_params}):
start_time = time.time()
res = {call_expr}
finish_time = time.time()
return finish_time - start_time, res
"""
loc_vars = {}
exec(func_text, globals(), loc_vars)
func = loc_vars[func_name]
return func
def gen_usecase(test_case, prefix):
usecase_params = test_case.usecase_params
call_expr = test_case.call_expr
if call_expr is None:
if usecase_params is None:
usecase_params = gen_usecase_params(test_case)
call_expr = gen_call_expr(test_case, prefix)
if isinstance(call_expr, list):
results = []
for ce in call_expr:
results.append({
'func': create_func(usecase_params, ce.code),
'type_': ce.type_,
'jitted': ce.jitted
})
return results
func = create_func(usecase_params, call_expr)
return func
|
For skin care products, my summer fav is definitely the amazing Curél Hydra Therapy Wet Skin Moisturizer. You will notice its magical skin soothing affects most dramatically after extreme sun like I experienced on my San Andres, Colombia and Cancun, Mexico trips. If you don’t know what a “wet skin moisturizer” is, read my post on it here!
Heavy sun exposure may give your skin a beautiful bronze glow but can also cause dryness, irritation, sunburn, peeling, or rough patches. To keep the glow and protect against the rest, I use an SPF with a minimum of 30 while in the sun, and I hydrate my skin by applying the Curél Hydra Therapy Wet Skin Moisturizer right out of the shower. This locks in all the moisture and helps instantly smooth soften and re-hydrate you skin.
I also love to apply this moisturizer after I exfoliate my skin also. Salt or sugar scrubs are a great way to remove dead skin or build up from the surface of your skin and also improve circulation to the skin to reduce the appearance of cellulite. In the summer I do these often, like once or twice a week, and finish it with my Curél Hydra Therapy Wet Skin Moisturizer!
PREVIOUSMY TOP 3 #LikeAGirl MOMENTS FROM RIO 2016!
NEXTTHE FEMME IN BLOOM BRUNCH TOUR – RSVP YOUR SEAT NOW!
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from operator import attrgetter
from flask import flash, jsonify, redirect, request
from werkzeug.exceptions import NotFound
from indico.core.db.sqlalchemy.protection import ProtectionMode, render_acl
from indico.core.permissions import (get_available_permissions, get_permissions_info, get_principal_permissions,
update_permissions)
from indico.modules.categories.models.roles import CategoryRole
from indico.modules.categories.util import serialize_category_role
from indico.modules.core.controllers import PrincipalsMixin
from indico.modules.events import Event
from indico.modules.events.controllers.base import RHAuthenticatedEventBase
from indico.modules.events.management.controllers.base import RHManageEventBase
from indico.modules.events.management.forms import EventProtectionForm
from indico.modules.events.management.views import WPEventProtection
from indico.modules.events.operations import update_event_protection
from indico.modules.events.roles.util import serialize_event_role
from indico.modules.events.sessions import COORDINATOR_PRIV_SETTINGS, session_settings
from indico.modules.events.sessions.operations import update_session_coordinator_privs
from indico.modules.events.util import get_object_from_args
from indico.util import json
from indico.util.i18n import _
from indico.util.marshmallow import PrincipalDict
from indico.web.args import use_rh_kwargs
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.forms.fields.principals import PermissionsField, serialize_principal
from indico.web.rh import RH
from indico.web.util import jsonify_template
class RHShowNonInheriting(RHManageEventBase):
"""Show a list of non-inheriting child objects."""
def _process_args(self):
RHManageEventBase._process_args(self)
self.obj = get_object_from_args()[2]
if self.obj is None:
raise NotFound
def _process(self):
objects = self.obj.get_non_inheriting_objects()
return jsonify_template('events/management/non_inheriting_objects.html', objects=objects)
class RHEventACL(RHManageEventBase):
"""Display the inherited ACL of the event."""
def _process(self):
return render_acl(self.event)
class RHEventACLMessage(RHManageEventBase):
"""Render the inheriting ACL message."""
def _process(self):
mode = ProtectionMode[request.args['mode']]
return jsonify_template('forms/protection_field_acl_message.html', object=self.event, mode=mode,
endpoint='event_management.acl')
class RHEventProtection(RHManageEventBase):
"""Show event protection."""
def _process(self):
form = EventProtectionForm(obj=FormDefaults(**self._get_defaults()), event=self.event)
selectable_permissions = {k for k, v in get_available_permissions(Event).items() if v.user_selectable}
user_permissions = [(p.principal, set(p.permissions)) for p in self.event.acl_entries]
hidden_permissions = sorted((
(principal, sorted(perms))
for principal, perms in user_permissions
if perms and not (perms & selectable_permissions)
), key=lambda x: (x[0].principal_order, x[0].name.lower()))
form.permissions.hidden_permissions = [(p.name, perms) for p, perms in hidden_permissions]
if form.validate_on_submit():
update_permissions(self.event, form)
update_event_protection(self.event, {'protection_mode': form.protection_mode.data,
'own_no_access_contact': form.own_no_access_contact.data,
'access_key': form.access_key.data,
'visibility': form.visibility.data,
'public_regform_access': form.public_regform_access.data})
self._update_session_coordinator_privs(form)
flash(_('Protection settings have been updated'), 'success')
return redirect(url_for('.protection', self.event))
return WPEventProtection.render_template('event_protection.html', self.event, 'protection', form=form)
def _get_defaults(self):
registration_managers = {p.principal for p in self.event.acl_entries
if p.has_management_permission('registration', explicit=True)}
event_session_settings = session_settings.get_all(self.event)
coordinator_privs = {name: event_session_settings[val] for name, val in COORDINATOR_PRIV_SETTINGS.items()
if event_session_settings.get(val)}
permissions = [[serialize_principal(p.principal), list(get_principal_permissions(p, Event))]
for p in self.event.acl_entries]
permissions = [item for item in permissions if item[1]]
return dict({'protection_mode': self.event.protection_mode, 'registration_managers': registration_managers,
'access_key': self.event.access_key, 'visibility': self.event.visibility,
'own_no_access_contact': self.event.own_no_access_contact,
'public_regform_access': self.event.public_regform_access,
'permissions': permissions},
**coordinator_privs)
def _update_session_coordinator_privs(self, form):
data = {field: getattr(form, field).data for field in form.priv_fields}
update_session_coordinator_privs(self.event, data)
class RHPermissionsDialog(RH):
def _process(self):
principal = json.loads(request.form['principal'])
permissions_tree = get_permissions_info(PermissionsField.type_mapping[request.view_args['type']])[1]
return jsonify_template('events/management/permissions_dialog.html', permissions_tree=permissions_tree,
permissions=request.form.getlist('permissions'), principal=principal)
class RHEventPrincipals(PrincipalsMixin, RHAuthenticatedEventBase):
@use_rh_kwargs({
'values': PrincipalDict(allow_groups=True, allow_external_users=True, allow_event_roles=True,
allow_category_roles=True, allow_registration_forms=True, allow_emails=True,
missing={})
}, rh_context=('event',))
def _process(self, values):
self.values = values
return PrincipalsMixin._process(self)
class RHEventRolesJSON(RHAuthenticatedEventBase):
def _process(self):
event_roles = sorted(self.event.roles, key=attrgetter('code'))
return jsonify([serialize_event_role(er, legacy=False) for er in event_roles])
class RHCategoryRolesJSON(RHAuthenticatedEventBase):
def _process(self):
category_roles = CategoryRole.get_category_roles(self.event.category)
return jsonify([serialize_category_role(cr, legacy=False) for cr in category_roles])
|
This entry was posted on Tuesday, August 14th, 2012 at 9:15 and is filed under Comics. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
|
import numpy as np
class CategoryToNumeric(object):
"""
Transform class that replaces a categorical value with a representative target value
for instances that belong to that category. This technique is useful as a method to
turn categorical features into numeric values for use in an estimator, and can be
viewed as an alternative approach to one-hot encoding. Only suitable for regression
tasks.
Parameters
----------
categorical_features : list
A list of integers representing the column indices to apply the transform to.
metric : {'mean', 'median', 'std'}, optional, default 'mean'
The method used to calculate the replacement value for a category.
Attributes
----------
feature_map_ : dict
Mapping of categorical to target values.
"""
def __init__(self, categorical_features, metric='mean'):
self.categorical_features = categorical_features
self.metric = metric
self.feature_map_ = {}
def fit(self, X, y):
"""
Fit the transform using X as the training data and y as the label.
Parameters
----------
X : array-like
Training input samples.
y : array-like
Target values.
"""
for i in self.categorical_features:
self.feature_map_[i] = {}
distinct = list(np.unique(X[:, i]))
for j in distinct:
if self.metric == 'mean':
self.feature_map_[i][j] = y[X[:, i] == j].mean()
elif self.metric == 'median':
self.feature_map_[i][j] = y[X[:, i] == j].median()
elif self.metric == 'std':
self.feature_map_[i][j] = y[X[:, i] == j].std()
else:
raise Exception('Metric not recognized.')
def transform(self, X):
"""
Apply the transform to the data.
Parameters
----------
X : array-like
Training input samples.
"""
X_trans = np.copy(X)
for i in self.categorical_features:
distinct = list(np.unique(X_trans[:, i]))
for j in distinct:
X_trans[X_trans[:, i] == j, i] = self.feature_map_[i][j]
return X_trans
def fit_transform(self, X, y):
"""
Wrapper method that calls fit and transform sequentially.
Parameters
----------
X : array-like
Training input samples.
y : array-like
Target values.
"""
self.fit(X, y)
return self.transform(X)
|
master_trainer_mike says, "Hi gang! I'm back from London and ready to answer a plethora of questions."
master_trainer_mike says, "You do know what a plethora is right?"
master_trainer_mike says, "I would hate to think you were asking a plethora of questions if in fact you didn't know what a plethora was!"
master_trainer_mike says, "Nope. That would be far too silly (My Ditto is facing your LSSP'd card which ends up beinga Blaine's Ninetails and so on....)"
master_trainer_pat presents the speaker with question #158 from indigomaster33:will the darkness energy in expedition re wording stay as is? or will it be errated?
master_trainer_pat says, "It is alittle different...but we won't go into details just yet."
master_trainer_pat says, "Gee, I don't know can anyone help this eager trainer?"
master_trainer_pat presents the speaker with question #160 from trekiev:When will the Worlds Professor Challenge results be posted on the website?
master_trainer_mike says, "That was a non-sanctioned event that was run as a benefit for the staff at Worlds. The results from the Sanctioned Pokemon Championsip at Origins whould be up soon (wrote a long article about it myself)."
master_trainer_mike says, "In other words, there are no plans for coverage on that event."
master_trainer_pat presents the speaker with question #162 from mt_icecold:Any news on Pokemon Expedition?
master_trainer_pat says, "What questions do you have about it?"
master_trainer_mike presents the speaker with question #161 from daytongl:Any idea of when the MPs or Profs can expect the marketing materials for BattleZone?
master_trainer_mike says, "We hope to have information up about that part of the Professor program next week."
master_trainer_mike presents the speaker with question #164 from mt_icecold:Yo mike, got any thoughts on what might be archetyped into the existing entei/cargo deck?
master_trainer_mike says, "Well, the Venusaur could be interesting. Combined with a Typhlosion and you could attach 3 fire energy to an active Magcargo per turn."
master_trainer_mike says, "Those are just clarifications for the players benefit. They are both Pokemon Powers (menaing Muk would shut them both off)."
master_trainer_mike says, "A Poke-BODY only affects the Pokemon that has it and is continuous."
master_trainer_mike says, "A Poke-POWER is an actiavted Pokemon Power that could have all sorts of effects."
master_trainer_mike says, "Poke_BODYs are not shut off when the Pokemon has a Special Condition on it as well."
master_trainer_pat presents the speaker with question #166 from lord_schweizer:If I use a professor elm trainer can I then put down a gold berry, focus band etc.?
master_trainer_pat says, "No, you cannot."
master_trainer_pat presents the speaker with question #167 from bigchuck001:Theres a big rumor going around saying that withthe new Darkness Energy, things like Murkrows feint attack will do 30+ damage to a BENCHED pokemon. Is this true?
master_trainer_mike says, "Good catch Alex!"
master_trainer_mike says, "That is true."
master_trainer_mike says, "There are a lot to check out. Don't have fave yet."
master_trainer_pat presents the speaker with question #169 from bulletpokemonmaster:Is Promo #50 the promo from the pokemon center New York? and is it possibly Unown R?
master_trainer_pat says, "Don't know. We have not heard any plans to release Unown R"
master_trainer_pat says, "I'm sure he's crushed."
master_trainer_mike presents the speaker with question #171 from snorlaxstampede:I am trying to get some tournaments running in my town. Do you have any suggestions to help me do this?
master_trainer_mike says, "Information on running and santioning tournaments is all available on the web at www.thedci.com"
master_trainer_mike says, "Also, you could contact Pokemon Professors in your area for help as wlel."
master_trainer_mike presents the speaker with question #172 from teamrocket4life:Any new details you can give us on BattleZone?
master_trainer_mike says, "It is moving forward and will start in November."
master_trainer_pat presents the speaker with question #175 from lizardotc:When is, or was, the first date that Expedition is allowed in sanctioned play?
master_trainer_pat presents the speaker with question #177 from chrisbo:So just to clarify, Muk's "Toxic Gas" will prevent Poke-Body powers, right?
master_trainer_pat presents the speaker with question #173 from trekiev:whats up with the experation date on the professor cards?
master_trainer_pat says, "Well, I think the idea is that you will need to re-test when they expire."
master_trainer_mike presents the speaker with question #174 from michel1be:Is Battlezone for USA only or is it for Europe too ?
master_trainer_mike says, "Different markets have their own plans. I believe it will be coming out in Europe as well though."
master_trainer_pat presents the speaker with question #176 from ikendall:If you had 2 Venusaurs on the bench, could you attach 3 energy per turn to your Active, or just 2, irrespective of the number of Venusaurs you have?
master_trainer_pat says, "I'm really not going to even try to answer that since I don't even have that card yet."
master_trainer_pat says, "It's impossible to get into the rules questions when you don't have the text of the card."
master_trainer_pat presents the speaker with question #179 from trekiev:darkness energy has been changed?! why?
master_trainer_pat says, "I don't know. It's news to me. Like I mentioned in the answer to the last question I don't have any cards yet. I did see a few and I think MIke has a few, but that's it."
master_trainer_pat presents the speaker with question #180 from snorlaxstampede:Will Dark Magcargos Bench Damage also be uped with the new Dark NRG text?
master_trainer_pat says, "Wait until you have the new cards in front of you to ask that please."
master_trainer_pat presents the speaker with question #181 from soslowpoke:So is expedition legal for use?
master_trainer_pat says, "On Sept 16th"
master_trainer_mike presents the speaker with question #178 from bulletpokemonmaster:Anything yet on Battle Zone? and when its suppose to start?
master_trainer_mike says, "It is a low cost, flexible evcent support kit will provide participation and prize promo cards. It can be used for simplified league play as well if that is what the tsore chooses. It starts in November, more details to come."
master_trainer_pat says, "No, only the previously printed Pokemon with new attacks and Powers"
master_trainer_pat presents the speaker with question #183 from chrisbo:So from now on, we are to play Darkness Energy as written on the Expedition version regardless of which actual card we are using in our deck, correct?
master_trainer_mike says, "As of 9/16/02 yes."
master_trainer_mike says, "Sorry, we don;t answer these questions until after the offiocial release date."
master_trainer_pat presents the speaker with question #185 from bigdaddysnorlax:When the text for a previously released card changes in a new set, like Darkness energy, how are the older cards played?
master_trainer_pat says, "As with other games that we make, you will need to start using the new text as soon as the set is released"
master_trainer_pat says, "You got that special recognition last week."
master_trainer_mike presents the speaker with question #187 from lizardotc:Will Battle Zone supplement league, or will it REPLACE league?
master_trainer_mike presents the speaker with question #188 from bulletpokemonmaster:Will battle Zone have a chance for people to earn cards? or anything of that sort?
master_trainer_mike says, "Details to come gang."
master_trainer_mike presents the speaker with question #189 from lizardotc:Re: #175: Not long ago, you stated that there were no longer official release date for Pokémon. I take it that official release dates are now back... correct?
master_trainer_pat says, "No, offical release dates are not back, but because some stores start selling the new set _way_ before the others has made it so that we need to have a date when the new set would be legal"
master_trainer_pat says, "So that this doesn't create an unfair playing field for those that can't get the new cards."
master_trainer_mike presents the speaker with question #190 from pigdeon_of_death:Is Exedition out yet?
master_trainer_mike says, "Mainly no, 1 store has an exclusive but the main release is Sept 15th."
master_trainer_mike says, "The theme decks."
master_trainer_mike presents the speaker with question #193 from sd_pokemom:Will those of us unfortunate to have Leagues which are behind in getting kits be able to get the kits we've missed, even when BattleZone starts?
master_trainer_mike says, "There will be a limited window to do so yes, but eventually we are trying to get everyone running at the same time."
master_trainer_pat presents the speaker with question #194 from mt_icecold:Will top 8 at worlds for both age groups get invited back enxt year?
master_trainer_pat says, "We have not worked that out yet."
master_trainer_pat presents the speaker with question #195 from lance313:Re: Expedition: How many different box toppers are there?
master_trainer_pat presents the speaker with question #196 from bigchuck001:If the Dark ruling sticks, I think something should be done about Murkrow. The card was broken in Unlimited already. Now it's like a necessity in every deck.
master_trainer_mike says, "The sky is falling te skiy is falling!"
master_trainer_mike presents the speaker with question #197 from trekiev:i heard there aren't anymore 1st edition printings, how come?
master_trainer_mike says, "1st edition stopped with Neo Destiny. It didn't make sense as far as long term growth of the product."
master_trainer_mike says, "We have them in front of us and we won't talk about them."
master_trainer_mike says, "After 9/15 gang."
master_trainer_mike says, "Yay! Glad they finally are getting out."
master_trainer_mike presents the speaker with question #201 from soslowpoke:Oooh so now Base Set 2 Full Heal Removes all special conditions like the Expedition one?
master_trainer_mike presents the speaker with question #202 from teamrocket4life:Will the Coin Flipper on the Chansey E-Card (When used with the E-Reader) be legal for gameplay purposes?
master_trainer_pat presents the speaker with question #205 from bulletpokemonmaster:When a Pokemon Like Light Lanturns Pokemon Power changes the attack from Electric to Water, does it do 60 Damage to fire pokemon rather then 30?
master_trainer_pat says, "I think you're talking about Lanturn...it's Submerge Power can change it's type to water"
master_trainer_pat says, "So it does count as a water attack"
master_trainer_pat says, "You were mentioned...we posted your question..."
master_trainer_mike presents the speaker with question #208 from puritys_echidna:is the pokemon center going to get exclusive early release rights for sets from now on, or was expedition a one time deal?
master_trainer_mike presents the speaker with question #207 from soslowpoke:Any futher info on when the Theme deck challenge winners will be receiving their expedition boosters?
master_trainer_mike says, "We don't even have our allotment of the product yet. Once we do, we will send these prizes out."
master_trainer_pat presents the speaker with question #209 from soslowpoke:Hmm...so will the Echo Theme Deck (alakazam one) have all 6 basic energy in it to complement its Poke-Power?
master_trainer_pat says, "Hold your Horseas"
master_trainer_mike presents the speaker with question #210 from marril2000:any new info about the Pokemon Rewards System besides the holographic energy making a second apperence?
master_trainer_mike says, "When we have more details, you will have more details."
master_trainer_mike presents the speaker with question #211 from ikendall:How are the Prof cards being dealt with internationally? Have the been sent from the US, or will they be sent by local offices?
master_trainer_mike says, "We are sending them all out from here."
master_trainer_mike presents the speaker with question #212 from calathon:do we have to build robots for BattleZone?
master_trainer_pat says, "Giant Firebrathing Robots.."
master_trainer_mike presents the speaker with question #213 from prof_kris:Any idea about when the schedule for the next Challenge series will be out?
master_trainer_mike presents the speaker with question #214 from bigchuck001:I'm not sure if anyone asked allready so I'll ask now =P. Any news on when the new Gym Challenges and Stadium Challenges will start up?
master_trainer_mike says, "We are in the process of scheduleing them now. It does take some time. We will try and tell you as soon as we can."
master_trainer_pat presents the speaker with question #215 from matthewssandslash:I heard a rumor about a WotC exclusive set... can you verify/trash that rumor?
master_trainer_pat says, "That's an old one. The last thing we heard was that prospects for that were not good."
master_trainer_mike says, "It is on the back burner. Hope to see it sometime."
master_trainer_mike presents the speaker with question #216 from gym_leader_blaine:so any more information for pokemon battlegrounds that is coming out?
master_trainer_mike says, "You mean BattleZone? Details to come soon."
master_trainer_mike presents the speaker with question #217 from lizardotc:During what month do you envision the new Challenge Series tournaments will begin?
master_trainer_mike says, "When we know, you will know."
master_trainer_pat presents the speaker with question #218 from teamrocket4life:Is it possible to have the important rules documents (Master Rules Document, Pokemon Floor Rules, etc.) available to download in a Palm OS format?
master_trainer_pat says, "Only if you're able to make the conversion."
master_trainer_mike presents the speaker with question #220 from snorlaxstampede:Will the locations for the new Challenge series be the same as last year or will you try to spread them out?
master_trainer_mike says, "We plan on trying to offer more Gymn Challenges in a wider spread this next series."
master_trainer_mike presents the speaker with question #222 from darkleafchampion:I just got here, so sorry if this is repeated. I just checked my mail And got the Professor Card... Thank you!!!
master_trainer_mike says, "Glad you got it :)"
master_trainer_mike presents the speaker with question #223 from bulletpokemonmaster:Do you know if Wizards will make the next 2 Card E sets?
master_trainer_mike says, "Thats the plan!"
master_trainer_pat presents the speaker with question #224 from lucky_cal:Are the Pokémon Powers Drive Off (Light Arcanine) and Major Tsunami (Feraligatr) mandatory?
master_trainer_pat says, "Lt. Arcanine's Drive Off is written in a way that makes it mandatory if he is your active Pokemon."
master_trainer_mike says, "BTW, I wanted to thank all of the Pokemon Professors who worked and helped with Gen Con UK."
master_trainer_mike says, "You guys were great!"
master_trainer_mike says, "It is being updated as we speak and will be posted online soon."
master_trainer_pat presents the speaker with question #226 from lucky_cal:Re: 224 What if an active Lt. Arcanine or Feraligatr is retreated before the power is used?
master_trainer_pat says, "I don't think you would, but Mike says we better check with the rules team on this one."
master_trainer_pat presents the speaker with question #227 from bigchuck001:Ok, just to clarify because I wasnt sure on your response, would a Murkrow with a Darkness Energy and a Recycle Energy do 30 damage to a benched Pokemon when Expedition comes out?
master_trainer_pat says, "We're going to also clarify that with the rules team. Maybe that was a misprint and we'll have to play it like it's always been played."
master_trainer_pat presents the speaker with question #228 from calathon:If Brock's Ninetales has Shapeshifted into a Pokemon that does not have a Pokemon Power, does it still take damage from Pichu's Zzap!?
master_trainer_pat says, "Yes, it still has the Shapeshift Power, otherwise you wouldn't be able to keep attaching cards to B. Ninetales."
master_trainer_mike presents the speaker with question #229 from michel1be:If we were great, GenCon UK was great too. :-) We've had a lot of fun !
master_trainer_mike presents the speaker with question #230 from babayaga:Last week there was a ruling that a Confused Pokemon that failed a retreat attempt, then Evolved could try to retreat again. But wouldn't the Evolution cure the Status Effect?
master_trainer_mike says, "What are you saying? If you evolve, Confusion is cured and you can reterat."
master_trainer_mike says, "They do have normal card backs but they are so big they would clearly be marked cards in your deck."
master_trainer_pat presents the speaker with question #232 from sd_pokemom:Is this a white magnetic strip on the back of the professor card? It doesn't _feel_ like a signature strip; if it's magnetic, what info is encoded?
master_trainer_pat says, "It is purely for your viewing pleasure"
master_trainer_mike presents the speaker with question #233 from soslowpoke:Any info on when the first challenge series event will be or when event schedule will be up?
master_trainer_mike says, "We will tell you when we know."
master_trainer_mike presents the speaker with question #234 from soslowpoke:Hmm if there are only 4 boxtoppers in Expedition, and the numbers on the boxtoppers have a /12 on them, does this mean that there will be 4 in expedition, 4 in (dont want to spoil the name for anyone) carde2/3 and 4 in whatever set comes after that?
master_trainer_mike says, "Maybe, rabbit, maaaybe."
master_trainer_mike says, "No we haven't."
master_trainer_mike says, "Why? Does anyone actually play that card?"
master_trainer_mike presents the speaker with question #236 from snorlaxstampede:Do you know when the E-card Reader will be released.
master_trainer_mike says, "I have heard that there will be a small allotment released in November in the US and another smal allotment near Xmas in the US."
master_trainer_mike says, "That might not be true though."
master_trainer_mike says, "Any other questions my friends?"
master_trainer_mike presents the speaker with question #238 from soslowpoke:Has DMTM shown you the card I made for him at GenCon yet?
master_trainer_mike presents the speaker with question #239 from prof_kris:Any cards considering being banned from TMP?
master_trainer_mike says, "Not at this time."
master_trainer_mike presents the speaker with question #240 from skywolf9653:Can you repeat any announcement? I got in late?
master_trainer_mike says, "Didn't have any, just answering questions this week."
master_trainer_mike presents the speaker with question #241 from teamrocket4life:Could there ever possibly be a bundle package with, say, the E-Card Reader and a Theme Deck?
master_trainer_mike says, "Probably not, Nintendo makes the reader, we make the cards, that isn't a likely combo."
master_trainer_mike presents the speaker with question #242 from pigdeon_of_death:If Brocks Ninetails has evolved pokes attached, can Recall let it use a Brock's Vulpix attack?
master_trainer_mike says, "Sorry guys, the game rules aren't clear. We will ask Japan and see what they say."
master_trainer_pat presents the speaker with question #244 from soslowpoke:Do you ever feel the urge to raid DMTM's office?
master_trainer_pat says, "Nope, it's been designated an official historic landmark and cannot be touched."
master_trainer_pat presents the speaker with question #245 from soslowpoke:Ever plan on doing "Fat Packs" for Pokemon?
master_trainer_pat says, "Hmmm...there will be some Pokemon Big Box sets at Toys R Us that include the Trainer Video and a bunch of packs(maybe even some international ones) for a good price"
master_trainer_mike presents the speaker with question #246 from pokemonwarrior:If there is an Ecogym in play, and i play Misty's wrath, and discard Energies, will they return to my hand?
master_trainer_mike says, "Nope, read Ecogym. It says if a Trianer discards a non-colorless energy from another player's Pokemon....."
master_trainer_mike says, "That isn't happening in this case."
master_trainer_mike presents the speaker with question #247 from lucky_cal:Last week it was ruled that Dk. Gengar's attack would Pull In a Benched Pokémon against Neutral Barrier Mew... but this contradicts similar previous rulings decisions.
master_trainer_mike says, "Actually it wouldn't work. Please disregrad what was said last week."
master_trainer_mike presents the speaker with question #249 from snorlaxstampede:Why does expedition have 9 card boosters?
master_trainer_mike says, "Because e-cards cost more to make, we didn't raise the price of the packs, and each and every pack comes with a rare AND a parallel foil."
master_trainer_mike says, "Also 1 in 3 packs also come with a Holo card."
master_trainer_mike says, "So, 1 in 9 packs come with a rare, a parallele foil rare, AND a HOLO!!!!"
master_trainer_mike says, "Not bad huh?"
master_trainer_mike presents the speaker with question #250 from prof_kris:Will prizes at Stadium Challenges be from Expedition?
master_trainer_mike presents the speaker with question #251 from pokemonwarrior:Are the E-cards compatible for play without the reader?
master_trainer_mike says, "The Card-E cards are the next Pokemon TCG expaniosn."
master_trainer_mike says, "PLay them as any other expansion."
master_trainer_mike says, "The card-e content is a nifty extra that comes with the cards but has nothing to do with playing the card game."
master_trainer_mike presents the speaker with question #252 from teamrocket4life:Could you ever bring back those cool little glass bead damage counters? Even if it raises the price of a theme deck a little?
master_trainer_mike says, "Sorry, too many people thought they were candy."
master_trainer_mike presents the speaker with question #255 from bigdaddysnorlax:Is it true that Expedition Boosters only contain 9 cards?
master_trainer_mike says, "Yup, but the 2 cards that were removed were commons."
master_trainer_pat presents the speaker with question #256 from soslowpoke:Ooooh this isn't Pokemon related, but do you know whether or not the next Harry Potter Expansion will have a new Lesson type?
master_trainer_mike presents the speaker with question #257 from ikendall:Any Professor events on the horizon?
master_trainer_mike says, "Again, we are working on all of the Pokemon event schedules right now."
master_trainer_mike presents the speaker with question #259 from pokemonwarrior:Are the E-cards required for the card reader?
master_trainer_mike says, "The e-reader only reads card-e cards!!!"
master_trainer_pat presents the speaker with question #260 from chrisbo:Do you guys know if the Trainer Video to be included in the "Big Box" has been updated, or is it the same one we've had for a couple of years now?
master_trainer_mike presents the speaker with question #261 from soslowpoke:How do 1 in 9 come with a holo, rare, and reverse foil?
master_trainer_mike says, "Wait and see."
master_trainer_mike says, "That ISN'T GUARNTEED BTW!!! Just doing the math."
master_trainer_mike presents the speaker with question #262 from lucky_cal:Will there be a difference in the parallel foil cards with the rares that have both holo and non-holo cersions?
master_trainer_mike presents the speaker with question #263 from snorlaxstampede:so Expedition commons will be rarer then commons from other sets.
master_trainer_mike says, "I supose youcould look at it that way."
master_trainer_mike presents the speaker with question #264 from prof_kris:Since Expedition packs have less cards, would the recommended number of drafting packs is 6?
master_trainer_mike says, "Realistically, to keep costs down you would probably still want to use 5, but if you could afford 6, you would get a better mix."
master_trainer_pat presents the speaker with question #265 from michel1be:Legendary Collection boosters are great for sealed play. Is it the same with the Expedition boosters ?
master_trainer_pat says, "LC really was set up more for sealed play. It's unlikely that Expedition will work that nicely"
master_trainer_mike presents the speaker with question #267 from pokemonwarrior:Are there any events for older players without the need to become a professor?
master_trainer_mike says, "Sure, play in any of the 6000 sanctioned Pokemon tournaments held every year, or play in any of the sanctioned Team events or any of the side events held at Pokemon premiere events or at leagues or soon at Battlezone."
master_trainer_pat presents the speaker with question #268 from soslowpoke:Are there still 3 uncommons per booster in expedition?
master_trainer_pat says, "I think its 2 actually"
master_trainer_pat presents the speaker with question #269 from teamrocket4life:Were the Expedition boosters printed in a way to make drafts easy (Like Legendary Collection was)?
master_trainer_mike says, "Ok gang, its 4 pm, thank you all for coming."
master_trainer_mike says, "More details on our programs to come on our web site as soon as we can getthem up."
master_trainer_mike says, "2002-2003 is gonna be a lot of fun!"
master_trainer_pat says, "Hopefully Expedition will be in better circulation next Thurs"
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-10-23 18:45
from __future__ import unicode_literals
from django.db import migrations
def move_person_from_popolo(apps, schema_editor):
PopoloPerson = apps.get_model("popolo", "Person")
PeoplePerson = apps.get_model("people", "Person")
for pperson in PopoloPerson.objects.all():
people_person = PeoplePerson.objects.create(
pk=pperson.pk,
start_date=pperson.start_date,
end_date=pperson.end_date,
created_at=pperson.created_at,
updated_at=pperson.updated_at,
name=pperson.name,
family_name=pperson.family_name,
given_name=pperson.given_name,
additional_name=pperson.additional_name,
honorific_prefix=pperson.honorific_prefix,
honorific_suffix=pperson.honorific_suffix,
patronymic_name=pperson.patronymic_name,
sort_name=pperson.sort_name,
email=pperson.email,
gender=pperson.gender,
birth_date=pperson.birth_date,
death_date=pperson.death_date,
summary=pperson.summary,
biography=pperson.biography,
national_identity=pperson.national_identity,
versions=pperson.versions,
)
for election in pperson.not_standing.all():
people_person.not_standing.add(election)
class Migration(migrations.Migration):
dependencies = [("people", "0003_add_person_model")]
operations = [
migrations.RunPython(
move_person_from_popolo, migrations.RunPython.noop
),
migrations.RunSQL(
"""
SELECT setval('people_person_id_seq', COALESCE((SELECT MAX(id)+1
FROM people_person), 1));
""",
migrations.RunSQL.noop,
),
]
|
These delicious cupcakes by Half Baked Harvest are seriously to die for. Death By Chocolate Cupcakes are moist, rich, chocolate cupcake filled with whipped cream and topped with really awesome frosting. Chocolate mousse frosting is what make these cupcakes over the top. Besides having Kahlua in the cake and filling, these cupcakes are drizzled with chocolate-Kahlua sauce.
I make a dessert I also call death by chocolate. It is a layerd dessert in a bowl. First with fudge brownie pieces on bottom then a layor of chocolate pudding next a layor of whipped cream then sprinkle candy pieceson top.(I use reeses, snickers, and any chocolate candy chopped up into small pieces.) Now you start your layors over again. You can make them as thick or thin as u like. I do two layors so use half brownies, half pudding, half, whipped cream and candies.
|
import os
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
with open("README.rst") as f:
long_desc = f.read()
ind = long_desc.find("\n")
long_desc = long_desc[ind + 1:]
setup(
name="custodian",
packages=find_packages(),
version="0.8.1",
install_requires=["monty>=0.5.9", "six"],
extras_require={"vasp, nwchem, qchem": ["pymatgen>=3.0.2"]},
package_data={},
author="Shyue Ping Ong, William Davidson Richards, Stephen Dacek, "
"Xiaohui Qu",
author_email="ongsp@ucsd.edu",
maintainer="Shyue Ping Ong",
url="https://github.com/materialsproject/custodian",
license="MIT",
description="A simple JIT job management framework in Python.",
long_description=long_desc,
keywords=["jit", "just-in-time", "job", "management", "vasp"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts=[os.path.join("scripts", f) for f in os.listdir("scripts")]
)
|
Co-founder of the Zurich-based nonprofit organization nethood.org, which is involved in two EU Horizon2020 projects (2016–2018), MAZI — http://mazizone.eu, and netCommons — http://netcommons.eu.
This paper frames the role of community (wireless) networks, and other forms of grassroots DIY networking models, as complementary to the Internet communication infrastructures hosting local services for facilitating local interactions, as drivers for a more convivial and sustainable life in the city. Today, only a few Internet-based global corporations mediate our everyday online interactions, without respecting our rights to privacy, freedom of expression and self-determination; they depend for their own sustainability on the exploitation of the immense collected information and design power toward private, commercial and political objectives. But when communication is meant to take place between people in physical proximity, local community networks can provide an alternative infrastructure owned and designed by those concerned. The paper analyses four key reasons, practical, social, political, and scientific, why such DIY networks should be considered as a viable complementary infrastructure for local communications even when Internet access is available. Through analogies with other relevant domains of local action, namely complementary currencies and cooperative housing, I conclude by addressing the dichotomy between local action and global coordination. I advocate for the co-creation of convivial ICT tools for building local communities, or better hybrid spaces of local cooperation, which are larger in size than the small in “small is beautiful” and smaller, but in many cases more diverse, than recent imaginaries of the “multitude”.
Global Internet platforms like Google and Facebook become more and more efficient in managing vast amounts of information, which makes their users addicted and dependent on them, subject to manipulation and exploitation. But when communication is meant to be local — in public spaces, at the neighborhood or even at a city scale — there is no technical necessity to rely on such global platforms for mediating local interactions. Alternative options based on wireless technology do exist. They can empower citizens to build their own local networks and customize them according to their own needs for creating hybrid, digital and physical, urban spaces that are more inclusive, more intimate, and more convivial.
For example, using a Raspberry Pi and a Web server, a “self-appointed public character”, as Jane Jacobs called those that sustain the sidewalk’s social life, can activate a context-specific social application that invites passers-by and local residents for various hybrid interactions. The coverage, and thus the relevant applications, can further increase through the formation of a network of such devices, which can organically grow according to the voluntary contributions by individuals. The presence of this invisible digital space can be announced through physical urban interventions: a visible container of the device itself, a QR code, a poster, even through specific action like artistic performances or face-to-face communication. Anyone in proximity can join simply by selecting the wireless network name, or SSID, and by opening any web browser without the need for credentials or other identification, except from being there, and without the need for any Internet connection.
Indeed, the most typical reaction of someone introduced to the idea of a local community wireless network operating outside the Internet is to ask: “Why?” The Internet is robust, fast, and ubiquitous, they argue. Why investing to build isolated network infrastructures, subject to various forms of abuse, inefficiencies, and failures? And why would anyone wish to interact with strangers in immediate physical proximity anyway? In addition, there are important economic and political reasons why such technical solutions are not desirable, which makes the process of bringing them closer to the mainstream even more difficult.
Today such common infrastructures, where they exist, are mostly seen as gateways to the commercial Internet and they have been proven very valuable in providing broadband Internet access in rural areas not served by traditional ISPs, like in the case of Guifi.net, Sarantaporo.gr and many others, and also in underprivileged populations in urban areas, with most notable example the case of Freifunk.net. On the other extreme, they are also imagined by activists as the first step toward an utopian vision, a global alternative Internet made by the people for the people (Sandvig, 2004; Medosch, 2015).
This paper argues in favor of a more pragmatic approach in which DIY networking takes the form of a “tool for conviviality” à la Illich (1973), meant to serve only local communication needs, as a complementary infrastructure to the global Internet. It can be designed in a participatory way promoting, instead of replacing, face-to-face interactions; allowing a social learning process for understanding the complexities of the design of hybrid urban space; and enabling a more sustainable lifestyle.
Of course, the DIY networking approach entails new challenges and contradictions that need to be understood if we wish to avoid the evolution of this utopian scenario into a dystopia, as it seems to be happening in the case of the Internet (see Trèguer, et al., 2016). However, this paper chooses to focus more on the positive aspects of DIY networks and explains how they can play a role in resolving some of the tensions in the global vs. local debate.
I start with a short introduction to the concept of DIY networking and then analyze in depth four different reasons, practical, social, political, and scientific, why it is important to render DIY networking technologies popular, easy to install and customize, even when the Internet is widely available. Finally, drawing inspiration from success stories in the domains of complementary currencies and cooperative housing, I point to a strategy that focuses on the creation of more inclusive hybrid spaces in localities affirming differences; and at the same time on the design of global “ICT tools for conviviality” that can be easily replicated, appropriated, and negotiated by those concerned.
DIY networking has been recently used as an explicit “term” to characterize a variety of technical solutions that enable citizens to build and operate their own communication networks (Antoniadis, et al., 2014). These can range from large scale community networks to very dynamic ad hoc networks, built over time through the direct exchange of data between personal mobile devices. A DIY network could be also just a simple wireless access point (static or mobile), hosting a local application that is accessible only to those in physical proximity; an off-line or better off-the-cloud network (Dragona and Charitos, 2016).
Existing (wireless) community networks cover geographic areas of various sizes, ranging from a small urban neighbourhood (Gaved, 2011; Baldwin, 2011); to a small town like Leiden (van Oost, et al., 2009); or large city-regions like Athens (awmn.net) and Rome (ninux.org) or even wider areas like the extended Guifi.net network in Catalonia and Freifunk.net in Germany, both including optical fibre cables in their overall infrastructure. On the other hand, ad hoc or delay tolerant networks (DTN) have been mostly developed by the networking research community (e.g., Basagni, et al., 2013), driven mostly by the highly challenging intellectual and technical issues associated with the creation of networks over time, based on “contacts” of independent mobile devices. Finally, off-the-cloud networks are rooted in artistic and/or activist projects such as the PirateBox and Occupy.here (see Dragona and Charitos, 2016).
a new mode of communication that can attract curiosity and interest.
Most importantly, the term DIY networking attempts to emphasize a critical quality and distinguishing factor of WiFi networks: that they can operate outside the public Internet (Antoniadis, et al., 2014, 2008; Powell, 2006). However, not all DIY networking technologies are the same. In the case of wireless DIY networks, there are important to understand and distinguish both for their technical and social implications.
Figure 1: Examples of the required infrastructure to build a single-node DIY access network with (left) an omni-directional antenna in a small wireless router and (right) a backbone node of the Freifunk network depicting a directional antenna (up) and two sector antennas (bottom).
First, directional antennas can establish a wireless link between distant locations, possibly many kilometers away. This link could be imagined as a very long cable along the imaginary line connecting two locations, which needs to be clear of obstacles (walls, trees, etc), a line-of-sight. Such links are often called “backbone” links since they establish the wider coverage area of the network and are not accessible by end-users. As a social infrastructure, such antennas typically connect like-minded individuals or groups that live far away, which would need to coordinate and agree to create a link between them.
Second, an omni-directional antenna, attached to a router, spreads “cables”, radio signals, in all directions around it and makes it easy for many devices to connect at the same time and independently from their relative location (sector antennas lie between these two extremes restricting the signal inside a certain angle). In this case, the distance between the small antennas inside our devices and the omni-directional antenna can be much smaller, a few hundred meters depending on the environmental conditions. So, omni-directional antennas are more inclusive and can bring in contact people that are not aware of each other’s presence. Off-the-cloud networks are typically single-node access networks with an omni-directional antenna that are used for local-only communications.
Third, omni-directional or sector antennas can be also used to create direct links between devices, which are easier to setup (the antennas find each other automatically if they fall in each other’s range) and thus the corresponding networks are easier to expand, but they are more costly in terms of noise and interference. The corresponding links could be backbone links between fixed nodes as in “mesh” community wireless networks, like Freifunk.net and Wlan slovenija, or ad hoc links between mobile devices that happen to be in “contact”, as in the case of delay tolerant networks.
The potential applications for the latter case are rather complex to work properly and make sense mostly for very dynamic temporal applications, here and now. Real deployments for “civilians” have been scarce until now, with the exception of Qaul.net, the only running system that combines all the above communication modes for both artistic and practical use.
As a last remark, there is often criticism on the use of the term “Do It Yourself” to characterize collective action projects, such as the creation of a network. Alternative terms, more “collaborative”, include “Do It With Others”, “Do It Together”, or “Do It Ourselves”. The preference for the term DIY is first practical, since it is a common abbreviation that does not need explanation. But it also stresses the fact that although it is not possible to build a whole network by yourself, you can indeed build by yourself, or yourselves, one of its nodes. And even if this node is often built using off-the-shelf commercial equipment, it is still placed on your space, owned, installed, and maintained by you.
All these questions refer to one or both potential roles of community networks: Internet access vs. local services. As put by Alison Powell, “last mile vs. local innovation” or as the members of the Air-Stream community network in Adelaide describe their network, ‘Ournet, not the Internet’ (Jungnickel, 2014). It is also important to distinguish whether a certain community network is “introvert” or “extrovert”, i.e., whether its services are available only to those that contribute to the network creation, typically technology enthusiasts, or to the general public.
Figure 2 provides a rough taxonomy of existing initiatives according to these two variables, which provides a good starting point for understanding their differences and similarities. Of course, one cannot capture the whole complexity of this ecosystem with only two dimensions. For example, Figure 2 does not give any indication on the size of the different networks, nor the quality of the Internet access and/or local services offered. Moreover, community networks like Guifi.net and Freifunk.net have several instances (islands) with different characteristics, for example between urban and rural areas. Moreover, such characteristics change over time. Île sans Fils is an example of a community network whose “secondary goal is [was] variously expressed as ‘connecting Montrealers to one another,’ ‘creating community,’ or empowering individuals and fostering a sense of community” (Powell, 2008) but today it has shifted even more toward its primary goal which as stated on its latest logo is “Internet without wires, free”.
In many cases there are also discrepancies between how community networks represent themselves, how they are perceived by outsiders, and how they operate in practice. For example, Guifi.net is a special case of a community network that aims to clearly separate the ownership of the network infrastructure, as a commons, from the services offered on top by the different members, including Internet access (Baig, et al., 2015). However, in reality most people and especially outsiders perceive it as a network offering affordable Internet access and this is how it is mostly used.
Perhaps the most well-known example of a community network focusing on local services is the AWMN network in Athens. Numerous local services, such as Wtube, Woogle, etc., have been developed over the years and they are part of the identity and “pride” of its members (Lennett, et al., 2011). However, the “locality” of AWMN is mostly restricted to the members of the community and for outsiders it is useful only to the extend that provides free access to the Internet (although Internet access is not included in the basic services offered by AWMN some, but very few, of its members do provide public access WiFi).
The focus of this paper is exclusively on extrovert community networks offering local services. Lennett, et al. (2011) mention that “There is tremendous potential for the creation of services and applications that build on municipal and community wireless networks. These include commercial applications such as real-time mapping, games and content portals as well as services intended to enhance e-government initiatives” and also “applications such as Voice over Internet Protocol (VoIP), streaming and Web-hosting”, but until today this potential has not yet really materialized and the question of the “killer local app” is still open.
Figure 2: A taxonomy of the most well known community networks according to the main service offered (Internet gateway vs. local applications) and openness of the community (introvert vs. extrovert).
Jonathan Baldwin (2011), during his master’s studies at Parsons School of Design in New York, tried to address these issues with the deployment of a small scale community network in Red Hook, a neighborhood in Brooklyn, N.Y. He designed a collaborative mapping tool called tidepools through a participatory process involving local residents. This bottom-up process for wireless community networks has then become the flagship project of the Open Technology Institute (OTI) and was tried also at Detroit Community Technology Project. Today, there is an ongoing effort to replicate it in more areas through the SEED grants project (Nucera, et al., 2016).
The question ”Why?“ has also been constantly posed for the better understood Internet access scenario. Why should a grassroots community be responsible to provide such a critical service to citizens? Wouldn’t be a municipality a more appropriate institution? And why not let the free market do the job?
This question has been sufficiently answered today through the evolution of networks like guifi.net and freifunk.net that prove that community networks built from the bottom-up is an organizational structure that can produce Internet solutions of high quality and performance, at low cost. Moreover, they also facilitate innovation, knowledge transfer, civic engagement, digital inclusion, and foster competition in the telecommunications market (Byrum, 2015).
But here comes a subtle issue that has become relevant only recently, after most of the existing literature on community networks has been produced. Assume that community networks become a mainstream organizational structure and they do succeed in bringing affordable broadband Internet connectivity everywhere. To which Internet will they enable us to connect? To an open and diverse network respecting our privacy and right to self-determination? Or to the big data laboratories of Facebook and Google?
Indeed, it is not an economic paradox that Facebook is trying to provide “basic” Internet connectivity everywhere in the world for “free” , and competes with Google over “Stratospheric Internet Plans” . Such platforms have the power to become the only online places that people visit even if it is to facilitate local interactions, with a huge price: our privacy and self-determination. This puts those DIY networking activists in an awkward position, fighting on the side of the global players for the same objective, Internet access for all, even if with different motivations and underlying values.
In this paper, I would like to highlight the reasons why DIY networking is a good idea even if the Internet is ubiquitous and free for everyone — a position that may appear extreme. While some years ago DIY networking was seen as a “counter power” to commercial ISPs, “disciplining the market” of Internet connectivity (Byrum, 2015; Lennett, et al., 2011; Medosch, 2015; De Filippi and Tréguer, 2015), today it could also be seen as a “counter power” to commercial Internet platforms, “disciplining the market” of location-based applications and services toward the common good.
In the following, I make two main assumptions: 1) there are different groups of people in physical proximity that wish to communicate through an ICT platform, exclusively or in addition to face-to-face interaction, 2) access to the Internet is abundant, subject only to costs and restrictions as in the most economically and technologically advanced cities in the world. Then, using representative examples of possible local applications, I analyse four key reasons why there are many different situations, groups of people in different locations, in which the ICT functionality needed for their communication makes sense to be hosted directly at the local access network and not in a remote server accessed through “the Internet”.
Take as a first working example, a workshop, a party, or a gathering, during which participants often want to share photos, videos and other material, such as slides and documents, and engage in various contextualized (in space and time) interactions.
Instead of using an Internet-based cloud platform, a more straightforward way to exchange information in such a setting would be to have the local wireless router hosting the corresponding service. People that would select the appropriately chosen network name, SSID, would be immediately part of an online community whose members are de facto in the same place. This is by far the most inclusive and convenient way to support the exchange of content between trusted individuals in a specific location; provided, of course, that the corresponding application is well-designed, robust, and free to install on one's own server. Such self-hosted applications, like OwnCloud, have only recently reached the desirable usability levels, and if they are optimized for use in such local environments they would present a credible alternative to Internet-based cloud platforms.
Indeed, Internet-based platforms, like Dropbox, have certain “objective” drawbacks for our working scenario. First, everyone should have registered, or register ad hoc, to the selected service, which excludes those that do not wish to do so; certain platforms have undesirable privacy, copyright or pricing policies for some, and others are reluctant to create yet another account on their colleague’s favorite platform just to share a few files. Such people might need to be unnecessarily excluded or forced to subscribe to a service they do not approve. Moreover, the network connectivity offered by a local WiFi network is always faster in both directions (download and upload) and more private than the corresponding Internet WiFi connection, which might be a rather important feature for large, and private, files.
Note also that, even if we have assumed above that Internet connectivity is not an issue, in reality, most of us have experienced connectivity problems in the most unexpected situations (e.g., visiting an institution with strict access policy, or a crowded place with a saturated Internet connection) that have forced us to share our slides through passing USB sticks over the table, eventually failing to leave the room with all the relevant content in our computers.
Seen from a long-term perspective, there are additional reasons why using a local network is a better solution when communication is meant to be local: resilience and sustainability. First, DIY networking enables the creation of networks infrastructures, offering alternative options in case of a natural disaster, for instance, as it proved to be recently the case of the Red Hook WiFi initiative in Brooklyn mentioned earlier during the Hurricane Sandy . Second, when a local service is available through a central server (managing multiple such services) various energy inefficiencies are introduced. Many people might prefer to use their 3G/4G/5G connections, which are much more energy consuming than local WiFi, data needs to be transferred over longer distances, stored, processed, analysed, and so on. It wouldn’t be surprising to realize that more energy is actually needed by a global platform to perform the tasks related to its commercial activities than the actual service. A small local network built only to serve a small group of people could be made to run only on locally generated renewable energy.
Of course, energy savings/costs are not so easy to calculate since technology affects multiple dimensions of our everyday life . And it is not easy to build infrastructures waiting for a disaster to happen. It would be much more effective if they have a role to play anyway. And for the case of community DIY networks, additional, political, social, and scientific reasons exist, as explained below.
From a political perspective, the more information and communication technologies (ICTs) play a central role in our everyday communications, the more critical it becomes who has authorship in their design, who owns the corresponding infrastructure and generated data, who takes important decisions, and according to which objectives. When these privileges are granted to corporations with an exclusively commercial orientation, the corresponding Internet platforms, even if they are very attractive and efficient in facilitating information sharing and other complex interactions, can severely undermine our privacy, independence, and quality of life.
To make this point clearer, let’s take another example of ICT-supported local interactions, this time between people that do not know each other, and/or they do not know that they are close by. Such applications that help people to get in contact with friends or strangers with common interests that happen to be in proximity, are often called location-based applications or locative media. There have been numerous, mostly failed, start-ups trying to develop such applications, with most notable exception perhaps Foursquare, one that managed to acquire the critical mass of users needed to make it meaningful.
In addition to the aforementioned practical drawbacks of Internet platforms compared to local wireless networks, this is a use case for which the choice of infrastructure has very important political implications. The reason is that for such location-based service to be offered by a remote Internet platform, often many miles away from the target location, all candidate users should have subscribed to the same service provider and installed the “app”. Thus, the provider should be a big corporation at the size of Facebook or Twitter. And it needs to be informed in real-time about the location of every person in the world, in order to know when just a few of them are in physical proximity. And even more worryingly, the access to all the additional data shared generates unprecedented knowledge and power in the hands of actors that have good reasons to use it against their users.
In contrast, all communications that take place in a local wireless network remain, in principle, local. The only way for an external entity to have access to the network is by placing on site physical devices connected to a surveillance infrastructure, which is very costly at large scale and difficult to remain unnoticed for long. Information leakages through individual devices, either intentionally by malicious users, or unintentionally through software and hardware backdoors, are nevertheless possible; those can never be deterministically excluded. But depending on the level of perceived risk, local communities can take precautions by engaging only in anonymous interactions, by regularly deleting the information stored, and more.
In addition to the significant psychological benefits, such as feelings of independence, this characteristic has become more and more important after the public awareness of the NSA surveillance programs and of the aggressive online profiling policies, increasingly discussed in the popular press (see also De Filippi and Tréguer, 2015). But privacy and surveillance are not the only threats posed by the global Internet platforms as mediators of our online activities. The political question is best framed around the “right to the city” concept, and if both the physical and digital are considered together, the “right to the hybrid city” (Antoniadis and Apostol, 2014).
Note that even if these online social networks have been positively connected with recent urban uprisings and political struggles for the “right to the city” (e.g., Gezi Park in Istanbul), they are themselves highly privatized spaces. Their owners have significant power over the design of important software details and the management of all collected data, ranging from multimedia content (e.g., photos and videos) to private information (e.g., location and profile) and patterns of activity (e.g., browsing patterns, reactions to stimulations, or “nudges”). This complete lack of ownership and control of these platforms on the users’ behalf poses significant additional threats. More specifically the possibility to manipulate behaviour (e.g., Tufekci, 2014), to exploit the labor of users (e.g., Fuchs and Sandoval, 2014) and other forms of digital hegemony (Dulong de Rosnay and Musiani, 2016), which, being less obvious, are more difficult to combat. Finally, the existence of complementary infrastructures for local communication can become really critical in cases of political crisis and exceptional situations.
However, as in the case of physical disasters, it is difficult to convince people to work hard against what is invisible (i.e., manipulation) or seems improbable (e.g., a coup d’état). So, there is often a need for an even more tangible reason why to build a local network. The alienation in cities is one of them, as discussed below.
Since the design of global Internet-based platforms is guided mostly by commercial interests, it aims to create addiction and maximize online “stickiness”. Such platforms can thus undermine face-to-face interactions and our everyday contact with difference. They contribute, explicitly or implicitly, to render invisible “the different others”, even if they may be standing next to us (Wilken, 2010).
Moreover, for reasons of efficiency and usability, there is a high degree of uniformity in design imposed by the most popular platforms, which further threatens diversity and social sustainability. This tendency is reinforced by the strong competitive advantages that these platforms enjoy, due to the critical mass required and the economies of scale involved, which makes the innovation at the grassroots level more and more difficult.
Let us now consider another form of location-based communication, this time more long-term, between those living in the same neighbourhood. There are today a wide variety of online neighbourhood community platforms, like NextDoor in the U.S. and peuplade in France, but also Facebook-based approaches like the Bologna-based Social Street movement in Italy. However, the tendency of many urbanites to protect their anonymity and autonomy, by avoiding difference and interactions with strangers, appears as an important barrier for the proliferation of such platforms. “I don’t really want to interact with my neighbours” is the answer of many people being introduced to this idea. Should we accept and respect such tendencies or try to reverse them in the name of social cohesion, conviviality, and collective awareness? And which type of ICT solutions should we invest on for this, if any?
In this regard, DIY networking has some characteristics that could help designers to resolve the tension between anonymity and identity in more desirable ways than the corresponding Internet-based solutions, i.e., to create a balance between the anonymity offered by modern cities, and the social control in traditional local communities, by generating ICT-mediated location-based collective awareness with low commitment in terms of time and privacy.
The most relevant metaphor here is the sidewalk, which Jane Jacobs (1961) praised as a place for essential informal interactions between strangers that can achieve a very delicate balance between privacy and public exposure. If carefully designed, hybrid ICT applications that enable spontaneous information sharing between strangers can offer new ways to support the role of the sidewalk in contemporary cities, for generating local knowledge and a sense of belonging. But instead of relying on private ICT platforms managed by commercial companies, DIY networking offers the option to stimulate and empower citizens to use their creativity for setting up local freely accessible networks hosting context-specific collective awareness applications.
There are unlimited options for the design of such applications that are more or less close to the sidewalk metaphor. The types of information to be shared and the exact framing would depend on the context, but could include simple demographics (spoken languages, occupation, or gender); general preferences or location-based ones (favourite places, commerce or artistic activities, books, films, and music); multimedia material (audio, pictures, videos); opinions and thoughts on interesting perhaps controversial questions, even sensitive personal information since the wireless medium could allow for purely anonymous interactions.
Since all potential users of a local wireless network being in de facto physical proximity, the option of anonymity, in addition to be technically feasible, is much less intimidating than in the case of global online platforms. This can facilitate playful and open interactions between people that would enjoy exchanging information with those in proximity but with “no private commitments” (Jacobs, 1961).
By construction, a DIY networking needs to be setup and deployed by someone that has access to the built environment: a resident with a well-located balcony, an owner of a central store, a local institution with the authority to install street furniture. This can ensure that the local network is designed and customized by members of the community, ideally in an inclusive and convivial manner.
Being tangible infrastructure themselves, wireless networks can be naturally embedded in other artefacts and urban interventions, such as a public display, a colored bench, a phone booth, or even a mobile kiosk, and they can create naturally hybrid spaces that encourage ephemeral participation and playful engagement. This also enables the inclusion of non-users, as in the case of the Berlin Design Research Lab’s hybrid letterbox and polylogue .
Finally, a local ICT infrastructure which facilitates the communication exclusively between those that can easily meet face-to-face could be designed exactly for this purpose. Thus, energy efficiency will not be only a result of the lower energy required when communication takes place through a local wireless network (as described above), but also a product of people’s ability to spend more time for their social and psychological needs away from their computers and mobile devices.
Despite the numerous research studies and different technological solutions for the design of ICTs for communities, there is a long way to understanding the complexity introduced by the hybridity of space. For this, the question of interdisciplinarity becomes urgent. Social scientists need to become more aware of the capabilities of technology and they have to get involved in the design processes, while engineers need to get in touch with legitimate local social issues and their inherent complexity, going beyond simple optimization techniques and data analysis (see Antoniadis, et al., 2014).
DIY networks offer a great opportunity for people to understand important aspects of ICTs and learn by doing, in collaboration, the importance of design in shaping human behaviour, the role of (self-)governance, ownership and data management, and many more. Because of their novelty and constraints they can also become a very interesting “boundary object” that can facilitate interdisciplinary and transdisciplinary interactions as demonstrated by a series of related events (Antoniadis, et al., 2015) and different related communities formed over the last years that meet in various big events like the Transmediale festival in Berlin , organize workshops and conferences like Alternet and the International interdisciplinary conference on DIY networking , and produce collective publications like the special issue on alternative Internets in the Journal of Peer Production .
Moreover, two new EU Horizon2020 projects aim to make one step further and develop a transdisciplinary research framework, bringing together researchers and activists from different areas, around concrete case studies that can benefit from the use of DIY networking as a means rather than an end.
MAZI (meaning “together” in Greek), http://mazizone.eu, takes the perspective of existing grassroots initiatives whose goals are social and political in nature, and explores ways that DIY networking technologies can help pursue them. For this, it follows a transdisciplinary methodology that brings together different aspects of design (engineering, human-computer interaction, interaction design, design research, and urban design) around the development of a DIY networking toolkit, and four concrete pilot studies: Berlin’s urban garden prinzessinnengarten and neighbourhood academy, Zurich’s cooperative housing and living project Kraftwerk1, London’s network of local communities in Deptford, and the nomadic group unMonastery.
netCommons, http://netcommons.eu, sets as its starting point the existing large-scale (wireless) community networks in Europe, such as Guifi.net, Ninux.org, and Sarantaporo.gr which are perceived today mostly as gateways to the Internet. The project brings together communities of mostly engineers and technology enthusiasts, and experts on legal, economic, political, and urban aspects that can help those networks to become more resilient against recent developments that threaten their existence, and more inclusive and useful for the local communities around them (beyond Internet connectivity).
A difference between the above EU-funded projects and the SEED grants initiative in the US mentioned above, is that the European Commission and especially the CAPS (collective awareness platforms for sustainability and social innovation) platform try to keep a balance between research and action, between the involvement of academic institutions and civil society organizations. Such transdisciplinary collaborations around concrete case studies can lead to very productive win-win situations: Researchers can have access to valuable data for producing knowledge on the role of ICTs and their design, data that today are available only privately by big corporations; grassroots organizations and activists can have access to this knowledge for promoting their objectives toward the common good and also benefit from the research and innovation funding that is too precious to be invested only in industry-driven approaches like the smart city project.
The key premise behind this work is that one should carefully distinguish between the two main roles of a DIY network, Internet access vs. local services, if interdisciplinary and transdisciplinary interactions are to be productive and fruitful, and lead to the development of tools for communities that are both powerful and convivial (à la Illich). The focus here was on local services, as this is the role that is less understood. This lack of awareness reduces the motivation required to produce the appropriate tools that will enable grassroots efforts to compete, in terms of usability and marketing, with big corporations with enormous budgets and unlimited human resources.
This is the main reason why the arguments above rest on the assumption that access to the Internet is a non-existent problem. However, in reality this assumption does not hold, as in many cases like rural and disadvantaged urban areas, equitable Internet connectivity of good quality is still an unresolved issue. So, Internet access for all (Crowcroft, et al., 2015; Saldana, 2016), is indeed an additional reason why DIY networking is an important technology, and why people from different perspectives need to join forces to make it more accessible and better understood.
I propose here to clearly separate the two distinct roles of DIY networking, and conceive them as different but complementary services. This will reinforce various existing efforts that lie mostly on the one or the other side and avoid confusions and misconceptions. But to escape from the trap of “localism” (Sharzer, 2012), there is a need of a global vision and the necessary tools that can be easily replicated and customized, similarly to the concept of “Design global, manufacture local” proposed by Kostakis, et al. (2015). However, the global is a very powerful attractor, and also subject to various traps. Thus, it is critical to define the appropriate boundaries between these two domains of action.
This is nicely exemplified by the case of community or regional currencies (Kennedy, et al., 2012). More specifically, one of the most important design variables of a regional currency is its relation to the global economy, the so-called fiat/national currencies. Time Banks propose a completely different alternative model defining time as the main measure of value of one’s effort in community activities, and such an exchange is not possible at all. Other local currencies like Brixton and Bristol pound, as well as Bitcoin, are based on the value and prices defined by the global economy, and allow exchanges between the local and the fiat currency, sometimes with some “penalty”, hardly making them a true alternative. Between these two extremes, the most successful in terms of scale and impact currencies to date are mutual credit systems like the Swiss WIR and more recently the Italian Sardex.net (Littera, et al., forthcoming) that do not allow any conversion with fiat currencies while at the same time being perfectly compatible with the local regulations (e.g., tax payments in fiat currencies).
Independently from their design details, community currencies also face the same question “Why?” Why not just use fiat currencies that are perfectly capable of supporting the exchange of goods and services in localities, exactly as the Internet is capable to support local online interactions? The answer is similarly complex and multi-faceted as it is the case with DIY networking. It is important for this paper that, in this case, we need to very carefully distinguish between the global and the local, in order for the local aspect to be able to survive the global forces that try to undermine it, while at the same contributing toward a global vision, an utopia.
P.M.’s 1985 book Bolo’bolo proposes such a “realistic utopia”, focusing on the question of ecology and sustainability, suggesting a layered organization of life based on a nucleous self-organized neighbourhood, a bolo, of approximately 500 people . Although utopian in nature, it has inspired numerous cooperative housing and living projects in Zurich like Kraftwerk1, Kalkbreite, and more (Apostol, 2015). A key principle of these initiatives is the “medium” instead of “small” scale of similar projects, and the requirement for diversity and inclusiveness in their membership. They have achieved rather radical objectives such as co-ownership, a significant levels of sharing of common spaces, and democratic participation, but neither complementary currencies, nor DIY “complementary” networks, like the ones advocated in this paper, are part of these co-housing projects.
A possible reason behind this lack of integration between different innovative forms of local action is the fact that none of them is itself well understood and they all struggle to gain followers, often interacting more with like-minded people far away than the oft-reserved “neighbours”. This brings to mind the two types of wireless links described above, which express nicely the equal importance of the global and the local, and the tensions between them. First, the unidirectional long-distance links are important because they help the expansion of the network and the creation of a global community around the design of the appropriate tools. And second, the local access points, with their omni-directional or sector antennas, are equally important because they can bring in contact everyone in a certain location, with no coordination and interests that are not necessarily common. They can help to raise collective awareness and engage into the city project those that are not already converted.
Panayotis Antoniadis is the co-founder of the Zurich-based nonprofit organization nethood.org, which is involved in two EU Horizon2020 projects (2016–2018), MAZI — http://mazizone.eu, and netCommons — http://netcommons.eu. He has an interdisciplinary profile with background on the design and implementation of distributed systems (Computer Science Department, University of Crete), Ph.D. on the economics of peer-to-peer networks (Athens University of Economics and Business), post-doc on policies for the federation of shared virtualized infrastructures (UPMC Sorbonne Universites), and interdisciplinary research on the intersection of urban studies and computer science (ETH Zurich). Panayotis is currently active in the organization of interdisciplinary events that aim to bring together researchers, practitioners, and activists around the design of tools for self-organization in different areas of local action: DIY networking, cooperative housing, complementary currencies, social infrastructures, and community-supported agriculture.
6. The “off-line networks” community has gathered twice at the Transmediale Festival in 2015, https://transmediale.de/content/offline-networks-unite, and 2016 under a new more accurate label “off-the-cloud”, https://2016.transmediale.de/content/off-the-cloud-zone.
8. http://diynetworking.net; see also http://www.dagstuhl.de/en/program/calendar/semhp/?semnr=14042.
Panayotis Antoniadis, Ileana Apostol, Mark Gaved, Michael Smyth, and Andreas Unteidig, 2015. “DIY networking as a facilitator for interdisciplinary research on the hybrid city,” Proceedings of Hybrid City III Conference, pp. 65–73, and at http://nethood.org/publications/antoniadis_et_al_DIYnetworking_HybridCity2015.pdf, accessed 10 November 2016.
Panayotis Antoniadis and Ileana Apostol, 2014. “The right(s) to the hybrid city and the role of DIY networking,” Journal of Community Informatics, volume 10, number 3, at http://ci-journal.net/index.php/ciej/article/view/1092/1113, accessed 18 March 2016.
Panayotis Antoniadis, Jörg Ott, and Andrea Passarella (editors), 2014. “Do it yourself networking: An interdisciplinary approach,” Dagstuhl seminar 14042; Dagstuhl reports, volume 4, number 1, pp. 125–151, at http://drops.dagstuhl.de/opus/volltexte/2014/4538/, accessed 18 March 2016.
Ileana Apostol, 2015. “Urbanity and the right to difference,” Studies in History and Theory of Architecture, volume 3, at http://sita.uauim.ro/3/a/33/, accessed 10 November 2016.
Roger Baig, Ramon Roca, Felix Freitag, and Leandro Navarro, 2015. “guifi.net, a crowdsourced network infrastructure held in common,“ Computer Networks, volume 90, issue C, pp. 150–165.
doi: http://dx.doi.org/10.1016/j.comnet.2015.07.009, accessed 10 November 2016.
Jonathan Baldwin, 2011. ”TidePools: Social WiFi,“ Parsons School of Design, Master’s thesis in design and technology, at http://www.scribd.com/doc/94601219/TidePools-Social-WiFi-Thesis, accessed 18 March 2016.
Stefano Basagni, Marco Conti, Silvia Giordano, and Ivan Stojmenovic, 2013. Mobile ad hoc networking: Cutting edge directions. Second edition. Hoboken, N.J.: Wiley-IEEE Press.
Maria Bina, 2007. “Wireless community networks: A case of modern collective action,” Ph.D. dissertation, Athens University of Economics and Business, Department of Management Science and Technology, at http://www.wirelessresearch.eu/docs/thesis_bina.pdf, accessed 10 November 2016.
Greta Byrum, 2015. “What are community wireless networks for?” Journal of Community Informatics, volume 11, number 3, at http://ci-journal.net/index.php/ciej/article/view/1227/1167, accessed 18 March 2016.
Jon Crowcroft, Adam Wolisz, and Arjuna Sathiaseelan (editors), 2015. “Towards an affordable Internet access for everyone: The quest for enabling universal service commitment,” Dagstuhl Reports, volume 4, number 11, at http://drops.dagstuhl.de/opus/volltexte/2015/4971/, accessed 10 November 2016.
Primavera De Filippi and Félix Tréguer, 2015. “Expanding the Internet commons: The subversive potential of wireless community networks,” Journal of Peer Production, number 6, at http://peerproduction.net/issues/issue-6-disruption-and-the-law/peer-reviewed-articles/expanding-the-internet-commons-the-subversive-potential-of-wireless-community-networks/, accessed 10 November 2016.
Daphne Dragona and Dimitris Charitos, 2016. “Going off-the-cloud: The role of art in the development of a user-owned & controlled connected world,” Journal of Peer Production, number 9, at http://peerproduction.net/issues/issue-9-alternative-internets/peer-reviewed-papers/going-off-the-cloud/, accessed 10 November 2016.
Mélanie Dulong de Rosnay and Francesca Musiani, 2016. “Towards a (de)centralisation-based typology of peer production,” tripleC, volume 14, number 1, pp. 189–207, at http://www.triple-c.at/index.php/tripleC/article/view/728, accessed 10 November 2016.
Christian Fuchs and Marisol Sandoval, 2014. “Digital workers of the world unite! A framework for critically theorising and analysing digital labour,” tripleC, volume 12, number 2, at http://www.triple-c.at/index.php/tripleC/article/view/549, accessed 10 November 2016.
Mark Gaved, 2011. “An investigation into grassroots initiated networked communities as a means of addressing the digital divide,” Ph.D. thesis, Open University, at http://oro.open.ac.uk/29696/, accessed 10 November 2016.
Jane Jacobs, 1961. The death and life of great American cities. New York: Random House.
Katrina Jungnickel, 2014. DiY WiFi: Re-imagining connectivity. London: Palgrave Pivot.
Ivan Illich, 1973. Tools for conviviality. New York: Harper & Row.
Margrit Kennedy, Bernard Lietaer, and John Rogers, 2012. People money: The promise of regional currencies. Axminster, Devon: Triarchy Press.
Vasilis Kostakis, Vasilis Niarosa, George Dafermos, and Michel Bauwens, 2015. “Design global, manufacture local: Exploring the contours of an emerging productive model,” Futures, volume 73, pp. 126–135.
doi: http://dx.doi.org/10.1016/j.futures.2015.09.001, accessed 10 November 2016.
Benjamin Lennett, Laura Forlano, and Alison Powell, 2011. “From the digital divide to digital excellence,” New America Foundation (1 February), at https://www.newamerica.org/oti/policy-papers/from-the-digital-divide-to-digital-excellence/, accessed 10 November 2016.
Giuseppe Littera, Laura Sartori, Pailo Dini, and Panayotis Antoniadis, forthcoming. “From an idea to a scalable working model: Merging economic benefits with social values in Sardex,” International Journal of Community Currency Research, at https://ijccr.net; version at http://eprints.lse.ac.uk/59406/, accessed 10 November 2016.
Armin Medosch, 2015. “Cities of the sun: Urban revolutions and the network commons,” keynote talk at Hybrid City III Conference, Athens (17–19 September), at http://www.thenextlayer.org/node/1358, accessed 18 March 2016.
Diana Nucera, Ryan Gerety, and Andy Gunn, 2016. “Community technology retrospective: 2015 SEED grants” (26 January), at https://www.newamerica.org/oti/community-technology-retrospective-2015-seed-grants/, accessed 18 March 2016.
P.M., 1985. Bolo–bolo New York: Semiotext(e).
Alison Powell, 2008. “WiFi publics: Producing community and technology,” Information, Communication & Society, volume 11, number 8, pp. 1,068–1,088.
doi: http://dx.doi.org/10.1080/13691180802258746, accessed 10 November 2016.
Alison Powell, 2006. “‘Last mile or local innovation?’ Canadian perspectives on community wireless networking as civic participation,” Canadian Research Alliance for Community Innovation and Networking (CRACIN), Working Papers, number 18, at http://hdl.handle.net/1807/32137, accessed 10 November 2016.
José Saldana (editor), 2016. “Alternative network deployments: Taxonomy, characterization, technologies and architectures,” https://datatracker.ietf.org/doc/rfc7962/, accessed 10 November 2016.
Christian Sandvig, 2012. “What are community networks an example of? A response,” In: Andrew Clement, Michael Gurstein, Graham Longford, Marita Moll, and Leslie Shade (editors). Connecting Canadians: Investigations in community informatics. Edmonton: AU Press, Athabasca University, pp. 133–142.
Christian Sandvig, 2004. “An initial assessment of cooperative action in WiFi networking,” Telecommunications Policy, volume 28, numbers 7–8, pp. 579–602.
doi: http://dx.doi.org/10.1016/j.telpol.2004.05.006, accessed 10 November 2016.
Gwen Shaffer, 2011. “Banding together for bandwidth: An analysis of survey results from wireless community network participants,” First Monday, volume 16, number 5, at http://firstmonday.org/article/view/3331/2956, accessed 10 November 2016.
doi: http://dx.doi.org/10.5210/fm.v16i5.3331, accessed 10 November 2016.
Greg Sharzer, 2012. No local: Why small-scale alternatives won’t change the world. Alresford, Hampshire: John Hunt Publishing.
Felix Trèguer, Panayotis Antoniadis, and Johan Söderberg, 2016. “Alt. vs. ctrl.: Editorial notes for the JoPP issue on alternative Internets,” Journal of Peer Production, number 9, at http://peerproduction.net/issues/issue-9-alternative-internets/editorial-notes/, accessed 10 November 2016.
Zeynep Tufekci, 2014. “Engineering the public: Big data, surveillance and computational politics,” First Monday, volume 19, number 7, at http://firstmonday.org/article/view/4901/4097, accessed 10 November 2016.
doi: http://dx.doi.org/10.5210/fm.v19i7.4901, accessed 10 November 2016.
Ellen van Oost, Stefan Verhaegh, and Nelly Oudshoorn, 2009. “From innovation community to community innovation: User-initiated innovation in eireless Leiden,” Science, Technology, & Human Values, volume 34, number 2, pp. 182–205.
doi: http://dx.doi.org/10.1177/0162243907311556, accessed 10 November 2016.
Rowan Wilken, 2010. “A community of strangers? Mobile media, art, tactility and urban encounters with the other,” Mobilities, volume 5, number 4, pp. 449–478.
doi: http://dx.doi.org/10.1080/17450101.2010.510330, accessed 10 November 2016.
Received 6 November 2016; accepted 10 November 2016.
|
from __future__ import print_function, absolute_import
import os, sys, pydoc
import numpy as np # noqa (API import)
_cwd = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(_cwd, '..', 'param'))
import param
__version__ = param.Version(release=(1,5,0), fpath=__file__,
commit="$Format:%h$", reponame='holoviews')
from .core import archive # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.options import Options, Store, StoreOptions # noqa (API import)
from .core.layout import * # noqa (API import)
from .core.element import * # noqa (API import)
from .core.overlay import * # noqa (API import)
from .core.tree import * # noqa (API import)
from .core.spaces import * # noqa (API import)
from .interface import * # noqa (API import)
from .operation import ElementOperation, MapOperation, TreeOperation # noqa (API import)
from .element import * # noqa (API import)
# Surpress warnings generated by NumPy in matplotlib
# Expected to be fixed in next matplotlib release
import warnings
warnings.filterwarnings("ignore",
message="elementwise comparison failed; returning scalar instead")
try:
import IPython # noqa (API import)
from .ipython import notebook_extension
except ImportError as e:
class notebook_extension(param.ParameterizedFunction):
def __call__(self, *args, **opts):
raise Exception("IPython notebook not available")
if str(e) != 'No module named IPython':
raise e
# A single holoviews.rc file may be executed if found.
for rcfile in [os.environ.get("HOLOVIEWSRC", ''),
"~/.holoviews.rc",
"~/.config/holoviews/holoviews.rc"]:
try:
filename = os.path.expanduser(rcfile)
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
try:
exec(code)
except Exception as e:
print("Warning: Could not load %r [%r]" % (filename, str(e)))
break
except IOError:
pass
def help(obj, visualization=True, ansi=True, backend='matplotlib',
recursive=False, pattern=None):
"""
Extended version of the built-in help that supports parameterized
functions and objects. A pattern (regular expression) may be used to
filter the output and if recursive is set to True, documentation for
the supplied object is shown. Note that the recursive option will
only work with an object instance and not a class.
If ansi is set to False, all ANSI color
codes are stripped out.
"""
info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,
recursive=recursive, pattern=pattern)
msg = ( "\nTo view the visualization options applicable to this "
"object or class, use:\n\n"
" holoviews.help(obj, visualization=True)\n\n")
if info:
print((msg if visualization is False else '') + info)
else:
pydoc.help(obj)
|
I love love love this band and it doesn’t hurt that the lead singer, Julie Budet, is always fun to look at. Her style has a lot of color and a lot of humor in it, which of course I appreciate. There are often times when I find I allign myself heavily with her style of dressing – when I feel like putting on the brightest, most obnoxious colors in my closet and dancing around to lesser-known ’80s hits or old school hip hop. For this reason, I made a collage of my favorite Julie pics below. Oh and this was kind of a fun video where Julie takes Myspace shopping.
If you like Yelle, check out http://www.yelleusa.com for tons of pictures, videos and information about her.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Arne Neumann <discoursegraphs.programming@arne.cl>
"""
This module contains code to generate figures of RST trees in Latex
(using the rst.sty package).
"""
# Python 2/3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import codecs
import string
import re
import nltk
from discoursegraphs.readwrite.rst.rs3.rs3tree import RSTTree
MULTISAT_RELNAME = 'MONONUC-MULTISAT'
RSTSEGMENT_TEMPLATE = string.Template("""\\rstsegment{$segment}""") # \rstsegment{Foo}
NUC_TEMPLATE = string.Template("""{}{$nucleus}""")
SAT_TEMPLATE = string.Template("""{$relation}{$satellite}""")
MULTINUC_TEMPLATE = string.Template("""\multirel{$relation}$nucleus_segments""")
RSTLATEX_TREE_RE = re.compile("\\\(dirrel|multirel)")
class RSTLatexFileWriter(object):
def __init__(self, tree, output_filepath=None):
self.tree = tree
self.rstlatextree = rsttree2rstlatex(tree)
if output_filepath is not None:
with codecs.open(output_filepath, 'w', 'utf-8') as outfile:
outfile.write(self.rstlatextree + '\n')
def __str__(self):
return self.rstlatextree
def is_nltktreelike(obj):
"""Returns true, iff the given object behaves like an nltk.Tree.
This is a "duck-typing" workaround as most RST tree classes do not
inherit from nltk.Tree but rather embed it.
"""
return hasattr(obj, 'label') and callable(obj.label)
def get_node_type(tree):
"""Returns the type of the root node of the given RST tree
(one of 'N', 'S', 'relation' or 'edu'.)
"""
if is_nltktreelike(tree):
if tree.label() in ('N', 'S'):
return tree.label()
else:
return 'relation'
elif isinstance(tree, basestring):
return 'edu'
else:
raise ValueError("Unknown tree/node type: {}".format(type(tree)))
def is_edu_segment(rstlatex_string):
"""Returns true, iff the given string does not contain an RST subtree."""
return RSTLATEX_TREE_RE.search(rstlatex_string) is None
def wrap_edu_segment(edu_segment):
"""Wraps the string content of an EDU in RST Latex markup."""
return RSTSEGMENT_TEMPLATE.substitute(segment=edu_segment)
def make_nucsat(relname, nuc_types, elements):
"""Creates a rst.sty Latex string representation of a standard RST relation
(one nucleus, one satellite).
"""
assert len(elements) == 2 and len(nuc_types) == 2, \
"A nucsat relation must have two elements."
assert set(nuc_types) == set(['N', 'S']), \
"A nucsat relation must consist of one nucleus and one satellite."
result = "\dirrel"
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result += '\n\t' + NUC_TEMPLATE.substitute(nucleus=element)
else:
result += '\n\t' + SAT_TEMPLATE.substitute(satellite=element, relation=relname)
return result
def make_multinuc(relname, nucleii):
"""Creates a rst.sty Latex string representation of a multi-nuclear RST relation."""
nuc_strings = []
for nucleus in nucleii:
if is_edu_segment(nucleus):
nucleus = wrap_edu_segment(nucleus)
nuc_strings.append('{' + nucleus + '}')
nucleii_string = "\n\t" + "\n\t".join(nuc_strings)
return MULTINUC_TEMPLATE.substitute(relation=relname, nucleus_segments=nucleii_string)
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree
(i.e. merge a set of nucleus-satellite relations that share the same nucleus
into one subtree).
"""
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
# add elements (nucleus and satellite) from first relation to resulting (sub)tree
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=element))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=element, relation=relname))
# reorder elements of the remaining relation and add them to the resulting (sub)tree
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
result_segment = SAT_TEMPLATE.substitute(satellite=element, relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments)
def rsttree2rstlatex(tree, indent_level=0):
node_type = get_node_type(tree)
if node_type == 'relation':
relname = tree.label()
expected_types = set(['N', 'S'])
child_node_types = [get_node_type(child) for child in tree]
observed_types = set(child_node_types)
unexpected_types = observed_types.difference(expected_types)
assert unexpected_types == set(), \
"Observed types ({}) contain unexpected types ({})".format(observed_types, unexpected_types)
subtree_strings = [rsttree2rstlatex(grandchild, indent_level=indent_level+1)
for child in tree
for grandchild in child]
if observed_types == set('N'): # relation only consists of nucleii
return indent_tab(make_multinuc(relname=relname, nucleii=subtree_strings), indent_level)
elif relname == MULTISAT_RELNAME: # multiple relations sharing the same nucleus
relations = [grandchild for child in tree for grandchild in child]
relnames = [rel.label() for rel in relations]
nuctypes_per_relation = [[elem.label() for elem in relation] for relation in relations]
subtree_strings_per_relation = [[rsttree2rstlatex(elem[0]) for elem in relation] for relation in relations]
nucsat_tuples = zip(relnames, nuctypes_per_relation, subtree_strings_per_relation)
return indent_tab(make_multisat(nucsat_tuples), indent_level)
else: # a "normal" relation between one nucleus and one satellite
assert len(child_node_types) == 2, "A nuc/sat relationship must consist of two elements"
return indent_tab(make_nucsat(relname, child_node_types, subtree_strings), indent_level)
elif node_type == 'edu':
return " ".join(tree.split())
elif node_type in ('N', 'S'): # a single segment not in any relation
return indent_tab(string.Template("\rstsegment{$content}").substitute(content=tree[0]), indent_level)
else:
raise ValueError("Can't handle this node: {}".format(tree.label()))
def indent(text, amount, ch=' '):
"""Indents a string by the given amount of characters."""
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True))
def indent_tab(text, number):
"""Indents a string by the given number of tabs (one tab = 8 spaces)."""
return indent(text, number, '\t')
def write_rstlatex(tree, output_file=None):
"""Converts an RST tree into a rst.sty Latex string representation"""
return RSTLatexFileWriter(tree, output_filepath=output_file)
|
Young girls mostly prefer to use a lip gloss to give an attractive shimmer and hint of color to their lips. Also girls look better in a lip gloss as compared to a lipstick. On top of that one can use a lip gloss on daily basis and can apply lipstick during special occasions only.
This beauty product is hugely popular amongst youngsters as it gives a natural look to their lips and it is very easy to apply also. But before applying a gloss, you need to buy the perfect shade and must have some knowledge regarding how to apply it.
As there are numerous brands of lip glosses with varying prices, you need to do your research properly before buying one. Be sure of the shade, texture and smoothness of the gloss and then only buy it.
When applying a gloss to your lips you need to bear in mind that its shininess will not remain forever. You need to re-apply gloss after a few hours to maintain the natural look of your lips.
To conclude, get a lip gloss that goes well with your lip color and lip tone and apply it with patience to give your lips a natural look.
This entry was posted on Friday, November 9th, 2012 at 10:13 am and is filed under Lips. You can follow any responses to this entry through the RSS 2.0 feed. You can skip to the end and leave a response. Pinging is currently not allowed.
|
from time import time
from zeus.utils.metrics import Counter, HitCounter, gauge
def test_hit_counter():
current_ts = int(time())
c = HitCounter(size=3)
c.incr(current_ts=current_ts)
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 2
assert c.count(1, current_ts=current_ts) == 2
assert c.count(2, current_ts=current_ts) == 2
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 3
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 3
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 4
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 2
current_ts += 1
c.incr(current_ts=current_ts)
assert c.count(current_ts=current_ts) == 3
assert c.count(1, current_ts=current_ts) == 1
assert c.count(2, current_ts=current_ts) == 2
# dont incr here as it will force a truncate, and we just want to test
# the fact that count skips invalid buckets
current_ts += 1
assert c.count(current_ts=current_ts) == 2
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 1
current_ts += 1
assert c.count(current_ts=current_ts) == 1
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 0
current_ts += 1
assert c.count(current_ts=current_ts) == 0
assert c.count(1, current_ts=current_ts) == 0
assert c.count(2, current_ts=current_ts) == 0
def test_gauge():
counter = Counter()
with gauge(counter):
assert counter.value == 1
with gauge(counter):
assert counter.value == 2
assert counter.value == 1
assert counter.value == 0
|
Velocity USA-built wheels for your 26"-wheeled Rivendell. The wheels are hand-assembled in Michigan, USA, with our-design Velocity Atlas rims, DT Swiss double butted spokes, and solid, dependable, Shimano Deore hubs. Includes QR skewers.
|
import sys
import pkgutil
import time
import re
from datetime import datetime
from email.utils import parsedate_tz, mktime_tz
from inbox.log import get_logger
from inbox.providers import providers
class ProviderSpecificException(Exception):
pass
def or_none(value, selector):
if value is None:
return None
else:
return selector(value)
def strip_plaintext_quote(text):
"""
Strip out quoted text with no inline responses.
TODO: Make sure that the line before the quote looks vaguely like
a quote header. May be hard to do in an internationalized manner?
"""
found_quote = False
lines = text.strip().splitlines()
quote_start = None
for i, line in enumerate(lines):
if line.startswith('>'):
found_quote = True
if quote_start is None:
quote_start = i
else:
found_quote = False
if found_quote:
return '\n'.join(lines[:quote_start - 1])
else:
return text
def parse_ml_headers(headers):
"""
Parse the mailing list headers described in RFC 4021,
these headers are optional (RFC 2369).
"""
attrs = {}
attrs['List-Archive'] = headers.get('List-Archive')
attrs['List-Help'] = headers.get('List-Help')
attrs['List-Id'] = headers.get('List-Id')
attrs['List-Owner'] = headers.get('List-Owner')
attrs['List-Post'] = headers.get('List-Post')
attrs['List-Subscribe'] = headers.get('List-Subscribe')
attrs['List-Unsubscribe'] = headers.get('List-Unsubscribe')
return attrs
def parse_references(references, in_reply_to):
"""
Parse a References: header and returns an array of MessageIDs.
The returned array contains the MessageID in In-Reply-To if
the header is present.
Parameters
----------
references: string
the contents of the referfences header
in_reply_to: string
the contents of the in-reply-to header
Returns
-------
list of MessageIds (strings) or an empty list.
"""
replyto = in_reply_to.split()[0] if in_reply_to else in_reply_to
if not references:
if replyto:
return [replyto]
else:
return []
references = references.split()
if replyto not in references:
references.append(replyto)
return references
def dt_to_timestamp(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
def get_internaldate(date, received):
""" Get the date from the headers. """
if date is None:
other, date = received.split(';')
# All in UTC
parsed_date = parsedate_tz(date)
timestamp = mktime_tz(parsed_date)
dt = datetime.utcfromtimestamp(timestamp)
return dt
def timed(fn):
""" A decorator for timing methods. """
def timed_fn(self, *args, **kwargs):
start_time = time.time()
ret = fn(self, *args, **kwargs)
# TODO some modules like gmail.py don't have self.logger
try:
if self.log:
fn_logger = self.log
except AttributeError:
fn_logger = get_logger()
# out = None
fn_logger.info('[timer] {0} took {1:.3f} seconds.'.format(
str(fn), float(time.time() - start_time)))
return ret
return timed_fn
# Based on: http://stackoverflow.com/a/8556471
def load_modules(base_name, base_path):
"""
Imports all modules underneath `base_module` in the module tree.
Note that if submodules are located in different directory trees, you
need to use `pkgutil.extend_path` to make all the folders appear in
the module's `__path__`.
Returns
-------
list
All the modules in the base module tree.
"""
modules = []
for importer, module_name, _ in pkgutil.iter_modules(base_path):
full_module_name = '{}.{}'.format(base_name, module_name)
if full_module_name not in sys.modules:
module = importer.find_module(module_name).load_module(
full_module_name)
else:
module = sys.modules[full_module_name]
modules.append(module)
return modules
def register_backends(base_name, base_path):
"""
Dynamically loads all packages contained within thread
backends module, including those by other module install paths
"""
modules = load_modules(base_name, base_path)
mod_for = {}
for module in modules:
if hasattr(module, 'PROVIDER'):
provider_name = module.PROVIDER
if provider_name == 'generic':
for p_name, p in providers.iteritems():
p_type = p.get('type', None)
if p_type == 'generic' and p_name not in mod_for:
mod_for[p_name] = module
else:
mod_for[provider_name] = module
return mod_for
def cleanup_subject(subject_str):
"""Clean-up a message subject-line.
For instance, 'Re: Re: Re: Birthday party' becomes 'Birthday party'"""
if subject_str is None:
return ''
# TODO consider expanding to all
# http://en.wikipedia.org/wiki/List_of_email_subject_abbreviations
cleanup_regexp = "(?i)^((re|fw|fwd|aw|wg):\s*)+"
return re.sub(cleanup_regexp, "", subject_str)
|
Zambia is one of Southern Africa’s landlocked countries, sharing its borders with the Democratic Republic of Congo, Tanzania, Malawi, Mozambique, Zimbabwe, Botswana, Namibia and Angola. The landscape can be described as flat terrain that rises to a plateau of 8,000 ft. in the east and the climate of Zambia is generally very pleasant. The capital of Zambia is Lusaka and other cities include Kitwe, Ndola, Livingstone and Kabwe.
At the border between Livingstone, Zambia, and Victoria Falls, Zimbabwe, visitors enjoy the sights of the incredible Victoria Falls and the beauty of the great Zambezi River. The river is Africa’s greatest waterway, with extensive rapids and long streams which sustain hundreds of species of wildlife.
Zambia is rich in culture and the population is made up of more than 70 different ethnic groups. Some of the ethnic groups are small and only two of these groups are big enough to constitute at least 10% of the population. The majority of the Zambian population is made up of the Bantu-speaking clan. Zambia’s cultural diversity is another reason that makes Zambia one of the great African destinations to be visited.
Group Size: Departs Daily on Request.
Zambia’s oldest park is also the country’s largest. Proclaimed in 1950 and covering 22,400 km², Kafue National Park’s game viewing, bird watching and fishing are simply superb. Although only two hours drive from Livingstone, Kafue National Park remains relatively untouched and unexplored – perfect for a secluded getaway. Large quantities of wildlife are sustained by the life giving habitats of grassland plains, woodland forests and lush wetland systems. The Park boasts an astonishing diversity of antelope - more than anywhere else in Africa – including the rare blue and yellow-backed duiker, sitatunga and lechwe. Kafue is also renowned for it’s leopard population, which are frequently seen on night-drives, and one of the largest populations of Wild Dog in Africa.
Situated on the Zambian side of the spectacular Victoria Falls (Zimbabwe’s border is on the other side), the charming colonial town of Livingstone offers excellent accommodation facilities and boasts numerous adrenaline activities. Named after the legendary missionary and explorer Dr. David Livingstone - the first European to discover, name and tell the rest of world about the mighty Victoria Falls - the town of Livingstone is the ideal base from which to explore the magnificent falls and the game-rich surrounding National Parks. It is the main gateway into the region with access via the Livingstone Airport and is only 6.2 miles from Victoria Falls.
With a reputation as one of the best places to view game in the whole of Africa, the South Luangwa region in Zambia is enjoying ever-increasing popularity. The National Park lifeblood is the Luangwa River that sustains over 400 species of birds and some 60 animal species, including all of the Big Five plus lesser-spotted animals such as genets, civets, servals and hyenas. While the region is on the rise, visitor numbers are still comparatively less than better known regions in Southern Africa, so guests enjoy a sense of privacy and seclusion.
What is it like to swim in the famous infinity pool?
A new US $50 UNIVISA system has been introduced for Zambia & Zimbabwe allowing one visa to access both countries.
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional
from flask_appbuilder.security.sqla.models import User
from flask_babel import lazy_gettext as _
from superset.commands.base import BaseCommand
from superset.commands.exceptions import DeleteFailedError
from superset.dashboards.commands.exceptions import (
DashboardBulkDeleteFailedError,
DashboardBulkDeleteFailedReportsExistError,
DashboardForbiddenError,
DashboardNotFoundError,
)
from superset.dashboards.dao import DashboardDAO
from superset.exceptions import SupersetSecurityException
from superset.models.dashboard import Dashboard
from superset.reports.dao import ReportScheduleDAO
from superset.views.base import check_ownership
logger = logging.getLogger(__name__)
class BulkDeleteDashboardCommand(BaseCommand):
def __init__(self, user: User, model_ids: List[int]):
self._actor = user
self._model_ids = model_ids
self._models: Optional[List[Dashboard]] = None
def run(self) -> None:
self.validate()
try:
DashboardDAO.bulk_delete(self._models)
return None
except DeleteFailedError as ex:
logger.exception(ex.exception)
raise DashboardBulkDeleteFailedError()
def validate(self) -> None:
# Validate/populate model exists
self._models = DashboardDAO.find_by_ids(self._model_ids)
if not self._models or len(self._models) != len(self._model_ids):
raise DashboardNotFoundError()
# Check there are no associated ReportSchedules
reports = ReportScheduleDAO.find_by_dashboard_ids(self._model_ids)
if reports:
report_names = [report.name for report in reports]
raise DashboardBulkDeleteFailedReportsExistError(
_("There are associated alerts or reports: %s" % ",".join(report_names))
)
# Check ownership
for model in self._models:
try:
check_ownership(model)
except SupersetSecurityException:
raise DashboardForbiddenError()
|
Come to "The Moon," and if your friends aren't already there you will soon be making new ones. We offer inside and outdoor dining in a family-friendly atmosphere.
We have a full bar with tasty specialty drinks, our Hole in the Neck wines, more than 50 craft and imported beers, and the friendliest servers in the world!
On Thursdays, Fridays and Saturdays and for special events, the Moon is Southwest Georgia's go-to music venue, often featuring the Bo Henry Band.
Harvest Moon is like a chameleon: Albany’s top family-friendly eatery magnificently transforms into a hopping nightlife destination as the evening progresses.
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around a TTS system."""
import functools
import logging
import os
import subprocess
import tempfile
from aiy import i18n
# Path to a tmpfs directory to avoid SD card wear
TMP_DIR = '/run/user/%d' % os.getuid()
logger = logging.getLogger('tts')
def create_say(player):
"""Return a function say(words) for the given player."""
lang = i18n.get_language_code()
return functools.partial(say, player, lang=lang)
def say(player, words, lang='en-US', volume=60, pitch=130):
"""Say the given words with TTS.
Args:
player: To play the text-to-speech audio.
words: string to say aloud.
lang: language for the text-to-speech engine.
volume: volume for the text-to-speech engine.
pitch: pitch for the text-to-speech engine.
"""
try:
(fd, tts_wav) = tempfile.mkstemp(suffix='.wav', dir=TMP_DIR)
except IOError:
logger.exception('Using fallback directory for TTS output')
(fd, tts_wav) = tempfile.mkstemp(suffix='.wav')
os.close(fd)
words = '<volume level="' + str(volume) + '"><pitch level="' + str(pitch) + \
'">' + words + '</pitch></volume>'
try:
subprocess.call(['pico2wave', '--lang', lang, '-w', tts_wav, words])
player.play_wav(tts_wav)
finally:
os.unlink(tts_wav)
def _main():
import argparse
from aiy import audio
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Test TTS wrapper')
parser.add_argument('words', nargs='*', help='Words to say')
args = parser.parse_args()
if args.words:
words = ' '.join(args.words)
player = audio.get_player()
create_say(player)(words)
if __name__ == '__main__':
_main()
|
Know what to do in case of a weather-related emergency.
Emergency preparedness and natural disasters.
Your home or work routines can be disrupted with little or no warning by natural disasters, fires or other catastrophic events. It’s important that you and your family are prepared as help may not always be available.
If you or your loved ones are faced with a weather-based emergency, determine the safest course of action and stay informed through radio, TV, internet or whatever is availalbe. Before an emergency you can prepare an emergency kit with at least 72 hours worth of food and water, make sure your car has a kit as well.
Your home and car should have kits in case of an emergency.
If you are outdoors, move to a clear area or a safe building. When in the car, stay in the car. After the quake is over carefully assess damage and don’t enter buildings until you know it’s safe.
If the waters are high, make sure you and your family stay dry. Secure your appliances and turn off utlities like electricity. If you live in an area where flooding is common, you might want to invest in flood insurance. If you are driving, never driving through a standing pool of water. If you have to evacuate, return home only when authorities say it’s safe. Check for gas leaks, food spoilage and be aware of other hazards when returning home.
Before a hurricane, have a shelter in place and avoid traveling during flood, thunderstorm or tornado warnings. If you live in a high-rise, take shelter below the 10th floor. Hurricane season is June-November. If you are in an area at risk for hurricanes secure your property and consider investing in flood insurance. During a hurricane, evacuate when told to do so or if you are unable to evacuate go to an interior room and lie low. After a hurricane, assess the damage and be careful of post-emergency hazards like flooding, knocked-down eletrical wires and fire.
Tornado season is March-June and there have been tornadoes reported in 48 continental states. Before a tornado hits practice emergency plans and have a shelter in place. Avoid traveling during thunrderstorm, flood or tornado warnings. If a tornado does hit, lie low in an interior room at a low level such as a basement or a bathroom. If you’re driving, drive at a right angle to the tornado’s path and if you’re outside lie in a ditch or a flat, low area. After the tornado passes, let others know you’re ok, stay tuned for storm watches and warnings.
What you need for an emergency at home.
Be ready for a twister at any time.
How to be prepared in case of an earthquake.
Stay safe against a twister.
This entry was posted in Year Zero Survival Blog and tagged Blackout123, fema 123, fema 123 hoard, fema 123 item to hoard, fema 123 reviews, fema 123.com video, fema camps, fema region 3, homestead survival blog, how to survive, natural disaster, Preppers, Searches related to fema 123, SHTF, survival, survival forum, Survival gear, survival gear blog, survival instinct, survival skills, Urban Survival, urban survival blog, wilderness survival blog by staff-writer. Bookmark the permalink.
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated client library for storage version v1."""
import os
import sys
from apitools.base.py import base_api
import gslib
from gslib.third_party.storage_apitools import storage_v1_messages as messages
class StorageV1(base_api.BaseApiClient):
"""Generated client library for service storage version v1."""
MESSAGES_MODULE = messages
_PACKAGE = u'storage'
_SCOPES = [u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'v1'
_CLIENT_ID = 'nomatter'
_CLIENT_SECRET = 'nomatter'
_USER_AGENT = 'apitools gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
_USER_AGENT += ' Cloud SDK Command Line Tool'
if os.environ.get('CLOUDSDK_VERSION'):
_USER_AGENT += ' %s' % os.environ.get('CLOUDSDK_VERSION')
_CLIENT_CLASS_NAME = u'StorageV1'
_URL_VERSION = u'v1'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
version=_VERSION):
"""Create a new storage handle."""
url = url or u'https://www.googleapis.com/storage/v1/'
super(StorageV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params)
self._version = version
self.bucketAccessControls = self.BucketAccessControlsService(self)
self.buckets = self.BucketsService(self)
self.channels = self.ChannelsService(self)
self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self)
self.objectAccessControls = self.ObjectAccessControlsService(self)
self.objects = self.ObjectsService(self)
class BucketAccessControlsService(base_api.BaseApiService):
"""Service class for the bucketAccessControls resource."""
_NAME = u'bucketAccessControls'
def __init__(self, client):
super(StorageV1.BucketAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.bucketAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsDeleteRequest',
response_type_name=u'StorageBucketAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsGetRequest',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.bucketAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='',
request_type_name=u'StorageBucketAccessControlsListRequest',
response_type_name=u'BucketAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.bucketAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.bucketAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified bucket.
Args:
request: (StorageBucketAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class BucketsService(base_api.BaseApiService):
"""Service class for the buckets resource."""
_NAME = u'buckets'
def __init__(self, client):
super(StorageV1.BucketsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.buckets.delete',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsDeleteRequest',
response_type_name=u'StorageBucketsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.get',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsGetRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.buckets.insert',
ordered_params=[u'project'],
path_params=[],
query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'],
relative_path=u'b',
request_field=u'bucket',
request_type_name=u'StorageBucketsInsertRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.list',
ordered_params=[u'project'],
path_params=[],
query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'],
relative_path=u'b',
request_field='',
request_type_name=u'StorageBucketsListRequest',
response_type_name=u'Buckets',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.buckets.patch',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsPatchRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.buckets.update',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsUpdateRequest',
response_type_name=u'Bucket',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes an empty bucket.
Args:
request: (StorageBucketsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns metadata for the specified bucket.
Args:
request: (StorageBucketsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new bucket.
Args:
request: (StorageBucketsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves a list of buckets for a given project.
Args:
request: (StorageBucketsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Buckets) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a bucket. This method supports patch semantics.
Args:
request: (StorageBucketsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a bucket.
Args:
request: (StorageBucketsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ChannelsService(base_api.BaseApiService):
"""Service class for the channels resource."""
_NAME = u'channels'
def __init__(self, client):
super(StorageV1.ChannelsService, self).__init__(client)
self._method_configs = {
'Stop': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.channels.stop',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'channels/stop',
request_field='<request>',
request_type_name=u'Channel',
response_type_name=u'StorageChannelsStopResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Stop(self, request, global_params=None):
"""Stop watching resources through this channel.
Args:
request: (Channel) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageChannelsStopResponse) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
class DefaultObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the defaultObjectAccessControls resource."""
_NAME = u'defaultObjectAccessControls'
def __init__(self, client):
super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.defaultObjectAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest',
response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.defaultObjectAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.defaultObjectAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.defaultObjectAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageDefaultObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves default object ACL entries on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the objectAccessControls resource."""
_NAME = u'objectAccessControls'
def __init__(self, client):
super(StorageV1.ObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objectAccessControls.delete',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsDeleteRequest',
response_type_name=u'StorageObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.get',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objectAccessControls.insert',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsInsertRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.list',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field='',
request_type_name=u'StorageObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objectAccessControls.patch',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsPatchRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objectAccessControls.update',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsUpdateRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified object.
Args:
request: (StorageObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified object. This method supports patch semantics.
Args:
request: (StorageObjectAccessControlsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectsService(base_api.BaseApiService):
"""Service class for the objects resource."""
_NAME = u'objects'
def __init__(self, client):
super(StorageV1.ObjectsService, self).__init__(client)
self._method_configs = {
'Compose': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.compose',
ordered_params=[u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifMetagenerationMatch'],
relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose',
request_field=u'composeRequest',
request_type_name=u'StorageObjectsComposeRequest',
response_type_name=u'Object',
supports_download=True,
),
'Copy': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.copy',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsCopyRequest',
response_type_name=u'Object',
supports_download=True,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objects.delete',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsDeleteRequest',
response_type_name=u'StorageObjectsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.get',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsGetRequest',
response_type_name=u'Object',
supports_download=True,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'contentEncoding', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o',
request_field=u'object',
request_type_name=u'StorageObjectsInsertRequest',
response_type_name=u'Object',
supports_download=True,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o',
request_field='',
request_type_name=u'StorageObjectsListRequest',
response_type_name=u'Objects',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objects.patch',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsPatchRequest',
response_type_name=u'Object',
supports_download=False,
),
'Rewrite': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.rewrite',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'maxBytesRewrittenPerCall', u'projection', u'rewriteToken', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsRewriteRequest',
response_type_name=u'RewriteResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objects.update',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsUpdateRequest',
response_type_name=u'Object',
supports_download=True,
),
'WatchAll': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.watchAll',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o/watch',
request_field=u'channel',
request_type_name=u'StorageObjectsWatchAllRequest',
response_type_name=u'Channel',
supports_download=False,
),
}
self._upload_configs = {
'Insert': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload/storage/' + self._client._version + '/b/{bucket}/o',
simple_multipart=True,
simple_path=u'/upload/storage/' + self._client._version + '/b/{bucket}/o',
),
}
def Compose(self, request, global_params=None, download=None):
"""Concatenates a list of existing objects into a new object in the same bucket.
Args:
request: (StorageObjectsComposeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Compose')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Copy(self, request, global_params=None, download=None):
"""Copies an object to a specified location. Optionally overrides metadata.
Args:
request: (StorageObjectsCopyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Copy')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Delete(self, request, global_params=None):
"""Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.
Args:
request: (StorageObjectsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None, download=None):
"""Retrieves an object or its metadata.
Args:
request: (StorageObjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Insert(self, request, global_params=None, upload=None, download=None):
"""Stores a new object and metadata.
Args:
request: (StorageObjectsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Insert')
upload_config = self.GetUploadConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config,
download=download)
def List(self, request, global_params=None):
"""Retrieves a list of objects matching the criteria.
Args:
request: (StorageObjectsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Objects) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an object's metadata. This method supports patch semantics.
Args:
request: (StorageObjectsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Rewrite(self, request, global_params=None):
"""Rewrites a source object to a destination object. Optionally overrides metadata.
Args:
request: (StorageObjectsRewriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RewriteResponse) The response message.
"""
config = self.GetMethodConfig('Rewrite')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None, download=None):
"""Updates an object's metadata.
Args:
request: (StorageObjectsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def WatchAll(self, request, global_params=None):
"""Watch for changes on all objects in a bucket.
Args:
request: (StorageObjectsWatchAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Channel) The response message.
"""
config = self.GetMethodConfig('WatchAll')
return self._RunMethod(
config, request, global_params=global_params)
|
Let me originate by saying I like bread. Unique sizzling bread with butter smeared on thick decrease slices. That's doubtlessly the explanation I’ll maybe perhaps never construct the South Seaside or Atkins diets. There would possibly maybe be a wretchedness despite the indisputable fact that with selfmade bread … I know you're saying “What venture is there with selfmade bread?” I'll verbalize you, it takes system too long! That's factual, between the integration, first rising, kneading, second rising and baking a straightforward loaf of bread would possibly maybe perhaps expend anyplace from two and a half of hours to a pair of hours and twenty-five minutes. That's system too long too watch for bread. After some studying, analysis and correct passe-long-established trial and mistake I figured out be taught how to hold bread from originate to construct in a miniature over an hour! Yes, I did insist an hour. The magic is the exhaust of your microwave as a proofing field to attend lift the dough. If you occur to can also very neatly be anti-microwave discontinue studying factual now this article does no longer wretchedness you. By the exhaust of microwave to proof your dough you decrease the rising time dramatically.
2 teaspoons very indispensable wheat gluten (Thought this within the baking aisle the effect the flours are.) Hodgson Mills and King Arthur are two companies that get it.
1. In a vital bowl, dissolve the sugar in warm water, and then race in yeast. Allow to proof till yeast resembles a creamy foam. Customarily about 15 minutes or so searching on how warm your kitchen is.
four. Bake at 350 levels F for half-hour.
Subsequent time somebody tells you that making selfmade bread takes too long factual smile and repeat them this.
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = pd.Index(['s1', 's2', 's3', 's4'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'color', 255)
exp = pd.Index(['s4', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'num_reps', 3)
exp = pd.Index(['s1', 's3', 's5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
class TestCollapseSourceData(TestCase):
def test_example1(self):
# Simple example with 'sum' as collapse mode.
samples = ['sample1', 'sample2', 'sample3', 'sample4']
category = 'pH'
values = [3.0, 0.4, 3.0, 3.0]
stable = pd.DataFrame(values, index=samples, columns=[category])
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=stable.index,
columns=map(str, np.arange(4)))
source_samples = ['sample1', 'sample2', 'sample3']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :], fdata[0, :] + fdata[2, :]))
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
# Example with collapse mode 'mean'. This will cause non-integer values
# to be present, which the validate_gibbs_input should catch.
source_samples = ['sample1', 'sample2', 'sample3', 'sample4']
method = 'mean'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_data = np.vstack((fdata[1, :],
fdata[[0, 2, 3], :].mean(0))).astype(np.int32)
exp_index = [0.4, 3.0]
exp = pd.DataFrame(exp_data.astype(np.int32), index=exp_index,
columns=map(str, np.arange(4)))
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
def test_example2(self):
# Test on another arbitrary example.
data = np.arange(200).reshape(20, 10)
oids = ['o%s' % i for i in range(20)]
sids = ['s%s' % i for i in range(10)]
ftable = pd.DataFrame(data.T, index=sids, columns=oids)
_stable = \
{'s4': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's0': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'D'},
's1': {'cat1': '1', 'cat2': 'x', 'cat3': 'A', 'cat4': 'C'},
's3': {'cat1': '2', 'cat2': 'y', 'cat3': 'z', 'cat4': 'A'},
's2': {'cat1': '2', 'cat2': 'x', 'cat3': 'A', 'cat4': 'D'},
's6': {'cat1': '1', 'cat2': 'y', 'cat3': 'z', 'cat4': 'R'},
's5': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's7': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's9': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'},
's8': {'cat1': '2', 'cat2': 'x', 'cat3': 'z', 'cat4': '0'}}
stable = pd.DataFrame(_stable).T
category = 'cat4'
source_samples = ['s4', 's9', 's0', 's2']
method = 'sum'
obs = collapse_source_data(stable, ftable, source_samples, category,
method)
exp_index = np.array(['0', 'D'])
exp_data = np.array([[9, 19, 29, 39, 49, 59, 69, 79, 89, 99, 109, 119,
129, 139, 149, 159, 169, 179, 189, 199],
[6, 36, 66, 96, 126, 156, 186, 216, 246, 276, 306,
336, 366, 396, 426, 456, 486, 516, 546, 576]],
dtype=np.int32)
exp = pd.DataFrame(exp_data, index=exp_index, columns=oids)
exp.index.name = 'collapse_col'
pd.util.testing.assert_frame_equal(obs, exp)
class TestSubsampleDataframe(TestCase):
def test_no_errors_expected(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_subsample_with_replacement(self):
# Testing this function deterministically is hard because cython is
# generating the PRNG calls. We'll settle for ensuring that the sums
# are correct.
fdata = np.array([[10, 50, 10, 70],
[0, 25, 10, 5],
[0, 25, 10, 5],
[100, 0, 10, 5]])
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 30
obs = subsample_dataframe(ftable, n, replace=True)
self.assertTrue((obs.sum(axis=1) == n).all())
def test_shape_doesnt_change(self):
# Test that when features are removed by subsampling, the shape of the
# table does not change. Although rarifaction is stochastic, the
# probability that the below table does not lose at least one feature
# during rarefaction (and thus satisfy as the test of the condition we)
# are interested in) is nearly 0.
fdata = np.array([[0, 0, 0, 1e4],
[0, 0, 1, 1e4],
[0, 1, 0, 1e4],
[1, 0, 0, 1e4]]).astype(int)
ftable = pd.DataFrame(fdata, index=['s1', 's2', 's3', 's4'],
columns=map(str, np.arange(4)))
n = 10
obs = subsample_dataframe(ftable, n)
self.assertTrue((obs.sum(axis=1) == n).all())
self.assertEqual(obs.shape, ftable.shape)
class TestDataAggregationFunctions(TestCase):
'''Test that returned data is collated and written correctly.'''
def test_cumulative_proportions(self):
# 4 draws, 4 sources + unknown, 3 sinks
sink1_envcounts = np.array([[10, 100, 15, 0, 25],
[150, 0, 0, 0, 0],
[30, 30, 30, 30, 30],
[0, 11, 7, 35, 97]])
sink2_envcounts = np.array([[100, 10, 15, 0, 25],
[100, 0, 50, 0, 0],
[0, 60, 30, 30, 30],
[7, 11, 0, 35, 97]])
sink3_envcounts = np.array([[100, 10, 10, 5, 25],
[70, 20, 30, 30, 0],
[10, 30, 50, 30, 30],
[0, 27, 100, 20, 3]])
all_envcounts = [sink1_envcounts, sink2_envcounts, sink3_envcounts]
sink_ids = np.array(['sink1', 'sink2', 'sink3'])
source_ids = np.array(['source1', 'source2', 'source3', 'source4'])
cols = list(source_ids) + ['Unknown']
prp_r1 = np.array([190, 141, 52, 65, 152]) / 600.
prp_r2 = np.array([207, 81, 95, 65, 152]) / 600.
prp_r3 = np.array([180, 87, 190, 85, 58]) / 600.
prp_data = np.vstack([prp_r1, prp_r2, prp_r3])
prp_std_data = np.zeros((3, 5), dtype=np.float64)
prp_std_data[0, 0] = (np.array([10, 150, 30, 0]) / 600.).std()
prp_std_data[0, 1] = (np.array([100, 0, 30, 11]) / 600.).std()
prp_std_data[0, 2] = (np.array([15, 0, 30, 7]) / 600.).std()
prp_std_data[0, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[0, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[1, 0] = (np.array([100, 100, 0, 7]) / 600.).std()
prp_std_data[1, 1] = (np.array([10, 0, 60, 11]) / 600.).std()
prp_std_data[1, 2] = (np.array([15, 50, 30, 0]) / 600.).std()
prp_std_data[1, 3] = (np.array([0, 0, 30, 35]) / 600.).std()
prp_std_data[1, 4] = (np.array([25, 0, 30, 97]) / 600.).std()
prp_std_data[2, 0] = (np.array([100, 70, 10, 0]) / 600.).std()
prp_std_data[2, 1] = (np.array([10, 20, 30, 27]) / 600.).std()
prp_std_data[2, 2] = (np.array([10, 30, 50, 100]) / 600.).std()
prp_std_data[2, 3] = (np.array([5, 30, 30, 20]) / 600.).std()
prp_std_data[2, 4] = (np.array([25, 0, 30, 3]) / 600.).std()
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=cols)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids, columns=cols)
obs_prp, obs_prp_std = cumulative_proportions(all_envcounts, sink_ids,
source_ids)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
def test_single_sink_feature_table(self):
# 4 draws, depth of sink = 10, 5 sources + Unknown.
final_env_assignments = np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
# notice that each row is the same - they are determined by
# `generate_taxon_sequence` before the `gibbs_sampler` runs.
final_taxon_assignments = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
# we are allowing more taxa than we have found in this sample, i.e. the
# largest value in `final_taxon_assignments` will be smaller than the
# largest index in the columns of the final table.
nfeatures = 1250
nsources = 5
data = np.zeros((nsources + 1, nfeatures), dtype=np.int32)
# for the purpose of this test code, I'll increment data taxa by taxa.
data[np.array([5, 1, 4, 2]), 0] += 1
data[0, 3] += 3
data[1, 3] += 3
data[3, 3] += 1
data[4, 3] += 1
data[np.array([0, 3, 4, 5]), 227] += 1
data[0, 550] += 1
data[1, 550] += 3
data[2, 550] += 3
data[4, 550] += 2
data[5, 550] += 3
data[0, 999] += 2
data[1, 999] += 4
data[3, 999] += 2
data[1, 1100] += 2
data[2, 1100] += 2
exp_sources = ['source%s' % i for i in range(nsources)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp = pd.DataFrame(data, index=exp_sources, columns=feature_ids)
source_ids = np.array(['source%s' % i for i in range(nsources)])
obs = single_sink_feature_table(final_env_assignments,
final_taxon_assignments, source_ids,
feature_ids)
pd.util.testing.assert_frame_equal(obs, exp)
def test_collate_gibbs_results(self):
# We'll vary the depth of the sinks - simulating a situation where the
# user has not rarefied.
# We'll set:
# draws = 4
# sink_depths = [10, 15, 7]
# sources = 5 (+1 unknown)
final_env_counts_sink1 = np.array([[5, 2, 1, 1, 0, 1],
[0, 6, 2, 2, 0, 0],
[0, 3, 1, 1, 5, 0],
[2, 2, 2, 0, 0, 4]])
final_env_assignments_sink1 = \
np.array([[5, 0, 0, 0, 2, 0, 1, 0, 3, 1],
[1, 1, 3, 3, 2, 2, 1, 1, 1, 1],
[4, 1, 4, 4, 4, 4, 1, 1, 3, 2],
[2, 1, 0, 5, 5, 5, 5, 1, 0, 2]])
final_taxon_assignments_sink1 = \
np.array([[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100],
[0, 3, 3, 227, 550, 550, 550, 999, 999, 1100]])
final_env_counts_sink2 = np.array([[5, 1, 3, 2, 0, 4],
[1, 1, 4, 5, 1, 3],
[4, 1, 3, 2, 3, 2],
[2, 3, 3, 2, 1, 4]])
final_env_assignments_sink2 = \
np.array([[2, 5, 0, 5, 1, 5, 0, 0, 3, 0, 3, 5, 2, 2, 0],
[3, 2, 2, 3, 2, 3, 3, 5, 5, 1, 3, 4, 2, 0, 5],
[0, 2, 3, 2, 0, 0, 2, 4, 5, 4, 0, 5, 3, 1, 4],
[4, 3, 2, 1, 2, 5, 3, 5, 2, 0, 1, 0, 5, 1, 5]])
final_taxon_assignments_sink2 = \
np.array([[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249],
[7, 7, 7, 7, 8, 8, 8, 8, 250, 250, 250, 250, 1249, 1249]])
final_env_counts_sink3 = np.array([[4, 2, 0, 0, 1, 0],
[0, 3, 1, 0, 2, 1],
[0, 0, 1, 1, 3, 2],
[2, 1, 0, 3, 0, 1]])
final_env_assignments_sink3 = \
np.array([[4, 0, 0, 0, 1, 0, 1],
[1, 2, 1, 4, 5, 4, 1],
[4, 3, 5, 4, 4, 5, 2],
[3, 0, 1, 3, 3, 0, 5]])
final_taxon_assignments_sink3 = \
np.array([[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249],
[3, 865, 865, 1100, 1100, 1100, 1249]])
# Create expected proportion data.
prp_data = np.zeros((3, 6), dtype=np.float64)
prp_std_data = np.zeros((3, 6), dtype=np.float64)
prp_data[0] = (final_env_counts_sink1.sum(0) /
final_env_counts_sink1.sum())
prp_data[1] = (final_env_counts_sink2.sum(0) /
final_env_counts_sink2.sum())
prp_data[2] = (final_env_counts_sink3.sum(0) /
final_env_counts_sink3.sum())
prp_std_data[0] = \
(final_env_counts_sink1 / final_env_counts_sink1.sum()).std(0)
prp_std_data[1] = \
(final_env_counts_sink2 / final_env_counts_sink2.sum()).std(0)
prp_std_data[2] = \
(final_env_counts_sink3 / final_env_counts_sink3.sum()).std(0)
sink_ids = ['sink1', 'sink2', 'sink3']
exp_sources = ['source%s' % i for i in range(5)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(1250)]
exp_prp = pd.DataFrame(prp_data, index=sink_ids, columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=sink_ids,
columns=exp_sources)
# Create expected feature table data.
ft1 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink1.ravel(),
final_taxon_assignments_sink1.ravel()):
ft1[r, c] += 1
exp_ft1 = pd.DataFrame(ft1, index=exp_sources, columns=feature_ids)
ft2 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink2.ravel(),
final_taxon_assignments_sink2.ravel()):
ft2[r, c] += 1
exp_ft2 = pd.DataFrame(ft2, index=exp_sources, columns=feature_ids)
ft3 = np.zeros((6, 1250), dtype=np.int32)
for r, c in zip(final_env_assignments_sink3.ravel(),
final_taxon_assignments_sink3.ravel()):
ft3[r, c] += 1
exp_ft3 = pd.DataFrame(ft3, index=exp_sources, columns=feature_ids)
exp_fts = [exp_ft1, exp_ft2, exp_ft3]
# Prepare the inputs for passing to collate_gibbs_results
all_envcounts = [final_env_counts_sink1, final_env_counts_sink2,
final_env_counts_sink3]
all_env_assignments = [final_env_assignments_sink1,
final_env_assignments_sink2,
final_env_assignments_sink3]
all_taxon_assignments = [final_taxon_assignments_sink1,
final_taxon_assignments_sink2,
final_taxon_assignments_sink3]
# Test when create_feature_tables=True
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=True, loo=False)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
for i in range(3):
pd.util.testing.assert_frame_equal(obs_fts[i], exp_fts[i])
# Test when create_feature_tables=False
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments, np.array(sink_ids),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=False, loo=False)
self.assertTrue(obs_fts is None)
def test_collate_gibbs_results_loo(self):
# We'll vary the depth of the sources - simulating a situation where
# the user has not rarefied.
# We'll set:
# draws = 2
# source_depths = [7, 4, 5]
# sources = 3 (+1 Unknown)
ec1 = np.array([[6, 0, 1],
[2, 2, 3]])
ea1 = np.array([[0, 2, 0, 0, 0, 0, 0],
[0, 1, 0, 2, 1, 2, 2]])
ta1 = np.array([[2, 2, 2, 4, 4, 4, 6],
[2, 2, 2, 4, 4, 4, 6]])
ec2 = np.array([[1, 2, 1],
[2, 2, 0]])
ea2 = np.array([[0, 1, 2, 1],
[0, 1, 1, 0]])
ta2 = np.array([[3, 3, 3, 3],
[3, 3, 3, 3]])
ec3 = np.array([[1, 2, 2],
[4, 0, 1]])
ea3 = np.array([[1, 1, 0, 2, 2],
[0, 0, 0, 0, 2]])
ta3 = np.array([[3, 3, 4, 5, 5],
[3, 3, 4, 5, 5]])
# Create expected proportion data.
prp_data = np.array([[0, 8/14., 2/14., 4/14.],
[3/8., 0, 4/8., 1/8.],
[5/10., 2/10., 0, 3/10.]], dtype=np.float64)
prp_std_data = np.zeros((3, 4), dtype=np.float64)
prp_std_data[0, 1:] = (ec1 / ec1.sum()).std(0)
prp_std_data[1, np.array([0, 2, 3])] = (ec2 / ec2.sum()).std(0)
prp_std_data[2, np.array([0, 1, 3])] = (ec3 / ec3.sum()).std(0)
exp_sources = ['source%s' % i for i in range(3)] + ['Unknown']
feature_ids = ['f%s' % i for i in range(7)]
exp_prp = pd.DataFrame(prp_data, index=exp_sources[:-1],
columns=exp_sources)
exp_prp_std = pd.DataFrame(prp_std_data, index=exp_sources[:-1],
columns=exp_sources)
# Create expected feature table data.
ft1 = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 4, 0, 3, 0, 1],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 2, 0, 1]], dtype=np.int64)
ft2 = np.array([[0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.int64)
ft3 = np.array([[0, 0, 0, 2, 2, 1, 0],
[0, 0, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 0]], dtype=np.int64)
exp_fts = [pd.DataFrame(ft1, index=exp_sources, columns=feature_ids),
pd.DataFrame(ft2, index=exp_sources, columns=feature_ids),
pd.DataFrame(ft3, index=exp_sources, columns=feature_ids)]
# Prepare the inputs for passing to collate_gibbs_results
all_envcounts = [ec1, ec2, ec3]
all_env_assignments = [ea1, ea2, ea3]
all_taxon_assignments = [ta1, ta2, ta3]
# Test when create_feature_tables=True
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments,
np.array(exp_sources[:-1]),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=True, loo=True)
pd.util.testing.assert_frame_equal(obs_prp, exp_prp)
pd.util.testing.assert_frame_equal(obs_prp_std, exp_prp_std)
for i in range(3):
pd.util.testing.assert_frame_equal(obs_fts[i], exp_fts[i])
# Test when create_feature_tables=False
obs_prp, obs_prp_std, obs_fts = \
collate_gibbs_results(all_envcounts, all_env_assignments,
all_taxon_assignments,
np.array(exp_sources[:-1]),
np.array(exp_sources[:-1]),
np.array(feature_ids),
create_feature_tables=False, loo=True)
self.assertTrue(obs_fts is None)
class TestBookkeeping(TestCase):
'''Tests for fnxs which generate bookkeeping data for `gibbs_sampler`.'''
def test_generate_environment_assignment(self):
np.random.seed(235234234)
obs_sea, obs_ecs = generate_environment_assignments(100, 10)
exp_sea = \
np.array([7, 3, 4, 1, 5, 2, 6, 3, 6, 4, 4, 7, 8, 2, 7, 7, 9, 9, 4,
7, 0, 3, 6, 5, 7, 2, 7, 1, 2, 4, 1, 7, 0, 7, 5, 2, 8, 5,
3, 3, 1, 4, 3, 3, 8, 7, 7, 5, 2, 6, 0, 2, 4, 0, 0, 5, 9,
8, 2, 8, 9, 9, 8, 7, 5, 8, 0, 9, 8, 6, 3, 2, 3, 7, 3, 8,
4, 4, 9, 1, 6, 6, 0, 9, 2, 9, 9, 4, 2, 9, 0, 4, 1, 3, 4,
0, 0, 9, 8, 3])
exp_ecs = np.array([10, 6, 11, 12, 12, 7, 7, 13, 10, 12])
np.testing.assert_array_equal(obs_sea, exp_sea)
np.testing.assert_array_equal(obs_ecs, exp_ecs)
class ConditionalProbabilityTests(TestCase):
'''Unit test for the ConditionalProbability class.'''
def setUp(self):
# create an object we can reuse for several of the tests
self.alpha1 = .5
self.alpha2 = .001
self.beta = 10
self.source_data = np.array([[0, 0, 0, 100, 100, 100],
[100, 100, 100, 0, 0, 0]])
self.cp = ConditionalProbability(self.alpha1, self.alpha2, self.beta,
self.source_data)
def test_init(self):
exp_alpha1 = self.alpha1
exp_alpha2 = self.alpha2
exp_beta = self.beta
exp_m_xivs = self.source_data
exp_m_vs = np.array([[300], [300]])
exp_V = 3
exp_tau = 6
exp_joint_probability = np.array([0, 0, 0])
self.assertEqual(self.cp.alpha1, exp_alpha1)
self.assertEqual(self.cp.alpha2, exp_alpha2)
self.assertEqual(self.cp.beta, exp_beta)
np.testing.assert_array_equal(self.cp.m_xivs, exp_m_xivs)
np.testing.assert_array_equal(self.cp.m_vs, exp_m_vs)
self.assertEqual(self.cp.V, exp_V)
self.assertEqual(self.cp.tau, exp_tau)
np.testing.assert_array_equal(self.cp.joint_probability,
exp_joint_probability)
def test_set_n(self):
self.cp.set_n(500)
self.assertEqual(self.cp.n, 500)
def test_precalculate(self):
alpha1 = .01
alpha2 = .3
beta = 35
source_data = np.array([[10, 5, 2, 100],
[0, 76, 7, 3],
[9, 5, 0, 0],
[0, 38, 11, 401]])
cp = ConditionalProbability(alpha1, alpha2, beta, source_data)
n = 1300
cp.set_n(n)
cp.precalculate()
# Calculated by hand.
exp_known_p_tv = np.array(
[[.085526316, .042805878, .017173636, .85449419],
[.000116225, .883426313, .081473733, .034983728],
[.641737892, .356837607, .000712251, .000712251],
[.00002222, .084459159, .024464492, .891054129]])
exp_denominator_p_v = 1299 + 35 * 5
exp_known_source_cp = exp_known_p_tv / exp_denominator_p_v
exp_alpha2_n = 390
exp_alpha2_n_tau = 1560
self.assertEqual(cp.denominator_p_v, exp_denominator_p_v)
self.assertEqual(cp.alpha2_n, exp_alpha2_n)
self.assertEqual(cp.alpha2_n_tau, exp_alpha2_n_tau)
np.testing.assert_array_almost_equal(cp.known_p_tv, exp_known_p_tv)
np.testing.assert_array_almost_equal(cp.known_source_cp,
exp_known_source_cp)
def test_calculate_cp_slice(self):
# test with non overlapping two component mixture.
n = 500
self.cp.set_n(n)
self.cp.precalculate()
n_vnoti = np.array([305, 1, 193])
m_xiVs = np.array([25, 30, 29, 10, 60, 39])
m_V = 193 # == m_xiVs.sum() == n_vnoti[2]
# Calculated by hand.
exp_jp_array = np.array(
[[9.82612e-4, 9.82612e-4, 9.82612e-4, .1975051, .1975051,
.1975051],
[6.897003e-3, 6.897003e-3, 6.897003e-3, 3.4313e-5, 3.4313e-5,
3.4313e-5],
[.049925736, .059715096, .057757224, .020557656, .118451256,
.077335944]])
obs_jp_array = np.zeros((3, 6))
for i in range(6):
obs_jp_array[:, i] = self.cp.calculate_cp_slice(i, m_xiVs[i], m_V,
n_vnoti)
np.testing.assert_array_almost_equal(obs_jp_array, exp_jp_array)
# Test using Dan's R code and some print statements. Using the same
# data as specified in setup.
# Print statesments are added starting at line 347 of SourceTracker.r.
# The output is being used to compare the p_tv * p_v calculation that
# we are making. Used the following print statements:
# print(sink)
# print(taxon)
# print(sources)
# print(rowSums(sources))
# print(envcounts)
# print(p_v_denominator)
# print('')
# print(p_tv)
# print(p_v)
# print(p_tv * p_v)
# Results of print statements
# [1] 6
# [1] 100 100 100 100 100 100
# otu_1 otu_2 otu_3 otu_4 otu_5 otu_6
# Source_1 0.5 0.5 0.5 100.5 100.5 100.5
# Source_2 100.5 100.5 100.5 0.5 0.5 0.5
# Unknown 36.6 29.6 29.6 37.6 26.6 31.6
# Source_1 Source_2 Unknown
# 303.0 303.0 191.6
# [1] 213 218 198
# [1] 629
# [1] ""
# Source_1 Source_2 Unknown
# 0.331683168 0.001650165 0.164926931
# [1] 0.3386328 0.3465819 0.3147854
# Source_1 Source_2 Unknown
# 0.1123187835 0.0005719173 0.0519165856
# The sink is the sum of the source data, self.source_data.sum(1).
cp = ConditionalProbability(self.alpha1, self.alpha2, self.beta,
self.source_data)
cp.set_n(600)
cp.precalculate()
# Taxon selected by R was 6, but R is 1-indexed and python is
# 0-indexed.
taxon_index = 5
# Must subtract alpha2 * tau * n from the Unknown sum since the R
# script adds these values to the 'Sources' matrix.
unknown_sum = 188
unknown_at_t5 = 31
# Must subtract beta from each envcount because the R script adds this
# value to the 'envcounts' matrix.
envcounts = np.array([203, 208, 188])
obs_jp = cp.calculate_cp_slice(taxon_index, unknown_at_t5, unknown_sum,
envcounts)
# From the final line of R results above.
exp_jp = np.array([0.1123187835, 0.0005719173, 0.0519165856])
np.testing.assert_array_almost_equal(obs_jp, exp_jp)
class TestGibbs(TestCase):
'''Unit tests for Gibbs based on seeding the PRNG and hand calculations.'''
def test_single_pass_gibbs_sampler(self):
# The data for this test was built by seeding the PRNG, and making the
# calculations that Gibb's would make, and then comparing the results.
restarts = 1
draws_per_restart = 1
burnin = 0
# Setting delay to 2 is the only way to stop the Sampler after a single
# pass.
delay = 2
alpha1 = .2
alpha2 = .1
beta = 3
source_data = np.array([[0, 1, 4, 10],
[3, 2, 1, 1]])
sink = np.array([2, 1, 4, 2])
# Make calculations using gibbs function.
np.random.seed(0)
cp = ConditionalProbability(alpha1, alpha2, beta, source_data)
obs_ec, obs_ea, obs_ta = gibbs_sampler(sink, cp, restarts,
draws_per_restart, burnin,
delay)
# Make calculation using handrolled.
np.random.seed(0)
choices = np.arange(3)
np.random.choice(choices, size=9, replace=True)
order = np.arange(9)
np.random.shuffle(order)
expected_et_pairs = np.array([[2, 0, 1, 2, 0, 1, 0, 1, 0],
[3, 2, 2, 2, 0, 0, 1, 2, 3]])
envcounts = np.array([4., 3., 2.])
unknown_vector = np.array([0, 0, 1, 1])
# Calculate known probabilty base as ConditionalProbability would.
denominator = np.array([[(15 + (4*.2)) * (8 + 3*3)],
[(7 + (4*.2)) * (8 + 3*3)]])
numerator = np.array([[0, 1, 4, 10],
[3, 2, 1, 1]]) + .2
known_env_prob_base = numerator / denominator
# Set up a sequence environment assignments vector. This would normally
# be handled by the Sampler class.
seq_env_assignments = np.zeros(9)
# Set up joint probability holder, normally handeled by
# ConditionalProbability class.
joint_prob = np.zeros(3)
for i, (e, t) in enumerate(expected_et_pairs.T):
envcounts[e] -= 1
if e == 2:
unknown_vector[t] -= 1
# Calculate the new probabilty as ConditionalProbability would.
joint_prob = np.zeros(3)
joint_prob[:-1] += envcounts[:-1] + beta
joint_prob[:-1] = joint_prob[:-1] * known_env_prob_base[:2, t]
joint_prob[-1] = (unknown_vector[t] + (9 * .1)) / \
(unknown_vector.sum() + (9 * .1 * 4))
joint_prob[-1] *= ((envcounts[2] + beta) / (8 + 3*3))
# Another call to the PRNG
new_e = np.random.choice(np.array([0, 1, 2]),
p=joint_prob/joint_prob.sum())
seq_env_assignments[i] = new_e
envcounts[new_e] += 1
if new_e == 2:
unknown_vector[t] += 1
# prps = envcounts / float(envcounts.sum())
# exp_mps = prps/prps.sum()
# Create taxon table like Sampler class would.
exp_ct = np.zeros((4, 3))
for i in range(9):
exp_ct[expected_et_pairs[1, i],
np.int(seq_env_assignments[i])] += 1
# np.testing.assert_array_almost_equal(obs_mps.squeeze(), exp_mps)
# np.testing.assert_array_equal(obs_ct.squeeze().T, exp_ct)
np.testing.assert_array_equal(obs_ec.squeeze(), envcounts)
np.testing.assert_array_equal(obs_ea.squeeze()[order],
seq_env_assignments)
np.testing.assert_array_equal(obs_ta.squeeze()[order],
expected_et_pairs[1, :])
def test_gibbs_params_bad(self):
# test gibbs when the parameters passed are bad
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)).astype(np.int32),
index=['source1', 'source2'], columns=features)
self.assertRaises(ValueError, gibbs, sources, alpha1=-.3)
def test_gibbs_data_bad(self):
# input has nans.
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, np.nan])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)),
index=['source1', 'source2'], columns=features)
self.assertRaises(ValueError, gibbs, sources)
# features do not overlap.
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sources = pd.DataFrame(np.vstack((source1, source2)),
index=['source1', 'source2'], columns=features)
features2 = ['o1', 'asdsadO2', 'o3', 'o4', 'o5', 'o6']
sink1 = np.array([10, 10, 10, 0, 0, 0])
sink2 = np.array([0, 0, 0, 10, 10, 10])
sinks = pd.DataFrame(np.vstack((sink1, sink2)),
index=['sink1', 'sink2'], columns=features2)
self.assertRaises(ValueError, gibbs, sources, sinks)
# there are negative counts.
sources.iloc[0, 2] = -10
self.assertRaises(ValueError, gibbs, sources)
# non-real data in input dataframe.
# copied from test of `validate_gibbs_input`.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=['f%s' % i for i in range(5)],
columns=['s%s' % i for i in range(4)])
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
def test_consistency_when_gibbs_seeded(self):
'''Test consistency of `gibbs` (without LOO) from run to run.
Notes
-----
The number of calls to the PRNG should be stable (and thus this test,
which is seeded, should not fail). Any changes made to the code which
cause this test to fail should be scrutinized very carefully.
If the number of calls to the PRNG has not been changed, then an error
has been introduced somewhere else in the code. If the number of calls
has been changed, the deterministic tests should fail as well, but
since they are a small example they might not fail (false negative).
This test is extensive (it does 201 loops through the entire
`gibbs_sampler` block).
'''
features = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
source1 = np.array([10, 10, 10, 0, 0, 0])
source2 = np.array([0, 0, 0, 10, 10, 10])
sink1 = .5*source1 + .5*source2
sinks = pd.DataFrame(sink1.reshape(1, 6).astype(np.int32),
index=['sink1'], columns=features)
sources = pd.DataFrame(np.vstack((source1, source2)).astype(np.int32),
index=['source1', 'source2'], columns=features)
np.random.seed(1042)
mpm, mps, fts = gibbs(sources, sinks, alpha1=.001, alpha2=.01, beta=1,
restarts=3, draws_per_restart=5, burnin=50,
jobs=2, delay=4, create_feature_tables=True)
possible_sources = ['source1', 'source2', 'Unknown']
vals = np.array([[0.44, 0.44666667, 0.11333333]])
exp_mpm = pd.DataFrame(vals, index=['sink1'], columns=possible_sources)
vals = np.array([[0.00824322, 0.00435465, 0.01047985]])
exp_mps = pd.DataFrame(vals, index=['sink1'], columns=possible_sources)
vals = np.array([[69, 64, 65, 0, 0, 0],
[0, 0, 0, 67, 70, 64],
[6, 11, 10, 8, 5, 11]], dtype=np.int32)
exp_fts = pd.DataFrame(vals, index=possible_sources, columns=features)
pd.util.testing.assert_frame_equal(mpm, exp_mpm)
pd.util.testing.assert_frame_equal(mps, exp_mps)
pd.util.testing.assert_frame_equal(fts[0], exp_fts)
def test_consistency_when_gibbs_loo_seeded(self):
'''Test consistency of `gibbs` (loo) from run to run.
Notes
-----
The number of calls to the PRNG should be stable (and thus this test,
which is seeded, should not fail). Any changes made to the code which
cause this test to fail should be scrutinized very carefully.
If the number of calls to the PRNG has not been changed, then an error
has been introduced somewhere else in the code. If the number of calls
has been changed, the deterministic tests should fail as well, but
since they are a small example they might not fail (false negative).
This test is extensive (it does 201 loops through the entire
`gibbs_sampler` block for each source).
'''
source1a = np.array([10, 10, 10, 0, 0, 0])
source1b = np.array([8, 8, 8, 2, 2, 2])
source2a = np.array([0, 0, 0, 10, 10, 10])
source2b = np.array([4, 4, 4, 6, 6, 6])
vals = np.vstack((source1a, source1b, source2a,
source2b)).astype(np.int32)
source_names = ['source1a', 'source1b', 'source2a', 'source2b']
feature_names = ['o1', 'o2', 'o3', 'o4', 'o5', 'o6']
sources = pd.DataFrame(vals, index=source_names, columns=feature_names)
np.random.seed(1042)
obs_mpm, obs_mps, obs_fts = gibbs(sources, sinks=None, alpha1=.001,
alpha2=.01, beta=1, restarts=3,
draws_per_restart=5, burnin=50,
delay=4, create_feature_tables=True)
vals = np.array([[0., 0.62444444, 0., 0.01555556, 0.36],
[0.68444444, 0., 0.09333333, 0.12666667, 0.09555556],
[0., 0.00888889, 0., 0.08222222, 0.90888889],
[0.19111111, 0.2, 0.5, 0., 0.10888889]])
exp_mpm = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
vals = np.array([[0., 0.02406393, 0., 0.0015956, 0.02445387],
[0.0076923, 0., 0.00399176, 0.00824322, 0.00648476],
[0., 0.00127442, 0., 0.00622575, 0.00609752],
[0.00636175, 0.00786721, 0.00525874, 0., 0.00609752]])
exp_mps = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
fts0_vals = np.array([[0, 0, 0, 0, 0, 0],
[93, 87, 101, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[3, 4, 0, 0, 0, 0],
[54, 59, 49, 0, 0, 0]])
fts1_vals = np.array([[113, 98, 97, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 15, 13, 14],
[5, 7, 11, 11, 12, 11],
[2, 15, 12, 4, 5, 5]])
fts2_vals = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 12, 12, 13],
[0, 0, 0, 136, 137, 136]])
fts3_vals = np.array([[28, 27, 31, 0, 0, 0],
[27, 24, 25, 3, 4, 7],
[0, 0, 0, 80, 71, 74],
[0, 0, 0, 0, 0, 0],
[5, 9, 4, 7, 15, 9]])
fts_vals = [fts0_vals, fts1_vals, fts2_vals, fts3_vals]
exp_fts = [pd.DataFrame(vals, index=source_names + ['Unknown'],
columns=feature_names) for vals in fts_vals]
pd.util.testing.assert_frame_equal(obs_mpm, exp_mpm)
pd.util.testing.assert_frame_equal(obs_mps, exp_mps)
for obs_fts, exp_fts in zip(obs_fts, exp_fts):
pd.util.testing.assert_frame_equal(obs_fts, exp_fts)
def test_gibbs_close_to_sourcetracker_1(self):
'''This test is stochastic; occasional errors might occur.
Notes
-----
This tests against the R-code SourceTracker version 1.0, using
R version 2.15.3.
'''
sources_data = \
np.array([[0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0]],
dtype=np.int32)
sources_names = ['source1', 'source2', 'source3']
feature_names = ['f%i' % i for i in range(32)]
sources = pd.DataFrame(sources_data, index=sources_names,
columns=feature_names)
sinks_data = np.array([[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 385, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 95],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 380, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 100],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 378, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 102],
[0, 0, 0, 0, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 386, 0, 0, 0, 0, 0, 0, 0, 350, 0, 0,
0, 0, 94]], dtype=np.int32)
sinks_names = ['sink1', 'sink2', 'sink3', 'sink4']
sinks = pd.DataFrame(sinks_data, index=sinks_names,
columns=feature_names)
obs_mpm, obs_mps, _ = gibbs(sources, sinks, alpha1=.001, alpha2=.1,
beta=10, restarts=2, draws_per_restart=2,
burnin=5, delay=2,
create_feature_tables=False)
exp_vals = np.array([[0.1695, 0.4781, 0.3497, 0.0027],
[0.1695, 0.4794, 0.3497, 0.0014],
[0.1693, 0.4784, 0.3499, 0.0024],
[0.1696, 0.4788, 0.3494, 0.0022]])
exp_mpm = pd.DataFrame(exp_vals, index=sinks_names,
columns=sources_names + ['Unknown'])
pd.util.testing.assert_index_equal(obs_mpm.index, exp_mpm.index)
pd.util.testing.assert_index_equal(obs_mpm.columns, exp_mpm.columns)
np.testing.assert_allclose(obs_mpm.values, exp_mpm.values, atol=.01)
class PlotHeatmapTests(TestCase):
def setUp(self):
vals = np.array([[0., 0.62444444, 0., 0.01555556, 0.36],
[0.68444444, 0., 0.09333333, 0.12666667, 0.09555556],
[0., 0.00888889, 0., 0.08222222, 0.90888889],
[0.19111111, 0.2, 0.5, 0., 0.10888889]])
source_names = ['source1a', 'source1b', 'source2a', 'source2b']
self.mpm = pd.DataFrame(vals, index=source_names,
columns=source_names + ['Unknown'])
def test_defaults(self):
# plot_heatmap call returns successfully
fig, ax = plot_heatmap(self.mpm)
def test_non_defaults(self):
# plot_heatmap call returns successfully
fig, ax = plot_heatmap(self.mpm, cm=plt.cm.jet,
xlabel='Other 1', ylabel='Other 2',
title='Other 3')
if __name__ == '__main__':
main()
|
The exterior canvas is clean and beautiful. The leather piping has light wear at the bottom corners. The middle leather strip has signs of wear and minor hairline scratches. The handles have minor signs of wear and a few minor hairline scratches. The goldtone hardware is bright with hairline surface scratches. The interior has light signs of wear, but otherwise is clean and still in great condition.
|
"""
Depth First Search
------------------
Recursive implementation of the depth first search algorithm used to
traverse trees or graphs. Starts at a selected node (root) and explores the
branch as far as possible before backtracking.
Time Complexity: O(E + V)
E = Number of edges
V = Number of vertices (nodes)
Pseudocode: https://en.wikipedia.org/wiki/Depth-first_search
"""
def dfs(graph, start, path=[]):
"""
Depth first search that recursively searches the path. Backtracking occurs
only when the last node in the path is visited.
:param graph: A dictionary of nodes and edges.
:param start: The node to start the recursive search with.
:param path: A list of edges to search.
:rtype: A boolean indicating whether the node is included in the path.
"""
if start not in graph or graph[start] is None or graph[start] == []:
return None
path = path + [start]
for edge in graph[start]:
if edge not in path:
path = dfs(graph, edge, path)
return path
|
Smart and defensible. That’s how greatest to describe Gov. Gavin Newsom’s determination to reassign all but one hundred of the 360 California Nationwide Guard troops stationed on the southern border, which undercuts President Donald Trump’s hyperbolic declare that there’s a “nationwide emergency” at the border. Here in San Diego — easily the most important U.S. city alongside the border — our proximity to Mexico is a big financial plus, not the nightmare the president depicts.
Past that essential level, Newsom’s characterization of Trump’s deployment of border troops as “political theater” is borne out by news accounts from final April. That’s when the president stunned his aides with a collection of tweets and statements declaring a have to militarize the border to protect towards an inflow of unauthorized immigrant criminals and medicines. If there was a border emergency, that was news to his national safety workforce.
Trump went on to make the purported border “invasion” a serious theme in his fall campaigning for Republicans, capped together with his October determination to deploy more than 5,200 lively-obligation army troops to the border after stories that a caravan of migrants was headed toward the U.S. Whereas the GOP held on to the Senate, it misplaced 43 House seats, suggesting the president’s alarmism didn’t play properly.
Nonetheless, Trump has stayed the course together with his invasion theme, forcing a 35-day partial authorities shutdown when Congress refused to approve his request for $5.7 billion for extra border partitions. One other shutdown is possible on Friday when funding for many authorities businesses runs out. Provided that the president couldn’t get wall funds from Congress when it was controlled by Republicans, he’s received to know the money gained’t be forthcoming with Democrats answerable for the House. Something’s acquired to offer. However such calculations don’t matter to a president who gained the GOP nomination and the presidency in 2016 by railing towards immigration — and who chose to hold a related rally in El Paso tonight.
Newsom and other excessive-profile Democrats must be making calculations of their own on the evolving immigration challenge. Whereas the entire numbers are far decrease than they was, January was the 12th straight month through which apprehensions on the Southwest border have been up a minimum of 50 % over the identical month of the earlier yr. And whereas these trying to illegally enter the U.S. prior to now have been typically single men in Mexico, they’re increasingly families from Honduras, Guatemala and El Salvador in search of asylum in America. In December, Homeland Security officers stated the variety of asylum seekers was about ninety three,000 in fiscal 2018 — a huge improve over the 55,584 reported in fiscal 2017. A Syracuse University undertaking that tracks knowledge on asylum seekers studies that fiscal 2018 was the sixth straight yr that the share of asylum seekers rejected by immigration judges had increased.
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.google_api import API_DCM
from starthinker.util.cm import get_profile_for_api
def bulkdozer_test(config, task):
print('testing bulkdozer')
if 'verify' in task['traffic']:
is_admin, profile_id = get_profile_for_api(
config, task['auth'], task['traffic']['account_id'])
for entity in task['traffic']['verify']:
service = getattr(
API_DCM(config, task['auth'], internal=is_admin), entity['type'])
cm_entity = service().get(profileId=profile_id, id=entity['id']).execute()
values = entity['values']
for key in values:
if values[key] != cm_entity[key]:
raise ValueError('%s %s expected to be %s, was %s' % (entity['type'], key, values[key], cm_entity[key]))
|
THE 2019 NRL season draw is just days from being finalised — minus a historic US kick-off.
Todd Greenberg’s dream to launch the season in America has been put on hold as it proved too great a financial risk.
The NRL will continue to investigate ways to cover the costs of staging the season kick-off in the US for the start of 2020.
One much-anticipated match set for allocation is for the grand opening of the new Western Sydney Stadium, where Parramatta will call home.
The Eels have been told to prepare to host their first home game against Wests Tigers on Easter Monday in round six.
For now, you won’t catch either club or NRL officials confirming the date as the NSW government wants to grandstand the announcement.
The NRL has completed the 2019 season draw in near record time and, in a bid to beat the AFL to its draw release, Greenberg had contemplated making it public on Thursday.
However, it’s more likely the draw release will be postponed by a further week to avoid taking the spotlight and attention away from Australia’s eagerly awaited Test match with Tonga in New Zealand on Saturday.
I have included Titans games in itineraries for groups of Chinese tourists so I really want an early draw.
THE NRL has backed the Titans to rebound from the start of next season by handing them the sole stand-alone match in its first Magic Round at Suncorp Stadium in May.
The Titans and Sharks will meet on a Thursday night to launch the round in which all 16 clubs will play at Suncorp Stadium, with the NRL extending the concept from three days to four.
The Broncos will play Manly in the late match of a Friday double-header and the Cowboys take on South Sydney in the eighth and last match of the round to close a Sunday double-header.
Gold Coast finished 14th in 2018 and will have played eight matches before the Round 9 extravaganza in Brisbane, which will be the first edition of a multi-year contract between the NRL and State Government.
When the Magic Round was confirmed in June, NRL CEO Todd Greenberg said there would be matches played from Friday to Sunday, with Thursday night not mentioned.
Greenberg set an aim for three sellouts of the 52,500-seat Suncorp Stadium.
But the prospect now is of a smaller attendance on the Thursday night, a school and work night, when the Titans will play one of the less supported Sydney clubs.
The addition of the Thursday night match is a commercial matter related to broadcasting.
“The Magic Round is a great concept, especially playing at the best rugby league stadium in the world — I’m sure the fans will love it,’’ said Titans and Queensland forward Jai Arrow said.
“Hopefully we are doing well by then and get the Titans fans there. All fans from different teams will be there.
The full NRL season draw will be released this morning.
The only match between two 2018 finals teams in the Magic Round is the Warriors and Dragons clash, the second of an enticing Saturday triple-header.
Both clubs have sizeable supporter bases in Queensland, as do Melbourne and Parramatta, who play the following game to end the triple-header.
Broncos forward Matt Gillett said southeast Queensland league fans would be more likely than those in Sydney to pay to watch matches not involving their club.
“It’s an opportunity to them to watch different teams, not just the home team,’’ Gillett said.
“A lot of teams in Brisbane have good support, such as the Storm and the Dragons.
The NRL will capitalise on the presence of all clubs by scheduling a series of public promotions and clinics leading up to and during the four-day festival of matches.
“It will be really good that we’d be asked to come to Brisbane a bit earlier,’’ Queensland forward Coen Hess said.
When the Cowboys play Souths, Hess might have played in front of bigger crowds only in his Origin career, the 2017 grand final and the 2016 double-header which was the precursor to the Magic Round initiative.
A crowd of 52,346 flocked to Suncorp for the 2016 double-header involving the Cowboys, Storm, Broncos and Sea Eagles.
“The Magic Round will be a pretty different atmosphere, with supporters there from every team,’’ Hess said.
Going to be some tough games to fill the stadium the first few home games.
An article on the "winners and losers" after the draw announcement. You'll have to scroll to the very bottom to find us.
We also have the equal least free to air games.
Last edited by JunctionBlock; 25-10-18 at 04:33 PM.
Cannot tell you how angry I am that our Home Game against Cronulla is at Lang Park on a Thursday night. Thanks Annesley, thanks NRL, good job.
Agree totally. This was supposed to be a three day event but obviously the NRL went off half baked and announced the three days before talking to Fox/Nine. So they had the scrape a game out to play on Thursday night. So for an event that was meant to capture attention, it will now open to an empty stadium, great marketing. Not only will no one be there, it appears Titans membership costs have not been adjusted to take in to account that this is a home game that few members will attend.
^^^^ great to hear from you MT. I was worried something had happened to you.
You bring up a great point re The Membership Prices too. If the Club are expecting us to go then they are kidding themselves. On a Thursday to Dodge City and back and then work/school on Friday.
No all is good Bods.
The membership issue will not really bother me as I am scaling right back this year from 4 Titanium West memberships to most likely a 4 game pass. I just couldn't justify handing over $1100 each year to watch rubbish and little seems to change year on year. If they turn in to something serious I'll be fully back. This is a thread about the draw and I don't want to derail it.
Another clanger is our Warriors game - one of the best drawers at 6pm Friday. Crazy.
Geezus, that is a shockin' draw for us at home. Nothing interesting until Round 10 when we host the Doggies, and doesn't really fire up until we host the Cowboys in Round 12 (2nd of June).
And that's the bye round so all the rep players will be out I'm guessing.
So for the game against the Sharks at Suncorp do we still pay if we are full season members?
Well this is a situation where I’d be wanting the Members Alliance to step up on our behalf because this Thursday Cronulla Suncorp situation is totally unacceptable.
It must be moved back to ROBINA.
I agree. Surely one game like this taken away from the "Magic Round" doesn't matter. There will be this massive build up to the Magic Round and it will start with a whimper of probably less than 10k.
I had a better look at our draw earlier and we have THREE home games at 6pm on Fridays (7 overall). That is a stitch up. Get the CEO on to it - oh do we have one.
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
import json
import re
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
# TO DO
# Fix Download Mode
URL_ROOT = 'http://lesargonautes.telequebec.tv'
URL_VIDEOS = URL_ROOT + '/Episodes'
URL_STREAM_DATAS = 'https://mnmedias.api.telequebec.tv/api/v2/media/mediaUid/%s'
URL_STREAM = 'https://mnmedias.api.telequebec.tv/m3u8/%s.m3u8'
# VideoId
@Route.register
def website_root(plugin, item_id, **kwargs):
"""Add modes in the listing"""
resp = urlquick.get(URL_VIDEOS)
list_seasons_datas = re.compile(r'li path\=\"(.*?)\"').findall(resp.text)
for season_datas in list_seasons_datas:
season_title = season_datas
item = Listitem()
item.label = season_title
item.set_callback(list_videos,
item_id=item_id,
season_title=season_title)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, season_title, **kwargs):
resp = urlquick.get(URL_VIDEOS)
root = resp.parse("li", attrs={"path": season_title})
for video_datas in root.iterfind(".//li[@class='episode']"):
video_title = video_datas.find(".//div[@class='title']").text.strip(
) + ' - Episode ' + video_datas.find(
".//span[@path='Number']").text.strip()
video_image = video_datas.find(".//img[@class='screen']").get('src')
video_plot = video_datas.find(".//div[@class='summary']").text.strip()
video_id = video_datas.find(".//input[@path='MediaUid']").get('value')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
"""Get video URL and start video player"""
if video_id == '':
plugin.notify('ERROR', plugin.localize(30716))
return False
resp = urlquick.get(URL_STREAM_DATAS % video_id, verify=False)
json_parser = json.loads(resp.text)
final_video_url = URL_STREAM % json_parser['media']['mediaId']
if download_mode:
return download.download_video(final_video_url)
return final_video_url
|
Keep in touch with Into The Gloss!
Want more ITG? Sign up for Into The Gloss stories, Glossier product info and launches, and event invites delivered right to your inbox. Unsubscribe anytime.
|
#!/usr/bin/env python3
"""
Created on 4 Mar 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.aqcsv.specification.country_iso import CountryISO
from scs_core.aqcsv.specification.country_numeric import CountryNumeric
from scs_core.data.json import JSONify
# --------------------------------------------------------------------------------------------------------------------
print("list ISO...")
for country in CountryISO.instances():
print(country)
print("-")
print("find ISO...")
iso = "TUN"
iso_country = CountryISO.instance(iso)
print("iso:%s country:%s" % (iso, iso_country))
print(JSONify.dumps(iso_country))
print("-")
print("list Numeric...")
for country in CountryNumeric.instances():
print(country)
print("-")
print("find Numeric...")
numeric = 788
numeric_country = CountryNumeric.instance(numeric)
print("numeric:%s country:%s" % (numeric, numeric_country))
print(JSONify.dumps(numeric_country))
print("-")
print("equality...")
equality = iso_country == numeric_country
print("iso_country == numeric_country: %s" % equality)
print("-")
|
And what better way to chill out than a trip to a luxurious spa?
We guarantee you’ll be relaxed after a pampering session in this GORGE place. It has the most amazing scenery and it’s in the sunny south east. What’s not to love?
Spa treatments, a luxurious hotel and a wildlife park nearby? Ideal.
The beautiful Lyrath Estate is the best backdrop a getaway. You can enjoy a treatment or two in the fabulous spa and then have a potter around the idyllic gardens. Afternoon sorted.
With the prettiest of sea views, this spa is sure to make your Mammy one happy lady.
The view of the Wicklow Mountains from this tranquil haven makes it the ideal place for a relaxing spa trip. We promise you won’t be disappointed!
If you really want your Mammy to be treated like the queen she truly is, then your best option is Lough Eske Castle. Heaven on earth.
This five-star resort is one of the most amazing places in Ireland to enjoy a bit of relaxation.
If you’re living in the West, Delphi Resort should be right up your street. The spa uses eco-friendly products in all of its treatments and there’s outdoor activities too if you’re feeling energetic.
Last but certainly not least is the Dunboyne Castle Hotel. The outdoor jacuzzi will provide the most amazing views while you take some well deserved ‘me time’.
|
# Script to evaluate the performance of the clustering algorithm.
import argparse
from itertools import combinations
from collections import defaultdict
def count_correct_pairs(cluster, labels_lookup):
"""
Given a cluster, count the number of pairs belong to the same label and
the total number of pairs.
"""
total_pairs = 0
correct_pairs = 0
pairs = combinations(cluster, 2)
for f1, f2 in pairs:
if labels_lookup[f1] == labels_lookup[f2]:
correct_pairs += 1
total_pairs += 1
return correct_pairs, total_pairs
def calculate_pairwise_pr(clusters, labels_lookup):
"""
Given a cluster, return pairwise precision and recall.
"""
correct_pairs = 0
total_pairs = 0
# Precision
for cluster in clusters:
cp, tp = count_correct_pairs(cluster, labels_lookup)
correct_pairs += cp
total_pairs += tp
# Recall:
gt_clusters = defaultdict(list)
# Count the actual number of possible true pairs:
for row_no, label in labels_lookup.items():
gt_clusters[label].append(row_no)
true_pairs = 0
for cluster_id, cluster_items in gt_clusters.items():
n = len(cluster_items)
true_pairs += n * (n-1)/2.0
print("Correct Pairs that are in the same cluster:{}".format(correct_pairs))
print("Total pairs as per the clusters created: {}".format(total_pairs))
print("Total possible true pairs:{}".format(true_pairs))
precision = float(correct_pairs)/total_pairs
recall = float(correct_pairs)/true_pairs
return precision, recall
if __name__ == '__main__':
parser = argparse.ArgumentError()
parser.add_argument('-c', '--clusters', help='List of lists where each \
list is a cluster')
parser.add_argument('-l', '--labels', help='List of labels associated \
with each vector.')
|
As the Brexit process moves forward and insurance companies prepare for the UK’s divorce from the European Union, stakeholders in the US are also getting ready for a new era of political and economic relations between the two countries.
The American Insurance Association (AIA), the trade association for leaders in the P/C insurance industry, announced on September 12 that it will be joining the new US-UK Financial and Related Professional Services Coalition in order to foster discussion from both sides of the pond on trade and investment, among other issues.
The formal coalition has only been in the works for the past few months, but the AIA decided that it would be a smart move to have more processes and structures in place for US and UK insurance industries to coordinate their communications with their governments and with each other, added Simchak.
The coalition has put forward three main themes: promote deeper regulatory cooperation for the benefit of market participants, clients, and investors; maximize cross-border market access; and recognize the important role that the changing technological landscape plays in financial services.
“That’s an emerging area with both governments right now, so we thought it would be a useful purpose for this coalition to communicate with our governments about the best way to regulate fintech and insurtech, and whether there can be some transatlantic or US-UK regulatory cooperation on fintech and insurtech,” said Simchak.
The uncertainty in what a post-Brexit world will look like is one of the challenges that Simchak predicts will impact the coalition, but likewise underscores its importance.
|
"""
Functions for doing geometry calculations in the various types of coordinates
shared between client and server.
"""
import heapq
import math
from src.shared.config import CHUNK_SIZE, BUILD_SIZE
from src.shared.exceptions import NoPathToTargetError
from src.shared.logconfig import newLogger
log = newLogger(__name__)
BUILDS_PER_CHUNK = CHUNK_SIZE / BUILD_SIZE
# Costs used by pathfinding code.
# Measure distances in unit coordinates.
ORTHOGONAL_COST = CHUNK_SIZE
DIAGONAL_COST = int(ORTHOGONAL_COST * math.sqrt(2))
def findPath(gameState, srcPos, destPos):
"""
Compute and return a path from srcPos to destPos, avoiding any obstacles.
The returned path will be a list of waypoints such that a unit at srcPos
could travel by straight line to each of the waypoints in order and thus
get to destPos without hitting any obstacles.
"""
log.debug("Searching for path from %s to %s", srcPos, destPos)
chunkWidth, chunkHeight = gameState.sizeInChunks
# Value larger than any actual distance.
farFarAway = ORTHOGONAL_COST**2 * chunkWidth * chunkHeight
srcChunk = srcPos.chunk
destChunk = destPos.chunk
srcCX, srcCY = srcChunk
destCX, destCY = destChunk
# Make sure we're starting within the world.
if not (0 <= srcCX < chunkWidth and 0 <= srcCY < chunkHeight):
raise NoPathToTargetError("Starting point {} is outside the world."
.format(srcPos))
# If the source and dest points are in the same chunk, there's no point
# doing a chunk-based search to find a path, because the result will be
# trivial. Just go straight to the dest.
if srcChunk == destChunk:
return [destPos]
# This list actually serves 2 purposes. First, it keeps track of which
# chunks have been visited already. Second, for those that have been
# visited, it tracks which chunk came before it in the shortest path from
# the srcChunk to it.
parents = [[None for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
# Set to True for a node once we know we've found a shortest path to it, so
# that we don't keep checking new paths to that node.
nodeFinalized = [[False for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
# Shortest distance to each node from the start.
distanceFromStart = [[farFarAway for _y in range(chunkHeight)]
for _x in range(chunkWidth)]
distanceFromStart[srcCX][srcCY] = 0
# Priority queue of chunks that we still need to search outward from, where
# priority = distance from start + heuristic distance to end.
chunksToCheck = []
heapq.heappush(chunksToCheck, (_heuristicDistance(srcChunk, destChunk),
srcChunk))
while len(chunksToCheck) > 0:
_, currChunk = heapq.heappop(chunksToCheck)
log.debug("Pathfinding: search out from %s", currChunk)
if currChunk == destChunk:
break
cx, cy = currChunk
if nodeFinalized[cx][cy]:
# Already expanded from this node; don't do it again.
continue
nodeFinalized[cx][cy] = True
log.debug("Pathfinding: checking neighbors.")
for addlDist, neighbor in _getValidNeighbors(currChunk, gameState):
log.debug("Pathfinding: trying %s", neighbor)
nx, ny = neighbor
neighborStartDist = distanceFromStart[cx][cy] + addlDist
if neighborStartDist < distanceFromStart[nx][ny]:
log.debug("Pathfinding: found shorter path to neighbor.")
distanceFromStart[nx][ny] = neighborStartDist
parents[nx][ny] = currChunk
neighborFwdDist = _heuristicDistance(neighbor, destChunk)
neighborEstCost = neighborStartDist + neighborFwdDist
heapq.heappush(chunksToCheck, (neighborEstCost, neighbor))
if (not _chunkInBounds(gameState, destChunk)) or \
parents[destCX][destCY] is None:
raise NoPathToTargetError("No path exists from {} to {}."
.format(srcPos, destPos))
# Build the list of waypoints backward, by following the trail of parents
# all the way from dest to source.
lim = chunkWidth * chunkHeight
waypoints = []
currChunk = destChunk
while currChunk != srcChunk:
waypoints.append(currChunk)
cx, cy = currChunk
currChunk = parents[cx][cy]
assert currChunk is not None
# If there's a bug, crash rather than hanging (it's easier to debug).
lim -= 1
assert lim >= 0, "Infinite loop detected in findPath"
# Reverse the list of waypoints, since currently it's backward.
waypoints.reverse()
# Now convert the chunk coordinates to unit coordinates.
waypoints = [Coord.fromCBU(chunk=chunk).chunkCenter for chunk in waypoints]
# Note: The very first waypoint is still valid, because it's in a chunk
# orthogonally adjacent to the chunk containing the source point, so
# there's definitely not an obstacle in between.
# We still need to correct the last waypoint, which is currently the center
# of the dest chunk rather than the actual dest point. Note that we already
# handled the case where srcChunk == destChunk, so waypoints can't be
# empty.
waypoints[-1] = destPos
return waypoints
def _heuristicDistance(chunkA, chunkB):
"""
Return a heuristic estimate of the distance between chunk A and chunk B,
in *unit coordinates*.
"""
# Use Euclidean distance as the heuristic.
ax, ay = chunkA
bx, by = chunkB
deltaX = ORTHOGONAL_COST * (bx - ax)
deltaY = ORTHOGONAL_COST * (by - ay)
return int(math.hypot(deltaX, deltaY))
# Helper function for findPath.
def _getValidNeighbors(chunkPos, gameState):
x, y = chunkPos
# The 8 neighbors, separated into those orthogonally adjaent and those
# diagonally adjacent. Within each category, the particular neighbors are
# in random order.
diagonals = [
(x - 1, y + 1), # northwest
(x + 1, y - 1), # southeast
(x - 1, y - 1), # southwest
(x + 1, y + 1), # northeast
]
orthogonals = [
(x, y - 1), # south
(x, y + 1), # north
(x + 1, y ), # east
(x - 1, y ), # west
]
# Try diagonals first, so that when crossing a non-square rectangle we do
# the diagonal part of the path before the orthogonal part.
for neighbor in diagonals:
if _chunkInBounds(gameState, neighbor) and \
_chunkIsPassable(gameState, neighbor):
# Check that the other two corners of the square are passable, so
# we don't try to move through zero-width spaces in cases like:
# @@ B
# @@/
# /@@
# A @@
nx, ny = neighbor
if _chunkIsPassable(gameState, ( x, ny)) and \
_chunkIsPassable(gameState, (nx, y)):
yield (DIAGONAL_COST, neighbor)
for neighbor in orthogonals:
if _chunkInBounds(gameState, neighbor) and \
_chunkIsPassable(gameState, neighbor):
yield (ORTHOGONAL_COST, neighbor)
def _chunkInBounds(gameState, chunkPos):
return gameState.inBounds(Coord.fromCBU(chunk=chunkPos))
def _chunkIsPassable(gameState, chunkPos):
return gameState.isPassable(Coord.fromCBU(chunk=chunkPos))
class AbstractCoord(object):
def __init__(self, uPos):
super(AbstractCoord, self).__init__()
self.x, self.y = uPos
@classmethod
def fromUnit(cls, unit):
return cls(unit)
@classmethod
def fromCBU(cls, chunk=(0,0), build=(0,0), unit=(0,0)):
cx, cy = chunk
bx, by = build
ux, uy = unit
x = cx * CHUNK_SIZE + bx * BUILD_SIZE + ux
y = cy * CHUNK_SIZE + by * BUILD_SIZE + uy
return cls((x, y))
@property
def chunk(self):
return (self.x // CHUNK_SIZE, self.y // CHUNK_SIZE)
@property
def build(self):
return (self.x // BUILD_SIZE, self.y // BUILD_SIZE)
@property
def unit(self):
return (self.x, self.y)
@property
def buildSub(self):
return ((self.x % CHUNK_SIZE) // BUILD_SIZE,
(self.y % CHUNK_SIZE) // BUILD_SIZE)
@property
def unitSub(self):
return (self.x % BUILD_SIZE, self.y % BUILD_SIZE)
@property
def truncToChunk(self):
return self.fromCBU(chunk=self.chunk)
@property
def truncToBuild(self):
return self.fromCBU(build=self.build)
def serialize(self):
return [str(int(x)) for x in self.unit]
@classmethod
def deserialize(cls, descs):
assert len(descs) == 2
return cls.fromUnit(map(int, descs))
def __repr__(self):
return "{}({}, {})".format(type(self).__name__, self.x, self.y)
def __str__(self):
cx, cy = self.chunk
bx, by = self.build
ux, uy = self.unit
return "({cx}.{bx}.{ux}, {cy}.{by}.{uy})".format(
cx=cx, cy=cy, bx=bx, by=by, ux=ux, uy=uy
)
def __eq__(self, rhs):
if not isinstance(self, type(rhs)) and not isinstance(rhs, type(self)):
raise TypeError("Cannot compare {} with {}.".format(
type(self), type(rhs)
))
return self.x == rhs.x and self.y == rhs.y
def __ne__(self, rhs):
if not isinstance(self, type(rhs)) and not isinstance(rhs, type(self)):
raise TypeError("Cannot compare {} with {}.".format(
type(self), type(rhs)
))
return self.x != rhs.x or self.y != rhs.y
# Coord + Coord = err
# Coord + Dist = Coord
# Dist + Coord = Coord
# Dist + Dist = Dist
#
# Coord - Coord = Dist
# Coord - Dist = Coord
# Dist - Coord = err
# Dist - Dist = Dist
#
# - Coord = err
# - Dist = Dist
def __add__(self, rhs):
if isinstance(self, Coord) and isinstance(rhs, Coord):
raise TypeError("Cannot add two Coords.")
elif isinstance(self, Distance) and isinstance(rhs, Distance):
retType = Distance
else:
# Coord + Distance or Distance + Coord
retType = Coord
x = self.x + rhs.x
y = self.y + rhs.y
return retType((x, y))
def __sub__(self, rhs):
if isinstance(self, Coord) == isinstance(rhs, Coord):
# Coord - Coord or Distance - Distance
retType = Distance
elif isinstance(self, Coord):
# Coord - Distance
retType = Coord
else:
# Distance - Coord
raise TypeError("Cannot subtract Distance - Coord.")
x = self.x - rhs.x
y = self.y - rhs.y
return retType((x, y))
class Distance(AbstractCoord):
def length(self):
"Return the Euclidean length of this Distance."
return math.hypot(self.x, self.y)
def __neg__(self):
x = - self.x
y = - self.y
return Distance((x, y))
def __rmul__(self, lhs):
if not isinstance(lhs, int) and not isinstance(lhs, float):
raise TypeError("Cannot multiply Distance by {}.".format(
type(lhs)
))
return Distance((int(round(self.x * lhs)), int(round(self.y * lhs))))
def __mul__(self, rhs):
return rhs * self
class Coord(AbstractCoord):
@property
def chunkCenter(self):
"""
Return the Coord of the center of the chunk containing this Coord.
"""
return self.fromCBU(chunk=self.chunk,
unit=(CHUNK_SIZE // 2, CHUNK_SIZE // 2))
class Rect(object):
def __init__(self, coord, dist):
if not isinstance(coord, Coord):
raise TypeError("Rectangle must have Coordinate. Found {}".format(
type(coord)
))
if not isinstance(dist, Distance):
raise TypeError("Rectangle must have Distance. Found {}".format(
type(dist)
))
self.coord = coord
self.dist = dist
@property
def center(self):
return self.coord + 0.5*self.dist
def serialize(self):
return self.coord.serialize() + self.dist.serialize()
@classmethod
def deserialize(cls, descs):
assert len(descs) == 4
cdesc = descs[:2]
ddesc = descs[2:]
return cls(Coord.deserialize(cdesc), Distance.deserialize(ddesc))
def isRectCollision(rect1, rect2):
if not isinstance(rect1, Rect):
raise TypeError("Can only compare distance between rectangles."
"Found {}".format(
type(rect1)
))
if not isinstance(rect2, Rect):
raise TypeError("Can only compare distance between rectangles."
"Found {}".format(
type(rect2)
))
# Rectangle 1
left1, bottom1 = rect1.coord.unit
right1, top1 = (rect1.coord + rect1.dist).unit
right1, top1 = right1 - 1, top1 -1
# Rectangle 2
left2, bottom2 = rect2.coord.unit
right2, top2 = (rect2.coord + rect2.dist).unit
right2, top2 = right2 - 1, top2 -1
return ((bottom2 < top1 and top2 > bottom1) and
(left2 < right1 and right2 > left1))
|
This card was created as inspired by Splitcoaststampers card layout design for this week. Since it was the week of July 4th I decided to pull out this cute image from La-La Land Crafts called Saluting Luka. I am so grateful for all those men and women who dedicate their lives to keep America and the world free. This is in honor of them.
I revised the layout slightly with the placement of the small rectangles/square. I cut the three rectangle/square elements of the card from Gina K. Designs Kraft Cardstock. I used My Favorite Things A2 Stitched Rectangles and Avery Elle’s Double Pierced Square dies. I had to elongate the square to make a rectangle, and the A2 rectangle needed to be elongated as well so that it would align with the other two elements.
I stamped Luka on some Neena Classic Crest 80# Solar White Cardstock using Memento Tuxedo Black Ink. He was colored using Copic Markers (listed below). I used my Silhouette PIXScan mat to scan and cut around the image. I placed the image onto the paper side of Darice Sticky Back Foam and traced around it, fussy cut it just smaller than the trace, and adhered it to the back of the image for added dimension. Luka was the focal image and was placed on the long rectangle and adhered with Nuvo Deluxe Adhesive. For the banner across the bottom I decided to use a stars banner from Stampin’ Up/Sizzix Mini Treat Bag out of red cardstock and back it with gold glitter cardstock. I added a small square of foam to each end and placed it across the bottom of Luka.
I searched online to find another image that would compliment Luka and found this really cute patriotic eagle holding an American Flag created by MarinaNeira. I saved the image, uploaded it into the Silhouette Cameo, traced around the image and cut it out to add to the lower elongated rectangle.
For the upper square I die cut a Spellbinders Stars Five out of gold glitter cardstock and added one of the red stars leftover from the banner. I scatter the three of the remaining small red hearts around the eagle.
For the sentiment I pulled out Dare 2B Artzy’s My Hero stamp set and stamped it in the lower right of the card with VersaMark Ink and heat embossed it with Ranger Fine Detail Gold Embossing Powder. I decided to add some score lines between the images and the sentiment and the card was done.
This card was made for a young woman at my church whose favorite color is purple. I used the La-La Land Crafts Darling Marci Digi Stamp and printed it on Neena Classic Crest 80# Solar White Cardstock with my laser jet printer, which allows you to color with Copic Markers without the lines bleeding.
I colored the image with the following Copic Markers: V28, V25, V22, BV17, BV 13; E49, E47, E57, E55; E11, E01, E000, R20, R21. I added white polka dots to the dress using a Sakura White Get Pen. Once the coloring was complete I used the PIXScan Mat to cut around the image with the Silhouette Cameo.
I found some purple and pink plaid cardstock in the Recollections Bright & Basic Paper Pack and die cut it with an A2 My Favorite Things Rectangle STAX die. It was adhered to the front of a top folding A2 cardbase made from Gina K. Designs 120# White Cardstock with Kokuyo Dotliner Adhesive.
Whenever I use an image that appears to be sitting I like to have something to ground them. I die cut the La-La Land Crafts Garden Bench out of some Simon Says Stamp Woodgrain Cardstock and ink blended it with Distress Inks Brushed Corduroy and Vintage Photo. I added thin strips of 3M 1/2″ Foam Tape to the back of the bend to add dimension. The focal image was adhered to the bend with Nuvo Deluxe Adhesive, with a little foam tape behind the legs so that it would be on the same level.
The sentiment is a Penny Black die from the Awesome set. It was cut multiple times from white cardstock and adhered together with Nuvo Deluxe Adhesive. The top layers was colored using the same purple Copic Markers that had been used on the dress, blended with darkest on the bottom. It was adhered to the bottom of the plaid card front and then the bench and girl were adhered as well.
When I saw this stamp from La-La Land Crafts called Figure Skater Marci I knew it would make the perfect birthday card for Julia, who is a competitive figure skater. Love the way it turned out.
I have the digital image, so I can make it just the size I want. I printed the image with my laser printer onto Neena Classic Crest 80# Solar White Cardstock and then colored it with Copic Markers (listed below). Once I was done coloring, I then scanned and imported it into the Silhouette Cameo to cut around the image, much faster than fussy cutting.
I cut a rectangle from the same white cardstock using a My Favorite Things Stitched Rectangle STAX and then ran it through the Big Shot with the Cuttlebug Polka Dot Embossing Folder. This time I reversed the polka dots s they were recessed. I adhered the focal image to the embossed rectangle using 3M 1/2″ Foam Tape.
An A2 top folding cardbase was made from Gina K. Designs 120# White Cardstock. To make the focal image pop on the white cardbase, I used Gina K. Designs Black 120# Cardstock, which was cut 1/8″ larger than the embossed rectangle and adhered using Nuvo Deluxe Adhesive.
The sentiment is from Simon Says Stamp Brush Stroke Messages and was stamped on Vellum with VersaMark Ink and heat embossed with Ranger Fine Detail Gold Embossing Powder then wrapped around the focal image panel and adhered to the back with Scotch Tape. Darice Sticky Back Foam was added to the back of the panel and then adhered to the cardbase with Nuvo Deluxe Adhesive.
This cute image from La-La Land Crafts Autumn Marci is great for an autumn or winter birthday card with is jacket, scarf, and gloves. Paige’s favorite color is blue and I added accents of pink. I really like the color combination.
I cut a rectangle from the same white cardstock using a My Favorite Things Stitched Rectangle STAX and then ran it through the Big Shot with the Cuttlebug Polka Dot Embossing Folder and adhered the focal image using 3M 1/2″ Foam Tape.
An A2 top folding cardbase was made from Gina K. Designs 120# White Cardstock. To make the focal image pop on the white cardbase, I looked through my scraps of paper and found this matching blue paper, which was cut 1/8″ larger than the embossed rectangle and adhered using Nuvo Deluxe Adhesive.
The sentiment is from Studio Katia’s Blooming Bunch and was stamped on Vellum with VersaMark Ink and heat embossed with Ranger Fine Detail Gold Embossing Powder then wrapped around the focal image panel and adhered to the back with Scotch Tape. Darice Sticky Back Foam was added to the back of the panel and then adhered to the cardbase with Nuvo Deluxe Adhesive.
I love making birthday cards for the young women at church. It is fun to find an image that looks similar to what they look like and color matching hair and skin tones and the clothes in their favorite colors. I really like the navy and coral combination that Hailey likes.
I stamped the image from La-La Land Crafts Love You Molli onto Neena Classic Crest 80# Solar White Cardstock with Memento Tuxedo Black Ink, which is Copic friendly and colored the image with the markers listed below.
Once I was done coloring, I then scanned and imported it into the Silhouette Cameo to cut around the image. Almost as good as a matching die, just takes a little more time, but faster than fussy cutting.
I cut a rectangle from the same white cardstock using a My Favorite Things Wonky Rectangle and then ran it through the Big Shot with the Cuttlebug Cross My Heart Embossing Folder and adhered the focal image using 3M 1/2″ Foam Tape.
An A2 top folding cardbase was made from Gina K. Designs 120# White Cardstock. To make the focal image pop on the white cardbase, I used Gina K. Designs In the Navy 120# cardstock, which was cut 1/8″ larger than the embossed rectangle and adhered using Nuvo Deluxe Adhesive.
The sentiment is from the Simon Says Stamp Brush Stroke Message set and was stamped on Vellum with VersaMark Ink and heat embossed with Ranger Fine Detail Gold Embossing Powder then wrapped around the focal image panel and adhered to the back with Scotch Tape. Darice Sticky Back Foam was added to the back of the panel and then adhered to the cardbase with Nuvo Deluxe Adhesive.
My granddaughter Emma is turning twelve is quite the little artist. The cute image from La-La Land Crafts entitled Artist Marci is perfect for her. Her favorite color is aqua/teal blue and so that is what I used to color the image and stencil.
I originally made this card as a spinner, but when I was showing it to a friend the string broke and there wasn’t any way I could add new string between the front and back image because they had been adhered together with Sookwang Be Creative Tape and so I decided to make it a see through card.
This is a digital image and so it was easy to print both an original and mirror image. They were colored with Copic Markers (listed below) and then die cut using the Silhouette PIXScan Mat. As stated above the two images were adhered together with Be Creative Tape.
I pulled some striped blue ccardstock from the Echo Park Hello Winter 6 x 6 Pack and die cut it with a Memory Box Stitched Rectangle that would cover a 4 1/4 x 5 1/2 top folding cardbase, made from Gina K. Designs 120# White Cardstock. The patterned paper was temporarily adhered to the front of the cardbase so a rectangle opening could be die cut through both the cardbase and the patterned paper.
Southbend Monofilament Line was adhered between the two focal images, leaving long tails. The images were adhered in the center of the small rectangle with the strings being secured with more Sookwang Be Creative Tape then more tape was added to the back of the patterned paper and it was adhered to the front of the cardbase, covering the strings and securing them in place.
The Pretty Pink Posh Scalloped Rectangle Frame was used to cut four teal blue frames. Before adhering them to the front of the card the sentiment from Mama Elephant Make a Wish was stamped with VersaMark Ink and heat embossed with Ranger Fine Detail Gold Embossing Powder. Then two frames were together with Tombo Mono Multi Glue and then 3M 1/2″ Foam Tape was cut in half so that it would be narrow enough to be placed on the back of the frame adding dimension. The other two were adhered to the inside of the card with liquid glue.
When the string broke on the spinner a piece of acetate was cut just smaller than the opening and was adhered to the front of the first frames with 1/8″ Be Creative Tape. Another teal frame was die cut and adhered over the acetate with the same tape. The focal image was run through my Xyron Sticker Maker so that it would be smooth when placed on the acetate.
Since you could see through the front of the card there wasn’t any place to stamp the sentiment and write a message, so a quarter sheet of Neena Classic Crest was cut 4 1/4″ x 5 3/4″ and the Echo Park Balloon Stencil and ink blended with Distress Oxide Inks Broken China and Peacock Feather. It was scored at 5 1/4″ and the fold was adhered to the back of the card with some Tombo Mono Multi Glue. Another frame was die cut and an edge was cut to adhere to the top of the shorter stencil piece. This allowed it to be a tri-fold where the sentiment could stamped behind the stenciled panel and a message written.
This card was made for a young woman at church whose favorite color is purple. I found a beautiful plaid from the Doodlebug Polar Pals 6 x 6 Paper Pack that made the La-La Land Crafts Birthday Cake Molli image pop on the paper. I love the way it turned out. Happy Birthday Abigail.
The image is a digital stamp and was printed on Neena Classic Crest 80# Solar White Cardstock with a LaserJet printer to ensure it wouldn’t bleed when colored with Copic Markers (colors listed below). The Silhouette Cameo PIXScan Mat was used to cut around the image once it had been colored.
The patterned paper was cut with the rectangle from Simon Says Stamp Bundle of Stitched Shapes and adhered directly to a top folding A2 cardbase made from Gina K. Designs 120# White Cardstock with Kokuyo Dotliner Adhesive.
A My Favorite Things Stitched Oval Frame was cut from white cardstock to draw attention to the focal image. To add dimension 3M 1/2″ Foam Tape was cut in half and adhered to the back and then adhered to the card front.
Foam Tape was added to the back of the focal image and before adhering it to the card front it was placed to determine where to stamp the secondary sentiment from Winnie & Walter Sentimental Birthday, which was stamped with VersaMark Ink and heat embossed with Hero Arts Detailed White Embossing Powder. The focal image was then attached by removing the backing of the foam tape and the card was done.
I’m having total hip replacement the end of October and thought that it would be best to make all the birthday cards for the young women in our ward throughout the end of the year. All the cards I made for the young women are girl images, mostly from La-La Land Crafts. This one is Fabulous Marci. This is a digital image and was printed on Neena Classic Crest 80# Solar White Cardstock and then cut with the Silhouette Cameo and the PIXScan Mat.
This is a card that was made for Jennifer. Her favorite color is blue and she has lovely long dark blond/light brown hair. It was colored with Copic Markers (listed below). To add some dimension to the card I accented the headband and roses with Silver Stickles.
To assemble the card I went through my stash of patterned paper and found this pretty Baby Blue Paisley paper from The Paper Studio. It was die cut using the largest Lawn Fawn Small Stitched Rectangle. Since the patterned paper has a little black detailing, Gina K. Designs 120# Black Cardstock was cut 1/8” larger and adhered to the back with Kokuyo Dotliner Adhesive. It was then adhered to a top folding A2 cardbase made from Gina K. Designs 120# White Cardstock with Tombo Mono Multi Glue.
The sentiment consists of a die cut from the Sizzix Celebrate pack, which was cut three times from black cardstock and adhered together using Elmer’s Craft Bond Spray Adhesive. To determine placement the focal image was laid on the patterned paper, along with the die cut word, and the secondary sentiment from Winnie & Walter The Big the Bold and Party was stamped with Hero Hues India Ink Black. The “birthday” die cut was adhered above using Ranger Multi Medium Matte. 3M ½” Foam Tape was added to the back of the focal image and then adhered to the card front and the card was done.
This card was made for Cora, who is turning twelve this year and joining the young women organization in our church. We are excited to have her join us. Her favorite color is blue.
The image for this card is La-La Land Crafts Sugar n’ Spice Marci. This is a digital image and was printed on Neena Classic Crest 80# Solar White Cardstock and then cut with the Silhouette Cameo and the PIXScan Mat. It was colored with Copic Markers (colors listed below).
To set off the image a heart was die cut from Neena Classic Crest 80# Solar White Cardstock using the Spellbinders Classic Hearts Die. It was then dry embossed with the DoCrafts Small Polka Hearts Embossing Folder. I went through my stash of cardstock and found some blue and white polka dot paper. It was die cut using the WPlus9 Gift Card Layers Rectangle Die and adhered to the front of a top folding A2 Cardbase made from Gina K. Designs 120# White Cardstock using Kokuyo Dotliner Adhesive. The heart die was adhered to the front using Tombo Mono Multi Glue.
The sentiment consists of a Happy die cut three times from 80# white cardstock using the Lawn Fawn Happy Happy Happy Stamp and Die set. The secondary sentiment is from My Favorite Things Happy Everything Stamp with VersaMark Ink and heat embossed with Gina K. Designs Detailed White Embossing Powder.
3M ½” Foam Tape was added to the back of the focal image and it was then adhered to the embossed heart and the card was done.
|
import logging
logger = logging.getLogger(__name__)
class Harmony(object):
# numeric representation of the Circle of 5ths.
HARMONY = {
'G': 1,
'D': 2,
'A': 3,
'E': 4,
'B': 5,
'F#': 6,
'Gb': 6,
'Db': 7,
'C#': 7,
'Ab': 8,
'Eb': 9,
'Bb': 10,
'F': 11,
'C': 12,
'Em': 101,
'Bm': 102,
'F#m': 103,
'Gbm': 103,
'Dbm': 104,
'C#m': 104,
'G#m': 105,
'Ebm': 106,
'D#m': 106,
'A#m': 107,
'Bbm': 107,
'Fm': 108,
'Cm': 109,
'Gm': 110,
'Dm': 111,
'Am': 112,
}
def __init__(self, root_key):
"""
:param root_key: A string value representing the root key signature for the song.
"""
if root_key not in Harmony.HARMONY.keys():
raise LookupError('{key} is not reconized'.format(key=root_key))
self.root_key = root_key
self.root_key_value = Harmony.HARMONY[self.root_key]
# a list representing all compatible tone for a given root_key
self.harmonies = self._get_value(self.root_key_value) + self.down_shift() + self.up_shift() + self.minor()
def __repr__(self):
return '<Harmony key={0.root_key} value={0.root_key_value}>'.format(self)
@staticmethod
def _get_value(value):
""" performs a look-up of the HARMONY dictionary by value.
:parameter value: An integer representing a harmonic key
:return: A list of keys
:rtype list:
"""
return [note for note, fifth_value in Harmony.HARMONY.iteritems() if value == fifth_value]
def down_shift(self):
""" Fetches the next key(s) that represents a single tone downward
:return: A list representing a compatible key
:rtype list:
"""
# handle a roll over at position "1" on the wheel. in the case of 1 or 101 we down
# shift to 12 or 112
if self.root_key_value == 1:
down = Harmony._get_value(12)
elif self.root_key_value == 101:
down = Harmony._get_value(112)
else:
down = Harmony._get_value(self.root_key_value - 1)
return down
def up_shift(self):
""" Fetches the next key(s) that represents a single tone forward .
:return: A list representing a group of compatible keys
:rtype list:
"""
# handle a rollover at the apex of the wheel . when key_value is 12 or 112
# we shift forward to 1 (major) or 101 (minor)
if self.root_key_value == 12:
up = Harmony._get_value(1)
elif self.root_key_value == 112:
up = Harmony._get_value(101)
else:
up = Harmony._get_value(self.root_key_value + 1)
return up
def minor(self):
""" Fetches an adjacent key on the wheel (maj -> min or min -> maj).
:return: A list representing a group of compatible keys
:rtype list:
"""
# shift from major to minor
if self.root_key_value < 100:
return self._get_value(self.root_key_value + 100)
# otherwise shift minor to major.
else:
return self._get_value(self.root_key_value - 100)
|
2nd Annual All member show & anniversary celebration!
On his knees, drained; he simply can’t continue the pursuit. The muse escapes him, again. Elusive, cruel, intoxicating. This is a celebration of the beauty of unattainable dreams and the spirits that, nevertheless, cannot resist the chase!
Friends and strangers are invited to celebrate with us and see some amazing artwork from our member artists and crafters. Original art and handmade crafts make incredible gifts for the special people in your universe.
← Sweatshop Social: DIY Night Open Studio!
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
### <summary>
### Demonstration of using coarse and fine universe selection together to filter down a smaller universe of stocks.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="fine universes" />
class CoarseFundamentalTop3Algorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014,3,24) #Set Start Date
self.SetEndDate(2014,4,7) #Set End Date
self.SetCash(50000) #Set Strategy Cash
# what resolution should the data *added* to the universe be?
self.UniverseSettings.Resolution = Resolution.Daily
# this add universe method accepts a single parameter that is a function that
# accepts an IEnumerable<CoarseFundamental> and returns IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
self.__numberOfSymbols = 3
self._changes = None
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(coarse, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.__numberOfSymbols] ]
def OnData(self, data):
self.Log(f"OnData({self.UtcTime}): Keys: {', '.join([key.Value for key in data.Keys])}")
# if we have no changes, do nothing
if self._changes is None: return
# liquidate removed securities
for security in self._changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 1/N allocation in each security in our universe
for security in self._changes.AddedSecurities:
self.SetHoldings(security.Symbol, 1 / self.__numberOfSymbols)
self._changes = None
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
self._changes = changes
self.Log(f"OnSecuritiesChanged({self.UtcTime}):: {changes}")
def OnOrderEvent(self, fill):
self.Log(f"OnOrderEvent({self.UtcTime}):: {fill}")
|
ZTE's latest attempt at breaking into the premium smartphone market is the Axon 7, and it's available for pre-order today. You have to drop $399.98 to secure your Axon 7, which offers some rather high-end specs for that price. One catch, you'll have to wait a few weeks to actually get your phone.
The Axon 7 has an aluminum frame, supposedly designed in partnership with BMW Designworks. Inside is a Snapdragon 820, 4GB of RAM, 64GB of storage, and a 3250mAh battery. ZTE also has support for HiFi audio in the Axon 7, just like last year's Axon Pro. Around back is a 20MP camera with optical/electronic stabilization and support for 4K video.
The $400 asking price is the same as the OnePlus 3, which is a solid phone. The Axon 7 seems to stack up well, at least on paper. We're in the process of reviewing this phone, if you want to wait it out. One thing I can say for certain, you should avoid ZTE's SmartPay leasing option. The Axon 7 is available on ZTE's site in gray and gold colors. Note, the gold version won't ship until September 9th.
Apparently ZTE started taking pre-orders a few weeks ago, but today is the "official" launch? ZTE is very unusual. Or maybe that blog post was not supposed to be titled "Axon 7 Officially on Sale Today!" Whatever the reason, you can still buy it if you want.
|
"""
Django settings for survivalguide project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hxzazv7qqn-nv8c=2(9$ch-3og5tms5-hr4s1zkhyxtrs9p8to'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'south',
'talks',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'survivalguide.urls'
WSGI_APPLICATION = 'survivalguide.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
|
This blog will take you through the steps of maintaining a wonderful wildflower meadow over the summer months. If you are keen to find out even more about Wildflower Turf, look out for our book which will be published later on this year.
A single end of season cut is essential, however, you may feel one of the below mid-season cuts is appropriate.
This maintenance cut is a little bit of a leap of faith as the meadow may well be looking at its best right now. Cutting with a strimmer with a hedge trimmer attachment is the perfect type of tool to use taking the top layer off, to a height of 20-30cms. The purpose of the cut is to take off a large proportion of the young green material that has not experienced leaf shatter – a very effective way of depleting nutrients. You will see plenty of flowering buds under this level and the flowers will be quick to re-establish. You must be quite sensitive when forking off the material from the meadow, raking is not advised for clearance. With this cut, you will definitely lengthen the flowering period of the meadow, whilst improving the flower to grass ratio for the following season.
A result of high fertility or too much water during the establishment phase.
Clearing away the cutting on any cut is vital to deplete nutrient levels.
In some people’s eyes, senesced growth (browning vegetation and seed heads) can look scruffy. A cut at this time of year can tidy up the whole area. It will be of great benefit to the late flowering species within the meadow such as toad flax, mallow, yarrow, vetch, clover and scabious. Take the browning vegetation off to about 5-10cms. Once you have cleared the cuttings give the area a really good water. Within days you are likely to see the whole area green up again and flowering will continue until your final maintenance cut in the Autumn.
If your wildflower meadow starts to look brown/scruffy, simply give it a high cut.
Whether you have decided to make maintenance cuts earlier in the year or not, you will always need to do a final maintenance cut for the year in late summer/ early autumn after the wildflowers have set and shed their seed. This cut is essential for the health of the meadow, not only does it tidy up the area for the winter but it stops the senesced summer growth from covering the growing plant in a layer of rotting green material. If this material isn’t removed it will act as a barrier for the regrowth of the parent plant and also reintroduce a level of nutrients that is best avoided. An open sward over the winter ensures healthy, disease free plants which can benefit from what light and warmth is available to them during these months. If you have not cut the meadow previously in the year you will find that some of the stems are dry and tough, making it more difficult to cut.
Once cleared you will find that your meadow starts to grow again although how much regrowth will depend on the time of the cut, soil fertility, moisture levels and the weather. This is because some species will be quick to utilise any freshly created space, thus taking advantage of the new opportunity that they have been given. Allowing the meadow a chance to green-up ready for the winter is a good idea before it then becomes dormant with little or no growth through the winter. When spring approaches, the wildflowers are in the perfect position to develop quickly and repeat their perennial cycle thus guaranteeing a wildflower meadow year after year.
After depleting nutrients and reducing grass content, this meadow was allowed to flourish more impressively in its second year.
When planning your maintenance cut, choose a dry day, you will find it lighter and cleaner to clear the cuttings.
Be thorough with your clearance. The aim is to deplete nutrient levels to continue to keep the wildflower to be competitive and limit grasses and nutrient loving plants such as fat hen and docks. Rotting material left on site will also be a haven for pests such as slugs.
When clearing the area with a rake, it is fine to treat the ground and plants roughly, this will pull out any thatched material and provide light and air to the roots. The plants are hardy and will not be affected by some tough love.
Do take care, especially for amphibians within your meadow. A staggered cutting program will help – cutting half the area one day and then delaying the second half for a week or two will allow fauna to migrate to the uncut area. By the time of the second cut, the first cut area will have some regrowth to provide a beneficial habitat.
If you are using the cuttings for compost and they feel particularly dry, give them a soaking as they will break down better with moisture.
As a final tidy up a good rake is worthwhile, or a quick and easy option is to run a rotary mower with collectors over the area.
Once you have completely cleared the area you may find bare patches. These are perfectly acceptable. They may look unsightly for a little while, but the chances are there is something dormant underground waiting to get going again in the spring. If you do want to enhance the area with plugs you can use this sort of area to plant in as there will be less competition.
Be vigilant with leaf and fruit removal after your Autumnal cut. It will be much easier once you have removed the senesced meadow material to clear falling leaves but do not leave them to rot down and add nutrients to the soil or provide a potential risk of disease.
Cutting and removing, using an Amazone Profihopper.
Flourishing again the following June.
This entry was posted in General Interest, Maintenance, Wildflower Turf and tagged looking after a wildflower meadow, maintenance, summer, wildflower turf. Bookmark the permalink.
|
import numpy as np
from .Transform import *
class TransformObject:
def __init__(self, local=None):
self.quat = Float4(0.0, 0.0, 0.0, 1.0)
self.local = local if local is not None else Matrix4()
self.updated = True
self.left = WORLD_LEFT.copy()
self.up = WORLD_UP.copy()
self.front = WORLD_FRONT.copy()
self.pos = Float3()
self.rot = Float3()
self.scale = Float3(1, 1, 1)
self.prev_Pos = Float3()
self.prev_Rot = Float3()
self.prev_Scale = Float3(1, 1, 1)
self.rotationMatrix = Matrix4()
self.matrix = Matrix4()
self.inverse_matrix = Matrix4()
self.prev_matrix = Matrix4()
self.prev_inverse_matrix = Matrix4()
self.update_transform(True)
def reset_transform(self):
self.updated = True
self.set_pos(Float3())
self.set_rotation(Float3())
self.set_scale(Float3(1, 1, 1))
self.update_transform(True)
# Translate
def get_pos(self):
return self.pos
def get_pos_x(self):
return self.pos[0]
def get_pos_y(self):
return self.pos[1]
def get_pos_z(self):
return self.pos[2]
def set_pos(self, pos):
self.pos[...] = pos
def set_pos_x(self, x):
self.pos[0] = x
def set_pos_y(self, y):
self.pos[1] = y
def set_pos_z(self, z):
self.pos[2] = z
def move(self, pos):
self.pos[...] = self.pos + pos
def move_front(self, pos):
self.pos[...] = self.pos + self.front * pos
def move_left(self, pos):
self.pos[...] = self.pos + self.left * pos
def move_up(self, pos):
self.pos[...] = self.pos + self.up * pos
def move_x(self, pos_x):
self.pos[0] += pos_x
def move_y(self, pos_y):
self.pos[1] += pos_y
def move_z(self, pos_z):
self.pos[2] += pos_z
# Rotation
def get_rotation(self):
return self.rot
def get_pitch(self):
return self.rot[0]
def get_yaw(self):
return self.rot[1]
def get_roll(self):
return self.rot[2]
def set_rotation(self, rot):
self.rot[...] = rot
def set_pitch(self, pitch):
if pitch > TWO_PI or pitch < 0.0:
pitch %= TWO_PI
self.rot[0] = pitch
def set_yaw(self, yaw):
if yaw > TWO_PI or yaw < 0.0:
yaw %= TWO_PI
self.rot[1] = yaw
def set_roll(self, roll):
if roll > TWO_PI or roll < 0.0:
roll %= TWO_PI
self.rot[2] = roll
def rotation(self, rot):
self.rotation_pitch(rot[0])
self.rotation_yaw(rot[1])
self.rotation_roll(rot[2])
def rotation_pitch(self, delta=0.0):
self.rot[0] += delta
if self.rot[0] > TWO_PI or self.rot[0] < 0.0:
self.rot[0] %= TWO_PI
def rotation_yaw(self, delta=0.0):
self.rot[1] += delta
if self.rot[1] > TWO_PI or self.rot[1] < 0.0:
self.rot[1] %= TWO_PI
def rotation_roll(self, delta=0.0):
self.rot[2] += delta
if self.rot[2] > TWO_PI or self.rot[2] < 0.0:
self.rot[2] %= TWO_PI
# Scale
def get_scale(self):
return self.scale
def get_scale_x(self):
return self.scale[0]
def get_scale_Y(self):
return self.scale[1]
def get_scale_z(self):
return self.scale[2]
def set_scale(self, scale):
self.scale[...] = scale
def set_scale_x(self, x):
self.scale[0] = x
def set_scale_y(self, y):
self.scale[1] = y
def set_scale_z(self, z):
self.scale[2] = z
def scaling(self, scale):
self.scale[...] = self.scale + scale
# update Transform
def update_transform(self, update_inverse_matrix=False, force_update=False):
prev_updated = self.updated
self.updated = False
if any(self.prev_Pos != self.pos) or force_update:
self.prev_Pos[...] = self.pos
self.updated = True
if any(self.prev_Rot != self.rot) or force_update:
self.prev_Rot[...] = self.rot
self.updated = True
# Matrix Rotation - faster
matrix_rotation(self.rotationMatrix, *self.rot)
matrix_to_vectors(self.rotationMatrix, self.left, self.up, self.front)
# Euler Rotation - slow
# p = get_rotation_matrix_x(self.rot[0])
# y = get_rotation_matrix_y(self.rot[1])
# r = get_rotation_matrix_z(self.rot[2])
# self.rotationMatrix = np.dot(p, np.dot(y, r))
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
# Quaternion Rotation - slower
# euler_to_quaternion(*self.rot, self.quat)
# quaternion_to_matrix(self.quat, self.rotationMatrix)
# matrix_to_vectors(self.rotationMatrix, self.right, self.up, self.front)
if any(self.prev_Scale != self.scale) or force_update:
self.prev_Scale[...] = self.scale
self.updated = True
if prev_updated or self.updated:
self.prev_matrix[...] = self.matrix
if update_inverse_matrix:
self.prev_inverse_matrix[...] = self.inverse_matrix
if self.updated:
self.matrix[...] = self.local
transform_matrix(self.matrix, self.pos, self.rotationMatrix, self.scale)
if update_inverse_matrix:
# self.inverse_matrix[...] = np.linalg.inv(self.matrix)
self.inverse_matrix[...] = self.local
inverse_transform_matrix(self.inverse_matrix, self.pos, self.rotationMatrix, self.scale)
return self.updated
def get_transform_infos(self):
text = "\tPosition : " + " ".join(["%2.2f" % i for i in self.pos])
text += "\n\tRotation : " + " ".join(["%2.2f" % i for i in self.rot])
text += "\n\tFront : " + " ".join(["%2.2f" % i for i in self.front])
text += "\n\tLeft : " + " ".join(["%2.2f" % i for i in self.left])
text += "\n\tUp : " + " ".join(["%2.2f" % i for i in self.up])
text += "\n\tMatrix"
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[0, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[1, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[2, :]])
text += "\n\t" + " ".join(["%2.2f" % i for i in self.matrix[3, :]])
return text
|
Spectacular views of the Atlantic Ocean await you from the private balcony of this Tybee vacation condominium. This is a top floor unit with amenities that include use of the property's 3 swimming pools, including 1 heated and a Kid's Pool with waterfall, private beach access and The Deck beachside restaurant.
Great staff. Everything associated with our visit went great.
Perfect! The condo is absolutely beautiful and you can't beat oceanfront with a balcony.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.