text
stringlengths
29
850k
#coding=utf-8 from time import time from urlparse import urlparse from django.db import models from django.db.models.signals import post_save from django.dispatch import receiver from mezzanine.core.models import Displayable, Ownable from mezzanine.generic.models import Rating from mezzanine.generic.fields import RatingField, CommentsField class Link(Displayable, Ownable): c=(('hc','清真餐厅'),('yc','一餐厅'),('ec','二餐厅'),('sc','三餐厅'),('jby','聚博园'),('other','未分类')) canteen=models.CharField(max_length=20,choices=c,default='ec') link = models.URLField(blank=True) #这个根本不需要,不要删除吧,免得麻烦,只要不让它出现就行,完成 rating = RatingField() comments = CommentsField() solved = models.BooleanField(default=False) @models.permalink def get_absolute_url(self): return ("link_detail", (), {"slug": self.slug}) @property def domain(self): return urlparse(self.link).netloc class Profile(models.Model): user = models.OneToOneField("auth.User") website = models.URLField(blank=True) bio = models.TextField(blank=True) karma = models.IntegerField(default=0, editable=False) def __unicode__(self): return "%s (%s)" % (self.user, self.karma) @receiver(post_save, sender=Rating) def karma(sender, **kwargs): """ Each time a rating is saved, check its value and modify the profile karma for the related object's user accordingly. Since ratings are either +1/-1, if a rating is being edited, we can assume that the existing rating is in the other direction, so we multiply the karma modifier by 2. """ rating = kwargs["instance"] value = int(rating.value) if not kwargs["created"]: value *= 2 content_object = rating.content_object if rating.user != content_object.user: queryset = Profile.objects.filter(user=content_object.user) queryset.update(karma=models.F("karma") + value)
The manual wheelchair Ki Mobility Tsunami ALX with its sleek look and ultra light rigid frame, offer a more responsive ride at an affordable price. As a 20% stiffer and more responsive chair in its highest price range market, the Tsunami ALX stands out with an 11.2lb transport weight and as a product that doesn’t need to sacrifice quality, design or features to offer a competitive price. Its large option availability allows users to customize their ride to their needs, while the aluminum 7000 series construction brings stability and maneuverability at a low weight, allowing it to take you wherever you need to go. Just because it comes at a lower price than many competitors, doesn’t mean the manual wheelchair Ki Mobility Tsunami ALX is at all lacking. Its improved design and innovative technology make it arguably the best choice in the market for manual wheelchairs. With a wide range of styles and positioning to choose from, the Tsunami ALX comes with more front-frame bend options, tapers and colors than any other similar product in its field. A tubular component system provides an individual seat height setting without needing any extra clutter of hardware, with the feature of adjusting the seat up and down in ¼” increments to provide a sleek, clean appearance and improved stiffness. By employing the ingenious Crescent CG system, the side frame is kept free of excess hardware but remains infinitely adjustable. This makes it a comfortable ride that includes adjustable footplates, stronger and precise wheels and precision machined aluminum caster housings, all adding to the responsiveness and durability of your ride. As experts in the field of mobility, we are proud to introduce to our customers products, such as the manual wheelchair Ki Mobility Tsunami ALX, as a powerful ride at an affordable price. Contact us today to find out more or visit our showroom and come test it for yourself.
#!/usr/bin/env python import os.path import sys DIRNAME = os.path.dirname(__file__) # trick to get the two-levels up directory, which for the "simple" project should be the satchmo dir _parent = lambda x: os.path.normpath(os.path.join(x, '..')) SATCHMO_DIRNAME = _parent(_parent(DIRNAME)) SATCHMO_APPS = os.path.join(SATCHMO_DIRNAME, 'apps') if not SATCHMO_APPS in sys.path: sys.path.append(SATCHMO_APPS) if not DIRNAME in sys.path: sys.path.append(DIRNAME) try: import settings # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) sys.exit(1) if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") from django.core.management import execute_from_command_line print "sysarv",sys.argv execute_from_command_line(sys.argv)
I grabbed this book ages ago because HELLLO LOOK AT THAT COVER. It also has a really intriguing concept and I have heard nothing but good things about it. Of course though I never got around to it because so many books and so little time. I love this book, and the follow up! I completely adored Caden and Makenna, they are wonderful!
#!/usr/bin/env python import re import numpy """ @brief A-Eqdsk reader class @version $Id$ Copyright &copy; 2006-2008, Tech-X Corporation, Boulder, CO See LICENSE file for conditions of use. The official document describing a-eqdsk files: http://fusion.gat.com/THEORY/efit/a_eqdsk.html """ class Aeqdsk: def __init__(self): """ Constructor """ self.data = {} def openFile(self, filename): """ open aeqdsk file and parse its content """ fmt_1060 = r'^\s*\*\s*([\w\.\-]+)\s+(\d+)\s+(\d+)\s([\w]+)\s+(\d+)\s+(\d+)\s([\w ]+)\s+\d+\s+\d+\s*$' fmt_1040 = r'^\s*' + 4*r'([\s\-]\d+\.\d+[Ee][\+\-]\d\d)' fmt_1041 = r'^' + 4*r'\s+([ \-]\d+)' lines = open(filename, 'r').readlines() counter = 0 m = None while m == None: line = lines[counter] m = re.match(fmt_1060, line) counter += 1 # read (neqdsk,1060) time(jj),jflag(jj),lflag,limloc(jj), mco2v,mco2r,qmflag if m: self.data['time'] = float(m.group(1)), 'time ms' self.data['jflag'] = int(m.group(2)), '0 if error' self.data['lflag'] = int(m.group(3)), '>0 if error' self.data['limloc'] = m.group(4), 'IN/OUT/TOP/BOT: limiter inside/outside/top/bot SNT/SNB: single null top/bottom DN: double null' self.data['mco2v'] = int(m.group(5)), 'number of vertical CO2 density chords' self.data['mco2r'] = int(m.group(6)), 'number of radial CO2 density chords' self.data['qmflag'] = m.group(7), 'axial q(0) flag, FIX if constrained and CLC for float' else: raise 'Read error at line %d' % (counter-1) # read (neqdsk,1040) tsaisq(jj),rcencm,bcentr(jj),pasmat(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['tsaisq'] = float(m.group(1)), "total chi2 from magnetic probes, flux loops, Rogowskiand external coils" self.data['rcencm'] = float(m.group(2)), "major radius in cm for vacuum field BCENTR" self.data['bcentr'] = float(m.group(3)), "vacuum toroidal magnetic field in Tesla at RCENCM" self.data['pasmat'] = float(m.group(4)), "measured plasma toroidal current in Ampere" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) cpasma(jj),rout(jj),zout(jj),aout(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['cpasma'] = float(m.group(1)), "fitted plasma toroidal current in Ampere-turn" self.data['rout'] = float(m.group(2)), "major radius of geometric center in cm" self.data['zout'] = float(m.group(3)), "Z of geometric center in cm" self.data['aout'] = float(m.group(4)), "plasma minor radius in cm" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) eout(jj),doutu(jj),doutl(jj),vout(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['eout'] = float(m.group(1)), "Plasma boundary elongation" self.data['doutu'] = float(m.group(2)), "upper triangularity" self.data['doutl'] = float(m.group(3)), "lower triangularity" self.data['vout'] = float(m.group(4)), "plasma volume in cm3" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) rcurrt(jj),zcurrt(jj),qsta(jj),betat(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['rcurrt'] = float(m.group(1)), "major radius in cm of current centroid" self.data['zcurrt'] = float(m.group(2)), "Z in cm at current centroid" self.data['qsta'] = float(m.group(3)), "equivalent safety factor q*" self.data['betat'] = float(m.group(4)), "toroidal b in %" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) betap(jj),ali(jj),oleft(jj),oright(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['betap'] = float(m.group(1)), "poloidal b with normalization average poloidal magnetic BPOLAV defined through Ampere's law" self.data['ali'] = float(m.group(2)), "li with normalization average poloidal magnetic defined through Ampere's law" self.data['oleft'] = float(m.group(3)), "plasma inner gap in cm" self.data['oright'] = float(m.group(4)), "plasma outer gap in cm" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) otop(jj),obott(jj),qpsib(jj),vertn(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['otop'] = float(m.group(1)), "plasma top gap in cm" self.data['obott'] = float(m.group(2)), "plasma bottom gap in cm" self.data['qpsib'] = float(m.group(3)), "q at 95% of poloidal flux" self.data['vertn'] = float(m.group(4)), "vacuum field index at current centroid" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) mco2v = self.data['mco2v'][0] print 'mco2v=', mco2v # read (neqdsk,1040) (rco2v(k,jj),k=1,mco2v) data = [] while len(data) < mco2v: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['rco2v'] = numpy.array(data), "path length in cm of vertical CO2 density chord" # read (neqdsk,1040) (dco2v(jj,k),k=1,mco2v) data = [] while len(data) < mco2v: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['dco2v'] = numpy.array(data), "line average electron density in cm3 from vertical CO2 chord" mco2r = self.data['mco2r'][0] print 'mco2r=', mco2r # read (neqdsk,1040) (rco2r(k,jj),k=1,mco2r) data = [] while len(data) < mco2r: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['rco2r'] = numpy.array(data), "path length in cm of radial CO2 density chord" # read (neqdsk,1040) (dco2r(jj,k),k=1,mco2r) data = [] while len(data) < mco2r: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['dco2r'] = numpy.array(data), "line average electron density in cm3 from radial CO2 chord" # read (neqdsk,1040) shearb(jj),bpolav(jj),s1(jj),s2(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['shearb'] = float(m.group(1)), "" self.data['bpolav'] = float(m.group(2)), "average poloidal magnetic field in Tesla defined through Ampere's law" self.data['s1'] = float(m.group(3)), "Shafranov boundary line integrals" self.data['s2'] = float(m.group(4)), "Shafranov boundary line integrals" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) s3(jj),qout(jj),olefs(jj),orighs(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['s3'] = float(m.group(1)), "Shafranov boundary line integrals" self.data['qout'] = float(m.group(2)), "q at plasma boundary" self.data['olefs'] = float(m.group(3)), "" self.data['orighs'] = float(m.group(4)), "outer gap of external second separatrix in cm" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) otops(jj),sibdry(jj),areao(jj),wplasm(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['otops'] = float(m.group(1)), "top gap of external second separatrix in cm" self.data['sibdry'] = float(m.group(2)), "" self.data['areao'] = float(m.group(3)), "cross sectional area in cm2" self.data['wplasm'] = float(m.group(4)), "" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) terror(jj),elongm(jj),qqmagx(jj),cdflux(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['terror'] = float(m.group(1)), "equilibrium convergence error" self.data['elongm'] = float(m.group(2)), "elongation at magnetic axis" self.data['qqmagx'] = float(m.group(3)), "axial safety factor q(0)" self.data['cdflux'] = float(m.group(4)), "computed diamagnetic flux in Volt-sec" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) alpha(jj),rttt(jj),psiref(jj),xndnt(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['alpha'] = float(m.group(1)), "Shafranov boundary line integral parameter" self.data['rttt'] = float(m.group(2)), "Shafranov boundary line integral parameter" self.data['psiref'] = float(m.group(3)), "reference poloidal flux in VS/rad" self.data['xndnt'] = float(m.group(4)), "vertical stability parameter, vacuum field index normalized to critical index value" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) rseps(1,jj),zseps(1,jj),rseps(2,jj),zseps(2,jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['rseps1'] = float(m.group(1)), "major radius of x point in cm" self.data['zseps1'] = float(m.group(2)), "" self.data['rseps2'] = float(m.group(3)), "major radius of x point in cm" self.data['zseps2'] = float(m.group(4)), "" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) sepexp(jj),obots(jj),btaxp(jj),btaxv(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['sepexp'] = float(m.group(1)), "separatrix radial expansion in cm" self.data['obots'] = float(m.group(2)), "bottom gap of external second separatrix in cm" self.data['btaxp'] = float(m.group(3)), "toroidal magnetic field at magnetic axis in Tesla" self.data['btaxv'] = float(m.group(4)), "vacuum toroidal magnetic field at magnetic axis in Tesla" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) aaq1(jj),aaq2(jj),aaq3(jj),seplim(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['aaq1'] = float(m.group(1)), "minor radius of q=1 surface in cm, 100 if not found" self.data['aaq2'] = float(m.group(2)), "minor radius of q=2 surface in cm, 100 if not found" self.data['aaq3'] = float(m.group(3)), "minor radius of q=3 surface in cm, 100 if not found" self.data['seplim'] = float(m.group(4)), "> 0 for minimum gap in cm in divertor configurations, < 0 absolute value for minimum distance to external separatrix in limiter configurations" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040) rmagx(jj),zmagx(jj),simagx(jj),taumhd(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['rmagx'] = float(m.group(1)), "major radius in cm at magnetic axis" self.data['zmagx'] = float(m.group(2)), "" self.data['simagx'] = float(m.group(3)), "" self.data['taumhd'] = float(m.group(4)), "energy confinement time in ms" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040,err=380) betapd(jj),betatd(jj),wplasmd(jj),diamag(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['betapd'] = float(m.group(1)), "diamagnetic poloidal b" self.data['betatd'] = float(m.group(2)), "diamagnetic toroidal b in %" self.data['wplasmd'] = float(m.group(3)), "diamagnetic plasma stored energy in Joule" self.data['fluxx'] = float(m.group(4)), "measured diamagnetic flux in Volt-sec" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # read (neqdsk,1040,err=380) vloopt(jj),taudia(jj),qmerci(jj),tavem(jj) line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['vloopt'] = float(m.group(1)), "measured loop voltage in volt" self.data['taudia'] = float(m.group(2)), "diamagnetic energy confinement time in ms" self.data['qmerci'] = float(m.group(3)), "Mercier stability criterion on axial q(0), q(0) > QMERCI for stability" self.data['tavem'] = float(m.group(4)), "average time in ms for magnetic and MSE data" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) # ishot > 91000 line = lines[counter] m = re.match(fmt_1041, line) if m: self.data['nsilop'] = int(m.group(1)), "" self.data['magpri'] = int(m.group(2)), "" self.data['nfcoil'] = int(m.group(3)), "" self.data['nesum'] = int(m.group(4)), "" counter += 1 else: raise 'Read error at line %d:%s' % (counter, line) nsilop = self.data['nsilop'][0] magpri = self.data['magpri'][0] print 'nsilop=', nsilop, ' magpri=', magpri data = [] while len(data) < nsilop + magpri: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['csilop'] = numpy.array( data[:nsilop] ), "computed flux loop signals in Weber" self.data['cmpr2'] = numpy.array( data[nsilop:] ), "" # data = [] nfcoil = self.data['nfcoil'][0] while len(data) < nfcoil: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['ccbrsp'] = numpy.array(data), "computed external coil currents in Ampere" data = [] nesum = self.data['nesum'][0] while len(data) < nesum: line = lines[counter] data += eval('[' + re.sub(r'(\d)\s*([\s\-])\s*(\d)', '\\1, \\2\\3', line) + ']') counter += 1 self.data['eccurt'] = numpy.array(data), "measured E-coil current in Ampere" # line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['pbinj'] = float(m.group(1)), "neutral beam injection power in Watts" self.data['rvsin'] = float(m.group(2)), "major radius of vessel inner hit spot in cm" self.data['zvsin'] = float(m.group(3)), "Z of vessel inner hit spot in cm" self.data['rvsout'] = float(m.group(4)), "major radius of vessel outer hit spot in cm" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['zvsout'] = float(m.group(1)), "Z of vessel outer hit spot in cm" self.data['vsurfa'] = float(m.group(2)), "plasma surface loop voltage in volt, E EQDSK only" self.data['wpdot'] = float(m.group(3)), "time derivative of plasma stored energy in Watt, E EQDSK only" self.data['wbdot'] = float(m.group(4)), "time derivative of poloidal magnetic energy in Watt, E EQDSK only" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['slantu'] = float(m.group(1)), "" self.data['slantl'] = float(m.group(2)), "" self.data['zuperts'] = float(m.group(3)), "" self.data['chipre'] = float(m.group(4)), "total chi2 pressure" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['cjor95'] = float(m.group(1)), "" self.data['pp95'] = float(m.group(2)), "normalized P'(y) at 95% normalized poloidal flux" self.data['ssep'] = float(m.group(3)), "" self.data['yyy2'] = float(m.group(4)), "Shafranov Y2 current moment" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['xnnc'] = float(m.group(1)), "" self.data['cprof'] = float(m.group(2)), "current profile parametrization parameter" #self.data['oring'] = float(m.group(3)), "" (not used) self.data['cjor0'] = float(m.group(4)), "normalized flux surface average current density at 99% of normalized poloidal flux" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['fexpan'] = float(m.group(1)), "flux expansion at x point" self.data['qqmin'] = float(m.group(2)), "minimum safety factor qmin" self.data['chigamt'] = float(m.group(3)), "total chi2 MSE" self.data['ssi01'] = float(m.group(4)), "magnetic shear at 1% of normalized poloidal flux" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['fexpvs'] = float(m.group(1)), "flux expansion at outer lower vessel hit spot" self.data['sepnose'] = float(m.group(2)), "radial distance in cm between x point and external field line at ZNOSE" self.data['ssi95'] = float(m.group(3)), "magnetic shear at 95% of normalized poloidal flux" self.data['rqqmin'] = float(m.group(4)), "normalized radius of qmin , square root of normalized volume" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['cjor99'] = float(m.group(1)), "" self.data['cj1ave'] = float(m.group(2)), "normalized average current density in plasma outer 5% normalized poloidal flux region" self.data['rmidin'] = float(m.group(3)), "inner major radius in m at Z=0.0" self.data['rmidout'] = float(m.group(4)), "outer major radius in m at Z=0.0" counter += 1 line = lines[counter] m = re.match(fmt_1040, line) if m: self.data['psurfa'] = float(m.group(1)), "plasma boundary surface area in m2" #self.data[''] = float(m.group(2)), "" #self.data[''] = float(m.group(3)), "" #self.data[''] = float(m.group(4)), "" counter += 1 def getAll(self): return self.data def getAllVars(self): return self.data.keys() def get(self, varname): return self.data[varname] ################################ def main(): import sys from optparse import OptionParser parser = OptionParser() parser.add_option("-f", "--file", dest="filename", help="g-eqdsk file", default="") parser.add_option("-a", "--all", dest="all", help="display all variables", action="store_true",) parser.add_option("-v", "--vars", dest="vars", help="comma separated list of variables (use '-v \"*\"' for all)", default="*") #parser.add_option("-p", "--plot", dest="plot", # help="plot all variables", action="store_true",) parser.add_option("-i", "--inquire", dest="inquire", help="inquire list of variables", action="store_true",) options, args = parser.parse_args() if not options.filename: parser.error("MUST provide filename (type -h for list of options)") eq = Aeqdsk() eq.openFile(options.filename) if options.inquire: print eq.getAllVars() if options.all: print eq.getAll() vs = eq.getAllVars() if options.vars != '*': vs = options.vars.split(',') for v in vs: print '%s: %s'% (v, str(eq.get(v))) if __name__ == '__main__': main()
Pinot Bianco is one of Italy’s best-kept secrets. Its close relative, Pinot Grigio, grabs all the attention and Pinot Bianco remains out of the limelight but the smart money knows to go looking for it. In fact, Italy is one of the best sources for Pinot Bianco wines anywhere in the world. Only parts of Austria and Germany make world class Pinot Bianco (Weissburgunder) wines comparable to those of the boot. Do not make the common mistake of thinking that Alsace makes great Pinot Bianco wines too: for the most part, it does not. This is because the vast majority of wines labeled Pinot Blanc (the French version of Pinot Bianco, but the two are one and the same) are in fact not made with Pinot Bianco at all but with Auxerrois, another grape variety altogether. In fact, Pinot Blancs from Alsace are either blends of the two grapes (Auxerrois almost always dominates the blend roughly by an 80-20 margin) or even 100% Auxerrois (100% Pinot Blanc wines are extremely rare there and few are noteworthy). Believe it or not, that’s not the only weird thing in the world of the Pinots. I did not use the words “close relative” in referring to Pinot Grigio and Pinot Bianco at the beginning of this article by chance. Although I am starting to feel like someone working for Ripley’s, believe it or not (again) current scientific knowledge tells us that those two grapes are genetically identical. Wait, it gets better. Research also tells us that Pinot Nero (or Pinot Noir), is also identical to Pinot Bianco and Pinot Grigio. Clearly, that is not so, as there are obvious differences between those three grapes and their wines. This conundrum is the result of current genetic knowledge that does not allow us to discern differences that exist between the three genomes or that the genomes of the three varieties are made to work differently (just because the DNA looks alike does not mean it is made to work the same way). These observations have far reaching implications for many other varieties too, for example Italy’s Vermentino, Pigato and Favorita, also said to be genetically identical when the grapes don’t look anything alike and the wines don’t resemble each other much either. In any case, unlike Pinot Grigio, which has a reddish-rusty skin color and can be made in a style that brings out that pigment, Pinot Bianco yields only white wines. Pinot Biancos are characterized by delicate aromas and flavors of white flowers, pomaceous orchard fruit, chamomile and beeswax. There is also a certain mellowness of flavor that is very typical of Pinot Biancos, especially when they are made from ripe grapes with healthy sugars. Pinot Biancos are never characterized by nostril-piercing or palate-burning levels of fruit intensity or acidity; rather they are all about refinement and balance (the good ones, at least). Some consumers prefer Pinot Grigio’s more obvious size and flavors, and pass Pinot Bianco by. However, only rarely does Pinot Grigio reach the levels of refinement that Pinot Bianco is capable of. The Nals-Margreid (Nalles-Magrè in Italian) cooperative of today was originally founded in 1932 by forty-five original members as the Kellerei Nals (Cantina Nalles, in Italian). In 1985 Kellerei Nals merged with the Margreid-Entiklar coop (founded in 1954) and took the new name of Nals-Margreid-Entiklar, or Nalless-Magrè-Niclara (the Entiklar/Niclara was dropped in the mid 2000s in order to have a more manageable company name). Today the coop is located in the town of Nalles and oversees the work of 138 members who farm 170 hectares in 14 different areas within Alto Adige. To put Alto Adige’s viticulture into perspective, consider that the region has 5,300 hectares and about 5,000 wine producers, which translates into the smallest average ownership of land per producer in Italy by a wide margin. The most important areas in Alto Adige where Nals-Margreid sources grapes are Nalles, Sirmian, Magré and Bolzano. The coop added a new, modernist-looking winery building extension in 2011, with a spacious, truly beautiful terrace that offers gorgeous views of the surrounding mountains. In 2007, the nearby (small) Schwanburg coop closed up shop because of the death of its main owner, and so Nals-Margrei stepped in to give a hand. In taking over responsibility of Schwanburg’s vines, wines and the well being of its members and their families, the cooperative added another fifteen hectares under vine and four new members. I would have to say the Schwanburg members have fallen into very good hands. One of the strongest suits of Nals-Margreid is the team at its helm, from president Walter Schwarz, the talented and young winemaker Harald Schraffl and the ultra capable and extremely hard working general director, Gottfried Pollinger who is very much the face of the cooperative that tirelessly travels around the globe promoting it and its wines. Another important reason why the Sirmian gets right of way is to be found in its name. Sirmian is the name of the wine but also that of the area where the Pinot Bianco used to make this wine grows. Is this important, you ask? Very. Sirmian is located within the Terlano subzone of Alto Adige, where Italy’s best Pinot Bianco wines are made. Only Friuli Venezia Giulia’s producers of the Collio might be able to argue that statement. It is where the Terlano coop makes their famous Pinot Bianco Vorberg Riserva wine, Italy’s most age-worthy white wine (see the Terlano Vorberg vertical in the Vinous database. Interestingly, Nals-Margreid makes another Pinot Bianco wine called Penon from grapes grown down by the town of Magrè, in the area of Penon which takes its name from Penone, a fraction of the town of Cortaccia. It could not be anymore different than the wine made with Sirmian grapes. The differences are due to the combination of soil, topographical and winemaking diversity. Penon is situated just above the town of Cortaccia in the Bassa Altesina area of Alto Adige at about 600 meters above sea level in a fairly flatland setting (or at hillsides with at most a 20% gradient) and very calcareous soils. Pinot Bianco there tends to give wines that are elegant, but also very fruity. By contrast, Sirmian is located close to the town of Nalles and boasts soils characterized by morainic debris and porphyr bedrock plus gneiss, mica, and marble inclusions and very steep slopes ranging from 550-680 meters above sea level. Over the years, Sirmian’s Pinot Bianco vines have been partially replanted and so vines there average only about thirteen years of age. The Pinot Bianco Sirmian was first produced in 1971. Grape bunches used to make Sirmian are cut in half the week before color change (véraison; in the Nals area this usually happens around mid-July). It is also important to know that in some years grapes are attacked by noble rot, as the Sirmian grapes are always harvested on the later side, weather permitting. Sooner or later noble rot hits the Pinot Bianco. And despite not using any grapes affected by Botrytis cinerea to make Sirmian), removing noble rot influence is extremely hard to do because rot is not always fully visible. Therefore, in some vintages, the Pinot Bianco Sirmian can be marked by a slight, but very recognizable, touch of noble rot (such as was the case in 2013 and 1998). Normally, I am a huge proponent of a touch of noble rot in dry and especially so in sweet wines. I believe that, when present, noble rot takes many such wines to a whole different level of texture and complexity, but I absolutely get Pollinger’s point as it does not seem like, at least in Sirmian’ case, Botrytis cinerea adds that much extra. In any case, Sirmian is always very complex and mineral, and can be very shut down when young. The Sirmian undergoes 100% malolactic and is now aged in 20-30 HL barrels, whereas up until the end of the 1990s it was partly aged in stainless steel tanks. In my view, the best vintages are 2012 and the 1986, but the 2016 may well join those ranks. Older vintages were labeled with the Germanic “Weissburgunder” instead of the Italian “Pinot Bianco” name. All the following wines (save for the 2009 and 2008 that I bought on release and that therefore came from my own cellar) were sourced directly at the winery and tasted there with Harald Schraffl and Gottfried Pollinger in October 2017.
from aes_base import sbox from aes_base import Rcon # Get a key from user password def keyExpansion(key, key_size): Nb = 4 if(key_size == 128): Nk = 4 Nr = 10 elif(key_size == 192): Nk = 6 Nr = 12 elif(key_size == 256): Nk = 8 Nr = 14 else: raise valueError("keyExpansion: bad key size") key = process_key(key, Nk) w = [] for word in key: w.append(word[:]) i = Nk while i < Nb * (Nr + 1): temp = w[i-1][:] if i % Nk == 0: temp = SubWord(RotWord(temp)) temp[0] ^= Rcon[(i//Nk)] elif Nk > 6 and i % Nk == 4: temp = SubWord(temp) for j in range(len(temp)): temp[j] ^= w[i-Nk][j] w.append(temp[:]) i += 1 return w def SubWord(word): return [sbox[byte] for byte in word] def RotWord(word): return word[1:] + word[0:1] def process_key(key, Nk): try: key = key.replace(" ", "") return [[int(key[i*8+j*2:i*8+j*2+2], 16) for j in range(4)] for i in range(Nk)] except: print ("Password must be hexadecimal.") exit()
This BLACK+DECKER 4 Slice Toaster Oven features exclusive Under-the-Cabinet SpaceMaker™ Design, which optimizes space with assembly and mounting. With it's curved interior, you can fit up to 4 slices of bread or a 9" pizza. Cooking versatility is right on your countertop with it's one touch toast, bake and keep warm controls. Finally, clean-up is a breeze with the drop-down crumb tray for a mess-free countertop. Two person installation recommended. Requires tape, ruler, electric drill with a 1/32" drill bit and a ¼" drill bit, screwdriver and protective glasses. Recommended for installation only on flat bottom cabinets (without bottom light rail molding). Several standard screw lengths are provided to fit a range of standard cabinets but may not fit all cabinet sizes.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved. # import pytest from snowflake.sqlalchemy import ( AWSBucket, AzureContainer, CopyFormatter, CopyIntoStorage, CSVFormatter, ExternalStage, JSONFormatter, PARQUETFormatter, ) from sqlalchemy import Column, Integer, MetaData, Sequence, String, Table from sqlalchemy.sql import select, text def test_external_stage(sql_compiler): assert ExternalStage.prepare_namespace("something") == "something." assert ExternalStage.prepare_path("prefix") == "/prefix" # All arguments are handled assert ( sql_compiler(ExternalStage(name="name", path="prefix/path", namespace="namespace")) == "@namespace.name/prefix/path" ) # defaults don't ruin things assert sql_compiler(ExternalStage(name="name", path=None, namespace=None)) == "@name" def test_copy_into_location(engine_testaccount, sql_compiler): meta = MetaData() conn = engine_testaccount.connect() food_items = Table("python_tests_foods", meta, Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True), Column('name', String), Column('quantity', Integer)) meta.create_all(engine_testaccount) copy_stmt_1 = CopyIntoStorage(from_=food_items, into=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms( '1234abcd-12ab-34cd-56ef-1234567890ab'), formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null'])) assert (sql_compiler(copy_stmt_1) == "COPY INTO 's3://backup' FROM python_tests_foods FILE_FORMAT=(TYPE=csv " "ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION=" "(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')") copy_stmt_2 = CopyIntoStorage(from_=select([food_items]).where(food_items.c.id == 1), # Test sub-query into=AWSBucket.from_uri('s3://backup').credentials( aws_role='some_iam_role').encryption_aws_sse_s3(), formatter=JSONFormatter().file_extension('json').compression('zstd')) assert (sql_compiler(copy_stmt_2) == "COPY INTO 's3://backup' FROM (SELECT python_tests_foods.id, " "python_tests_foods.name, python_tests_foods.quantity FROM python_tests_foods " "WHERE python_tests_foods.id = 1) FILE_FORMAT=(TYPE=json COMPRESSION='zstd' " "FILE_EXTENSION='json') CREDENTIALS=(AWS_ROLE='some_iam_role') " "ENCRYPTION=(TYPE='AWS_SSE_S3')") copy_stmt_3 = CopyIntoStorage(from_=food_items, into=AzureContainer.from_uri( 'azure://snowflake.blob.core.windows.net/snowpile/backup' ).credentials('token'), formatter=PARQUETFormatter().snappy_compression(True)) assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' " "FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) " "CREDENTIALS=(AZURE_SAS_TOKEN='token')") copy_stmt_3.maxfilesize(50000000) assert (sql_compiler(copy_stmt_3) == "COPY INTO 'azure://snowflake.blob.core.windows.net/snowpile/backup' " "FROM python_tests_foods FILE_FORMAT=(TYPE=parquet SNAPPY_COMPRESSION=true) " "MAX_FILE_SIZE = 50000000 " "CREDENTIALS=(AZURE_SAS_TOKEN='token')") copy_stmt_4 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms( '1234abcd-12ab-34cd-56ef-1234567890ab'), into=food_items, formatter=CSVFormatter().record_delimiter('|').escape(None).null_if(['null', 'Null'])) assert (sql_compiler(copy_stmt_4) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv " "ESCAPE=None NULL_IF=('null', 'Null') RECORD_DELIMITER='|') ENCRYPTION=" "(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')") copy_stmt_5 = CopyIntoStorage(from_=AWSBucket.from_uri('s3://backup').encryption_aws_sse_kms( '1234abcd-12ab-34cd-56ef-1234567890ab'), into=food_items, formatter=CSVFormatter().field_delimiter(',')) assert (sql_compiler(copy_stmt_5) == "COPY INTO python_tests_foods FROM 's3://backup' FILE_FORMAT=(TYPE=csv " "FIELD_DELIMITER=',') ENCRYPTION=" "(KMS_KEY_ID='1234abcd-12ab-34cd-56ef-1234567890ab' TYPE='AWS_SSE_KMS')") copy_stmt_6 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name"), formatter=CSVFormatter()) assert sql_compiler(copy_stmt_6) == "COPY INTO @stage_name FROM python_tests_foods FILE_FORMAT=(TYPE=csv)" copy_stmt_7 = CopyIntoStorage(from_=food_items, into=ExternalStage(name="stage_name", path="prefix/file", namespace="name"), formatter=CSVFormatter()) assert sql_compiler(copy_stmt_7) == "COPY INTO @name.stage_name/prefix/file FROM python_tests_foods FILE_FORMAT=(TYPE=csv)" # NOTE Other than expect known compiled text, submit it to RegressionTests environment and expect them to fail, but # because of the right reasons try: acceptable_exc_reasons = {'Failure using stage area', 'AWS_ROLE credentials are not allowed for this account.', 'AWS_ROLE credentials are invalid'} for stmnt in (copy_stmt_1, copy_stmt_2, copy_stmt_3, copy_stmt_4): with pytest.raises(Exception) as exc: conn.execute(stmnt) if not any(map(lambda reason: reason in str(exc) or reason in str(exc.value), acceptable_exc_reasons)): raise Exception("Not acceptable exception: {} {}".format(str(exc), str(exc.value))) finally: conn.close() food_items.drop(engine_testaccount) def test_copy_into_storage_csv_extended(sql_compiler): """ This test compiles the SQL to read CSV data from a stage and insert it into a table. The CSV formatting statements are inserted inline, i.e. no explicit SQL definition of that format is necessary. The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was executed before. This way, the COPY INTO statement does not need to know any security details (credentials or tokens) """ # target table definition (NB: this could be omitted for the test, since the # SQL statement copies the whole CSV and assumes the target structure matches) metadata = MetaData() target_table = Table( "TEST_IMPORT", metadata, Column("COL1", Integer, primary_key=True), Column("COL2", String), ) # define a source stage (root path) root_stage = ExternalStage( name="AZURE_STAGE", namespace="ML_POC.PUBLIC", ) # define a CSV formatter formatter = ( CSVFormatter() .compression("AUTO") .field_delimiter(",") .record_delimiter(r"\n") .field_optionally_enclosed_by(None) .escape(None) .escape_unenclosed_field(r"\134") .date_format("AUTO") .null_if([r"\N"]) .skip_header(1) .trim_space(False) .error_on_column_count_mismatch(True) ) # define CopyInto object; reads all CSV data (=> pattern) from # the sub-path "testdata" beneath the root stage copy_into = CopyIntoStorage( from_=ExternalStage.from_parent_stage(root_stage, "testdata"), into=target_table, formatter=formatter ) copy_into.copy_options = {"pattern": "'.*csv'", "force": "TRUE"} # check that the result is as expected result = sql_compiler(copy_into) expected = ( r"COPY INTO TEST_IMPORT " r"FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata " r"FILE_FORMAT=(TYPE=csv COMPRESSION='auto' DATE_FORMAT='AUTO' " r"ERROR_ON_COLUMN_COUNT_MISMATCH=True ESCAPE=None " r"ESCAPE_UNENCLOSED_FIELD='\134' FIELD_DELIMITER=',' " r"FIELD_OPTIONALLY_ENCLOSED_BY=None NULL_IF=('\N') RECORD_DELIMITER='\n' " r"SKIP_HEADER=1 TRIM_SPACE=False) force = TRUE pattern = '.*csv'" ) assert result == expected def test_copy_into_storage_parquet_named_format(sql_compiler): """ This test compiles the SQL to read Parquet data from a stage and insert it into a table. The source file is accessed using a SELECT statement. The Parquet formatting definitions are defined in a named format which was explicitly created before. The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was executed before. This way, the COPY INTO statement does not need to know any security details (credentials or tokens) """ # target table definition (NB: this could be omitted for the test, as long as # the statement is not executed) metadata = MetaData() target_table = Table( "TEST_IMPORT", metadata, Column("COL1", Integer, primary_key=True), Column("COL2", String), ) # define a source stage (root path) root_stage = ExternalStage( name="AZURE_STAGE", namespace="ML_POC.PUBLIC", ) # define the SELECT statement to access the source file. # we can probably defined source table metadata and use SQLAlchemy Column objects # instead of texts, but this seems to be the easiest way. sel_statement = select( text("$1:COL1::number"), text("$1:COL2::varchar") ).select_from( ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet") ) # use an existing source format. formatter = CopyFormatter(format_name="parquet_file_format") # setup CopyInto object copy_into = CopyIntoStorage( from_=sel_statement, into=target_table, formatter=formatter ) copy_into.copy_options = {"force": "TRUE"} # compile and check the result result = sql_compiler(copy_into) expected = ( "COPY INTO TEST_IMPORT " "FROM (SELECT $1:COL1::number, $1:COL2::varchar " "FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet) " "FILE_FORMAT=(format_name = parquet_file_format) force = TRUE" ) assert result == expected def test_copy_into_storage_parquet_files(sql_compiler): """ This test compiles the SQL to read Parquet data from a stage and insert it into a table. The source file is accessed using a SELECT statement. The Parquet formatting definitions are defined in a named format which was explicitly created before. The format is specified as a property of the stage, not the CopyInto object. The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was executed before. This way, the COPY INTO statement does not need to know any security details (credentials or tokens). The FORCE option is set using the corresponding function in CopyInto. The FILES option is set to choose the files to upload """ # target table definition (NB: this could be omitted for the test, as long as # the statement is not executed) metadata = MetaData() target_table = Table( "TEST_IMPORT", metadata, Column("COL1", Integer, primary_key=True), Column("COL2", String), ) # define a source stage (root path) root_stage = ExternalStage( name="AZURE_STAGE", namespace="ML_POC.PUBLIC", ) # define the SELECT statement to access the source file. # we can probably defined source table metadata and use SQLAlchemy Column objects # instead of texts, but this seems to be the easiest way. sel_statement = select( text("$1:COL1::number"), text("$1:COL2::varchar") ).select_from( ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format") ) # setup CopyInto object copy_into = CopyIntoStorage( from_=sel_statement, into=target_table, ).force(True).files(["foo.txt", "bar.txt"]) # compile and check the result result = sql_compiler(copy_into) expected = ( "COPY INTO TEST_IMPORT " "FROM (SELECT $1:COL1::number, $1:COL2::varchar " "FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet " "(file_format => parquet_file_format)) FILES = ('foo.txt','bar.txt') " "FORCE = true" ) assert result == expected def test_copy_into_storage_parquet_pattern(sql_compiler): """ This test compiles the SQL to read Parquet data from a stage and insert it into a table. The source file is accessed using a SELECT statement. The Parquet formatting definitions are defined in a named format which was explicitly created before. The format is specified as a property of the stage, not the CopyInto object. The Stage is a named stage, i.e. we assume that a CREATE STAGE statement was executed before. This way, the COPY INTO statement does not need to know any security details (credentials or tokens). The FORCE option is set using the corresponding function in CopyInto. The PATTERN option is set to choose multiple files """ # target table definition (NB: this could be omitted for the test, as long as # the statement is not executed) metadata = MetaData() target_table = Table( "TEST_IMPORT", metadata, Column("COL1", Integer, primary_key=True), Column("COL2", String), ) # define a source stage (root path) root_stage = ExternalStage( name="AZURE_STAGE", namespace="ML_POC.PUBLIC", ) # define the SELECT statement to access the source file. # we can probably defined source table metadata and use SQLAlchemy Column objects # instead of texts, but this seems to be the easiest way. sel_statement = select( text("$1:COL1::number"), text("$1:COL2::varchar") ).select_from( ExternalStage.from_parent_stage(root_stage, "testdata/out.parquet", file_format="parquet_file_format") ) # setup CopyInto object copy_into = CopyIntoStorage( from_=sel_statement, into=target_table, ).force(True).pattern("'.*csv'") # compile and check the result result = sql_compiler(copy_into) expected = ( "COPY INTO TEST_IMPORT " "FROM (SELECT $1:COL1::number, $1:COL2::varchar " "FROM @ML_POC.PUBLIC.AZURE_STAGE/testdata/out.parquet " "(file_format => parquet_file_format)) FORCE = true PATTERN = '.*csv'" ) assert result == expected
Geography Ph.D. student John Connors left for Tanzania this week to begin fieldwork for his dissertation. John's research explores how perceptions of ecosystem services shape food security programs and affect land change in Central Tanzania. During the first two months abroad, John will study Swahili as part of a Boren fellowship. After completing his language study, John will spend 5 more months gathering data and conducting interviews with farmers and development program representatives. This research is also funded by a Borlaug Fellowship for global food security and a Melvin Marcus fieldwork scholarship. Connors' research will examine the social and environmental impacts of a large scale agricultural development program known as Kilimo Kwanzaa. During his six months abroad, John will conduct interviews with rural households and gather ground data for remote sensing analysis. The research seeks to understand how rural communities use forest resources to improve food security, and how changes in agricultural policies may impact these strategies.
__author__ = 'adria' #!/usr/bin/python from dataBase import * import sys sys.path.insert(0, '../model') #sino no deixa importar... from owner import * class UserLogin: def __init__(self, owner): self.owner = owner self.db = DataBase() self.registered = False #si l'usuari ja ha fet loguin o no def enterLogin(self): """Like the 'login' method, but asks for the user data to be written in the terminal""" self.askUserData() while True: result = self.login() if result == 1: self.askUserData() elif result == 2: create = input("Would you like to create it?(Y/N): ") if create.lower() == "y" or create.lower() == "": self.db.afegeixUsuari(self.owner.dni, self.owner.nombre, self.owner.apellidos) break else: break def askUserData(self): """Sets the self.owner information with the parameters the user writes on the terminal""" while True: print("Insert your personal information to log in:") name = input("Name: ") surname = input("Surname: ") dni = input("DNI: ") if name and surname and dni: self.owner = Owner(dni, surname, name) break else: print("Error, one or more of the fields is empty, write it again:\n") def login(self, owner=None): """Checks if the user is on the database and logs in""" result = 0 if owner is not None: self.owner = owner if self.userExists(): if self.checkUser(): self.registered = True print("You have succesfully logged in\n") else: print("Error! name or surname incorrect\n") result = 1 else: print("Error, user with DNI "+self.owner.dni+" doesn't exist\n") result = 2 return result def llistaDNI(self): """Lists all DNI's""" llista = [] llistacompleta = self.db.llistaUsers() for user in llistacompleta: llista.append(user[0]) return llista def userExists(self, dni = None): """Checks if a user exists by searching the DNI in the database""" if dni is None: dni = self.owner.dni exists = False for dniactual in self.llistaDNI(): if dniactual == dni: exists = True return exists def checkUser(self): """Checks if self.owner data is correct""" result = False for user in self.db.llistaUsers(): dni = user[0] name = user[1] surname = user[2] if dni == self.owner.dni: if name == self.owner.nombre and surname == self.owner.apellidos: result = True break return result def isLogged(self): """Returns if the user is logged in or not""" return self.registered def guardaUsuari(self, owner=None): """Saves owner to the database if it doesn't exist""" if owner is None: owner = self.owner if self.userExists(owner.dni): print("User with DNI '"+owner.dni+"' already exists!") else: result = self.db.afegeixUsuari(owner.dni, owner.nombre, owner.apellidos) if result: print("User "+owner.nombre+" added!") else: print("User could not be added") def getIbanList(self): """Returns a list of the IBAN codes of the owners' accounts""" llista = self.db.llistaComptes() ibanList = [] for account in llista: for user in account[3:]: if user == self.owner.dni: ibanList.append(account[0]) break return ibanList def getOwner(self): return self.owner
What shapes does a child need to know for kindergarten? In Kindergarten, children typically learn the names of basic shapes, including some 3-dimensional shapes. Before entering Kindergarten, you can encourage your child to recognize shapes such as squares, circles, triangles, and rectangles in everyday life. For example, you can tell your child that you see something in the room that looks like a circle (e.g., a clock face), then ask your child to guess what it is by naming objects that are shaped like circles. Then your child can have a turn in finding an object that is shaped like a rectangle and have you guess what it is. This will help your child to associate shapes with their names, and will get them excited about seeing mathematics all around him/her! Jennie Ito is a mother of two and a child development consultant who specializes in children’s play and toys. Before becoming a consultant for LeapFrog, she was an intern at the Smithsonian Institution in Washington, DC, and later worked as a content expert for the Association of Children’s Museum’s “Playing for Keeps” Play Initiative. Jennie earned her doctorate degree in developmental psychology at Queen’s University in Ontario, Canada.
from __future__ import unicode_literals import datetime import iso8601 import pytz from calendar import timegm from decimal import Decimal, localcontext from uuid import UUID from logging import getLogger from pytz import BaseTzInfo from .utils import escape, parse_array, comma_join, string_or_func, get_subclass_names from .funcs import F, FunctionOperatorsMixin from ipaddress import IPv4Address, IPv6Address logger = getLogger('clickhouse_orm') class Field(FunctionOperatorsMixin): ''' Abstract base class for all field types. ''' name = None # this is set by the parent model parent = None # this is set by the parent model creation_counter = 0 # used for keeping the model fields ordered class_default = 0 # should be overridden by concrete subclasses db_type = None # should be overridden by concrete subclasses def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None): assert [default, alias, materialized].count(None) >= 2, \ "Only one of default, alias and materialized parameters can be given" assert alias is None or isinstance(alias, F) or isinstance(alias, str) and alias != "",\ "Alias parameter must be a string or function object, if given" assert materialized is None or isinstance(materialized, F) or isinstance(materialized, str) and materialized != "",\ "Materialized parameter must be a string or function object, if given" assert readonly is None or type(readonly) is bool, "readonly parameter must be bool if given" assert codec is None or isinstance(codec, str) and codec != "", \ "Codec field must be string, if given" self.creation_counter = Field.creation_counter Field.creation_counter += 1 self.default = self.class_default if default is None else default self.alias = alias self.materialized = materialized self.readonly = bool(self.alias or self.materialized or readonly) self.codec = codec def __str__(self): return self.name def __repr__(self): return '<%s>' % self.__class__.__name__ def to_python(self, value, timezone_in_use): ''' Converts the input value into the expected Python data type, raising ValueError if the data can't be converted. Returns the converted value. Subclasses should override this. The timezone_in_use parameter should be consulted when parsing datetime fields. ''' return value # pragma: no cover def validate(self, value): ''' Called after to_python to validate that the value is suitable for the field's database type. Subclasses should override this. ''' pass def _range_check(self, value, min_value, max_value): ''' Utility method to check that the given value is between min_value and max_value. ''' if value < min_value or value > max_value: raise ValueError('%s out of range - %s is not between %s and %s' % (self.__class__.__name__, value, min_value, max_value)) def to_db_string(self, value, quote=True): ''' Returns the field's value prepared for writing to the database. When quote is true, strings are surrounded by single quotes. ''' return escape(value, quote) def get_sql(self, with_default_expression=True, db=None): ''' Returns an SQL expression describing the field (e.g. for CREATE TABLE). - `with_default_expression`: If True, adds default value to sql. It doesn't affect fields with alias and materialized values. - `db`: Database, used for checking supported features. ''' sql = self.db_type args = self.get_db_type_args() if args: sql += '(%s)' % comma_join(args) if with_default_expression: sql += self._extra_params(db) return sql def get_db_type_args(self): """Returns field type arguments""" return [] def _extra_params(self, db): sql = '' if self.alias: sql += ' ALIAS %s' % string_or_func(self.alias) elif self.materialized: sql += ' MATERIALIZED %s' % string_or_func(self.materialized) elif isinstance(self.default, F): sql += ' DEFAULT %s' % self.default.to_sql() elif self.default: default = self.to_db_string(self.default) sql += ' DEFAULT %s' % default if self.codec and db and db.has_codec_support: sql += ' CODEC(%s)' % self.codec return sql def isinstance(self, types): """ Checks if the instance if one of the types provided or if any of the inner_field child is one of the types provided, returns True if field or any inner_field is one of ths provided, False otherwise - `types`: Iterable of types to check inclusion of instance Returns: Boolean """ if isinstance(self, types): return True inner_field = getattr(self, 'inner_field', None) while inner_field: if isinstance(inner_field, types): return True inner_field = getattr(inner_field, 'inner_field', None) return False class StringField(Field): class_default = '' db_type = 'String' def to_python(self, value, timezone_in_use): if isinstance(value, str): return value if isinstance(value, bytes): return value.decode('UTF-8') raise ValueError('Invalid value for %s: %r' % (self.__class__.__name__, value)) class FixedStringField(StringField): def __init__(self, length, default=None, alias=None, materialized=None, readonly=None): self._length = length self.db_type = 'FixedString(%d)' % length super(FixedStringField, self).__init__(default, alias, materialized, readonly) def to_python(self, value, timezone_in_use): value = super(FixedStringField, self).to_python(value, timezone_in_use) return value.rstrip('\0') def validate(self, value): if isinstance(value, str): value = value.encode('UTF-8') if len(value) > self._length: raise ValueError('Value of %d bytes is too long for FixedStringField(%d)' % (len(value), self._length)) class DateField(Field): min_value = datetime.date(1970, 1, 1) max_value = datetime.date(2105, 12, 31) class_default = min_value db_type = 'Date' def to_python(self, value, timezone_in_use): if isinstance(value, datetime.datetime): return value.astimezone(pytz.utc).date() if value.tzinfo else value.date() if isinstance(value, datetime.date): return value if isinstance(value, int): return DateField.class_default + datetime.timedelta(days=value) if isinstance(value, str): if value == '0000-00-00': return DateField.min_value return datetime.datetime.strptime(value, '%Y-%m-%d').date() raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value)) def validate(self, value): self._range_check(value, DateField.min_value, DateField.max_value) def to_db_string(self, value, quote=True): return escape(value.isoformat(), quote) class DateTimeField(Field): class_default = datetime.datetime.fromtimestamp(0, pytz.utc) db_type = 'DateTime' def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None, timezone=None): super().__init__(default, alias, materialized, readonly, codec) # assert not timezone, 'Temporarily field timezone is not supported' if timezone: timezone = timezone if isinstance(timezone, BaseTzInfo) else pytz.timezone(timezone) self.timezone = timezone def get_db_type_args(self): args = [] if self.timezone: args.append(escape(self.timezone.zone)) return args def to_python(self, value, timezone_in_use): if isinstance(value, datetime.datetime): return value if value.tzinfo else value.replace(tzinfo=pytz.utc) if isinstance(value, datetime.date): return datetime.datetime(value.year, value.month, value.day, tzinfo=pytz.utc) if isinstance(value, int): return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc) if isinstance(value, str): if value == '0000-00-00 00:00:00': return self.class_default if len(value) == 10: try: value = int(value) return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc) except ValueError: pass try: # left the date naive in case of no tzinfo set dt = iso8601.parse_date(value, default_timezone=None) except iso8601.ParseError as e: raise ValueError(str(e)) # convert naive to aware if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None: dt = timezone_in_use.localize(dt) return dt raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value)) def to_db_string(self, value, quote=True): return escape('%010d' % timegm(value.utctimetuple()), quote) class DateTime64Field(DateTimeField): db_type = 'DateTime64' def __init__(self, default=None, alias=None, materialized=None, readonly=None, codec=None, timezone=None, precision=6): super().__init__(default, alias, materialized, readonly, codec, timezone) assert precision is None or isinstance(precision, int), 'Precision must be int type' self.precision = precision def get_db_type_args(self): args = [str(self.precision)] if self.timezone: args.append(escape(self.timezone.zone)) return args def to_db_string(self, value, quote=True): """ Returns the field's value prepared for writing to the database Returns string in 0000000000.000000 format, where remainder digits count is equal to precision """ return escape( '{timestamp:0{width}.{precision}f}'.format( timestamp=value.timestamp(), width=11 + self.precision, precision=self.precision), quote ) def to_python(self, value, timezone_in_use): try: return super().to_python(value, timezone_in_use) except ValueError: if isinstance(value, (int, float)): return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc) if isinstance(value, str): left_part = value.split('.')[0] if left_part == '0000-00-00 00:00:00': return self.class_default if len(left_part) == 10: try: value = float(value) return datetime.datetime.utcfromtimestamp(value).replace(tzinfo=pytz.utc) except ValueError: pass raise class BaseIntField(Field): ''' Abstract base class for all integer-type fields. ''' def to_python(self, value, timezone_in_use): try: return int(value) except: raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value)) def to_db_string(self, value, quote=True): # There's no need to call escape since numbers do not contain # special characters, and never need quoting return str(value) def validate(self, value): self._range_check(value, self.min_value, self.max_value) class UInt8Field(BaseIntField): min_value = 0 max_value = 2**8 - 1 db_type = 'UInt8' class UInt16Field(BaseIntField): min_value = 0 max_value = 2**16 - 1 db_type = 'UInt16' class UInt32Field(BaseIntField): min_value = 0 max_value = 2**32 - 1 db_type = 'UInt32' class UInt64Field(BaseIntField): min_value = 0 max_value = 2**64 - 1 db_type = 'UInt64' class Int8Field(BaseIntField): min_value = -2**7 max_value = 2**7 - 1 db_type = 'Int8' class Int16Field(BaseIntField): min_value = -2**15 max_value = 2**15 - 1 db_type = 'Int16' class Int32Field(BaseIntField): min_value = -2**31 max_value = 2**31 - 1 db_type = 'Int32' class Int64Field(BaseIntField): min_value = -2**63 max_value = 2**63 - 1 db_type = 'Int64' class BaseFloatField(Field): ''' Abstract base class for all float-type fields. ''' def to_python(self, value, timezone_in_use): try: return float(value) except: raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value)) def to_db_string(self, value, quote=True): # There's no need to call escape since numbers do not contain # special characters, and never need quoting return str(value) class Float32Field(BaseFloatField): db_type = 'Float32' class Float64Field(BaseFloatField): db_type = 'Float64' class DecimalField(Field): ''' Base class for all decimal fields. Can also be used directly. ''' def __init__(self, precision, scale, default=None, alias=None, materialized=None, readonly=None): assert 1 <= precision <= 38, 'Precision must be between 1 and 38' assert 0 <= scale <= precision, 'Scale must be between 0 and the given precision' self.precision = precision self.scale = scale self.db_type = 'Decimal(%d,%d)' % (self.precision, self.scale) with localcontext() as ctx: ctx.prec = 38 self.exp = Decimal(10) ** -self.scale # for rounding to the required scale self.max_value = Decimal(10 ** (self.precision - self.scale)) - self.exp self.min_value = -self.max_value super(DecimalField, self).__init__(default, alias, materialized, readonly) def to_python(self, value, timezone_in_use): if not isinstance(value, Decimal): try: value = Decimal(value) except: raise ValueError('Invalid value for %s - %r' % (self.__class__.__name__, value)) if not value.is_finite(): raise ValueError('Non-finite value for %s - %r' % (self.__class__.__name__, value)) return self._round(value) def to_db_string(self, value, quote=True): # There's no need to call escape since numbers do not contain # special characters, and never need quoting return str(value) def _round(self, value): return value.quantize(self.exp) def validate(self, value): self._range_check(value, self.min_value, self.max_value) class Decimal32Field(DecimalField): def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None): super(Decimal32Field, self).__init__(9, scale, default, alias, materialized, readonly) self.db_type = 'Decimal32(%d)' % scale class Decimal64Field(DecimalField): def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None): super(Decimal64Field, self).__init__(18, scale, default, alias, materialized, readonly) self.db_type = 'Decimal64(%d)' % scale class Decimal128Field(DecimalField): def __init__(self, scale, default=None, alias=None, materialized=None, readonly=None): super(Decimal128Field, self).__init__(38, scale, default, alias, materialized, readonly) self.db_type = 'Decimal128(%d)' % scale class BaseEnumField(Field): ''' Abstract base class for all enum-type fields. ''' def __init__(self, enum_cls, default=None, alias=None, materialized=None, readonly=None, codec=None): self.enum_cls = enum_cls if default is None: default = list(enum_cls)[0] super(BaseEnumField, self).__init__(default, alias, materialized, readonly, codec) def to_python(self, value, timezone_in_use): if isinstance(value, self.enum_cls): return value try: if isinstance(value, str): try: return self.enum_cls[value] except Exception: return self.enum_cls(value) if isinstance(value, bytes): decoded = value.decode('UTF-8') try: return self.enum_cls[decoded] except Exception: return self.enum_cls(decoded) if isinstance(value, int): return self.enum_cls(value) except (KeyError, ValueError): pass raise ValueError('Invalid value for %s: %r' % (self.enum_cls.__name__, value)) def to_db_string(self, value, quote=True): return escape(value.name, quote) def get_db_type_args(self): return ['%s = %d' % (escape(item.name), item.value) for item in self.enum_cls] @classmethod def create_ad_hoc_field(cls, db_type): ''' Give an SQL column description such as "Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)" this method returns a matching enum field. ''' import re from enum import Enum members = {} for match in re.finditer(r"'([\w ]+)' = (-?\d+)", db_type): members[match.group(1)] = int(match.group(2)) enum_cls = Enum('AdHocEnum', members) field_class = Enum8Field if db_type.startswith('Enum8') else Enum16Field return field_class(enum_cls) class Enum8Field(BaseEnumField): db_type = 'Enum8' class Enum16Field(BaseEnumField): db_type = 'Enum16' class ArrayField(Field): class_default = [] def __init__(self, inner_field, default=None, alias=None, materialized=None, readonly=None, codec=None): assert isinstance(inner_field, Field), "The first argument of ArrayField must be a Field instance" assert not isinstance(inner_field, ArrayField), "Multidimensional array fields are not supported by the ORM" self.inner_field = inner_field super(ArrayField, self).__init__(default, alias, materialized, readonly, codec) def to_python(self, value, timezone_in_use): if isinstance(value, str): value = parse_array(value) elif isinstance(value, bytes): value = parse_array(value.decode('UTF-8')) elif not isinstance(value, (list, tuple)): raise ValueError('ArrayField expects list or tuple, not %s' % type(value)) return [self.inner_field.to_python(v, timezone_in_use) for v in value] def validate(self, value): for v in value: self.inner_field.validate(v) def to_db_string(self, value, quote=True): array = [self.inner_field.to_db_string(v, quote=True) for v in value] return '[' + comma_join(array) + ']' def get_sql(self, with_default_expression=True, db=None): sql = 'Array(%s)' % self.inner_field.get_sql(with_default_expression=False, db=db) if with_default_expression and self.codec and db and db.has_codec_support: sql+= ' CODEC(%s)' % self.codec return sql class UUIDField(Field): class_default = UUID(int=0) db_type = 'UUID' def to_python(self, value, timezone_in_use): if isinstance(value, UUID): return value elif isinstance(value, bytes): return UUID(bytes=value) elif isinstance(value, str): return UUID(value) elif isinstance(value, int): return UUID(int=value) elif isinstance(value, tuple): return UUID(fields=value) else: raise ValueError('Invalid value for UUIDField: %r' % value) def to_db_string(self, value, quote=True): return escape(str(value), quote) class IPv4Field(Field): class_default = 0 db_type = 'IPv4' def to_python(self, value, timezone_in_use): if isinstance(value, IPv4Address): return value elif isinstance(value, (bytes, str, int)): return IPv4Address(value) else: raise ValueError('Invalid value for IPv4Address: %r' % value) def to_db_string(self, value, quote=True): return escape(str(value), quote) class IPv6Field(Field): class_default = 0 db_type = 'IPv6' def to_python(self, value, timezone_in_use): if isinstance(value, IPv6Address): return value elif isinstance(value, (bytes, str, int)): return IPv6Address(value) else: raise ValueError('Invalid value for IPv6Address: %r' % value) def to_db_string(self, value, quote=True): return escape(str(value), quote) class NullableField(Field): class_default = None def __init__(self, inner_field, default=None, alias=None, materialized=None, extra_null_values=None, codec=None): assert isinstance(inner_field, Field), "The first argument of NullableField must be a Field instance. Not: {}".format(inner_field) self.inner_field = inner_field self._null_values = [None] if extra_null_values: self._null_values.extend(extra_null_values) super(NullableField, self).__init__(default, alias, materialized, readonly=None, codec=codec) def to_python(self, value, timezone_in_use): if value == '\\N' or value in self._null_values: return None return self.inner_field.to_python(value, timezone_in_use) def validate(self, value): value in self._null_values or self.inner_field.validate(value) def to_db_string(self, value, quote=True): if value in self._null_values: return '\\N' return self.inner_field.to_db_string(value, quote=quote) def get_sql(self, with_default_expression=True, db=None): sql = 'Nullable(%s)' % self.inner_field.get_sql(with_default_expression=False, db=db) if with_default_expression: sql += self._extra_params(db) return sql class LowCardinalityField(Field): def __init__(self, inner_field, default=None, alias=None, materialized=None, readonly=None, codec=None): assert isinstance(inner_field, Field), "The first argument of LowCardinalityField must be a Field instance. Not: {}".format(inner_field) assert not isinstance(inner_field, LowCardinalityField), "LowCardinality inner fields are not supported by the ORM" assert not isinstance(inner_field, ArrayField), "Array field inside LowCardinality are not supported by the ORM. Use Array(LowCardinality) instead" self.inner_field = inner_field self.class_default = self.inner_field.class_default super(LowCardinalityField, self).__init__(default, alias, materialized, readonly, codec) def to_python(self, value, timezone_in_use): return self.inner_field.to_python(value, timezone_in_use) def validate(self, value): self.inner_field.validate(value) def to_db_string(self, value, quote=True): return self.inner_field.to_db_string(value, quote=quote) def get_sql(self, with_default_expression=True, db=None): if db and db.has_low_cardinality_support: sql = 'LowCardinality(%s)' % self.inner_field.get_sql(with_default_expression=False) else: sql = self.inner_field.get_sql(with_default_expression=False) logger.warning('LowCardinalityField not supported on clickhouse-server version < 19.0 using {} as fallback'.format(self.inner_field.__class__.__name__)) if with_default_expression: sql += self._extra_params(db) return sql # Expose only relevant classes in import * __all__ = get_subclass_names(locals(), Field)
How to Check Compatibility of your Interior Designer Gurgaon? Are you the owner of a big commercial space? Are you willing to renovate your residential premises for a particular occasion? At this juncture, a professional consultation with a design expert can ease your task and render you a beneficial solution for the same. It becomes very much difficult to analyze that which interior designer will suit best to your preferences. The market is full of competition and every second designer company commits itself best from others. To overcome this overwhelming situation, you need to be very clear about your objective, purpose for interior design and most importantly your taste. For choosing Interior Designer Gurgaon location, one should check the feedback of previous work performed by an interior designing company to which you are going to opt for. This article is briefing some sort of points as an instruction which can guide you better in the selection of an Interior designer agency. Is interior designer different from Interior Decorator? Taking contact of installing draperies, floorings, light fixtures, wall covers, furniture etc. As per change in time and advancement in technology, Interior designers are also taking part in deciding architectural criteria for the building of the home or commercial spaces differently as per the demand. For a reputed and advanced Interior designer, it is desirable to possess expertise in home design & CAD software for preparing blueprints to represent in-front of clients. If you are a resident of Metropolitan cities or areas nearby them, and searching an Interior Designer which can renovate your building perfectly in compliance to currently prevailed trends & techniques, then you are suggested to research first & get a more clearer view about Interior Designers. Most often Interior Designer works as a part of a team on a freelance basis, but sometimes they find their responsibilities in architectural firms as well as home furnishing stores also. The main objective of an accomplished Interior designer is to get an overview of the exact need of the client, available space and environment of surroundings and integrate them all in functions well as aesthetics of the interior. The purpose of this article is to provide users a brief information about points to be taken care while choosing Interior design for commercial or residential premises.
#!/usr/bin/env python """ Usage: Shut down your registry service to avoid race conditions and possible data loss and then run the command with an image repo like this: delete_docker_registry_image.py --image awesomeimage --dry-run """ import argparse import json import logging import os import sys import shutil import glob logger = logging.getLogger(__name__) def del_empty_dirs(s_dir, top_level): """recursively delete empty directories""" b_empty = True for s_target in os.listdir(s_dir): s_path = os.path.join(s_dir, s_target) if os.path.isdir(s_path): if not del_empty_dirs(s_path, False): b_empty = False else: b_empty = False if b_empty: logger.debug("Deleting empty directory '%s'", s_dir) if not top_level: os.rmdir(s_dir) return b_empty def get_layers_from_blob(path): """parse json blob and get set of layer digests""" try: with open(path, "r") as blob: data_raw = blob.read() data = json.loads(data_raw) if data["schemaVersion"] == 1: result = set([entry["blobSum"].split(":")[1] for entry in data["fsLayers"]]) else: result = set([entry["digest"].split(":")[1] for entry in data["layers"]]) if "config" in data: result.add(data["config"]["digest"].split(":")[1]) return result except Exception as error: logger.critical("Failed to read layers from blob:%s", error) return set() def get_digest_from_blob(path): """parse file and get digest""" try: with open(path, "r") as blob: return blob.read().split(":")[1] except Exception as error: logger.critical("Failed to read digest from blob:%s", error) return "" def get_links(path, _filter=None): """recursively walk `path` and parse every link inside""" result = [] for root, _, files in os.walk(path): for each in files: if each == "link": filepath = os.path.join(root, each) if not _filter or _filter in filepath: result.append(get_digest_from_blob(filepath)) return result class RegistryCleanerError(Exception): pass class RegistryCleaner(object): """Clean registry""" def __init__(self, registry_data_dir, dry_run=False): self.registry_data_dir = registry_data_dir if not os.path.isdir(self.registry_data_dir): raise RegistryCleanerError("No repositories directory found inside " \ "REGISTRY_DATA_DIR '{0}'.". format(self.registry_data_dir)) self.dry_run = dry_run def _delete_layer(self, repo, digest): """remove blob directory from filesystem""" path = os.path.join(self.registry_data_dir, "repositories", repo, "_layers/sha256", digest) self._delete_dir(path) def _delete_blob(self, digest): """remove blob directory from filesystem""" path = os.path.join(self.registry_data_dir, "blobs/sha256", digest[0:2], digest) self._delete_dir(path) def _blob_path_for_revision(self, digest): """where we can find the blob that contains the json describing this digest""" return os.path.join(self.registry_data_dir, "blobs/sha256", digest[0:2], digest, "data") def _blob_path_for_revision_is_missing(self, digest): """for each revision, there should be a blob describing it""" return not os.path.isfile(self._blob_path_for_revision(digest)) def _get_layers_from_blob(self, digest): """get layers from blob by digest""" return get_layers_from_blob(self._blob_path_for_revision(digest)) def _delete_dir(self, path): """remove directory from filesystem""" if self.dry_run: logger.info("DRY_RUN: would have deleted %s", path) else: logger.info("Deleting %s", path) try: shutil.rmtree(path) except Exception as error: logger.critical("Failed to delete directory:%s", error) def _delete_from_tag_index_for_revision(self, repo, digest): """delete revision from tag indexes""" paths = glob.glob( os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags/*/index/sha256", digest) ) for path in paths: self._delete_dir(path) def _delete_revisions(self, repo, revisions, blobs_to_keep=None): """delete revisions from list of directories""" if blobs_to_keep is None: blobs_to_keep = [] for revision_dir in revisions: digests = get_links(revision_dir) for digest in digests: self._delete_from_tag_index_for_revision(repo, digest) if digest not in blobs_to_keep: self._delete_blob(digest) self._delete_dir(revision_dir) def _get_tags(self, repo): """get all tags for given repository""" path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags") if not os.path.isdir(path): logger.critical("No repository '%s' found in repositories directory %s", repo, self.registry_data_dir) return None result = [] for each in os.listdir(path): filepath = os.path.join(path, each) if os.path.isdir(filepath): result.append(each) return result def _get_repositories(self): """get all repository repos""" result = [] root = os.path.join(self.registry_data_dir, "repositories") for each in os.listdir(root): filepath = os.path.join(root, each) if os.path.isdir(filepath): inside = os.listdir(filepath) if "_layers" in inside: result.append(each) else: for inner in inside: result.append(os.path.join(each, inner)) return result def _get_all_links(self, except_repo=""): """get links for every repository""" result = [] repositories = self._get_repositories() for repo in [r for r in repositories if r != except_repo]: path = os.path.join(self.registry_data_dir, "repositories", repo) for link in get_links(path): result.append(link) return result def prune(self): """delete all empty directories in registry_data_dir""" del_empty_dirs(self.registry_data_dir, True) def _layer_in_same_repo(self, repo, tag, layer): """check if layer is found in other tags of same repository""" for other_tag in [t for t in self._get_tags(repo) if t != tag]: path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", other_tag, "current/link") manifest = get_digest_from_blob(path) try: layers = self._get_layers_from_blob(manifest) if layer in layers: return True except IOError: if self._blob_path_for_revision_is_missing(manifest): logger.warn("Blob for digest %s does not exist. Deleting tag manifest: %s", manifest, other_tag) tag_dir = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", other_tag) self._delete_dir(tag_dir) else: raise return False def _manifest_in_same_repo(self, repo, tag, manifest): """check if manifest is found in other tags of same repository""" for other_tag in [t for t in self._get_tags(repo) if t != tag]: path = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", other_tag, "current/link") other_manifest = get_digest_from_blob(path) if other_manifest == manifest: return True return False def delete_entire_repository(self, repo): """delete all blobs for given repository repo""" logger.debug("Deleting entire repository '%s'", repo) repo_dir = os.path.join(self.registry_data_dir, "repositories", repo) if not os.path.isdir(repo_dir): raise RegistryCleanerError("No repository '{0}' found in repositories " "directory {1}/repositories". format(repo, self.registry_data_dir)) links = set(get_links(repo_dir)) all_links_but_current = set(self._get_all_links(except_repo=repo)) for layer in links: if layer in all_links_but_current: logger.debug("Blob found in another repository. Not deleting: %s", layer) else: self._delete_blob(layer) self._delete_dir(repo_dir) def delete_repository_tag(self, repo, tag): """delete all blobs only for given tag of repository""" logger.debug("Deleting repository '%s' with tag '%s'", repo, tag) tag_dir = os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/tags", tag) if not os.path.isdir(tag_dir): raise RegistryCleanerError("No repository '{0}' tag '{1}' found in repositories " "directory {2}/repositories". format(repo, tag, self.registry_data_dir)) manifests_for_tag = set(get_links(tag_dir)) revisions_to_delete = [] blobs_to_keep = [] layers = [] all_links_not_in_current_repo = set(self._get_all_links(except_repo=repo)) for manifest in manifests_for_tag: logger.debug("Looking up filesystem layers for manifest digest %s", manifest) if self._manifest_in_same_repo(repo, tag, manifest): logger.debug("Not deleting since we found another tag using manifest: %s", manifest) continue else: revisions_to_delete.append( os.path.join(self.registry_data_dir, "repositories", repo, "_manifests/revisions/sha256", manifest) ) if manifest in all_links_not_in_current_repo: logger.debug("Not deleting the blob data since we found another repo using manifest: %s", manifest) blobs_to_keep.append(manifest) layers.extend(self._get_layers_from_blob(manifest)) layers_uniq = set(layers) for layer in layers_uniq: if self._layer_in_same_repo(repo, tag, layer): logger.debug("Not deleting since we found another tag using digest: %s", layer) continue self._delete_layer(repo, layer) if layer in all_links_not_in_current_repo: logger.debug("Blob found in another repository. Not deleting: %s", layer) else: self._delete_blob(layer) self._delete_revisions(repo, revisions_to_delete, blobs_to_keep) self._delete_dir(tag_dir) def delete_untagged(self, repo): """delete all untagged data from repo""" logger.debug("Deleting utagged data from repository '%s'", repo) repositories_dir = os.path.join(self.registry_data_dir, "repositories") repo_dir = os.path.join(repositories_dir, repo) if not os.path.isdir(repo_dir): raise RegistryCleanerError("No repository '{0}' found in repositories " "directory {1}/repositories". format(repo, self.registry_data_dir)) tagged_links = set(get_links(repositories_dir, _filter="current")) layers_to_protect = [] for link in tagged_links: layers_to_protect.extend(self._get_layers_from_blob(link)) unique_layers_to_protect = set(layers_to_protect) for layer in unique_layers_to_protect: logger.debug("layer_to_protect: %s", layer) tagged_revisions = set(get_links(repo_dir, _filter="current")) revisions_to_delete = [] layers_to_delete = [] dir_for_revisions = os.path.join(repo_dir, "_manifests/revisions/sha256") for rev in os.listdir(dir_for_revisions): if rev not in tagged_revisions: revisions_to_delete.append(os.path.join(dir_for_revisions, rev)) for layer in self._get_layers_from_blob(rev): if layer not in unique_layers_to_protect: layers_to_delete.append(layer) unique_layers_to_delete = set(layers_to_delete) self._delete_revisions(repo, revisions_to_delete) for layer in unique_layers_to_delete: self._delete_blob(layer) self._delete_layer(repo, layer) def get_tag_count(self, repo): logger.debug("Get tag count of repository '%s'", repo) repo_dir = os.path.join(self.registry_data_dir, "repositories", repo) tags_dir = os.path.join(repo_dir, "_manifests/tags") if os.path.isdir(tags_dir): tags = os.listdir(tags_dir) return len(tags) else: logger.info("Tags directory does not exist: '%s'", tags_dir) return -1 def main(): """cli entrypoint""" parser = argparse.ArgumentParser(description="Cleanup docker registry") parser.add_argument("-i", "--image", dest="image", required=True, help="Docker image to cleanup") parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="verbose") parser.add_argument("-n", "--dry-run", dest="dry_run", action="store_true", help="Dry run") parser.add_argument("-f", "--force", dest="force", action="store_true", help="Force delete (deprecated)") parser.add_argument("-p", "--prune", dest="prune", action="store_true", help="Prune") parser.add_argument("-u", "--untagged", dest="untagged", action="store_true", help="Delete all untagged blobs for image") args = parser.parse_args() handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(u'%(levelname)-8s [%(asctime)s] %(message)s')) logger.addHandler(handler) if args.verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) # make sure not to log before logging is setup. that'll hose your logging config. if args.force: logger.info( "You supplied the force switch, which is deprecated. It has no effect now, and the script defaults to doing what used to be only happen when force was true") splitted = args.image.split(":") if len(splitted) == 2: image = splitted[0] tag = splitted[1] else: image = args.image tag = None if 'REGISTRY_DATA_DIR' in os.environ: registry_data_dir = os.environ['REGISTRY_DATA_DIR'] else: registry_data_dir = "/opt/registry_data/docker/registry/v2" try: cleaner = RegistryCleaner(registry_data_dir, dry_run=args.dry_run) if args.untagged: cleaner.delete_untagged(image) else: if tag: tag_count = cleaner.get_tag_count(image) if tag_count == 1: cleaner.delete_entire_repository(image) else: cleaner.delete_repository_tag(image, tag) else: cleaner.delete_entire_repository(image) if args.prune: cleaner.prune() except RegistryCleanerError as error: logger.fatal(error) sys.exit(1) if __name__ == "__main__": main()
All conference, meetings and event planners will agree that mobile event apps are fast becoming a must-have on the event planning checklist. We recently looked at event planning statistics that show 10 reasons why every event needs a mobile event app. However, there are still organizations who are using some mix of printed paper programs, lengthy email chains with massive attachments and confusing spreadsheets to share event information with attendees and communicate with them at the event. There are many reasons organizations are sticking to paper for events including the cost, technical know-how of attendees and the added time/resources in setting up the event app. All of these reasons come down to one key issue: event planners are concerned it will be a difficult transition for everyone involved – their team, attendees and sponsors. That’s why, we’ve published a (free) eBook on how to easily transition your events from print to mobile guides. Goodbye expensive and outdated paper programs, hello instant mobile event apps! Wasting time, money and resources on paper programs or other printouts is the biggest mistake event professionals make. It’s time to bid goodbye to spending hours in preparing printed bio books and running to the printer. These hefty, expensive and time-consuming paper programs are old news! Learn how you can be a better event professional by taking your events mobile with 4 tactics in this eBook. Get your copy of the free eBook to take your events paperless with mobile event apps now. You’ll walk away with 4 tactics on how to convert your attendees into mobile event app fans. Attendees are at the core of every event so it’s crucial that they adapt and enjoy your event app. That’s why we’ve proposed 4 tactics to help you with this transition. Each of the 4 tactics includes several actionable steps you can execute on right away or as soon as you’re ready to take your events mobile. Try all of these ideas and action items or create a custom plan on how to take your event mobile based on your attendees. The eBook includes a plan to encourage technically-challenged attendees to adopt the event app. This is one of the top reasons we hear why event planners have not transitioned from print to mobile event guides yet. The eBook includes 4 special tips on how to convert the technically-challenged attendees. With these tips, even the late technology adoptors will jump in on the event app fun. Finally, get tips to show your event sponsors the benefits of the mobile event app. Without event sponsors, you may not be able to produce your fantastic event. If your sponsors are used to the print ad placements, you need to show them the benefits of featuring their brand in the event app. Wth the tips highlighted in this eBook, your sponsors will be hooked and soon they’ll be requesting an mobile app for the next event. Get your copy of the free eBook to take your events paperless with mobile event apps now.
####################################################################### # This file is part of JMdictDB. # Copyright (c) 2008-2010 Stuart McGraw # # JMdictDB is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, # or (at your option) any later version. # # JMdictDB is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with JMdictDB; if not, write to the Free Software Foundation, # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA ####################################################################### import sys, ply.yacc, re, unicodedata, pdb from collections import defaultdict import jellex, jdb from objects import * class ParseError (ValueError): def __init__ (self, msg, loc=None, token=None): self.args = (msg,) self.loc = loc self.token = token precedence = [] # -------------- RULES ---------------- def p_entr_1(p): '''entr : preentr''' p.lexer.begin('INITIAL') e = p[1] # The Freq objects on the readings are inependent of # those on the kanjis. The following function merges # common values. merge_freqs (e) # Set the foreign key ids since they will be used # needed by mk_restrs() below. jdb.setkeys (e, None) # The reading and sense restrictions here are simple # lists of text strings that give the allowed readings # or kanji. mk_restrs() converts those to the canonical # format which uses the index number of the disallowed # readings or kanji. if hasattr (e, '_rdng') and hasattr (e, '_kanj'): err = mk_restrs ("_RESTR", e._rdng, e._kanj) if err: perror (p, err, loc=False) if hasattr (e, '_sens') and hasattr (e, '_kanj'): err = mk_restrs ("_STAGK", e._sens, e._kanj) if err: perror (p, err, loc=False) if hasattr (e, '_sens') and hasattr (e, '_rdng'): err = mk_restrs ("_STAGR", e._sens, e._rdng) if err: perror (p, err, loc=False) # Note that the entry object returned may have an _XREF list # on its senses but the supplied xref records are not # complete. We do not assume database access is available # when parsing so we cannot look up the xrefs to find the # the target entry id numbers, validate that the kanji # reading (if given) are unique, or the target senses exist, # etc. It is expected that the caller will do this resolution # on the xrefs using something like jdb.resolv_xref() prior # to using the object. p[0] = e def p_preentr_1(p): '''preentr : kanjsect FF rdngsect FF senses''' p[0] = jdb.Entr(_kanj=p[1], _rdng=p[3], _sens=p[5]) def p_preentr_2(p): '''preentr : FF rdngsect FF senses''' p[0] = jdb.Entr(_rdng=p[2], _sens=p[4]) def p_preentr_3(p): '''preentr : kanjsect FF FF senses''' p[0] = jdb.Entr(_kanj=p[1], _sens=p[4]) def p_kanjsect_1(p): '''kanjsect : kanjitem''' p[0] = [p[1]] def p_kanjsect_2(p): '''kanjsect : kanjsect SEMI kanjitem''' p[0] = p[1]; p[0].append (p[3]) def p_kanjitem_1(p): '''kanjitem : krtext''' p[0] = jdb.Kanj(txt=p[1]) def p_kanjitem_2(p): '''kanjitem : krtext taglists''' kanj = jdb.Kanj(txt=p[1]) err = bld_kanj (kanj, p[2]) if err: perror (p, err) p[0] = kanj def p_rdngsect_1(p): '''rdngsect : rdngitem''' p[0] = [p[1]] def p_rdngsect_2(p): '''rdngsect : rdngsect SEMI rdngitem''' p[0] = p[1]; p[0].append (p[3]) def p_rdngitem_1(p): '''rdngitem : krtext''' p[0] = jdb.Rdng(txt=p[1]) def p_rdngitem_2(p): '''rdngitem : krtext taglists''' rdng = jdb.Rdng(txt=p[1]) err = bld_rdng (rdng, p[2]) if err: perror (p, err) p[0] = rdng def p_krtext_1(p): '''krtext : KTEXT''' p[0] = p[1] def p_krtext_2(p): '''krtext : RTEXT''' p[0] = p[1] def p_senses_1(p): '''senses : sense''' p[0] = [p[1]] def p_senses_2(p): '''senses : senses sense''' p[0] = p[1]; p[0].append(p[2]) def p_sense_1(p): '''sense : SNUM glosses''' sens = jdb.Sens() err = bld_sens (sens, p[2]) if err: perror (p, "Unable to build sense %s\n%s" % (p[1], err)) p[0] = sens def p_glosses_1(p): '''glosses : gloss''' p[0] = [p[1]] def p_glosses_2(p): '''glosses : glosses SEMI gloss''' p[0] = p[1]; p[0].append (p[3]) def p_gloss_1(p): '''gloss : GTEXT''' p[0] = [p[1], []] def p_gloss_2(p): '''gloss : GTEXT taglists''' p[0] = [p[1], p[2]] def p_gloss_3(p): '''gloss : taglists GTEXT''' p[0] = [p[2], p[1]] def p_gloss_4(p): '''gloss : taglists GTEXT taglists''' p[0] = [p[2], p[1] + p[3]] def p_taglists_1(p): '''taglists : taglist''' p[0] = p[1] def p_taglists_2(p): '''taglists : taglists taglist''' p[0] = p[1] p[0].extend(p[2]) def p_taglist_1(p): '''taglist : BRKTL tags BRKTR''' p[0] = p[2] def p_tags_1(p): '''tags : tagitem''' p[0] = p[1] def p_tags_2(p): '''tags : tags COMMA tagitem''' p[0] = p[1] p[0].extend (p[3]) def p_tagitem_1(p): '''tagitem : KTEXT''' p[0] = [['RESTR', None, p[1]]] def p_tagitem_2(p): '''tagitem : RTEXT''' p[0] = [['RESTR', p[1], None]] def p_tagitem_3(p): '''tagitem : TEXT''' if p[1] == 'nokanji': p[0] = [['RESTR', 'nokanji', None]] else: x = lookup_tag (p[1]) if not x: perror (p, "Unknown keyword: '%s'" % p[1]) else: p[0] = [[None, p[1]]] def p_tagitem_4(p): '''tagitem : QTEXT''' # FIXME: why isn''t a QTEXT already cleaned up by jellex? txt = jellex.qcleanup (p[1][1:-1]) # FIXME: we should check for ascii text here and treat # that as TEXT above. if jdb.jstr_keb (txt): p[0] = [['RESTR', None, txt]] else: p[0] = [['RESTR', txt, None]] def p_tagitem_5(p): '''tagitem : TEXT EQL TEXT''' p[0] = [tag_eql_text (p, p[1], p[3])] def p_tagitem_6(p): '''tagitem : TEXT EQL TEXT COLON''' KW = jdb.KW if p[1] != "lsrc": perror (p, "Keyword must be \"lsrc\"") la = KW.LANG.get(p[3]) if not la: perror (p, "Unrecognised language '%s'" % p[3]) p[0] = [["lsrc", None, la.id, None]] def p_tagitem_7(p): '''tagitem : TEXT EQL TEXT COLON atext''' KW = jdb.KW lsrc_flags = None; lang = None if p[1] in ["lsrc"]: la = KW.LANG.get(p[3]) if not la: if p[3] not in ('w','p','wp','pw'): perror (p, "Unrecognised language '%s'" % p[3]) else: lsrc_flags = p[3] else: lang = la.id else: perror (p, "Keyword not \"lsrc\", \"lit\", or \"expl\"") p[0] = [["lsrc", p[5], lang, lsrc_flags]] def p_tagitem_8(p): '''tagitem : TEXT EQL TEXT SLASH TEXT COLON''' KW = jdb.KW if p[1] != "lsrc": perror (p, "Keyword not \"lsrc\"") la = KW.LANG.get(p[3]) if not la: perror (p, "Unrecognised language '%s'" % p[3]) if p[5] not in ('w','p','wp','pw'): perror (p, "Bad lsrc flags '%s', must be 'w' (wasei), " "'p' (partial),or both" % p[5]) p[0] = [["lsrc", '', la.id, p[5]]] def p_tagitem_9(p): '''tagitem : TEXT EQL TEXT SLASH TEXT COLON atext''' KW = jdb.KW if p[1] != "lsrc": perror (p, "Keyword not \"lsrc\"") la = KW.LANG.get(p[3]) if not la: perror (p, "Unrecognised language '%s'" % p[3]) if p[5] not in ('w','p','wp','pw'): perror (p, "Bad lsrc flags '%s', must be 'w' (wasei), " "'p' (partial),or both" % p[5]) p[0] = [["lsrc", p[7], la.id, p[5]]] def p_tagitem_10(p): '''tagitem : TEXT EQL jrefs''' tag = p[1]; taglist = []; tagtype = 'XREF'; KW = jdb.KW for jref in p[3]: dotlist, slist, seq, corpus = jref if tag in [x.kw for x in KW.recs('XREF')]: # FIXME: instead of using XREF kw''s directly, do we want to # change to an lsrc syntax like, "xref=cf:..." # (possibly keeping "see" and "ant" as direct keywords)? if len (dotlist) == 1: if jdb.jstr_keb (dotlist[0]): taglist.append (['XREF', tag, None, dotlist[0], slist, seq, corpus]) else: taglist.append (['XREF', tag, dotlist[0], None, slist, seq, corpus]) elif len (dotlist) == 2: taglist.append (['XREF', tag, dotlist[1], dotlist[0], slist, seq, corpus]) elif len(dotlist) == 0: taglist.append (['XREF', tag, None, None, slist, seq, corpus]) else: perror ("No more than on kanji and one reading string can be given in an xref.") continue # The full 'jref' syntax is only used by xrefs (above) # so if we get here, complain if the 'jref' item has # any xref-specific elements. if seq or corpus or slist: perror ("Seq number, corpus, or a sense list can only be given with xref tags") # Xrefs are also the only contruct that uses the middot character # syntactically. Since we don''t have an xref, then the midots are # just characters in the text, so put the original text string back # together. txt = u'\u30FB'.join (dotlist) if tag == 'restr': if jdb.jstr_keb (txt): taglist.append (['RESTR', None, txt]) else: taglist.append (['RESTR', txt, None]) else: # This must be a tag=QTEXT contruct. taglist.append (tag_eql_text (p, tag, txt)) p[0] = taglist def p_atext_1(p): '''atext : TEXT''' p[0] = p[1] def p_atext_2(p): '''atext : QTEXT''' p[0] = jellex.qcleanup (p[1][1:-1]) def p_jrefs_1(p): '''jrefs : jref''' p[0] = [p[1]] def p_jrefs_2(p): '''jrefs : jrefs SEMI jref''' p[0] = p[1]; p[0].append (p[3]) def p_jref_1(p): '''jref : xrefnum''' p[0] = [[],[]] + p[1] def p_jref_2(p): '''jref : xrefnum slist''' p[0] = [[],p[2]] + p[1] def p_jref_3(p): '''jref : xrefnum DOT jitem''' p[0] = p[3] + p[1] def p_jref_4(p): '''jref : jitem''' p[0] = p[1] + [None,''] def p_jitem_1(p): '''jitem : dotlist''' p[0] = [p[1], None] def p_jitem_2(p): '''jitem : dotlist slist''' p[0] = [p[1], p[2]] def p_dotlist_1(p): '''dotlist : jtext''' p[0] = [p[1]] def p_dotlist_2(p): '''dotlist : dotlist DOT jtext''' p[0] = p[1]; p[0].append (p[3]) def p_jtext_1(p): '''jtext : KTEXT''' p[0] = p[1] def p_jtext_2(p): '''jtext : RTEXT''' p[0] = p[1] def p_jtext_3(p): '''jtext : QTEXT''' p[0] = jellex.qcleanup (p[1][1:-1]) def p_xrefnum_1(p): '''xrefnum : NUMBER''' p[0] = [toint(p[1]), ''] def p_xrefnum_2(p): '''xrefnum : NUMBER HASH''' p[0] = [toint(p[1]), None] def p_xrefnum_3(p): '''xrefnum : NUMBER TEXT''' p[0] = [toint(p[1]), p[2]] def p_slist_1(p): '''slist : BRKTL snums BRKTR''' p[0] = p[2] def p_snums_1(p): '''snums : NUMBER''' n = int(p[1]) if n<1 or n>99: perror (p, "Invalid sense number: '%s' % n") p[0] = [n] def p_snums_2(p): '''snums : snums COMMA NUMBER''' n = int(p[3]) if n<1 or n>99: perror (p, "Invalid sense number: '%s' % n") p[0] = p[1] + [n] # -------------- RULES END ---------------- def p_error (token): # Ply insists on having a p_error function that takes # exactly one argument so provide a wrapper around perror. perror (token) def perror (t_or_p, msg="Syntax Error", loc=True): # 't_or_p' is either a YaccProduction (if called from # jelparse code), a LexToken (if called by Ply), or None # (if called by Ply at end-of-text). if loc: errpos = -1 if t_or_p is None: errpos = None elif hasattr (t_or_p, 'stack'): # 't_or_p' is a production. Replace with a real token or # grammar symbol from the parser stack. t_or_p = t_or_p.stack[-1] # Grammar symbols will have a "endlexpos" attribute (presuming # that the parse() function was called with argument: tracking=True). if hasattr (t_or_p, 'endlexpos'): errpos = t_or_p.endlexpos # LexTokens will have a "lexpos" attribute. elif hasattr (t_or_p, 'lexpos'): errpos = t_or_p.lexpos if errpos == -1: raise ValueError ("Unable to get lexer error position. " "Was parser called with tracking=True?") t = errloc (errpos) loc_text = '\n'.join (t) else: loc_text = None raise ParseError (msg, loc_text) def errloc (errpos): # Return a list of text lines that consitute the parser # input text (or more accurately the input text to the # lexer used by the parser) with an inserted line containing # a caret character that points to the lexer position when # the error was detected. 'errpos' is the character offset # in the input text of the error, or None if the error was # at the end of input. # Note: Function create_parser() makes the parser it creates # global (in JelParser) and also make the lexer availble as # attribute '.lexer' of the parser, both of whech we rely on # here. global JelParser input = JelParser.lexer.lexdata if errpos is None: errpos = len (input) lines = input.splitlines (True) eol = 0; out = [] for line in lines: out.append (line.rstrip('\n\r')) eol += len (line) if eol >= errpos and errpos >= 0: # Calculate 'errcol', the error position relative # to the start of the current line. errcol = len(line) + errpos - eol # The line may contain double-width characters. Count # (in 'adj') the number of them that occur up to (but # not past) 'errcol'. adj = 0 for chr in line[:errcol]: w = unicodedata.east_asian_width (chr) if w == "W" or w == "F": adj += 1 # This assume that the width of a space is the same as # regular characters, and exactly half of a double-width # character, but that is the best we can do here. out.append ((' ' * (errcol+adj)) + '^') errpos = -1 # Ignore errpos on subsequent loops. return out def tag_eql_text (p, tag, text): # Process a tag=text syntax contructs as they are parsed. # We extract this activity into a function since, in the # "tagitem" section, we do it both for the TEXT=TEXT rule, # and TEXT=QTEXT (which is a possible condition in the # TEXT=jrefs rule.) if tag in ["note","lsrc","restr"]: if tag == "restr": if text != "nokanji": perror (p, "Bad restr value (expected \"nokanji\"): '%s'" % p[3]) r = ["RESTR", "nokanji", None] else: r = [tag, text, 1, None] else: x = lookup_tag (text, tag) if x and len(x) > 1: raise ValueError ("Unexpected return value from lookup_tag()") if x is None: perror (p, "Unknown keyword type '%s'" % tag) elif not x: perror (p, "Unknown %s keyword '%s'" % (tag,text)) else: r = x[0] return r def lookup_tag (tag, typs=None): # Lookup 'tag' (given as a string) in the keyword tables # and return the kw id number. If 'typs' is given it # should be a string or list of strings and gives the # specific KW domain(s) (e.g. FREQ, KINF, etc) that 'tag' # should be looked for in. # The return value is: # None -- A non-existent KW domain was given in'typs'. # [] -- (Empty list) The 'tag' was not found in any of # the doimains given in 'typs'. # [[typ1,id1],[typ2,id2],...] -- A list of lists. Each # item represents a domain in which 'tag' was found. # The first item of each item is a string giving # the domain name. The second item gives the id # number of that tag in the domain. In the case of # the FREQ keyword, the item will be a 3-list # consisting of "FREQ", the freq kw id, and the # a number for the freq value. E.g. lookup_tag('nf23') # will return [["FREQ",5,23]] (assuming that the "nf" # kw has the id value of 5 in the kwfreq table.) KW = jdb.KW matched = [] if not typs: typs = [x for x in KW.attrs()] if isinstance (typs, str): typs = [typs] for typ in typs: typ = typ.upper(); val = None if typ == "FREQ": mo = re.search (r'^([^0-9]+)(\d+)$', tag) if mo: tagbase = mo.group(1) val = int (mo.group(2)) else: tagbase = tag try: x = (getattr (KW, typ))[tagbase] except AttributeError: return None except KeyError: pass else: if not val: matched.append ([typ, x.id]) else: matched.append ([typ, x.id, val]) return matched def bld_sens (sens, glosses): # Build a sense record. 'glosses' is a list of gloss items. # Each gloss item is a 2-tuple: the first item is the gloss # record and the second, a list of sense tags. # Each of the sense tag items is an n-tuple. The first item # in an n-tuple is either a string giving the type of the tag # ('KINF', 'POS'. 'lsrc', etc) or None indicating the type was # not specified (for example, the input text contained a single # keyword like "vi" rather than "pos=vi"). The second and any # further items are dependent on the the tag type. # Our job is to iterate though this list, and put each item # on the appropriate sense list: e.g. all the "gloss" items go # into the list @{$sens->{_gloss}}, all the "POS" keyword items # go on @{$sens->{_pos}}, etc. KW = jdb.KW errs = []; sens._gloss = [] for gtxt, tags in glosses: gloss = jdb.Gloss (txt=jellex.gcleanup(gtxt)) sens._gloss.append (gloss) if tags: errs.extend (sens_tags (sens, gloss, tags)) if gloss.ginf is None: gloss.ginf = KW.GINF['equ'].id if gloss.lang is None: gloss.lang = KW.LANG['eng'].id return "\n".join (errs) def sens_tags (sens, gloss, tags): # See the comments in the "taglist" production for a description # of the format of 'taglist'. KW = jdb.KW errs = [] for t in tags: # Each tag, t, is a list where t[0] is the tag type (aka # domain) as a string, or None if it is unknown. There # will be one or more additional items in the list, the # numner depending on what type of tag it is. vals = None typ = t.pop(0) # Get the item type. if typ is None: # Unknown domain (that is, user gave a simple unadorned # tag like [n] rather than [pos=n]) so figure it what # domain it belongs to... # First, if we can interpret the tag as a sense tag, do so. candidates = lookup_tag (t[0], ('POS','MISC','FLD','DIAL')) if candidates and len(candidates) > 1: errs.append ( "Sense tag '%s' is ambiguous, may be either any of %s." " Please specify tag explicity, using, for instance," " \"%s=%s\"" % (t[0], ','.join([x[0] for x in candidates]), candidates[0][0], t[0])) continue if candidates: typ, t = candidates[0][0], [candidates[0][1]] if typ is None: candidates = lookup_tag (t[0], ('GINF','LANG')) if candidates: # There is currently only one ambiguity: "lit" may # be either GINF "literal" or LANG "Lithuanian". # We unilaterally choose the former interpretation # as it is much more common than the latter, and # the latter when needed can be specified as # [lang=lit]. candidate = candidates[0] typ = candidate[0]; t = [candidate[1]] if typ is None: errs.append ("Unknown tag '%s'" % t) continue if typ in ('POS','MISC','FLD','DIAL'): assert len(t)==1, "invalid length" assert type(t[0])==int, "Unresolved kw" if typ == 'POS': o = Pos(kw=t[0]) elif typ == 'MISC': o = Misc(kw=t[0]) elif typ == 'FLD': o = Fld(kw=t[0]) elif typ == 'DIAL': o = Dial(kw=t[0]) append (sens, "_"+typ.lower(), o) elif typ == 'RESTR': # We can't create real _stagk or _stagr lists here # because the readings and kanji we are given by the user # are allowed ones, but we need to store disallowed ones. # To get the disallowed ones, we need access to all the # readings/kanji for this entry and we don't have that # info at this point. So we do what checking we can. and # save the texts as given, and will fix later after the # full entry is built and we have access to the entry's # readings and kanji. rtxt,ktxt = t #if num or corp: if ((rtxt and ktxt) or (not rtxt and not ktxt)): errs.append ("Sense restrictions must have a " "reading or kanji (but not both): " + fmt_xitem (t)) if ktxt: append (sens, '_STAGK', ktxt) if rtxt: append (sens, '_STAGR', rtxt) elif typ == 'lsrc': wasei = t[2] and 'w' in t[2] partial = t[2] and 'p' in t[2] append (sens, '_lsrc', jdb.Lsrc(txt=t[0] or '', lang=(t[1] or lang_en), part=partial, wasei=wasei)) elif typ == 'note': if getattr (sens, 'notes', None): errs.append ("Only one sense note allowed") sens.notes = t[0] elif typ == 'XREF': kw = KW.XREF[t[0]].id t[0] = kw append (sens, '_XREF', t) elif typ == 'GINF': t = t[0] # GINF tags have only one value, the ginf code. if getattr (gloss, 'ginf', None): errs.append ( "Warning, duplicate GINF tag '%s' ignored\n" % KW.GINF[t].kw) else: gloss.ginf = t elif typ == 'LANG': t = t[0] # LANG tags have only one value, the lang code. assert isinstance(t,int) if getattr (gloss, 'lang', None): errs.append ( "Warning, duplicate LANG tag '%s' ignored\n" % KW.LANG[t].kw) else: gloss.lang = t elif typ: errs.append ("Cannot use '%s' tag in a sense" % typ) return errs def bld_rdng (r, taglist=[]): errs = []; nokanj = False for t in taglist: typ = t.pop(0) if typ is None: v = lookup_tag (t[0], ('RINF','FREQ')) if not v: typ = None errs.append ("Unknown reading tag '%s'" % t[0]) else: typ, t = v[0][0], v[0][1:] if typ == 'RINF': append (r, '_inf', jdb.Rinf(kw=t[0])) elif typ == 'FREQ': # _freq objects are referenced by both the reading and # kanji _freq lists. Since we don't have access to # the kanj here, temporarily save the freq (kw, value) # tuple in attribute "._FREQ". When the full entry is # processed, the info in here will be removed, merged # with parallel info from the kanj objects, and proper # ._freq objects created. append (r, '_FREQ', (t[0], t[1])) elif typ == 'RESTR': # We can't generate real restr records here because the real # records are the disallowed kanji. We have the allowed # kanji here and need the set of all kanji in order to get # the disallowed set, and we don't have that now. So we # just save the allowed kanji as given, and will convert it # after the full entry is built and we have all the info we # need. #for xitem in t[0]: # An xitem represents a reference to another entry # or other info within an entry, in textual form. It # is used for xrefs and restr info. It is a 5-seq # with the following values: # [0] -- Reading text # [1] -- Kanji text # For a reading restr, it is expected to contain only # a kanji text. rtxt,ktxt = t if rtxt == "nokanji": nokanj = True r._NOKANJI = 1 continue if rtxt: errs.append ("Reading restrictions must be kanji only: " + rtxt) append (r, "_RESTR", ktxt) if hasattr (r,'_RESTR') and nokanj: errs.append ("Can't use both kanji and \"nokanji\" in 'restr' tags") elif typ: errs.append ("Cannot use '%s' tag in a reading" % typ) return "\n".join (errs) def bld_kanj (k, taglist=[]): errs = [] for t in taglist: typ = t.pop(0) if typ is None: v = lookup_tag (t[0], ('KINF','FREQ')) if not v: perror ("Unknown kanji tag '%s'" % t[0]) # Warning: The following simply uses the first resolved tag in # the candidates list returned by lookup_tag(). This assumes # there are no possible tags that are ambiguous in the KINF and # FREQ which could cause lookup_tag() to return more than one # candidate tags. typ, t = v[0][0], v[0][1:] if typ == "KINF": append (k, "_inf", jdb.Kinf(kw=t[0])) elif typ == "FREQ": # _freq objects are referenced by both the reading and # kanji _freq lists. Since we don't have access to # the rdng here, temporarily save the freq (kw, value) # tuple in attribute "._FREQ". When the full entry is # processed, the info in here will be removed, merged # with parallel info from the rdng objects, and proper # ._freq objects created. append (k, "_FREQ", (t[0], t[1])) else: errs.append ("Cannot use '%s' tag in kanji section" % typ); return "\n".join (errs) def mk_restrs (listkey, rdngs, kanjs): # Note: mk_restrs() are used for all three # types of restriction info: restr, stagr, stagk. However to # simplify things, the comments and variable names assume use # with reading restrictions (restr). # # What we do is take a list of restr text items received from # a user which list the kanji (a subset of all the kanji for # the entry) that are valid with this reading, and turn it # into a list of restr records that identify the kanji that # are *invalid* with this reading. The restr records identify # kanji by id number rather than text. # # listkey -- Name of the key used to get the list of text # restr items from 'rdngs'. These are the text strings # provided by the user. Should be "_RESTR", "_STAGR", # or "_STAGK". # rdngs -- List of rdng or sens records depending on whether # we're doing restr or stagr/stagk restrictions. # kanjs -- List of the entry's kanji or reading records # depending on whether we are doing restr/stagk or stagr # restrictions. errs = [] ktxts = [x.txt for x in kanjs] for n,r in enumerate (rdngs): # Get the list of restr text strings and nokanji flag and # delete them from the rdng object since they aren't part # of the standard api. restrtxt = getattr (r, listkey, None) if restrtxt: delattr (r, listkey) nokanj = getattr (r, '_NOKANJI', None) if nokanj: delattr (r, '_NOKANJI') # Continue with next reading if nothing to be done # with this one. if not nokanj and not restrtxt: continue # bld_rdngs() guarantees that {_NOKANJI} and {_RESTR} # won't both be present on the same rdng. if nokanj and restrtxt: # Only rdng-kanj restriction should have "nokanji" tag, so # message can hardwire "reading" and "kanji" text even though # this function in also used for sens-rdng and sens-kanj # restrictions. errs.append ("Reading %d has 'nokanji' tag but entry has no kanji" % (n+1)) continue if nokanj: restrtxt = None z = jdb.txt2restr (restrtxt, r, kanjs, listkey.lower()) # Check for kanji erroneously in the 'restrtxt' but not in # 'kanjs'. As an optimization, we only do this check if the # number of Restr objects created (len(z)) plus the number of # 'restrtxt's are not equal to the number of 'kanjs's. (This # criterion my not be valid in some corner cases.) if restrtxt is not None and len (z) + len (restrtxt) != len (kanjs): nomatch = [x for x in restrtxt if x not in ktxts] if nomatch: if listkey == "_RESTR": not_found_in = "kanji" elif listkey == "_STAGR": not_found_in = "readings" elif listkey == "_STAGK": not_found_in = "kanji" errs.append ("restr value(s) '" + "','".join (nomatch) + "' not in the entry's %s" % not_found_in) return "\n".join (errs) def resolv_xrefs ( cur, # An open DBAPI cursor to the current JMdictDB database. entr # An entry with ._XREF tuples. ): """\ Convert any jelparser generated _XREF lists that are attached to any of the senses in 'entr' to a normal augmented xref list. An _XREF list is a list of 6-tuples: [0] -- The type of xref per id number in table kwxref. [1] -- Reading text of the xref target entry or None. [2] -- Kanji text of the target xref or None. [3] -- A list of ints specifying the target senses in in the target entry. [4] -- None or a number, either seq or entry id. [5] -- None, '', or a corpus name. None means 'number' is a entry id, '' means it is a seq number in the corpus 'entr.src', otherwise it is the name or id number of a corpus in which to try resolving the xref. At least one of [1], [2], or [4] must be non-None.\ """ errs = [] for s in getattr (entr, '_sens', []): if not hasattr (s, '_XREF'): continue xrefs = []; xunrs = [] for typ, rtxt, ktxt, slist, seq, corp in s._XREF: if corp == '': corp = entr.src xrf, xunr = find_xref (cur, typ, rtxt, ktxt, slist, seq, corp) if xrf: xrefs.extend (xrf) else: xunrs.append (xunr) errs.append (xunr.msg) if xrefs: s._xref = xrefs if xunrs: s._xrslv = xunrs del s._XREF return errs def find_xref (cur, typ, rtxt, ktxt, slist, seq, corp, corpcache={}, clearcache=False): xrfs = []; xunrs = None; msg = '' if clearcache: corpcache.clear() if isinstance (corp, str): if corpcache.get (corp, None): corpid = corpcache[corp] else: rs = jdb.dbread (cur, "SELECT id FROM kwsrc WHERE kw=%s", [corp]) if len(rs) != 1: raise ValueError ("Invalid corpus name: '%s'" % corp) corpid = corpcache[corp] = rs[0][0] else: corpid = corp try: xrfs = jdb.resolv_xref (cur, typ, rtxt, ktxt, slist, seq, corpid) except ValueError as e: msg = e.args[0] xunrs = jdb.Xrslv (typ=typ, ktxt=ktxt, rtxt=rtxt,tsens=None) xunrs.msg = msg return xrfs, xunrs def merge_freqs (entr): # This function is used by code that contructs Entr objects # by parsing a textual entry description. Generally such code # will parse freq (a.k.a. prio) tags for readings and kanji # individually. Before the entry is used, these independent # tags must be combined so that a rdng/kanj pairs with the # same freq tag point to a single Freq object. This function # does that merging. # It expects the entry's Rdng and Kanj objects to have a temp # attribute named "_FREQ" that contains a list of 2-tuples. # Each 2-tuple contains the freq table kw id number, and the # freq value. After merge_freqs() runs, all those .FREQ # attributes will have been deleted, and .freq attributes # created with equivalent, properly linked Freq objects. fmap = defaultdict (lambda:([list(),list()])) # Collect the info in .FREQ attributes from all the readings. for r in getattr (entr, '_rdng', []): for kw_val in getattr (r, '_FREQ', []): # 'kw_val' is a 2-tuple denoting the freq as a freq table # keyword id and freq value pair. rlist = fmap[(kw_val)][0] # Add 'r' to rlist if it is not there already. # Use first() as a "in" operator that uses "is" rather # than "==" as compare function. if not jdb.isin (r, rlist): rlist.append (r) if hasattr (r, '_FREQ'): del r._FREQ # Collect the info in .FREQ attributes from all the kanji. # This works on kanj's the same as above section works on # rdng's and comments above apply here too. for k in getattr (entr, '_kanj', []): for kw_val in getattr (k, '_FREQ', []): klist = fmap[(kw_val)][1] if not jdb.isin (k, klist): klist.append (k) if hasattr (k, '_FREQ'): del k._FREQ # 'fmap' now has one entry for every unique freq (kw,value) tuple # which is a pair of sets. The first set consists of all Rdng # objects that (kw,value) freq spec applies to. The second is # the set of all kanji it applies to. We take all combinations # of readings with kanji, and create a Freq object for each. errs = jdb.make_freq_objs (fmap, entr) return errs def append (sens, key, item): # Append $item to the list, @{$sens->{$key}}, creating # the latter if needed. v = [] try: v = getattr (sens, key) except AttributeError: setattr (sens, key, v) v.append (item) _uni_numeric = { '\uFF10':'0','\uFF11':'1','\uFF12':'2','\uFF13':'3', '\uFF14':'4','\uFF15':'5','\uFF16':'6','\uFF17':'7', '\uFF18':'8','\uFF19':'9',} def toint (s): n = int (s.translate (_uni_numeric)) return n def fmt_xitem (xitem): typ = None if len (xitem) == 6: typ = xitem.pop (0) if len (xitem) == 5: rtxt, ktxt, slist, num, corp = xitem else: rtxt, ktxt, slist, num, corp = xitem + [[], None, None] k = ktxt or ''; r = rtxt or ''; n = num or '' if num: if corp: c = ' ' + corp else: c = '#' if corp is None else '' n = n + c else: c = '' kr = k + (u'\u30FB' if k and r else '') + r t = n + (u'\u30FB' if n and kr else '') + kr s = ('[%s]' % ','.join(slist)) if slist else '' return t + s def parse_grp (grpstr): rv = []; KWGRP = jdb.KW.GRP if not grpstr.strip(): return rv # FIXME: Handle grp.notes which is currently ignored. for g in grpstr.split (';'): grp, x, ord = g.strip().partition ('.') if grp.isdigit(): grp = int(grp) grp = KWGRP[grp].id ord = int(ord) rv.append (Grp (kw=grp, ord=ord)) return rv def create_parser (lexer, toks, **args): # Set global JelParser since we need access to it # from error handling function p_error() and I don't # know any other way to make it available there. global tokens, JelParser # The tokens also have to be global because Ply # doesn't believe in user function parameters for # argument passing. tokens = toks # The following sets default keyword arguments to # to Ply's parser factory function. These are # intended to cause it to use the "jelparse_tab.py" # file that should be in sys.path somewhere (either # in the development dir's python/lib, or in the # web lib dir.) so as to prevent Ply from trying # to rebuild it, and worse, writing it like bird # droppings wherever we happen to be running. if 'module' not in args: args['module'] = sys.modules['jelparse'] if 'tabmodule' not in args: args['tabmodule'] = 'jelparse_tab' if 'write_tables' not in args: args['write_tables'] = 1 if 'optimize' not in args: args['optimize'] = 1 if 'debug' not in args: args['debug'] = 0 JelParser = ply.yacc.yacc (**args) JelParser.lexer = lexer # Access to lexer needed in error handler. return JelParser
Intel tel® PROSet/ Wireless Software and Drivers for Intel® WiFi Link 5100 Version: Latest ( Latest) Date: 8/ 17/. Intel( R) PRO/ Wireless 2100A LAN Mini PCI Adapter. Whereas Inspiron may change vendors components several times over the course of a single model the Latitude line generally retains identical components throughout its tel( R) PRO/ Wireless 2100 LAN 3A Mini PCI Adapter. 1 64- bit* Windows 8 32- bit* 7 more: Latest. 特殊金属加工( Ti、 Co- Ni) チタン・ コバルトニッケル ( 加工技術). Intel 82578DM Gigabit Network Connection Drivers. Mar 17, · I have recently received same message on my laptop running XP. Windows XP 32- bit Edition* Windows XP 64- bit Edition*. This download record contains the latest Intel® PROSet/ Wireless software and drivers available for Intel® WiFi Link 5100. Download the latest Intel® PROSet/ Wireless Software and drivers available for Intel® WiFi Link 5100. By contrast the Dell Inspiron is aimed at the consumer market its specifications change regularly. Intel WiFi Link 5100 AGN Drivers. Can always find a driver for your computer' s device. Check Warranty ( Current Product). Which file to choose. Intel Wireless WiFi Link 4965AGN Drivers. With wifi, Network Interface card driver is available here. Intel 82579LM Gigabit Network Connection Drivers. We are providing you latest Inspiron WLAN card driver for your amazing laptop. 53 of the Acer Aspire 1420P Laptops. Intel( R) PRO/ Wireless 2100 LAN 3B Mini PCI Adapter. Find All Intel WiFi Link 5100 AGN Drivers. I have both a laptop desktop running XP( updated) but when the media reader is plugged in the USB. 1, 32- bit* Windows 8. " OS: Windows XP Media Center Edition Intel PRO/ Wireless 3945ABG, Intel WiFi Link 5300, Intel PRO/ Wireless 2200BG, Intel WiFi Link 5100, Intel PRO/ Wireless 2915ABG, WiFi Link Driver File" Intel PRO Wireless 2200BG Intel Wireless WiFi tel PRO/ Wireless 5100 AGN LAN Driver. In fact ICT) est une expression, de l' audiovisuel, principalement utilisée dans le monde universitaire, pour désigner le domaine de la télématique, des multimédias, communication technologies, all you need is a working computer , c' est- à- dire les techniques de l' informatique d' Internet et des télécommunications qui permettent. Home 製品から探す 特殊金属加工( Ti、 Co- Ni). Support information for Wireless Networking. Intel pro wireless 5100 agn driver xp. All drivers available for download have been scanned by antivirus program. Intel pro wireless 5100 agn driver xp. Trying to upload photos using a media card reader. The SD card is a 1GB card. First choose your operating system then find your device name click the download work Driver Downloads - most popular Network drivers. Driver Intel Wifi Link 5100 Agn Download Windows 7 XP Vista. Date; Intel® PROSet/ Wireless Software and Drivers for Intel® WiFi Link 5100. Intel 82566DM 2 Gigabit Network Connection Drivers. Home » Intel( R) WiFi Link 5100 AGN Use the links on this page to download the latest version of Intel( R) WiFi Link 5100 AGN drivers. Intel Centrino Wireless N 135 Drivers. The Dell Latitude is a laptop family from Dell Computers, intended for business use. Drivers: Windows 8. 0 Driver version = 14. Intel pro wireless 5100 agn driver xp. The Intel( R) WiFi Link 5100 AGN device has one more Hardware IDs the list is listed below. Windows XP* : PROSet version = 15. Intel will be supplying Windows DCH Drivers for its products beginning in November. Acer Intel 5100 Wireless Adapter Driver Tracey Brown October 22 Windows XP, Acer Wireless Driver Wireless Drivers This is version 12. OS support and versions. You don' t need to buy a Chromebook to enjoy the features of Google' s desktop operating system. Drivers & Software How- tos & Solutions Documentation Diagnostics Warranty & Repair. Buy Intel Internal Wireless LAN Card 2200BG WM3B2200BG For Dell Inspiron: Laptop Network Adapters - FREE DELIVERY possible on eligible purchases. I' m running a HP 8530b Workstation OS XP SP3 which connects to the network via a wired connection ( 1gb) and a wireless connection using the on board wireless card. 0000> ( 8AW217WW) - [ 重要] 明确该驱动支持Windows Vista的部件信息. - ( 修正) 设备管理器中显示错误的设备名称( Intel Centrino Wireless Bluetooth 3. 0 + High Speed Virtual Adapter). Overview and product type.
"""Support tickets.""" from SoftLayer.CLI import formatting from SoftLayer import utils import click TEMPLATE_MSG = "***** SoftLayer Ticket Content ******" def get_ticket_results(mgr, ticket_id, update_count=1): """Get output about a ticket. :param integer id: the ticket ID :param integer update_count: number of entries to retrieve from ticket :returns: a KeyValue table containing the details of the ticket """ result = mgr.get_ticket(ticket_id) result = utils.NestedDict(result) table = formatting.KeyValueTable(['Name', 'Value']) table.align['Name'] = 'r' table.align['Value'] = 'l' table.add_row(['id', result['id']]) table.add_row(['title', result['title']]) if result['assignedUser']: table.add_row(['assignedUser', "%s %s" % (result['assignedUser']['firstName'], result['assignedUser']['lastName'])]) table.add_row(['createDate', result['createDate']]) table.add_row(['lastEditDate', result['lastEditDate']]) total_update_count = result['updateCount'] count = min(total_update_count, update_count) for i, update in enumerate(result['updates'][:count]): # NOTE(kmcdonald): Windows new-line characters need to be stripped out wrapped_entry = click.wrap_text(update['entry'].replace('\r', '')) table.add_row(['Update %s' % (i + 1,), wrapped_entry]) return table
How to use Soyatoo. A recipe for success so you can have your whipped cream too! Vegan-style.
#!/usr/bin/python3 """ Copy your oracle function to a new function that encrypts buffers under ECB mode using a consistent but unknown key (for instance, assign a single random key, once, to a global variable). Now take that same function and have it append to the plaintext, BEFORE ENCRYPTING, the following string: Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg YnkK |-------------------------------------------| |Spoiler alert. | | | |Do not decode this string now. Don't do it.| |-------------------------------------------| Base64 decode the string before appending it. Do not base64 decode the string by hand; make your code do it. The point is that you don't know its contents. What you have now is a function that produces: AES-128-ECB(your-string || unknown-string, random-key) It turns out: you can decrypt "unknown-string" with repeated calls to the oracle function! Here's roughly how: Feed identical bytes of your-string to the function 1 at a time --- start with 1 byte ("A"), then "AA", then "AAA" and so on. Discover the block size of the cipher. You know it, but do this step anyway. Detect that the function is using ECB. You already know, but do this step anyways. Knowing the block size, craft an input block that is exactly 1 byte short (for instance, if the block size is 8 bytes, make "AAAAAAA"). Think about what the oracle function is going to put in that last byte position. Make a dictionary of every possible last byte by feeding different strings to the oracle; for instance, "AAAAAAAA", "AAAAAAAB", "AAAAAAAC", remembering the first block of each invocation. Match the output of the one-byte-short input to one of the entries in your dictionary. You've now discovered the first byte of unknown-string. Repeat for the next byte. """ import argparse import base64 import random import sys from utils.cpset2 import aes_ecb, gen_random_bytes, test_aes_ecb random.seed(1) GLOBAL_KEY = gen_random_bytes(16) def is_oracle_ecb(block): if test_aes_ecb('A' * block * 10): return True return False def convert_to_bytes(text): if type(text).__name__ == 'str': t = text.encode('utf-8') elif type(text).__name__ == 'bytes': t = text else: raise TypeError('Bad type passed to encryption_oracle') return t def decrypt_ecb(block): ans = b'' mult = 0 ctlen = len(base64.b64decode(encryption_oracle(''))) while len(ans) < ctlen: if len(ans) % block == 0: mult += 1 pad = b'A' * (block - (len(ans)%block + 1)) oracle = encryption_oracle(pad) found = 0 for test in range(0,255): te = pad + ans + bytes([test]) enc = encryption_oracle(te) if base64.b64decode(enc)[:block*mult] == base64.b64decode(oracle)[:block*mult]: ans += bytes([test]) found = 1 break if not found: break pad = int(ans[-1]) if ans[-pad:] != bytes((pad,))*pad: print('Issue removing final pad.') print('Decrypted text: ') print(ans) return '' return ans[:-pad].decode('utf-8') def get_oracle_block_size(): l = 0 resize = 0 cnt = 0 for i in range(1,100): test = b'A' * i tl = len(encryption_oracle(test)) if l == 0: l = tl elif resize == 0: if tl != l: cnt = 1 l = tl resize = 1 elif l == tl: cnt += 1 else: return cnt return -1 def manage_decrypt_aes_ecb(): bs = get_oracle_block_size() if bs: ecb = is_oracle_ecb(bs) if ecb: return decrypt_ecb(bs) return '' def encryption_oracle(text): crypt = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg' crypt += 'aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq' crypt += 'dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg' crypt += 'YnkK' return aes_ecb(convert_to_bytes(text) + base64.b64decode(crypt), convert_to_bytes(GLOBAL_KEY),1) def main(): ans = manage_decrypt_aes_ecb() if ans: print(ans) return 0 print('Fail.') return -1 if __name__ == '__main__': parser = argparse.ArgumentParser( description='Uses an oracle to decrypt AES in ECB mode, one byte at \ a time. This is the simple approach.' ) args = parser.parse_args() sys.exit(main())
This minimalistic unisex bag by PB0110 is a design by Ayzit Bostan and is handmade of large leather blanks and variable straps. It can be worn in three different variations. The bag is handcrafted from vegetable tanned leather, a material which becomes even more beautiful with the passage of time and develops a unique patina.
# Welcome spoke classes # # Copyright (C) 2011-2012 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # import sys import re import os import gi gi.require_version("Gtk", "3.0") from gi.repository import Gtk from pyanaconda.ui.gui.hubs.summary import SummaryHub from pyanaconda.ui.gui.spokes import StandaloneSpoke from pyanaconda.ui.gui.utils import setup_gtk_direction, escape_markup from pyanaconda.core.async_utils import async_action_wait from pyanaconda.ui.gui.spokes.lib.lang_locale_handler import LangLocaleHandler from pyanaconda.ui.gui.spokes.lib.unsupported_hardware import UnsupportedHardwareDialog from pyanaconda import localization from pyanaconda.product import distributionText, isFinal, productName, productVersion from pyanaconda import flags from pyanaconda import geoloc from pyanaconda.core.i18n import _, C_ from pyanaconda.core.util import ipmi_abort from pyanaconda.core.constants import DEFAULT_LANG, WINDOW_TITLE_TEXT from pyanaconda.modules.common.constants.services import TIMEZONE, LOCALIZATION from pyanaconda.anaconda_loggers import get_module_logger log = get_module_logger(__name__) __all__ = ["WelcomeLanguageSpoke"] class WelcomeLanguageSpoke(StandaloneSpoke, LangLocaleHandler): """ .. inheritance-diagram:: WelcomeLanguageSpoke :parts: 3 """ mainWidgetName = "welcomeWindow" focusWidgetName = "languageEntry" uiFile = "spokes/welcome.glade" helpFile = "WelcomeSpoke.xml" builderObjects = ["languageStore", "languageStoreFilter", "localeStore", "welcomeWindow", "betaWarnDialog"] preForHub = SummaryHub priority = 0 def __init__(self, *args, **kwargs): StandaloneSpoke.__init__(self, *args, **kwargs) LangLocaleHandler.__init__(self) self._origStrings = {} self._l12_module = LOCALIZATION.get_proxy() def apply(self): (store, itr) = self._localeSelection.get_selected() if not itr: log.warning("No locale is selected. Skip.") return locale = store[itr][1] locale = localization.setup_locale(locale, self._l12_module, text_mode=False) self._set_lang(locale) # Skip timezone and keyboard default setting for kickstart installs. # The user may have provided these values via kickstart and if not, we # need to prompt for them. But do continue if geolocation-with-kickstart # is enabled. if flags.flags.automatedInstall and not geoloc.geoloc.enabled: return timezone_proxy = TIMEZONE.get_proxy() loc_timezones = localization.get_locale_timezones(self._l12_module.Language) if geoloc.geoloc.result.timezone: # (the geolocation module makes sure that the returned timezone is # either a valid timezone or None) log.info("using timezone determined by geolocation") timezone_proxy.SetTimezone(geoloc.geoloc.result.timezone) # Either this is an interactive install and timezone.seen propagates # from the interactive default kickstart, or this is a kickstart # install where the user explicitly requested geolocation to be used. # So set timezone.seen to True, so that the user isn't forced to # enter the Date & Time spoke to acknowledge the timezone detected # by geolocation before continuing the installation. timezone_proxy.SetKickstarted(True) elif loc_timezones and not timezone_proxy.Timezone: # no data is provided by Geolocation, try to get timezone from the # current language log.info("geolocation not finished in time, using default timezone") timezone_proxy.SetTimezone(loc_timezones[0]) @property def completed(self): # Skip the welcome screen if we are in single language mode # If language has not been set the default language (en_US) # will be used for the installation and for the installed system. if flags.flags.singlelang: return True if flags.flags.automatedInstall and self._l12_module.LanguageKickstarted: return bool(self._l12_module.Language) def _row_is_separator(self, model, itr, *args): return model[itr][3] def initialize(self): self.initialize_start() self._languageStore = self.builder.get_object("languageStore") self._languageStoreFilter = self.builder.get_object("languageStoreFilter") self._languageEntry = self.builder.get_object("languageEntry") self._langSelection = self.builder.get_object("languageViewSelection") self._langSelectedRenderer = self.builder.get_object("langSelectedRenderer") self._langSelectedColumn = self.builder.get_object("langSelectedColumn") self._langView = self.builder.get_object("languageView") self._localeView = self.builder.get_object("localeView") self._localeStore = self.builder.get_object("localeStore") self._localeSelection = self.builder.get_object("localeViewSelection") LangLocaleHandler.initialize(self) # We need to tell the view whether something is a separator or not. self._langView.set_row_separator_func(self._row_is_separator, None) # We can use the territory from geolocation here # to preselect the translation, when it's available. # # But as the lookup might still be in progress we need to make sure # to wait for it to finish. If the lookup has already finished # the wait function is basically a noop. geoloc.geoloc.wait_for_refresh_to_finish() # the lookup should be done now, get the teorritory territory = geoloc.geoloc.result.territory_code # bootopts and kickstart have priority over geoip language = self._l12_module.Language if language and self._l12_module.LanguageKickstarted: locales = [language] else: locales = localization.get_territory_locales(territory) or [DEFAULT_LANG] # get the data models filter_store = self._languageStoreFilter store = filter_store.get_model() # get language codes for the locales langs = [localization.get_language_id(locale) for locale in locales] # check which of the geolocated languages have translations # and store the iterators for those languages in a dictionary langs_with_translations = {} itr = store.get_iter_first() while itr: row_lang = store[itr][2] if row_lang in langs: langs_with_translations[row_lang] = itr itr = store.iter_next(itr) # if there are no translations for the given locales, # use default if not langs_with_translations: self._set_lang(DEFAULT_LANG) localization.setup_locale(DEFAULT_LANG, self._l12_module, text_mode=False) lang_itr, _locale_itr = self._select_locale(self._l12_module.Language) langs_with_translations[DEFAULT_LANG] = lang_itr locales = [DEFAULT_LANG] # go over all geolocated languages in reverse order # and move those we have translation for to the top of the # list, above the separator for lang in reversed(langs): itr = langs_with_translations.get(lang) if itr: store.move_after(itr, None) else: # we don't have translation for this language, # so dump all locales for it locales = [l for l in locales if localization.get_language_id(l) != lang] # And then we add a separator after the selected best language # and any additional languages (that have translations) from geoip newItr = store.insert(len(langs_with_translations)) store.set(newItr, 0, "", 1, "", 2, "", 3, True) # setup the "best" locale locale = localization.setup_locale(locales[0], self._l12_module) self._set_lang(locale) self._select_locale(self._l12_module.Language) # report that we are done self.initialize_done() def _retranslate_one(self, widgetName, context=None): widget = self.builder.get_object(widgetName) if not widget: return if widget not in self._origStrings: self._origStrings[widget] = widget.get_label() before = self._origStrings[widget] if context is not None: widget.set_label(C_(context, before)) else: widget.set_label(_(before)) def retranslate(self): # Change the translations on labels and buttons that do not have # substitution text. for name in ["pickLanguageLabel", "betaWarnTitle", "betaWarnDesc"]: self._retranslate_one(name) # It would be nice to be able to read the translation context from the # widget, but we live in an imperfect world. # See also: https://bugzilla.gnome.org/show_bug.cgi?id=729066 for name in ["quitButton", "continueButton"]: self._retranslate_one(name, "GUI|Welcome|Beta Warn Dialog") # The welcome label is special - it has text that needs to be # substituted. welcomeLabel = self.builder.get_object("welcomeLabel") welcomeLabel.set_text(_("WELCOME TO %(name)s %(version)s.") % {"name" : productName.upper(), "version" : productVersion}) # pylint: disable=no-member # Retranslate the language (filtering) entry's placeholder text languageEntry = self.builder.get_object("languageEntry") if languageEntry not in self._origStrings: self._origStrings[languageEntry] = languageEntry.get_placeholder_text() languageEntry.set_placeholder_text(_(self._origStrings[languageEntry])) # And of course, don't forget the underlying window. self.window.set_property("distribution", distributionText().upper()) self.window.retranslate() # Retranslate the window title text # - it looks like that the main window object is not yet # properly initialized during the first run of the # retranslate method (it is always triggered at startup) # so make sure the object is actually what we think it is # - ignoring this run is OK as the initial title is # already translated to the initial language if isinstance(self.main_window, Gtk.Window): self.main_window.set_title(_(WINDOW_TITLE_TEXT)) # Correct the language attributes for labels self.main_window.reapply_language() def refresh(self): self._select_locale(self._l12_module.Language) self._languageEntry.set_text("") self._languageStoreFilter.refilter() def _add_language(self, store, native, english, lang): native_span = '<span lang="%s">%s</span>' % \ (escape_markup(lang), escape_markup(native)) store.append([native_span, english, lang, False]) def _add_locale(self, store, native, locale): native_span = '<span lang="%s">%s</span>' % \ (escape_markup(re.sub(r'\..*', '', locale)), escape_markup(native)) store.append([native_span, locale]) # Signal handlers. def on_lang_selection_changed(self, selection): (_store, selected) = selection.get_selected_rows() LangLocaleHandler.on_lang_selection_changed(self, selection) if not selected and hasattr(self.window, "set_may_continue"): self.window.set_may_continue(False) def on_locale_selection_changed(self, selection): (store, selected) = selection.get_selected_rows() if hasattr(self.window, "set_may_continue"): self.window.set_may_continue(len(selected) > 0) if selected: lang = store[selected[0]][1] lang = localization.setup_locale(lang) self._set_lang(lang) self.retranslate() # Reset the text direction setup_gtk_direction() # Redraw the window to reset the sidebar to where it needs to be self.window.queue_draw() # Override the default in StandaloneSpoke so we can display the beta # warning dialog first. def _on_continue_clicked(self, window, user_data=None): # Don't display the betanag dialog if this is the final release or # when autostep has been requested as betanag breaks the autostep logic. if not isFinal and not self.data.autostep.seen: dlg = self.builder.get_object("betaWarnDialog") with self.main_window.enlightbox(dlg): rc = dlg.run() dlg.hide() if rc != 1: ipmi_abort(scripts=self.data.scripts) sys.exit(0) dialog = UnsupportedHardwareDialog(self.data) if not dialog.supported: with self.main_window.enlightbox(dialog.window): dialog.refresh() rc = dialog.run() if rc != 1: ipmi_abort(scripts=self.data.scripts) sys.exit(0) StandaloneSpoke._on_continue_clicked(self, window, user_data) @async_action_wait def _set_lang(self, lang): # This is *hopefully* safe. The only threads that might be running # outside of the GIL are those doing file operations, the Gio dbus # proxy thread, and calls from the Gtk main loop. The file operations # won't be doing things that may access the environment, fingers # crossed, the GDbus thread shouldn't be doing anything weird since all # of our dbus calls are from python and synchronous. Using # gtk_action_wait ensures that this is Gtk main loop thread, and it's # holding the GIL. # # There is a lot of uncertainty and weasliness in those statements. # This is not good code. # # We cannot get around setting $LANG. Python's gettext implementation # differs from C in that consults only the environment for the current # language and not the data set via setlocale. If we want translations # from python modules to work, something needs to be set in the # environment when the language changes. # pylint: disable=environment-modify os.environ["LANG"] = lang
LONDON, July 12 ― Roger Federer's hopes of a ninth Wimbledon w88 malaysia title bit the dust as South African Kevin Anderson recovered from two sets down to win a quarter-final cliffhanger 2-6 6-7(5) 7-5 6-4 13-11 on a tension-filled Court One yesterday. But after squandering a match point in the 10th game of the third set Federer's game frayed at the edges and an inspired Anderson powered back to claim victory in four hours 14 minutes. It was the biggest shock in a tournament already brimming with surprises, especially as Joahannesburg-born Anderson had not even won a set in their four previous meetings. While top seed Federer was only at his scintillating best in the first girls pics set nothing could be taken away from Anderson, who will become the first male player representing South Africa to contest a Wimbledon semi-final since Kevin Curren in 1983. The 32-year-old, who reached last year's US Open final, will face big-serving American John Isner in the semis. “Down 2-0 I tried my best to keep fighting and was able to scrape through and by the end I thought I did a great job. I was in the flow of the match,” eighth seed Anderson said. “Beating Roger Federer at Wimbledon will be one I remember. As the match went on, I gave it my all. I'm very ecstatic.” It is the second time that 20-times Grand Slam champion Federer has lost at Wimbledon from two sets ahead, suffering the same fate against Jo-Wilfried Tsonga in the 2011 quarter-finals. “It just wasn't one of my best days,” Federer, who won the first set in 26 minutes, told reporters. The Swiss refused to blame his surprise defeat on the decision to beautiful girl play the match on Court One rather than his customary Centre Court stage at the All England Club. “I don't think it really mattered, to be honest. I had my chances and blew them, so... That's my problem really, the 36-year-old said. “I had my chances and I blew it.” Federer had breezed into the quarter-finals without dropping a set for the eighth time and was imperious in the opening set in which he hit 10 winners and only three unforced errors.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Optional from airflow.models.taskinstance import SimpleTaskInstance class CallbackRequest: """ Base Class with information about the callback to be executed. :param full_filepath: File Path to use to run the callback :param msg: Additional Message that can be used for logging """ def __init__(self, full_filepath: str, msg: Optional[str] = None): self.full_filepath = full_filepath self.msg = msg def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.__dict__) class TaskCallbackRequest(CallbackRequest): """ A Class with information about the success/failure TI callback to be executed. Currently, only failure callbacks (when tasks are externally killed) and Zombies are run via DagFileProcessorProcess. :param full_filepath: File Path to use to run the callback :param simple_task_instance: Simplified Task Instance representation :param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback :param msg: Additional Message that can be used for logging to determine failure/zombie """ def __init__( self, full_filepath: str, simple_task_instance: SimpleTaskInstance, is_failure_callback: Optional[bool] = True, msg: Optional[str] = None, ): super().__init__(full_filepath=full_filepath, msg=msg) self.simple_task_instance = simple_task_instance self.is_failure_callback = is_failure_callback class DagCallbackRequest(CallbackRequest): """ A Class with information about the success/failure DAG callback to be executed. :param full_filepath: File Path to use to run the callback :param dag_id: DAG ID :param execution_date: Execution Date for the DagRun :param is_failure_callback: Flag to determine whether it is a Failure Callback or Success Callback :param msg: Additional Message that can be used for logging """ def __init__( self, full_filepath: str, dag_id: str, execution_date: datetime, is_failure_callback: Optional[bool] = True, msg: Optional[str] = None, ): super().__init__(full_filepath=full_filepath, msg=msg) self.dag_id = dag_id self.execution_date = execution_date self.is_failure_callback = is_failure_callback class SlaCallbackRequest(CallbackRequest): """ A class with information about the SLA callback to be executed. :param full_filepath: File Path to use to run the callback :param dag_id: DAG ID """ def __init__(self, full_filepath: str, dag_id: str): super().__init__(full_filepath) self.dag_id = dag_id
In above said project we have implemented a robotic drive which will be of differential mode while in its movement and will be controlled simultaneously by a remote control. The term ‘differential’ means that robot turning speed is determined by the speed difference between both wheels, each on either side of your robot. For example: keep the left wheel still, and rotate the right wheel forward, and the robot will turn left. It can thus change its direction by varying the relative rate of rotation of its wheels and hence does not require an additional steering motion. Following figure is one of the simplest illustration for the DIFFERENTIAL DERIVE ROBOT . The mounting of motors on the base in the project will be as i.e two motors will be mounted on the wheels separately(one motor on one wheel). H bridges(motor drivers). Battery power source. Rf modules. Controller (arduino/MEGA2560). Base for mounting wheels and motors. Two fixed wheels and one free wheel. We used three types of transistors in the designing of H bridge. They are TIP 142, TIP 147 and 2N2222. Transistor can be used as a switch or as an amplifier depending upon the configuration we implemented. In H bridge we use all these transistors in order to make connections using switching mode. Following is the pictorial form of TIP 142. Where B is base of the transistor, C is the collector and E is the emitter. The above circuit reveals the internal packaging circuitry of the TIP 142 which consist of two simple PNP transistors, diode and resistors. Following is the pictorial form of 2n2222 AND PIN configuration of 2n2222. When there is a logic combination of 00(motor A) and 00(motor B) , motor A and motor B will be static. When there is a logic combination of 10(motor A) and 00(motor B) , motor A will be moving in anticlockwise direction. When there is a logic combination of 11(motor A) and 00(motor B) , motor A and motor B will be static. When there is a logic combination of 01(motor A) and 00(motor B) , motor A will be moving in clockwise direction and . When there is a logic combination of 00(motor A) and 10(motor B) , motor A will be moving in anticlockwise direction. When there is a logic combination of 00(motor A) and 11(motor B) , motor A and motor B will be static. A simple DC motor typically has a stationary set of magnets in the stator and an armaturewith a series of two or more windings of wire wrapped in insulated stack slots around iron pole pieces (called stack teeth) with the ends of the wires terminating on a commutator . An RF module (radio frequency module) is a (usually) small electronic device used to transmit and/or receive radio signals between two devices. In an embedded system it is often desirable to communicate with another device wirelessly. We implant our RF modules i.e transmitter and receiver pieces into the controller plantform to transmit and receive signals for the operations of the drive.we used AVR libraries in order to code our transmitter and receiver. In this said project to design transmitter and receiver and the main board of the derive we will be working on adruino /MEGA2560 platform. We implement our RF modules into the controller plant form to transmit and receive signals for the operations of the drive.On the receiving module we further implanted our motor drivers in order to drive our robotic body.
from werkzeug.datastructures import MultiDict from funnel.forms import LabelForm from funnel.models import Label def test_new_label_get(client, new_user, new_project): with client.session_transaction() as session: session['userid'] = new_user.userid resp = client.get(new_project.url_for('new_label')) label_form = LabelForm(parent=new_project, model=Label) for field in label_form: if field not in ('csrf_token', 'form_nonce'): assert field.name in resp.data.decode('utf-8') def test_new_label_without_option(client, new_user, new_project): with client.session_transaction() as session: session['userid'] = new_user.userid resp_post = client.post( new_project.url_for('new_label'), data=MultiDict( { 'title': "Label V1", 'icon_emoji': "👍", 'required': False, 'restricted': False, } ), follow_redirects=True, ) assert "Manage labels" in resp_post.data.decode('utf-8') label_v1 = Label.query.filter_by( title="Label V1", icon_emoji="👍", project=new_project ).first() assert label_v1 is not None def test_new_label_with_option(client, new_user, new_project): with client.session_transaction() as session: session['userid'] = new_user.userid resp_post = client.post( new_project.url_for('new_label'), data=MultiDict( { 'title': ["Label V2", "Option V21", "Option V22"], 'icon_emoji': ["👍", "", ""], 'required': False, 'restricted': False, } ), follow_redirects=True, ) assert "Manage labels" in resp_post.data.decode('utf-8') label_v2 = Label.query.filter_by( title="Label V2", icon_emoji="👍", project=new_project ).first() assert label_v2 is not None assert label_v2.has_options assert len(label_v2.options) == 2 assert label_v2.options[0].title == "Option V21" assert label_v2.options[0].icon_emoji == "" assert label_v2.options[0].icon == "OV" assert label_v2.options[1].title == "Option V22" assert label_v2.options[1].icon_emoji == "" assert label_v2.options[1].icon == "OV"
I have put together and delivered a workshop at several business incubators recently. I have fun with it because I really enjoy watching the faces of the participants when a piece of information answers a question they have been struggling with for years. What is that question?
#Embedded file name: ACEStream\Core\DecentralizedTracking\pymdht\core\routing_table.pyo import ptime as time import logging logger = logging.getLogger('dht') class PopError(Exception): pass class PutError(Exception): pass class SuperBucket(object): def __init__(self, index, max_nodes): self.index = index self.main = Bucket(max_nodes) self.replacement = Bucket(max_nodes) class Bucket(object): def __init__(self, max_rnodes): self.max_rnodes = max_rnodes self.rnodes = [] self.last_maintenance_ts = time.time() self.last_changed_ts = 0 def get_rnode(self, node_): i = self._find(node_) if i >= 0: return self.rnodes[i] def add(self, rnode): rnode.bucket_insertion_ts = time.time() self.rnodes.append(rnode) def remove(self, node_): del self.rnodes[self._find(node_)] def __repr__(self): return '\n'.join(['b>'] + [ repr(rnode) for rnode in self.rnodes ]) def __len__(self): return len(self.rnodes) def __eq__(self, other): if self.max_rnodes != other.max_rnodes or len(self) != len(other): return False for self_rnode, other_rnode in zip(self.rnodes, other.rnodes): if self_rnode != other_rnode: return False return True def __ne__(self, other): return not self == other def there_is_room(self, min_places = 1): return len(self.rnodes) + min_places <= self.max_rnodes def get_freshest_rnode(self): freshest_ts = 0 freshest_rnode = None for rnode in self.rnodes: if rnode.last_seen > freshest_ts: freshest_ts = rnode.last_seen freshest_rnode = rnode return freshest_rnode def get_stalest_rnode(self): oldest_ts = time.time() stalest_rnode = None for rnode in self.rnodes: if rnode.last_seen < oldest_ts: oldest_ts = rnode.last_seen stalest_rnode = rnode return stalest_rnode def sorted_by_rtt(self): return sorted(self.rnodes, key=lambda x: x.rtt) def _find(self, node_): for i, rnode in enumerate(self.rnodes): if rnode == node_: return i return -1 NUM_SBUCKETS = 160 NUM_NODES = 8 class RoutingTable(object): def __init__(self, my_node, nodes_per_bucket): self.my_node = my_node self.nodes_per_bucket = nodes_per_bucket self.sbuckets = [None] * NUM_SBUCKETS self.num_rnodes = 0 self.lowest_index = NUM_SBUCKETS def get_sbucket(self, log_distance): index = log_distance if index < 0: raise IndexError, 'index (%d) must be >= 0' % index sbucket = self.sbuckets[index] if not sbucket: sbucket = SuperBucket(index, self.nodes_per_bucket[index]) self.sbuckets[index] = sbucket return sbucket def update_lowest_index(self, index): if index < self.lowest_index: sbucket = self.sbuckets[index] if sbucket and sbucket.main: self.lowest_index = sbucket.index return if index == self.lowest_index: for i in range(index, NUM_SBUCKETS): sbucket = self.sbuckets[i] if sbucket and sbucket.main: self.lowest_index = i return self.lowest_index = NUM_SBUCKETS def get_closest_rnodes(self, log_distance, max_rnodes, exclude_myself): result = [] index = log_distance for i in range(index, self.lowest_index - 1, -1): sbucket = self.sbuckets[i] if not sbucket: continue result.extend(sbucket.main.rnodes[:max_rnodes - len(result)]) if len(result) == max_rnodes: return result if not exclude_myself: result.append(self.my_node) for i in range(index + 1, NUM_SBUCKETS): sbucket = self.sbuckets[i] if not sbucket: continue result.extend(sbucket.main.rnodes[:max_rnodes - len(result)]) if len(result) == max_rnodes: break return result def find_next_bucket_with_room_index(self, node_ = None, log_distance = None): index = log_distance or node_.log_distance(self.my_node) for i in range(index + 1, NUM_SBUCKETS): sbucket = self.sbuckets[i] if sbucket is None or self.sbuckets[i].main.there_is_room(): return i def get_main_rnodes(self): rnodes = [] for i in range(self.lowest_index, NUM_SBUCKETS): sbucket = self.sbuckets[i] if sbucket: rnodes.extend(sbucket.main.rnodes) return rnodes def print_stats(self): num_nodes = 0 for i in range(self.lowest_index, NUM_SBUCKETS): sbucket = self.sbuckets[i] if sbucket and len(sbucket.main): print i, len(sbucket.main), len(sbucket.replacement) print 'Total:', self.num_rnodes def __repr__(self): begin = ['==============RoutingTable============= BEGIN'] data = [ '%d %r' % (i, sbucket) for i, sbucket in enumerate(self.sbuckets) ] end = ['==============RoutingTable============= END'] return '\n'.join(begin + data + end)
The feud between T.I and Kodak Black has transferred to wax. The back and forth exchange between the two rappers started when T.I responded to Kodak’s statement on Lauren London. Then a snippet came out of a diss song T.I made on Kodak. Now Kodak has made a new song called Expeditiously. Listen to it below. Video Extra #6 >>> Drake Shoots Down A Celeb!
import ast from _ast import Call, Attribute, Name from documents.constants import * from documents.parsers import BaseParser, ParseError MODEL_BASE_CLASS = "Model" MANY_TO_MANY_FIELD = "many-to-many" FOREIGN_KEY_FIELD = "foreign-key" FIELD_TYPE_MAP = { "PositiveIntegerField": TYPES_INTEGER, "IntegerField": TYPES_INTEGER, "CharField": TYPES_STRING, "EmailField": TYPES_STRING, "BooleanField": TYPES_BOOLEAN, "DateTimeField": TYPES_DATETIME, "DateField": TYPES_DATE, "TimeField": TYPES_TIME, "FileField": TYPES_STRING, "ForeignKey": FOREIGN_KEY_FIELD, "ManyToManyField": MANY_TO_MANY_FIELD, "OneToOneField": FOREIGN_KEY_FIELD, "FloatField": TYPES_DOUBLE } DEFAULT_FIELD_TYPE = "string" class FieldVisitor(ast.NodeVisitor): """ A visitor that inspects model fields. """ def __init__(self): self.fields = [] def add_field(self, field_name, field_type, relationship): field = { "name": field_name, "type": field_type } if relationship is not None: field["relationship"] = relationship self.fields.append(field) def visit_Assign(self, node): field_name = None field_type = None relationship = None if not isinstance(node.value, Call): return try: field_name = node.targets[0].id except AttributeError: return if isinstance(node.value.func, Attribute): field_type = FIELD_TYPE_MAP.get(node.value.func.attr, DEFAULT_FIELD_TYPE) if field_type in [MANY_TO_MANY_FIELD, FOREIGN_KEY_FIELD]: relationship = node.value.args[0].id if field_type is not None: self.add_field(field_name, field_type, relationship=relationship) class ModelVisitor(ast.NodeVisitor): """ A visitor that detects django models. """ def __init__(self): self.models = {} def visit_ClassDef(self, node): base_class = None for base in node.bases: if isinstance(base, Attribute): base_class = base.attr if isinstance(base, Name): base_class = base.id if base_class == MODEL_BASE_CLASS: visitor = FieldVisitor() visitor.visit(node) self.models[node.name] = visitor.fields class DjangoORMParser(BaseParser): def parse(self, text): try: node = ast.parse(text) except SyntaxError: raise ParseError else: visitor = ModelVisitor() visitor.visit(node) return self.normalize_models(visitor.models) def normalize_models(self, models): """ The normalization process for django models. - Adds `id` field - Separates many-to-many fields - Converts foreign-key-fields to integer """ position_top = 0 position_left = 0 for model, fields in models.items(): attributes = [{ "name": "id", "type": TYPES_INTEGER, "is_primary_key": True }] for field in fields: if field.get("type") == MANY_TO_MANY_FIELD: position_left += ENTITY_POSITION_LEFT_INCREASE position_top += ENTITY_POSITION_TOP_INCREASE yield self.m2m_to_entity(model, field, position_top, position_left) continue # skip the field addition elif field.get("type") == FOREIGN_KEY_FIELD: field["name"] += "_id" field["type"] = TYPES_INTEGER attributes.append(field) position_left += ENTITY_POSITION_LEFT_INCREASE position_top += ENTITY_POSITION_TOP_INCREASE yield { "name": model.lower(), "attributes": attributes, "position": { "top": position_top, "left": position_left } } def m2m_to_entity(self, model, field, position_top, position_left): """ Returns an entity that consist of provided m2m field. """ return { "name": model.lower() + "_" + field.get("name"), "position": { "top": position_top, "left": position_left }, "attributes": [ { "name": "id", "type": TYPES_INTEGER, }, { "name": model.lower() + "_id", "type": TYPES_INTEGER, "is_foreign_key": True, "foreign_key_entity": model.lower(), "foreign_key_attribute": "id" }, { "name": field.get("relationship").lower() + "_id", "type": TYPES_INTEGER, "is_foreign_key": True, "foreign_key_entity": field.get("relationship").lower(), "foreign_key_attribute": "id" } ] }
CRS Pharma 2°C - 8°C Storage Chambers are perfect for protecting the integrity of refrigerated temperature sensitive products. To find out more about our range of 2 – 8 Degree Celsius Chilled Stores call on 0800 085 2298 (UK) or 1890 929 824 (IRL). Alternatively, please complete our enquiry form and one of our experience team member will get back to you. Two refrigeration plant systems operate on a daily alternating duty standby cycle, to provide and maintain a chamber temperature of +5ºC +/-3ºC for the storage of pharmaceutical product. Duty Standby temperature control is arranged so that one unit defrosts naturally during the standby rest period while cooling is provided by the duty unit. If a high temperature alarm occurs the standby unit is activated to quickly bring the temperature back within the required range. If a low temperature alarm occurs both duty and standby cooling units are shut down unitil the temperature returns within the required range. Dual Close Control modulating cooling systems ensure that temperature fluctuations are minimized. Early warning alerts at +3ºC and +7ºC. Internal lighting including emergency lights with Minimum 300 lux lighting capacity. High quality build specification with easy open doors and non-slip floors. Local audio visual alarms for total peace of mind. CRS Pharma Solutions offer 2-8 degrees Celsius refrigerator containers, which are perfect for protecting the integrity of refrigerated temperature sensitive products. Our chilled store at 2-8 c solutions can maintain precise temperatures through Dual Close Control modulating cooling systems, which ensure temperature fluctuations are kept to a minimum. With a temperature range of 2^8ºC, our pharmaceutical storage solutions are ideal for storage and stability testing of pharmaceutical products. Our 2 to 8 degrees Celsius pharma cold storage units offer Duty Standby temperature control. Duty Standby temperature control means that if one unit defrosts naturally during the standby rest period, cooling will be provided by the duty unit. In the event of a high-temperature alarm, the standby unit is activated, quickly bringing the temperature back within the required range. If a low-temperature alarm occurs, both duty and standby cooling units will be shut down until the temperature returns within the prescribed range. CRS Pharma Solutions 2-8ºC chiller units feature internal lighting and emergency lights with Minimum 300 lux lighting capacity, as standard. All out pharmaceutical cold storage units boast high-quality build specification. Standard features include simple access doors, non-slip floors and easy clean, stainless steel interiors. Additionally, the units come supplied fitted with local audio-visual alarms for total peace of mind. At CRS, we pride ourselves in offering not only the best solution for our client's requirement but also a solution that is energy efficient, providing ongoing cost-savings. For pharmaceutical storage and 2-8 degrees celcius refrigerators that provide exacting temperature control with full redundancy features contact CRS Pharma Solutions. One of our experienced staff will be happy to discuss your requirements and suggest the best solution for your application.
import sim def getScoreMetricTime(thread_id): return long(sim.stats.get('thread', thread_id, 'nonidle_elapsed_time')) def getScoreMetricInstructions(thread_id): return long(sim.stats.get('thread', thread_id, 'instruction_count')) class Thread: def __init__(self, thread_id, getScoreMetric): self.thread_id = thread_id self.getScoreMetric = lambda: getScoreMetric(thread_id) self.core = None self.runnable = False self.unscheduled = False self.score = 0 # Accumulated score self.metric_last = 0 # State at start of last interval sim.thread.set_thread_affinity(self.thread_id, ()) def updateScore(self): metric_now = self.getScoreMetric() self.score += metric_now - self.metric_last self.metric_last = metric_now def setScore(self, score): self.score = score self.metric_last = self.getScoreMetric() def setCore(self, core_id, time = -1): self.core = core_id if core_id is None: self.updateScore() self.last_scheduled_out = time sim.thread.set_thread_affinity(self.thread_id, ()) else: self.last_scheduled_in = time sim.thread.set_thread_affinity(self.thread_id, [ c == core_id for c in range(sim.config.ncores) ]) def __repr__(self): return 'Thread(%d, %s, score = %d)' % (self.thread_id, 'core = %d' % self.core if self.core is not None else 'no core', self.score) class SchedulerLocality: def setup(self, args): args = dict(enumerate((args or '').split(':'))) interval_ns = long(args.get(0, None) or 10000000) scheduler_type = args.get(1, 'equal_time') core_mask = args.get(2, '') if scheduler_type == 'equal_time': self.getScoreMetric = getScoreMetricTime elif scheduler_type == 'equal_instructions': self.getScoreMetric = getScoreMetricInstructions else: raise ValueError('Invalid scheduler type %s' % scheduler_type) if core_mask: core_mask = map(int, core_mask.split(',')) + [0]*sim.config.ncores self.cores = [ core for core in range(sim.config.ncores) if core_mask[core] ] else: self.cores = range(sim.config.ncores) sim.util.Every(interval_ns * sim.util.Time.NS, self.periodic) self.threads = {} self.last_core = 0 def hook_thread_start(self, thread_id, time): self.threads[thread_id] = Thread(thread_id, self.getScoreMetric) self.threads[thread_id].runnable = True # Initial assignment: one thread per core until cores are exhausted if self.last_core < len(self.cores): self.threads[thread_id].setCore(self.cores[self.last_core], sim.stats.time()) self.last_core += 1 else: self.threads[thread_id].setCore(None, sim.stats.time()) def hook_thread_exit(self, thread_id, time): self.hook_thread_stall(thread_id, 'exit', time) def hook_thread_stall(self, thread_id, reason, time): if reason == 'unscheduled': # Ignore calls due to the thread being scheduled out self.threads[thread_id].unscheduled = True else: core = self.threads[thread_id].core self.threads[thread_id].setCore(None, time) self.threads[thread_id].runnable = False # Schedule a new thread (runnable, but not running) on this free core threads = [ thread for thread in self.threads.values() if thread.runnable and thread.core is None ] if threads: # Order by score threads.sort(key = lambda thread: thread.score) threads[0].setCore(core, time) def hook_thread_resume(self, thread_id, woken_by, time): if self.threads[thread_id].unscheduled: # Ignore calls due to the thread being scheduled back in self.threads[thread_id].unscheduled = False else: self.threads[thread_id].setScore(min([ thread.score for thread in self.threads.values() ])) self.threads[thread_id].runnable = True # If there is a free core, move us there now used_cores = set([ thread.core for thread in self.threads.values() if thread.core is not None ]) free_cores = set(self.cores) - used_cores if len(free_cores): self.threads[thread_id].setCore(list(free_cores)[0], time) def periodic(self, time, time_delta): # Update thread scores [ thread.updateScore() for thread in self.threads.values() if thread.core is not None ] # Get a list of all runnable threads threads = [ thread for thread in self.threads.values() if thread.runnable ] # Order by score threads.sort(key = lambda thread: thread.score) # Select threads to run now, one per core threads = threads[:len(self.cores)] #print ', '.join(map(repr, threads)) # Filter out threads that are already running, and keep them on their current core keep_threads = [ thread for thread in threads if thread.core is not None ] used_cores = set([ thread.core for thread in keep_threads ]) # Move new threads to free cores free_cores = set(self.cores) - used_cores threads = [ thread for thread in threads if thread.core is None ] assert(len(free_cores) >= len(threads)) for thread, core in zip(threads, sorted(free_cores)): current_thread = [ t for t in self.threads.values() if t.core == core ] if current_thread: current_thread[0].setCore(None) thread.setCore(core, time) assert thread.runnable sim.util.register(SchedulerLocality())
Since 1957, the Myron L® Company has designed and manufactured highly reliable analytical instruments for a wide variety of applications. Thousands of professionals around the world rely every day on the performance of our instruments. Demanding uses range from boiler water testing to ultrapure water control to medical instruments for artificial kidney machines. We are proud of the trust our handheld instruments and monitor/controllers have earned in the past. Our product line has evolved to a new level of outstanding performance and value in analytical instruments: the Ultrameter II series. While priced like affordable single-parameter instruments, the Ultrameter II does the job of three, four or even six instruments. The Myron L® Company FCE function reports FAC quickly and accurately by measuring ORP, the chemical characteristic of chlorine that directly reflects its effectivity, cross referenced with pH. Both DPD kits and colorimeters may tell the user the FAC value of the sample in the test tube, but since the chemistry of that sample is quite different from the source water being analyzed, the results are imprecisely related to actual disinfection power. The Myron L® Company FCE function measures the real, unaltered chemistry of source water, including moment-tomoment changes in that chemistry. Fast and accurate in the laboratory, both Ultrameter II models are rugged enough for daily in-line controller checks in hostile process applications. All calibrations are quickly accomplished by pressing the or keys to agree with our NIST traceable Standard Solution. When calibration is necessary, display prompts simplify pH calibration and make sure the correct buffer is being used. Plus, all parameters (excluding factory-set temperature) have an internal electronic setting that can be used for field calibration and as a check on pH/ORP sensor life. User adjustable conductivity/ TDS conversion ratio for greater accuracy when measuring solutions not contained in the microprocessor. bluDock™ Accessory Package includes bluDock™, Macintosh/PC application software for downloading data and printed instructions. Conductivity Standard Solutions are necessary to maintain accuracy and for periodic calibration of conductivity/TDS parameters. All Standard Solutions are NIST traceable for your complete confidence. recommended values: KCl- 7000 (7 mS), 442-3000 (TDS), or NaCl-14.0 (mS) available in 2 oz/59 ml, 1 qt/1 L, and 1 gal/3,8 L. Available in 2 oz/59 ml, 1 qt/1 L, and 1 gal/3,8 L. Certificate of NIST traceability for pH Buffer or Conductivity Standard Solutions are available (must be specified when placing solution order). Hard protective case (kit) with three buffers (pH 4, 7, and 10), one pH/ORP storage solution, and two standard solutions, (KCI-7000 and 442-3000). All bottles are 2 oz/59 ml. Soft protective case is constructed of padded Nylon and features a belt clip for hands-free mobility. Replacement pH/ORP sensor user-replaceable, features a unique/porous liquid-junction. All Myron L® Ultrameter IIs have a Two (2) Year Limited Warranty. The pH/ORP sensors have a Six (6) Month Limited Warranty. Warranty is limited to the repair or replacement of the Ultrameter II only, at our discretion. Myron L® Company assumes no other responsibility or liability.
"Tools for optimal fits to GP sweeps" from time import time import numpy as np from ..small_classes import Count from ..small_scripts import mag from ..solution_array import SolutionArray from ..exceptions import InvalidGPConstraint class BinarySweepTree: # pylint: disable=too-many-instance-attributes """Spans a line segment. May contain two subtrees that divide the segment. Attributes ---------- bounds : two-element list The left and right boundaries of the segment sols : two-element list The left and right solutions of the segment costs : array The left and right logcosts of the segment splits : None or two-element list If not None, contains the left and right subtrees splitval : None or float The worst-error point, where the split will be if tolerance is too low splitlb : None or float The cost lower bound at splitval splitub : None or float The cost upper bound at splitval """ def __init__(self, bounds, sols, sweptvar, costposy): if len(bounds) != 2: raise ValueError("bounds must be of length 2") if bounds[1] <= bounds[0]: raise ValueError("bounds[0] must be smaller than bounds[1].") self.bounds = bounds self.sols = sols self.costs = np.log([mag(sol["cost"]) for sol in sols]) self.splits = None self.splitval = None self.splitlb = None self.splitub = None self.sweptvar = sweptvar self.costposy = costposy def add_split(self, splitval, splitsol): "Creates subtrees from bounds[0] to splitval and splitval to bounds[1]" if self.splitval: raise ValueError("split already exists!") if splitval <= self.bounds[0] or splitval >= self.bounds[1]: raise ValueError("split value is at or outside bounds.") self.splitval = splitval self.splits = [BinarySweepTree([self.bounds[0], splitval], [self.sols[0], splitsol], self.sweptvar, self.costposy), BinarySweepTree([splitval, self.bounds[1]], [splitsol, self.sols[1]], self.sweptvar, self.costposy)] def add_splitcost(self, splitval, splitlb, splitub): "Adds a splitval, lower bound, and upper bound" if self.splitval: raise ValueError("split already exists!") if splitval <= self.bounds[0] or splitval >= self.bounds[1]: raise ValueError("split value is at or outside bounds.") self.splitval = splitval self.splitlb, self.splitub = splitlb, splitub def posy_at(self, posy, value): """Logspace interpolates between sols to get posynomial values. No guarantees, just like a regular sweep. """ if value < self.bounds[0] or value > self.bounds[1]: raise ValueError("query value is outside bounds.") bst = self.min_bst(value) lo, hi = bst.bounds loval, hival = [sol(posy) for sol in bst.sols] lo, hi, loval, hival = np.log(list(map(mag, [lo, hi, loval, hival]))) interp = (hi-np.log(value))/float(hi-lo) return np.exp(interp*loval + (1-interp)*hival) def cost_at(self, _, value, bound=None): "Logspace interpolates between split and costs. Guaranteed bounded." if value < self.bounds[0] or value > self.bounds[1]: raise ValueError("query value is outside bounds.") bst = self.min_bst(value) if bst.splitlb: if bound: if bound == "lb": splitcost = np.exp(bst.splitlb) elif bound == "ub": splitcost = np.exp(bst.splitub) else: splitcost = np.exp((bst.splitlb + bst.splitub)/2) if value <= bst.splitval: lo, hi = bst.bounds[0], bst.splitval loval, hival = bst.sols[0]["cost"], splitcost else: lo, hi = bst.splitval, bst.bounds[1] loval, hival = splitcost, bst.sols[1]["cost"] else: lo, hi = bst.bounds loval, hival = [sol["cost"] for sol in bst.sols] lo, hi, loval, hival = np.log(list(map(mag, [lo, hi, loval, hival]))) interp = (hi-np.log(value))/float(hi-lo) return np.exp(interp*loval + (1-interp)*hival) def min_bst(self, value): "Returns smallest bst around value." if not self.splits: return self choice = self.splits[0] if value <= self.splitval else self.splits[1] return choice.min_bst(value) def sample_at(self, values): "Creates a SolutionOracle at a given range of values" return SolutionOracle(self, values) @property def sollist(self): "Returns a list of all the solutions in an autosweep" sollist = [self.sols[0]] if self.splits: sollist.extend(self.splits[0].sollist[1:]) sollist.extend(self.splits[1].sollist[1:-1]) sollist.append(self.sols[1]) return sollist @property def solarray(self): "Returns a solution array of all the solutions in an autosweep" solution = SolutionArray() for sol in self.sollist: solution.append(sol) solution.to_arrays() return solution def save(self, filename="autosweep.p"): """Pickles the autosweep and saves it to a file. The saved autosweep is identical except for two things: - the cost is made unitless - each solution's 'program' attribute is removed Solution can then be loaded with e.g.: >>> import cPickle as pickle >>> pickle.load(open("autosweep.p")) """ import pickle pickle.dump(self, open(filename, "wb")) class SolutionOracle: "Acts like a SolutionArray for autosweeps" def __init__(self, bst, sampled_at): self.sampled_at = sampled_at self.bst = bst def __call__(self, key): return self.__getval(key) def __getitem__(self, key): return self.__getval(key) def _is_cost(self, key): if hasattr(key, "hmap") and key.hmap == self.bst.costposy.hmap: return True return key == "cost" def __getval(self, key): "Gets values from the BST and units them" if self._is_cost(key): key_at = self.bst.cost_at v0 = self.bst.sols[0]["cost"] else: key_at = self.bst.posy_at v0 = self.bst.sols[0](key) units = getattr(v0, "units", None) fit = [key_at(key, x) for x in self.sampled_at] return fit*units if units else np.array(fit) def cost_lb(self): "Gets cost lower bounds from the BST and units them" units = getattr(self.bst.sols[0]["cost"], "units", None) fit = [self.bst.cost_at("cost", x, "lb") for x in self.sampled_at] return fit*units if units else np.array(fit) def cost_ub(self): "Gets cost upper bounds from the BST and units them" units = getattr(self.bst.sols[0]["cost"], "units", None) fit = [self.bst.cost_at("cost", x, "ub") for x in self.sampled_at] return fit*units if units else np.array(fit) def plot(self, posys=None, axes=None): "Plots the sweep for each posy" import matplotlib.pyplot as plt from ..interactive.plot_sweep import assign_axes from .. import GPBLU if not hasattr(posys, "__len__"): posys = [posys] for i, posy in enumerate(posys): if posy in [None, "cost"]: posys[i] = self.bst.costposy posys, axes = assign_axes(self.bst.sweptvar, posys, axes) for posy, ax in zip(posys, axes): if self._is_cost(posy): # with small tol should look like a line ax.fill_between(self.sampled_at, self.cost_lb(), self.cost_ub(), facecolor=GPBLU, edgecolor=GPBLU, linewidth=0.75) else: ax.plot(self.sampled_at, self(posy), color=GPBLU) if len(axes) == 1: axes, = axes return plt.gcf(), axes def autosweep_1d(model, logtol, sweepvar, bounds, **solvekwargs): "Autosweep a model over one sweepvar" original_val = model.substitutions.get(sweepvar, None) start_time = time() solvekwargs.setdefault("verbosity", 1) solvekwargs["verbosity"] -= 1 sols = Count().next firstsols = [] for bound in bounds: model.substitutions.update({sweepvar: bound}) try: model.solve(**solvekwargs) firstsols.append(model.program.result) except InvalidGPConstraint: raise InvalidGPConstraint("only GPs can be autoswept.") sols() bst = BinarySweepTree(bounds, firstsols, sweepvar, model.cost) tol = recurse_splits(model, bst, sweepvar, logtol, solvekwargs, sols) bst.nsols = sols() # pylint: disable=attribute-defined-outside-init if solvekwargs["verbosity"] > -1: print("Solved in %2i passes, cost logtol +/-%.3g" % (bst.nsols, tol)) print("Autosweeping took %.3g seconds." % (time() - start_time)) if original_val: model.substitutions[sweepvar] = original_val else: del model.substitutions[sweepvar] return bst def recurse_splits(model, bst, variable, logtol, solvekwargs, sols): "Recursively splits a BST until logtol is reached" x, lb, ub = get_tol(bst.costs, bst.bounds, bst.sols, variable) tol = (ub-lb)/2.0 if tol >= logtol: model.substitutions.update({variable: x}) model.solve(**solvekwargs) bst.add_split(x, model.program.result) sols() tols = [recurse_splits(model, split, variable, logtol, solvekwargs, sols) for split in bst.splits] bst.tol = max(tols) return bst.tol bst.add_splitcost(x, lb, ub) return tol def get_tol(costs, bounds, sols, variable): # pylint: disable=too-many-locals "Gets the intersection point and corresponding bounds from two solutions." y0, y1 = costs x0, x1 = np.log(bounds) s0, s1 = [sol["sensitivities"]["variables"][variable] for sol in sols] # y0 + s0*(x - x0) == y1 + s1*(x - x1) num = y1-y0 + x0*s0-x1*s1 denom = s0-s1 # NOTE: several branches below deal with straight lines, where lower # and upper bounds are identical and so x is undefined if denom == 0: # mosek runs into this on perfect straight lines, num also equal to 0 # mosek_cli also runs into this on near-straight lines, num ~= 0 interp = -1 # flag interp as out-of bounds else: x = num/denom lb = y0 + s0*(x-x0) interp = (x1-x)/(x1-x0) ub = y0*interp + y1*(1-interp) if interp < 1e-7 or interp > 1 - 1e-7: # cvxopt on straight lines x = (x0 + x1)/2 # x is undefined? stick it in the middle! lb = ub = (y0 + y1)/2 return np.exp(x), lb, ub
Let’s talk telephones, with a big nod to Pamela Paul, editor of the New York Times Book Review, who recently wrote about phones in the Times. At the risk that you’ll go there and not return, I’m going to link to that story — though the Times’s pay wall could possibly keep you from reading it. If you go there first, please come back! Paul’s story, headlined “Don’t Call Me, I Won’t Call You,” made several points about telephones, many of them dire if you’re in that business. Telephone use has declined so much in her house and among her acquaintances, she writes, that when the phone rings, she assumes some tragedy has occurred. Calling folks on the phone whenever you feel like it is rude, Pamela Paul writes. Who knows what the person on the other end of the line is up to? You’re asking him or her to drop everything, run up a flight of stairs, or do who-knows-what to answer the ring. In many “cubicle farm” office settings — our newsroom folks call ours “podland” — where people are aligned like eggs in a carton, phone calls can be all too public and intrusive. Many times the phone doesn’t get answered because of the “Caller ID” feature. It makes it easy to “blow off” calls from unknown numbers, creditors, and elderly parents who you know very well are calling to complain that you never call them any more. No wonder, as Paul points out, a study by Nielsen Media found that U.S. spending on text messaging will exceed spending on phones — even cellular ones — within three years. I’m old enough to have grown up in the days of really cumbersome telephonic communication — though not quite all the way back to the time when you had to crank your phone to reach a central operator, who would connect your call by plugging you into another person’s slot on her switchboard. We did, however, have to dial “0” and ask a local operator to put us through to a long-distance operator, who would place our distant calls after we verbally told her the city, state, and number we wanted to reach. I seem to recall that if the number was busy or did not answer, that special operator would keep trying and call us back when she got through. At any rate, these transacrtions were frightfully expensive, and we made precious few of them. Our black phone was made of something called “Bakelite,” which felt like the same stuff they made bowling balls out of. It weighed as much, and it was operated with a rotary dial. You’d stick your finger into a hole above each number you were dialing and turn the dial clockwise until your finger hit a sort of curved post. Then you’d let go and listen as the dial slowly rewound, clicking as it went, over and over again until the full number had been entered and a connection had been made. This took forever if the number had a lot of 8s or 9s. I specified all that because there have been some hilarious hidden-camera peeks at today’s young people trying, and failing, to make a phone call on a rotary phone. They treat it gingerly and ineptly, as if it were an ancient instrument of torture. Our telephone “exchange” at home was WI, short for “Winton,” the street on which we lived. The full telephone number was WI-2634. After awhile, enough people were added to the WI exchange that they had to insert another number, and ours became WI-7-2634. It was the same arrangement that went into what may be the most famous U.S. phone number ever: “PEnnsylvania 6-5000.” This was — and, believe it or not, still is — the number of New York City’s Hotel Pennsylvania, and the title of a hot Glenn Miller big-band song in the 1940s. Eventually, in a development that outraged my mother and grandmother, letter designations for telephone exchanges were eliminated in favor of numbers exclusively on the dial. “WI” turned into the cold and impersonal “94.” The phone book — another antiquated artifact — became a sea of numbers, and soon we were forced to add area codes to many calls as well, even to people across town. Imagine finger-clickety-clicking your way through a number such as 974-679-8969 on a slow rotary dial. (1) If you picked up the phone and someone was talking, you were to immediately and gently hang up. You couldn’t make a call anyway, since there’d be no dial tone. (2) You were sternly urged to keep your calls short, especially once you heard the other party click on, wanting the line. Party lines, like pay phones, are almost all historical curiosities, of course. In fact, these days telephone calling of almost any sort has ceded the stage to texting and e-mailing. A lot of texters become two-thumb wizards. • You can send computer messages from almost anywhere and at any time and can answer them on your schedule, if you choose to reply at all. • If someone asks a substantive question in an e-mail, you can ruminate before replying. But if somebody asks you that question on the phone, you’re compelled to answer it immediately, off the top of your head, or call back. • You control the length of the reply and don’t feel obligated to “chat” or “catch up” if you don’t want to. Your message is much more likely to be brief and to the point. • Unlike phone calls, e-mails can be sent to a number of people at once if you have a global point to make. Still, we old-timers sometimes pine for the “olden” days, even remembering the times when we’d get actual, personal letters in the mail. We’d even share phone conversations with another live person. Imagine! A phone call is, as writer Lisa Birnbach told Pamela Paul, “warmer and more honest” than cold, virtual type on a screen. I write a line. His desktop blurts that he has a message from me. He replies, quick as a fox. We “chat” back and forth, electronically. And briskly, unless he throws in one of his inspired puns. That compels me to fire one back. And so goes the one-upsmanship until one of us concedes by simply giving up. But something is still missing in this world of texting and e-mailing and IMing. Something more than the warmth that Lisa Birnbach mentioned. I call it nuance. Tone of voice. If I told you in an e-mail that your dress the other day looked simply marvelous, you couldn’t be sure whether I was serious or being sarcastic. On the phone, you could tell by the way I spoke the word. Texting and tweeting, in particular, encourage impatience, crunching communication into a few, often-abbreviated words that rule out elegant or juicy descriptions for lack of space. So much for imagination, storytelling, and any emotion other than one that can be impersonated using exclamation points!!!! So some people still prefer to talk on the phone. They enjoy the interplay, the emotion, and those nuances. Others — and I’m one — find that conveying complicated thoughts via the keyboard actually takes more time than delivering them on the phone. Plus, the bear traps built into e-mails scare me. Ever spend an hour composing a masterpiece, only to accidently delete it? Happens to me all the time. Or, by mistake, send a catty critique of someone to that person — or the world? I asked a few friends to weigh in on the state of modern telephony — love that erudite-sounding word: tell-EFF-inny. Yup. I was asking about telephoning — by e-mail. The phone is hardly obsolete, Art contends. But it’s also true that an increasing amount of phone traffic travels over the Internet. (Vonage, Skype, Google Voice, and other VOIP — that’s Voice Over Internet Protocol — services). Decades ago, the equivalent of SMS (text) was telegrams. You had a short message to send — particularly long distance, which was expensive — you sent a wire. That business began to decline after WWII and effectively ended by sometime about 1970 or so. I forget to mention telegrams in my earlier reverie about communications’ good-ole-days. Human communication started in caves, around fires, and while running from saber-toothed tigers. Then came smoke signals, messages drummed on hollow logs and such. Then we talked to each other over wires. Then conversations coursed through the air. More recently, a lot of us stopped speaking and listening to the human voice whenever we could, preferring to tap out messages back and forth. Then our written chats grew shorter and shorter until just about all that was left were shorthand abbreviations and acronyms. Modern communication has become so terribly cryptic. Surrepticiously. Secretly or without someone’s knowledge.
import time import numpy as np import scipy.linalg as lin import scipy.stats as stat import matplotlib.pyplot as plt import kernel_qp as qp def log_likelihood(theta, t, Y): tLen = len(t.reshape(-1)) Nw = theta.shape[0] Y = np.ascontiguousarray(Y.reshape(-1)) K = qp.gamma_qp(t, theta) Kinv_mu = np.empty((Nw, tLen)) for i in range(Nw): K[i], lower = lin.cho_factor(K[i]) Kinv_mu[i] = lin.cho_solve((K[i], lower), Y) muT_Kinv_mu = (Y*Kinv_mu).sum(axis=1) HalfLogDetK = np.log(K.diagonal(axis1=1, axis2=2)).sum(axis=1) return -0.5*tLen*np.log(2*np.pi) - HalfLogDetK - 0.5*muT_Kinv_mu def log_prior(theta): out = stat.norm.logpdf(theta[:,0], loc=0.0005, scale=0.00025) # Sc out += stat.norm.logpdf(theta[:,1], loc=25.3, scale=2.0) # Pqp out += stat.uniform.logpdf(theta[:,2], loc=0.1, scale=1.5) # lambda_P out += stat.norm.logpdf(theta[:,3], loc=50.0, scale=25.0) # lambda_e out += stat.norm.logpdf(theta[:,4], loc=0.0005, scale=0.00025) # sig return out.squeeze() def log_posterior(theta, t, Y): return log_likelihood(theta, t, Y) + log_prior(theta) def proposal(size=1, a=2.0): U = stat.uniform.rvs(size=size) return (U*(a - 1) + 1)**2/a def main(): Nparam = 5; Ni = int(1e4) Nw = int(1e2) Nw2 = int(Nw/2) Ns = int(Ni*Nw) Z = proposal(size=(Ni, Nw2, 2)) r = stat.uniform.rvs(size=(Ni, Nw2, 2)) logrZNp = np.log(r*(Z**(1 - Nparam))) rw = stat.randint.rvs(low=0, high=Nw2, size=(Ni, Nw2, 2)) x01 = stat.norm.rvs(loc=0.0005, scale=0.00025, size=(Nw2, 2)) # Sc x02 = stat.norm.rvs(loc=25.3, scale=2.0, size=(Nw2, 2)) # Pqp x03 = stat.uniform.rvs(loc=0.1, scale=1.5, size=(Nw2, 2)) # lambda_P x04 = stat.norm.rvs(loc=50.0, scale=25.0, size=(Nw2, 2)) # lambda_e x05 = stat.norm.rvs(loc=0.0005, scale=0.00025, size=(Nw2, 2)) # sig2 theta0 = np.array([x01, x02, x03, x04, x05]) theta = np.zeros((Nparam, Ni, Nw2, 2)) acpt = np.zeros(Z.shape) fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\solarSindexCutDaily.txt' data = np.loadtxt(fname, skiprows=1) t = data[:,0] Y = data[:,2] for l in range(0, Ni): if l % 1 == 0: print(100*l/Ni, '% complete') j = rw[l,:,0] thetaP = theta0[:,j,1] + Z[l,:,0]*(theta0[:,:,0] - theta0[:,j,1]) logPthetaP = log_posterior(thetaP.T, t, Y) logPtheta0 = log_posterior(theta0[:,:,0].T, t, Y) idx = (logPthetaP - logPtheta0) >= logrZNp[l,:,0] acpt[l,idx,0] += 1 theta[:,l, idx,0] = thetaP[:,idx] theta[:,l,~idx,0] = theta0[:,~idx,0] theta0[:,:,0] = theta[:,l,:,0] j = rw[l,:,1] thetaP = theta0[:,j,0] + Z[l,:,1]*(theta0[:,:,1] - theta0[:,j,0]) logPthetaP = log_posterior(thetaP.T, t, Y) logPtheta0 = log_posterior(theta0[:,:,1].T, t, Y) idx = (logPthetaP - logPtheta0) >= logrZNp[l,:,1] acpt[l,idx,1] += 1 theta[:,l, idx,1] = thetaP[:,idx] theta[:,l,~idx,1] = theta0[:,~idx,1] theta0[:,:,1] = theta[:,l,:,1] fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\S_samples20' np.save(fname, theta) fname = r'C:\Users\Nicholas\Documents\Walsworth_group\solar_RVs\accept20' np.save(fname, acpt) if __name__ == '__main__': main()
This common approach guarantees full transparency and access to important project information for all team members – at any time and any place. Each stakeholder can get his own task-specific view onto these project information. Additionally, we have implemented some tools to support the communication between all stakeholders (e-mail based discussions, task management). Use Axure’s specification software to effectively document and communicate your designs with clients, colleagues, and stakeholders. Save time and money by instantly generating a customizable functional specification in Microsoft Word format. Get Started Fast – An easy setup and intuitive interface make launching projects a snap. SharePoint Powered – Security trimming beyond documents, so users only see what they need. Track Results – Stay informed with SharePoint dashboards and notifications. Centralised repository – CaliberRM provides a central, secure repository for project requirements. By storing requirements in a common repository, the most current data is available to the people who need it, whenever they need it. Adaptable – CaliberRM is designed to bring speed and agility to the requirements process.Whether your process is highly structured or very agile, CaliberRM can be customized to support the way your teams work. It is easily customized directly through the GUI or by using helpful wizards; complex and/or proprietary scripting languages are not required. Requirements traceability across the lifecycle – CaliberRM has an open architecture that permits requirements to be linked to a variety of artifacts across the lifecycle.Whether it be a source code file or change request managed in Borland StarTeam, a test case or test set managed in Mercury interactive TestDirector, or a task from Microsoft Project, CaliberRM can link requirements to the appropriate artifacts. Impact analysis throughout the application lifecycle – Multiple methods of traceability visualization help users immediately understand the scope of analysis required to gauge the effect of a requirements change. Traces are easily created using drag and drop and can link assets in multiple systems. Online glossaries to standardise and define terminology – Glossaries can also be used to define ambiguous and untestable terms — words that donÂ’t belong in requirements. CaliberRM helps to refine requirements on the way in, so they don’t become costly defects in later stages. IMPROVE COLLABORATION – CaliberRM provides an enterprise repository that delivers simultaneous, secure, and live requirements data for all projects. Enjoy using the flexibility of rich-text formatting and embedded images and tables to express requirements in a way that makes the most sense for your organization. EXPAND YOUR VISION WITH DATAMART – CaliberRM Datamart is a powerful business intelligence solution that provides managers with dashboard-style reporting to measure, track, and analyze the efficiency and effectiveness of the requirements management process. MAKE MORE ACCURATE PREDICTIONS – CaliberRM ESTIMATE Professional includes powerful requirements-based estimation capabilities that help project managers plan project scope, schedule, and resources throughout the software development lifecycle with greater accuracy. ACCESS REQUIREMENTS FROM ANYWHERE – CaliberRM clients are architected to communicate efficiently via TCP/IP giving team members quick access to live requirements data from virtually anywhere, even with low bandwidth connections. REDUCE LEARNING CURVES WITH EASY-TO-USE GUI – CaliberRM makes it easier for stakeholders from many backgrounds to collaborate — increasing the amount of usable knowledge available for developers. In the event of staff turnover, CaliberRM makes it easier for new employees to get up to speed quickly, helping to reduce risks to the project schedule. RECEIVE NOTIFICATION OF IMPORTANT CHANGES – Immediate notification tied to changes of project requirements means responsible users wonÂ’t be surprised by changes that are not evident until later in the development cycle. FOCUS ON WHATÂ’S IMPORTANT – Prioritize requirements by sorting and filtering on cost, priority, or other key attributes within the spreadsheet views. Views can be created based on data collected to make the management of requirements easier. VIEW COMPREHENSIVE AUDIT TRAIL AND CHANGE HISTORY – Every change in CaliberRM is automatically audited. Each change creates a unique history record, highlighting the differences between one version of a requirement and another, including the reason for the change. LEVERAGING BASELINING TO MEASURE VOLATILITY – Since CaliberRM captures versions of each requirement, it also enables baselining – the versioning of a whole set of requirements with each requirement at its specific version. This baseline provides the capability to both electronically sign and view a snapshot of requirements at a point in time. By comparing baselines in a handy side-by-side report, users can immediately see where volatility, modifications, additions, and deletions have taken place. • A multi-tier, client-server architecture that is scalable from a single user to hundreds of users with optimal performance and scalability. • A high-performance, embedded enterprise database is included. All the project data, documents, and other artifacts are stored in a single, centrally-controlled location. CaseComplete helps you gather, organize, and share use cases and requirements. The tool’s strength is its ease of use. Within minutes, you can install it, write a use case, and generate a clear, good-looking requirements document. HP Quality Center is an enterprise-ready solution whose native capabilities are growing with each release. In addition, our open APIs allow us to enrich our solution through integrations and ensure that HP proactively supports the complex eco-system of processes, tools and applications that our customers need to manage. Indeed, industry analysts argue that HP’s Quality Center is already a disruptive force in the marketplace and will continue to be an even stronger alternative to incumbent requirements management offerings. IBM Rational RequisitePro provides requirements management, traceability, and impact analysis capabilities for project teams, primarily suited to organizations creating application software. See IBM Rational Requirements Composer to see the ‘next generation RequisitePro’. IdeaShare enables soliciting and harvesting of ideas, suggestions, and feedback to improve products and services. IdeaShare’s collaboration-centric features empower users to create, evolve, and ranking ideas. Organizations can crowd-source ideas from their communities of customers, partners, or employees. IdeaShare’s novel approach to idea management is simpler, faster, and produces better results. iRise Studio – iRise Studio is a powerful, easy-to- use application definition solution used by business experts to quickly assemble functionally rich simulations of Web-based applications in a matter of hours. iRise Studio is used for new custom applications, portals, enhancements to existing systems and Web-based front-ends to packaged software. Business people can quickly lay out the page flow of simulations and create high fidelity pages that precisely mimic not only the look and feel of the final application, but the business logic and data interactions as well. iRise iDoc – Stakeholders can have fun with application simulations that encourage dialog, drive consensus and quickly iterate to specifications that act as visual blueprints for what to build. Business analysts publish simulations as interactive definition documents, or iDocs, then present them to stakeholders for review. Rather than pages and pages of text and screen shots, stakeholders now have an interactive, functionally rich preview of how the final application will look and behave. They can test drive the simulation as if it were the real thing. Walkthrough notes help guide the review process and feedback comments can be sent to the business analyst with the click of a button. Leap SE is an advanced requirements engineering CASE tool that produces object-oriented models directly from a system requirements repository or specification (SRS). By translating English into logical models for software development, Leap SE achieves RAD from the source, dramatically shortening the systems analysis phase of software projects. Project managers can reduce their systems engineering staffing needs with Leap SE, while promoting the development of quality requirements. Twenty-two templates and a Requirement Builder are provided for fast and flexible composition. Every time a new requirement is saved, Leap SE’s object model database is updated to reflect the new entities, relationships, attributes and methods. From this database, a directory of header files can be generated at any time to give software engineering a much-needed head start on design. Moreover, it’s just one small step to importing these header files into a reverse-engineering CASE tool to quickly produce a host of class diagrams. Leap SE is fully integrated with MS Access. Data model output, in the form of SQL, can be run in the RDBMS to produce entity-relationship diagrams, tables, and relationships complete with referential integrity. Notes is not a requirements tool as such, but is very effective place to store requirements specifications. Set up a new Notes database for each specification, and give access to all the stakeholders. Use DocLinks to link to documents that have some relation to your specification – interview notes, meeting minutes. Attach the function point counts to the requirements, then link each of them to the appropriate design documents. Release 5 now includes a bookmark bar that creates quick links to frequently needed information and support for a universal Internet mailbox – whether they’re on a Lotus Domino server or hosted by an Internet Service Provider (ISP). DocumentCommunication is easy with Optimal Trace’s automated document generation and a selection of pre-canned templates that are fully customizable to your company—specific standards and processes to ensure a high-quality finish. Multiple formats mean all stakeholders can communicate in a way that suits each best (Word, Excel, html). Edits can be easily reversed back into the main project to keep the repository and the document in sync. Complete projects can be exported to Microsoft Project, CSV files or other formats. CollaborateOptimal Trace, the ultimate collaboration tool, provides a central secure repository for all project requirements and artifacts so the most current data is always available for live work or uploading for remote usage. Multiple users can work concurrently online and make real-time edits on the same project. Using Optimal Trace off line to capture users’ scenarios live on-site helps users see the project structure emerge. This functionality builds confidence and drives consensus and approval. The baseline capability facilitates review and final sign-off and also allows easy visibility of additions, deletions and amendments. An exclusive innovation you won’t find elsewhere, Polarion LiveDoc™ – online structured specification documents, are fast becoming the way companies of all sizes gather, author, approve, validate, and manage requirements. Grow into Polarion’s Test Management and/or enterprise ALM solutions that seamlessly tie in with your requirements data. Just add licenses to your installation… nothing to install or integrate. Community Edition – Perfect for students, beginners, and other non-commercial users, this free edition is the ideal introduction to UML, complete with Java forward engineering. Standard Edition – Aimed at analysts, this edition makes quick and easy work of designing and documenting models with Java reverse engineering, UMLdoc, and plug-in extensions. Professional Edition – Developers will find a full suite of powerful features such as an Eclipse integration, Java roundtrip engineering, and code generation for many other languages. Enterprise Edition – Teams of developers can use real-time collaboration functions and versioning coupled with standalone Java roundtrip engineering to develop software across the room or across the globe. Embedded Enterprise Edition – Embedded system developers can use the team capabilities of the enterprise edition with c and c++ generation specifically designed for embedded systems, along with Java reverse engineering. Competitive Product Positioning – QFD enables specific competitive positioning targets that are communicated throughout the organization and provides a shared focus for management and project team. Product Portfolio Management – Application of QFD provides a unique opportunity to not only define what the current new product should be all about but also what constitutes better as technologies improves. Technology Planning – QFD provides visibility to technology shortcomings and focus for future technology needs. Timely progress communication (to support stage-gate process) – Each step in the QFD process produces the exact data needed for decisions to be made by management at each gate in the product development process. Meaningful definition of Critical Parameters for DFSS process – The QFD process enables the project team to identify the most critical parameters needed to obtain the competitive positioning that will be critical for the success of the project. Data driven decisions – The QFD process limits the reliance on subjective opinions to make key decisions by letting decisions be driven by data collected from customers. Traceability of decisions and intent (requirements management) – The proper execution of QFD leaves an organization with traceability of requirements and design decisions all the way back to the initial targeted customer and business needs. Market Opportunity Maps – Produce Market Opportunity Map reports identifying the best opportunities for product improvement. Relationship Tree – Generate relationship tree diagrams showing measures for each requirement in a graphical tree and branch format. Templates – Print out and work with blank chart templates which are useful as documents-in-progress during team meetings. Printing Spreadsheets – Print out spreadsheets as they appear in QFDcapture. RaQuest is short for ‘Requirement Adjustment Quest,’ reflecting our hope that it will make system development easier. The pronunciation of RaQ means ‘easy’ or ‘happy’ in Japanese. ReqView is a simple to use requirements management tool in which you can capture structured requirements for a software or system product and manage traceability of design, tests and risks to the product requirements. ReqView is the solution for companies from medical, aerospace & defense, automotive and other industries that saves a lot of effort with documenting compliance with industry standards. Flexibly configure your project documents and traceability. ReqView scales from agile SW development projects to complex system development project adopting V-Model methodology. Define custom link types enabling easier analysis of requirements traceability. Capture a structured document in an easy-to-use tabular view displaying the document hierarchy, requirement description, attributes, discussion and traceability links. Describe requirements in a rich text editor, attach images, PDF files, or other documents. Setup custom attributes for requirements, test cases and risks for your process. For instance, you can track requirements status, priority, target release or describe an acceptance criteria. Copy or move objects or whole sections. Edit selected attribute values at once. Filter requirements matching an advanced logical condition evaluating requirement description, attributes and traceability links. Find a keyword by a full text search. Comment requirements and update their customer or supplier status. Customize HTML report templates and generate reports preserving displayed columns, filter, sorting and navigable traceability links. Create MS Word documents with custom title page, table of contents and paragraph styles. Link related requirements, tests, risks and other project information. Browse the requirement traceability matrix in the context of the source or target document structure. Display custom traceability columns with multi-level traceability information. Adjust templates for multi-level traceability reports to match your custom layout. Analyze traceability information including important attributes of linked objects, such as user story acceptance or test status. Visualize end-to-end traceability between business needs, requirements, tests and risks. Manage project risks using Failure Mode and Effects Analysis (FMEA) or other risk management methodology. For each potential risk capture mitigation actions. Link project risks to the related high-level business or functional requirements. Browse history of changes to track changed requirement attributes, comments or traceability links. Review changes between project versions in a unique side-by-side compare view. Manage project revisions in your favorite Version Control System (VCS). Import existing documents from Word, Excel or import ReqIF files from IBM Rational DOORS. Export a document into any structured text format, such as HTML, CSV, XML or JSON, with custom layout and formatting. Work anywhere on any desktop PC or Mac. Open a shared project from a network drive, lock a document for exclusive edit and update it offline. ReqView free version with limited features is available for download. A unique user interface makes it possible to systematically consider vast numbers of combinations of circumstances. In its most powerful mode of use, Statestep supports modeling system behaviour as a finite state machine – in a simple form that is immediately understood by all reviewers and allows for easy annotation with informal comments or notes. The power of TopTeam can be explained by the integral design that looks beyond just requirements. The easy to use tool, with rich features “off the shelf”, ensures requirements are really used by all disciplines that contribute to achieving the common objectives. The Verification Studio (Formerly called Requirements Quality Analyzer – RQA) allows you to define and manage the V&V of the SOI, System element or any kind of development work-product (requirements, SysML models, MODELICA models, etc.) by measuring, calculating and (eventually) improving their Quality. Poor quality of work-products during the concept and design phases of a project leads to rework, extra costs, delays and, if not detected, severe consequences. A tool to automate the routine quality inspection and analysis of many types of work-products minimizes the cost of quality appraisals, while dramatically reducing the costs of poor quality. VisibleThread develops document content analysis software that identifies defects and streamlines document compliance. The company’s analysis software enables users to scan MS Office and PDF docs for liability concerns, automatically create compliance documents, coordinate and track changes from multiple stakeholders, and provide oversight throughout proposal development and IT delivery projects. VisibleThread can provide specific support for those promoting Volere, by representing the template as a ‘structure outline’ in the solution. This means that docs can be proactively checked to ensure certain sections are filled in while helping guide authors (using an MS Word plug-in) to fill in content according to the core sections as recommended by the template. Intuitive models allow you to quickly capture your “current state” metrics as a baseline against which future changes can be evaluated. These “live” dynamic visual models support on-going strategic planning and control within the organization. They can also be used to provide multimedia training, executive information systems, project planning, development, and beyond.
from model.contact import Contact import re class ContactHelper: def __init__(self, app): self.app = app def open_home_page(self): wd = self.app.wd if not ((wd.current_url.endswith("addressbook/") or wd.current_url.endswith("/index.php")) and ( len(wd.find_elements_by_link_text("Last name")) > 0)): wd.find_element_by_link_text("home").click() def change_field_value(self, field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field_name).click() wd.find_element_by_name(field_name).clear() wd.find_element_by_name(field_name).send_keys(text) def fill_contact_form(self, contact): self.change_field_value("firstname", contact.firstname) self.change_field_value("lastname", contact.lastname) self.change_field_value("address", contact.address) self.change_field_value("home", contact.home_phone) self.change_field_value("mobile", contact.mobile_phone) self.change_field_value("work", contact.work_phone) self.change_field_value("fax", contact.fax) self.change_field_value("email", contact.email1) self.change_field_value("email2", contact.email2) self.change_field_value("email3", contact.email3) self.change_field_value("homepage", contact.homepage) def create(self, contact): wd = self.app.wd self.open_home_page() # init contact creation wd.find_element_by_link_text("add new").click() self.fill_contact_form(contact) # submit contact creation wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click() wd.find_element_by_link_text("home page").click() self.contact_cache = None def edit_first_contact(self, contact): self.edit_contact_by_index(0, contact) def edit_contact_by_index(self, index, contact): wd = self.app.wd self.open_home_page() # init contact editing # wd.find_element_by_xpath("//tbody/tr[" + str(index+1) + "]/td[8]/a").click() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[7] cell.find_element_by_tag_name("a").click() self.fill_contact_form(contact) # submit update wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click() self.contact_cache = None def edit_contact_by_id(self, id, contact): wd = self.app.wd self.open_home_page() # init contact editing checkbox = wd.find_element_by_id(id) row = checkbox.find_element_by_xpath("./../..") cell = row.find_elements_by_tag_name("td")[7] cell.find_element_by_tag_name("a").click() self.fill_contact_form(contact) # submit update wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click() self.contact_cache = None def open_contact_to_edit_by_index(self, index): wd = self.app.wd self.open_home_page() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[7] cell.find_element_by_tag_name("a").click() def open_contact_view_by_index(self, index): wd = self.app.wd self.open_home_page() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[6] cell.find_element_by_tag_name("a").click() def select_contact_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() def select_contact_by_id(self, id): wd = self.app.wd wd.find_element_by_css_selector("input[value='%s']" % id).click() def delete_first_contact(self): self.delete_contact_by_index(0) def delete_contact_by_index(self, index): wd = self.app.wd self.open_home_page() self.select_contact_by_index(index) # submit deletion # wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.find_element_by_xpath("//div/div[4]/form[2]/div[2]/input").click() # confirm deletion alert wd.switch_to_alert().accept() self.contact_cache = None def delete_contact_by_id(self, id): wd = self.app.wd self.open_home_page() self.select_contact_by_id(id) # submit deletion # wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.find_element_by_xpath("//div/div[4]/form[2]/div[2]/input").click() # confirm deletion alert wd.switch_to_alert().accept() self.contact_cache = None def count(self): wd = self.app.wd self.open_home_page() return len(wd.find_elements_by_name("selected[]")) contact_cache = None def get_contact_list(self): if self.contact_cache is None: wd = self.app.wd self.open_home_page() self.contact_cache = [] for element in wd.find_elements_by_name("entry"): lastname = element.find_element_by_css_selector("td:nth-child(2)").text firstname = element.find_element_by_css_selector("td:nth-child(3)").text address = element.find_element_by_css_selector("td:nth-child(4)").text all_emails = element.find_element_by_css_selector("td:nth-child(5)").text all_phones = element.find_element_by_css_selector("td:nth-child(6)").text homepage = element.find_element_by_css_selector("td:nth-child(10)").text id = element.find_element_by_name("selected[]").get_attribute("value") self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, address=address, all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones, homepage=homepage, id=id)) return self.contact_cache def get_contact_from_edit_page(self, index): wd = self.app.wd self.open_contact_to_edit_by_index(index) firstname = wd.find_element_by_name("firstname").get_attribute("value") lastname = wd.find_element_by_name("lastname").get_attribute("value") address = wd.find_element_by_name("address").get_attribute("value") home_phone = wd.find_element_by_name("home").get_attribute("value") mobile_phone = wd.find_element_by_name("mobile").get_attribute("value") work_phone = wd.find_element_by_name("work").get_attribute("value") email1 = wd.find_element_by_name("email").get_attribute("value") email2 = wd.find_element_by_name("email2").get_attribute("value") email3 = wd.find_element_by_name("email3").get_attribute("value") homepage = wd.find_element_by_name("homepage").get_attribute("value") id = wd.find_element_by_name("id").get_attribute("value") return Contact(firstname=firstname, lastname=lastname, address=address, home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, email1=email1, email2=email2, email3=email3, homepage=homepage, id=id) def get_contact_from_view_page(self, index): wd = self.app.wd self.open_contact_view_by_index(index) text = wd.find_element_by_id("content").text home_phone = re.search("H: (.*)", text).group(1) mobile_phone = re.search("M: (.*)", text).group(1) work_phone = re.search("W: (.*)", text).group(1) return Contact(home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone) def clear(self, s): wd = self.app.wd return re.sub("[() -]", "", s) def merge_phones_like_on_home_page(self, contact): return "\n".join(filter(lambda x: x != "", map(lambda x: self.clear(x), filter(lambda x: x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.fax])))) def merge_emails_like_on_home_page(self, contact): return "\n".join(filter(lambda x: x != "", map(lambda x: self.clear(x), filter(lambda x: x is not None, [contact.email1, contact.email2, contact.email3])))) def add_contact_to_group(self, app, contact, group): wd = self.app.wd self.open_home_page() self.select_contact_by_id(contact.id) app.group.add_to_group(group) def remove_contact_from_group(self, app, contact, group): wd = self.app.wd self.open_home_page() app.group.select_group_on_homepage_by_id(group.id) self.select_contact_by_id(contact.id) wd.find_element_by_name("remove").click() assert ("group page \"" + group.name + "\"") == wd.find_element_by_xpath("//div/div[4]/div/i/a").text wd.find_element_by_xpath("//div/div[4]/div/i/a").click()
The course main topic is biomechanical assessment as an important tool for PRM Physicians. The cost of the course is 247.93 + VAT (Total 300 €) and the IBV has generated a discount coupon for 5% of the basic amount for those students who come from ESPRM (ESPRM4R2018). The platfom mannagers are experts from the Biomechanics Institute of Valencia (IBV) and Politecnico Di Milano Institute (POLIMI) will also cooperate in the teaching.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This module contains the LiveDelay class. """ from harpia.GUI.fieldtypes import * from harpia.model.plugin import Plugin class LiveDelay(Plugin): """ This class contains methods related the liveDelay class. """ # ------------------------------------------------------------------------- def __init__(self): Plugin.__init__(self) self.frameNumber = 5 # Appearance self.help = "Inserts a delay inside a live stream." self.label = "Live Delay" self.color = "250:20:30:150" self.in_types = ["harpia.extensions.c.ports.image"] self.out_types = ["harpia.extensions.c.ports.image"] self.group = "General" self.properties = [{"name": "Time (in frames)", "label": "frameNumber", "type": HARPIA_INT, "lower": 1, "upper": 200, "step": 1 } ] # ------------------------------C/OpenCv code-------------------------- self.codes[2] = ''' if(block$id$_img_i0){ cvReleaseImage(&(block$id$_buffer[i_$id$])); block$id$_buffer[i_$id$] = cvCloneImage(block$id$_img_i0); i_$id$++; i_$id$ %= $frameNumber$; block$id$_img_o0 = block$id$_buffer[i_$id$]; } ''' self.codes[3] = 'cvReleaseImage(&block$id$_img_i0);\n' self.codes[4] = ''' for(i_$id$=0; i_$id$<$frameNumber$; i_$id$++) if(block$id$_buffer[i_$id$] != NULL) cvReleaseImage(&(block$id$_buffer[i_$id$])); ''' # ---------------------------------------------------------------------- def generate_vars(self): self.frameNumber = int(round(float(self.frameNumber))) value = \ 'IplImage * block$id$_img_i0 = NULL;\n' + \ 'IplImage * block$id$_img_o0 = NULL;\n' + \ 'int i_$id$ = 0;\n' + \ 'IplImage * block$id$_buffer[$frameNumber$] = {' for idx in range(self.frameNumber): value += 'NULL' if idx != self.frameNumber - 1: value += ',' value += '};\n' for idx in range(self.frameNumber): value += 'block$id$_buffer[' + str( idx) + '] = cvCreateImage( cvSize(640,480), 8, 3);\n' value += 'cvSetZero(block$id$_buffer[' + str(idx) + ']);\n' value += 'block$id$_img_o0 = block$id$_buffer[' + \ str(self.frameNumber - 1) + '];\n' return value self.language = "c" self.framework = "opencv" # -----------------------------------------------------------------------------
You’re encouraged, or even required to forecast your pipeline–what is your expected closure? Non-existent, inaccurate, or outdated information makes this a guessing game. Even recalling what deals you’ve worked on can be challenging. Making informed decisions with all the information for past, present, and future deals is much easier with Handle Advanced Search. Salesman and managers have the power to pull current, accurate information and further filter results to get a customized view of the data. The easiest way to see your in-progress sales Opportunities is to use the pre-loaded “My Open” saved search in the Opportunities search screen. “My Open” and “My Closed” are loaded automatically for all users. You can refine and customize your Opportunity search using the advanced search fields and customize your grid. This will list your Opportunities. If you are a manager or have been given permission to view other team member opportunities, you can do so by searching with the [Owner] field. Using the Activity search or Opportunity search, you can create lists of active opportunities/deals. Adding fields like [Owner], [Status], [Close Date], and [Quoted] give you the flexibility to create exactly the list you need to review and for yourself or with a Salesman. Customizing the results grid allows you to control what information you want to see and sort it to what works best for you. If you’ve ready any of our articles or consulted with CustomerTRAX on opportunity management, you’ll know we highly recommend reviewing opportunities with your manager or with your team. Saved searches are an easy way to pull this information for these discussions. Now we have created a way for those saved searches to be emailed to you. Read More about scheduling saved search to be delivered to you automatically.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Python application to fetch, store and offline search Reddit data. Requirements: 1. Python 2.7+ 2. requests (pip install requests) 3. elasticsearch (pip install elasticsearch) 4. ElasticSearch server running (downlaod from https://www.elastic.co/downloads/elasticsearch, install and start) """ __author__ = 'Rajendra Kumar Uppal' __copyright__ = "Copyright 2015, Rajendra Kumar Uppal" __credits__ = ["Fletcher Heisler", "Rajendra Kumar Uppal"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Rajendra Kumar Uppal" __email__ = "rajen.iitd@gmail.com" __status__ = "Production" import requests from elasticsearch import Elasticsearch class Reddit(): pass def main(): es = Elasticsearch() # get top 100 IAMA posts of all time response = requests.get("http://api.reddit.com/r/iama/top/?t=all&limit=1", headers={"User-Agent":"TrackMaven"}) fields = ['title', 'selftext', 'author', 'score', 'ups', 'downs', 'num_comments', 'url', 'created'] # loop through the response and add each data dictionary to reddit index for i, iama in enumerate(response.json()['data']['children']): content = iama['data'] doc = {} for field in fields: doc[field] = content[field] print doc es.index(index='reddit', doc_type='iama', body=doc) if __name__ == '__main__': main()
Discover the thrills and excitement of Las Vegas each and every year with a timeshare at Vacation Village at the Jockey Club. Find timeshare resales and rentals priced up to 70% below retail costs today. What’s My Vacation Village Timeshare Worth? Sin City is front and center at this luxurious timeshare resort. The Vacation Village at the Jockey Club is located directly on The Strip, offering instant access to all of the city's best attractions. Nestled between the Bellagio and the Mandarin Oriental, you'll have spectacular views of glimmering Las Vegas and a wonderful proximity to lively casinos and world-class restaurants. Complete with a swimming pool, a health club and fitness center, and a sauna, you'll be pampered from head to toe with your very own accommodations at this establishment. The rooms at the Jockey Club include studio units as well as one and two bedroom suites, offering space and comfort for groups up to six. Whether you're seeking a getaway with your spouse, a bachelor or bachelorette party with your friends, or a family vacation to the Entertainment Capital of the World, you'll find a unit for you at the Jockey Club. Suites come equipped with recently-renovated accommodations, including full kitchens, king size beds, up-to-date entertainment systems, and more. Are you ready to guarantee yourself amazing vacations in Las Vegas for years to come? Fill out the form on this page to learn more about ownership opportunities at the Vacation Village Jockey Club resort, or call a representative at 1-877-815-4227. Buying a timeshare resale at the Jockey Club Vacation Village can earn you years of great vacations as a timeshare owner for thousands less than what others pay. Unlike traditional timeshares offered directly by the resort, timeshare resales are sold by current owners. What results is the same great vacation ownership accommodations priced much more affordably—you can find timeshare resales at the Jockey Club at prices up to 70% lower than retail costs. Resales at the resort come in the form of deeded fixed and floating weeks, offering a great deal of flexibility when searching for the perfect timeshare. You can also find a vacation ownership interval by price, unit size, and more to ensure that you're investing in a timeshare that's perfect for you. A timeshare resale at the resort can also unlock exclusive benefits and better vacations through the Vacation Village Resorts brand. As a Vacation Village Resorts affiliate, the Jockey Club resort offers much more than just a place to rest your head for your Vegas trips. As a timeshare resale owner, you'll be able to take advantage of some of the brand's best and most exclusive vacation benefits, which can enhance your overall vacation experience. Use your status as a timeshare owner to collect Affinity Rewards®, which provide exciting perks and extras to members. You can also exchange your Vacation Village Las Vegas timeshare and stay at other branded resorts across the U.S. The Jockey Club resort is affiliated with RCI, the world's largest timeshare exchange brand. With more than 8,000 affiliated resorts in its exchange network, you can trade your Las Vegas accommodations for stays at other resorts all over the world. The Vacation Village at the Jockey Club is also an RCI Silver Crown rated resort, noting its above average accommodations and amenities. As a Jockey Club owner, you'll be able to exchange with other resorts of similar high quality and rating. This Vacation Village Las Vegas resort combines luxurious accommodations with impeccable amenities and services to provide a timeless and unforgettable stay. On-site, you'll find the resort has everything you need to enjoy your Vegas getaway. Head down to the pool to swim or lounge with a book, or visit the health club to stay on top of your fitness goals while you're away. The sauna provides each guest with the opportunity to truly unwind and enjoy their escape to the desert. The resort's location is perhaps its best amenity, with a center-Strip position that simply can't be beat. Head out the front doors to discover the Bellagio fountains performing amazing shows every night. Gamble and game at popular locations like Caesars Palace, the Venetian, and Mandalay Bay, all of which are located just a short walk away. Catch an unbelievable show from acts like the Blue Man Group or Cirque du Soleil, or test your limits at the rooftop thrill rides at The Stratosphere. Get outside the city to discover the true beauty of the American Southwest, with day trip potential at Red Rock Canyon or the Hoover Dam. Already Own at Vacation Village at the Jockey Club?
from sympy import * def test_sinh(): x, y = symbols('xy') k = Symbol('k', integer=True) assert sinh(nan) == nan assert sinh(oo) == oo assert sinh(-oo) == -oo assert sinh(0) == 0 assert sinh(1) == sinh(1) assert sinh(-1) == -sinh(1) assert sinh(x) == sinh(x) assert sinh(-x) == -sinh(x) assert sinh(pi) == sinh(pi) assert sinh(-pi) == -sinh(pi) assert sinh(2**1024 * E) == sinh(2**1024 * E) assert sinh(-2**1024 * E) == -sinh(2**1024 * E) assert sinh(pi*I) == 0 assert sinh(-pi*I) == 0 assert sinh(2*pi*I) == 0 assert sinh(-2*pi*I) == 0 assert sinh(-3*10**73*pi*I) == 0 assert sinh(7*10**103*pi*I) == 0 assert sinh(pi*I/2) == I assert sinh(-pi*I/2) == -I assert sinh(5*pi*I/2) == I assert sinh(7*pi*I/2) == -I assert sinh(pi*I/3) == Basic.Half()*sqrt(3)*I assert sinh(-2*pi*I/3) == -Basic.Half()*sqrt(3)*I assert sinh(pi*I/4) == Basic.Half()*sqrt(2)*I assert sinh(-pi*I/4) == -Basic.Half()*sqrt(2)*I assert sinh(17*pi*I/4) == Basic.Half()*sqrt(2)*I assert sinh(-3*pi*I/4) == -Basic.Half()*sqrt(2)*I assert sinh(pi*I/6) == Basic.Half()*I assert sinh(-pi*I/6) == -Basic.Half()*I assert sinh(7*pi*I/6) == -Basic.Half()*I assert sinh(-5*pi*I/6) == -Basic.Half()*I assert sinh(pi*I/105) == sin(pi/105)*I assert sinh(-pi*I/105) == -sin(pi/105)*I assert sinh(2 + 3*I) == sinh(2 + 3*I) assert sinh(x*I) == sin(x)*I assert sinh(k*pi*I) == 0 assert sinh(17*k*pi*I) == 0 assert sinh(k*pi*I/2) == sin(k*pi/2)*I def test_cosh(): x, y = symbols('xy') k = Symbol('k', integer=True) assert cosh(nan) == nan assert cosh(oo) == oo assert cosh(-oo) == oo assert cosh(0) == 1 assert cosh(1) == cosh(1) assert cosh(-1) == cosh(1) assert cosh(x) == cosh(x) assert cosh(-x) == cosh(x) assert cosh(pi*I) == cos(pi) assert cosh(-pi*I) == cos(pi) assert cosh(2**1024 * E) == cosh(2**1024 * E) assert cosh(-2**1024 * E) == cosh(2**1024 * E) assert cosh(pi*I/2) == 0 assert cosh(-pi*I/2) == 0 assert cosh(pi*I/2) == 0 assert cosh(-pi*I/2) == 0 assert cosh((-3*10**73+1)*pi*I/2) == 0 assert cosh((7*10**103+1)*pi*I/2) == 0 assert cosh(pi*I) == -1 assert cosh(-pi*I) == -1 assert cosh(5*pi*I) == -1 assert cosh(8*pi*I) == 1 assert cosh(pi*I/3) == Basic.Half() assert cosh(-2*pi*I/3) == -Basic.Half() assert cosh(pi*I/4) == Basic.Half()*sqrt(2) assert cosh(-pi*I/4) == Basic.Half()*sqrt(2) assert cosh(11*pi*I/4) == -Basic.Half()*sqrt(2) assert cosh(-3*pi*I/4) == -Basic.Half()*sqrt(2) assert cosh(pi*I/6) == Basic.Half()*sqrt(3) assert cosh(-pi*I/6) == Basic.Half()*sqrt(3) assert cosh(7*pi*I/6) == -Basic.Half()*sqrt(3) assert cosh(-5*pi*I/6) == -Basic.Half()*sqrt(3) assert cosh(pi*I/105) == cos(pi/105) assert cosh(-pi*I/105) == cos(pi/105) assert cosh(2 + 3*I) == cosh(2 + 3*I) assert cosh(x*I) == cos(x) assert cosh(k*pi*I) == cos(k*pi) assert cosh(17*k*pi*I) == cos(17*k*pi) assert cosh(k*pi) == cosh(k*pi) def test_tanh(): x, y = symbols('xy') k = Symbol('k', integer=True) assert tanh(nan) == nan assert tanh(oo) == 1 assert tanh(-oo) == -1 assert tanh(0) == 0 assert tanh(1) == tanh(1) assert tanh(-1) == -tanh(1) assert tanh(x) == tanh(x) assert tanh(-x) == -tanh(x) assert tanh(pi) == tanh(pi) assert tanh(-pi) == -tanh(pi) assert tanh(2**1024 * E) == tanh(2**1024 * E) assert tanh(-2**1024 * E) == -tanh(2**1024 * E) assert tanh(pi*I) == 0 assert tanh(-pi*I) == 0 assert tanh(2*pi*I) == 0 assert tanh(-2*pi*I) == 0 assert tanh(-3*10**73*pi*I) == 0 assert tanh(7*10**103*pi*I) == 0 assert tanh(pi*I/2) == tanh(pi*I/2) assert tanh(-pi*I/2) == -tanh(pi*I/2) assert tanh(5*pi*I/2) == tanh(5*pi*I/2) assert tanh(7*pi*I/2) == tanh(7*pi*I/2) assert tanh(pi*I/3) == sqrt(3)*I assert tanh(-2*pi*I/3) == sqrt(3)*I assert tanh(pi*I/4) == I assert tanh(-pi*I/4) == -I assert tanh(17*pi*I/4) == I assert tanh(-3*pi*I/4) == I assert tanh(pi*I/6) == I/sqrt(3) assert tanh(-pi*I/6) == -I/sqrt(3) assert tanh(7*pi*I/6) == I/sqrt(3) assert tanh(-5*pi*I/6) == I/sqrt(3) assert tanh(pi*I/105) == tan(pi/105)*I assert tanh(-pi*I/105) == -tan(pi/105)*I assert tanh(2 + 3*I) == tanh(2 + 3*I) assert tanh(x*I) == tan(x)*I assert tanh(k*pi*I) == 0 assert tanh(17*k*pi*I) == 0 assert tanh(k*pi*I/2) == tan(k*pi/2)*I def test_coth(): x, y = symbols('xy') k = Symbol('k', integer=True) assert coth(nan) == nan assert coth(oo) == 1 assert coth(-oo) == -1 assert coth(0) == coth(0) assert coth(1) == coth(1) assert coth(-1) == -coth(1) assert coth(x) == coth(x) assert coth(-x) == -coth(x) assert coth(pi*I) == -cot(pi)*I assert coth(-pi*I) == cot(pi)*I assert coth(2**1024 * E) == coth(2**1024 * E) assert coth(-2**1024 * E) == -coth(2**1024 * E) assert coth(pi*I) == -cot(pi)*I assert coth(-pi*I) == cot(pi)*I assert coth(2*pi*I) == -cot(2*pi)*I assert coth(-2*pi*I) == cot(2*pi)*I assert coth(-3*10**73*pi*I) == cot(3*10**73*pi)*I assert coth(7*10**103*pi*I) == -cot(7*10**103*pi)*I assert coth(pi*I/2) == 0 assert coth(-pi*I/2) == 0 assert coth(5*pi*I/2) == 0 assert coth(7*pi*I/2) == 0 assert coth(pi*I/3) == -I/sqrt(3) assert coth(-2*pi*I/3) == -I/sqrt(3) assert coth(pi*I/4) == -I assert coth(-pi*I/4) == I assert coth(17*pi*I/4) == -I assert coth(-3*pi*I/4) == -I assert coth(pi*I/6) == -sqrt(3)*I assert coth(-pi*I/6) == sqrt(3)*I assert coth(7*pi*I/6) == -sqrt(3)*I assert coth(-5*pi*I/6) == -sqrt(3)*I assert coth(pi*I/105) == -cot(pi/105)*I assert coth(-pi*I/105) == cot(pi/105)*I assert coth(2 + 3*I) == coth(2 + 3*I) assert coth(x*I) == -cot(x)*I assert coth(k*pi*I) == -cot(k*pi)*I assert coth(17*k*pi*I) == -cot(17*k*pi)*I assert coth(k*pi*I) == -cot(k*pi)*I #def test_asinh(): #def test_acosh(): #def test_atanh(): #def test_acoth():
Stress-free Math with The Sunshine Method! The Sunshine Method’s in home one-on-one tutoring can help your child navigate the academic road to high school mathematics! Whether it’s pre-algebra, geometry, spatial reasoning, mathematical reasoning, fractions or anything in between, we will work with your child to create an individualized plan to propel them to numeric success! In collaboration with parents/guardians and teachers, we work together to ensure that your child is prepared to succeed, both inside and out of the classroom.Our (text messing a space between the O and the period) mission is to strengthen the foundation that will lead to your child’s continued academic success, especially as they prepare to enter high school. By filling gaps in knowledge, providing 24/7 access to the entire Sunshine Method online database, and customizing (words missing a space) lessons to your child’s current workload and upcoming tests and projects, we ensure that your child is able to make positive strides fast, and be on the best path to succeed for years to come!
from hcsvlab_robochef.annotations import * from hcsvlab_robochef.ingest_base import IngestBase from hcsvlab_robochef.rdf.map import * from hcsvlab_robochef.utils.filehandler import * from hcsvlab_robochef.utils.serialiser import * from hcsvlab_robochef.utils.statistics import * from rdf import paradisecMap from xml.etree import ElementTree as ET import codecs import mimetypes import urllib import re class EopasTestIngest(IngestBase): olac_role_map = {'annotator' : OLAC.annotator, 'author' : OLAC.author, 'compiler' : OLAC.compiler, 'consultant' : OLAC.consultant, 'data_inputter' : OLAC.data_inputter, 'depositor' : OLAC.depositor, 'developer' : OLAC.developer, 'editor' : OLAC.editor, 'illustrator' : OLAC.illustrator, 'interpreter' : OLAC.interpreter, 'interviewer' : OLAC.interviewer, 'participant' : OLAC.participant, 'performer' : OLAC.performer, 'photographer' : OLAC.photographer, 'recorder' : OLAC.recorder, 'researcher' : OLAC.researcher, 'research_participant' : OLAC.research_participant, 'responder' : OLAC.responder, 'signer' : OLAC.signer, 'singer' : OLAC.singer, 'speaker' : OLAC.speaker, 'sponsor' : OLAC.sponsor, 'transcriber' : OLAC.transcriber, 'translator' : OLAC.translator } def ingestCorpus(self, srcdir, outdir): ''' This function will initiate the ingest process for the Auslit corpus ''' print " converting corpus in", srcdir, "into normalised data in", outdir print " clearing and creating output location" self.clear_output_dir(outdir) print " processing files..." files_to_process = self.__get_files(srcdir) total = len(files_to_process) sofar = 0 for f in files_to_process: meta_dict = self.ingestDocument(srcdir, f) f = f.replace(srcdir, outdir, 1) try: os.makedirs(os.path.dirname(f)) except: pass (sampleid, _) = os.path.splitext(f) serialiser = MetaSerialiser() serialiser.serialise(outdir, sampleid, paradisecMap, meta_dict, True) sofar = sofar + 1 print "\033[2K ", sofar, "of", total, f, "\033[A" print "\033[2K ", total, "files processed" def setMetaData(self, rcdir): ''' Loads the meta data for use during ingest ''' pass def ingestDocument(self, srcdir, sourcepath): """ Read and process a corpus document """ xml_tree = self.__load_xml_tree(sourcepath) meta_dict = metadata.xml2tuplelist(xml_tree, ['olac', 'metadata']) self.__get_documents(meta_dict) self.__get_people(meta_dict) return meta_dict def __get_documents(self, meta_dict): for k, v in meta_dict: if k == 'tableOfContents': filetype = self.__get_type(v) file_meta = {'id' : v, 'filename' : v, 'filetype' : filetype, 'documenttitle' : v} meta_dict.append(('table_document_' + v, file_meta)) meta_dict[:] = [(k, v) for k, v in meta_dict if 'tableOfContents' not in k] def __get_people(self, meta_dict): # TODO: maybe this belongs elsewhere roles = self.olac_role_map.keys() for k, v in meta_dict: if k in roles: person = {'role' : self.olac_role_map[k], 'id' : re.sub(' ', '_', v), 'name' : v} meta_dict.append(('table_person_' + k, person)) meta_dict[:] = [(k, v) for k, v in meta_dict if k.strip() not in roles] # TODO: this could be moved to somewhere like ../utils where other modules could use it def __get_type(self, filepath): url = urllib.pathname2url(filepath) mime_type, _ = mimetypes.guess_type(url) filetype = None if mime_type: filetype = mime_type.split('/')[0].title() if not filetype or filetype == 'Application': filetype = 'Other' return filetype def __get_files(self, srcdir): ''' This function retrieves a list of files that the HCSvLab ingest should actually process ''' filehandler = FileHandler() files = filehandler.getFiles(srcdir, r'^.+?(?:pas|box).xml$') return_files = [os.path.join(srcdir, f) for f in files] return return_files def __tuplelist2dict__(self, tuplelist): result = dict() for (k, v) in tuplelist: if k and v: result[k] = v return result def __load_xml_tree(self, sourcepath): ''' This function reads in a XML docment as a text file and converts it into an XML tree for further processing ''' fhandle = codecs.open(sourcepath, "r", "utf-8") text = fhandle.read() fhandle.close() text = text.replace('&ndash;', u"\u2013") text = text.replace('&mdash;', u"\u2014") text = text.replace('&copy;', u"\u00A9") text = text.replace('&ldquo;', u"\u201C") text = text.replace('&rdquo;', u"\u201D") text = text.replace('&emsp;', u"\u2003") text = text.replace('&eacute;', u"\u00E9") text = text.replace('&lsquo;', u"\u2018") text = text.replace('&rsquo;', u"\u2019") text = text.replace('&ecirc;', u"\u00EA") text = text.replace('&agrave;', u"\u00E0") text = text.replace('&egrave;', u"\u00E8") text = text.replace('&oelig;', u"\u0153") text = text.replace('&aelig;', u"\u00E6") text = text.replace('&hellip;', u"\u2026") return ET.fromstring(text.encode("utf-8"))
View this Used 2018 Dodge Challenger in Kernersville, NC at Kernersville Chrysler Dodge Jeep Ram. Wide Body! Navigation! Backup Camera! Leather Seats! This One Owner CarFax 2018 DODGE CHALLENGER SRT HELLCAT WIDE BODY RWD has a 6.2L V8 Hellcat engine, automatic transmission, leather seats, navigation system, rear-view camera, power windows, power door locks, dual air bags, air conditioning, push start, heated steering wheel, heated seats, cooled seats, power seats, cruise control, keyless entry, and 18,416 miles.
import sys sys.path.append('.') import sampyl as smp from sampyl.state import State from sampyl import np from sampyl.diagnostics import diagnostics import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import seaborn as sns # correlated gaussian log likelihood def logp(x, y): icov = np.linalg.inv(np.array([[1., .8], [.8, 1.]])) d = np.array([x, y]) return -.5 * np.dot(np.dot(d, icov), d) logp_xy = lambda(th): logp(th[0], th[1]) # compare slice samplers, metropolis hastings, and the two variable # slice sampler ssamp = smp.Slice(logp, start={'x': 4., 'y': 4.} ) slice_trace = ssamp.sample(1000) met = smp.Metropolis(logp, start={'x': 4., 'y': 4.}) met_trace = met.sample(1000) bslice = smp.Slice(logp_xy, start={'th': np.array([4., 4.])}) btrace = bslice.sample(1000) # compute effective sample size based on autocorrelation slice_eff = diagnostics.compute_n_eff_acf(slice_trace.x) met_eff = diagnostics.compute_n_eff_acf(met_trace.x) b_eff = diagnostics.compute_n_eff_acf(btrace.th[:,0]) print "Slice effective sample size: %2.2f"%slice_eff print "MH effective sample size: %2.2f"%met_eff print "two var slice effective sample size: %2.2f"%b_eff print " ----- " print "Slice sampler evals per sample: ", ssamp.evals_per_sample # graphically compare samples fig, axarr = plt.subplots(1, 3, figsize=(12,4)) axarr[0].scatter(slice_trace.x, slice_trace.y) axarr[0].set_title("Slice samples") axarr[1].scatter(met_trace.x, met_trace.y) axarr[1].set_title("MH samples") axarr[2].scatter(btrace.th[:,0], btrace.th[:,1]) axarr[2].set_title("Two var Slice samples") for ax in axarr: ax.set_xlim((-4, 4)) ax.set_ylim((-4, 4)) plt.show()
A couple of posts back, I wrote about commodity theory and how the scarcity of an item makes it seem more valuable and more desirable. What I didn’t write about, however, was a possible mechanism for this interaction between scarcity and value – why would a rare thing be seen as valuable by your average consumer? Possibly it’s because people want to feel at least moderately unique, and possessing something rare is one way of feeling like you’ve achieved that. It depends on the culture you look at, of course, since the need-for-uniqueness can vary greatly depending on whether a culture is historically individualist or collectivist, although perhaps those differences are being diminished slowly through globalisation. Whatever the case, research has shown that people in more individualist societies (so-called “Western” societies, I guess) certainly react negatively to being told that they are, in fact, actually very similar to other people. Also, expressing your uniqueness via consumption is relatively safe (most people are unlikely to do something so unique that they end up going off the scale of social standards and become a social outcast) and relatively affordable (depending on what you want to buy to make yourself feel unique). The need for uniqueness feeds into consumerism by putting people on the quest to define and accentuate their uniqueness by acquiring products – the rarer, the more valuable, the better. A problem pointed out in this research paper is that, at least in American society (and probably a few others), this creates a catch-22 consumer carousel – you buy a product that is rare in order to make yourself feel unique, but chances are that through media and advertising other people will become aware of that same product and also buy it, diminishing its rarity and eroding your sense of uniqueness, putting you on a quest to find yet another item that’ll make you feel unique. As long as people feed this need to distinguish themselves from others and feel unique, they may very well continue to purchase extensively without actually achieving any sense of success. The 21st century consumer is a sophisticated one. Compared to past decades, especially prior to the sixties, we have greater access to information and we understand our rights better and we are more aware of our options. We’re not infallible, but we are more discerning and more informed than ever before. One topic we’re certainly more informed about is ethics. We know that various companies behave in what are commonly considered unethical ways, whether it’s unsustainable use of environmental resources or sweatshop labour or animal testing or creating pollution. If we look, we’re often able to find information about such practices. But how often do we do something about that? How often do we actually let ethical considerations shape our purchasing habits? Rather infrequently, it seems, if this research paper is anything to go by. It turns out that, despite the increased sophistication of consumers these days and the increased awareness of ethical issues, there’s very often a gap between any given consumer’s attitudes towards ethical issues and his or her actual behaviour concerning those issues: an attitude-behaviour gap, stretching between what a person knows and feels about ethical issues and problems and what they’re actually willing to do about it. A person might support an action, such as boycotting a company that has been revealed to use child labour, but whether that person actually does boycott the company is an entirely different matter. If an issue is specific and high-profile enough, sometimes action on a large scale does happen. For example, the consumer boycott of Nestlé products due to its infant formula marketing practices has been estimated as having cost the company tens of millions of dollars. But that’s one conspicuous, highly publicised example, and is definitely a rarity. Despite having access to information about specific ethical concerns in relation to any given company or retailer, and despite consumers almost inevitably becoming aware of such issues through the ubiquitous media, on the whole apparently we’re still just not that motivated to actually do anything about it. We disapprove of it, but we don’t act on our disapproval. A few studies have shown that, when it comes down to it, consumers simply don’t prioritise ethical and social issues when considering making a purchase, preferring to make their decisions based on price, value, quality and brand familiarity. And that’s perhaps the crux of it – when you have to consider so many other things in making a decision to purchase (and most of this blog is about the almost impossibility of the brain to make completely reasoned, unbiased, informed decisions), the additional effort of factoring in ethical considerations on top of everything else is almost overwhelming. Is it possible to bridge the gap between attitude and behaviour, even when you decide to make ethical issues a priority along with price, value, quality and brand? Some people’s socioeconomic and financial position will prohibit them from making ethical issues a priority, which is understandable. But for those of us who do have the socioeconomic privilege of being able to factor in the additional issue of ethics in our consumer decision-making: we need to think about whether our actions match up with our attitudes. The more people who do try to carry their ethical attitudes through to actual actions, the more demand there will be for ethical behaviour and social responsibility from brands and companies. So, as always, the message is: think critically. Think about how you’re thinking and be ruthless about criticising any inconsistencies – are you thinking one way but acting another? That’s what will truly make a discerning, informed consumer and will consequently actually make a difference in the world. The threat of something becoming rare or unavailable seems to be a powerful motivator for action – an item’s diminished availability suddenly makes it seem that much more valuable and desirable, and in the context of consumer decision-making, it may drive the desire for and purchasing of a particular item. Obviously, economists and marketers know about the effects of scarcity on the perceived value of an object, and all those cries of “limited edition!” and “limited time only!” and “only while stocks last!” are of course designed to imply scarcity and make consumers more keen to buy. Commodity theory, first proposed by psychologist T.C. Brock in 1968, characterised the phenomenon from a psychological perspective, describing the relationship between the scarcity of an item and its perceived value. Many studies over the years have supported the relationship between the two things – the decreased availability of something does indeed lead people to value that thing more highly and consider it more desirable. The psychological effects of commodity theory have been extensively exploited by fast fashion in particular (as described in this research paper, amongst many others). The entire premise of fast fashion is founded on fast production, fast responses to trends as they emerge, fast turn-over, and a pretty fast track to obsolescence for most items. Retailers thereby create an environment in which a particular item’s existence is fleeting – its shelf life is incredibly short because new items are constantly being introduced and slow-selling items are removed to make room for the new ones. People get to know that this is what fast fashion is about – companies like Zara and H&M and Top Shop have so wholly embraced the model that most people who shop at such places frequently enough know that if they don’t buy a particular item there and then, chances are it won’t be there when they come back the following week. It happens online too, for example, Lapin de Lune’s interaction with ASOS which she mentioned here on her Tumblr. All these retailers rely on their image of having a rapid rate of stock turn-over (supposedly in the name of staying on trend) to drive people to buy as impulsively and as frequently as possible. That’s commodity theory at play right there. The rapid turn-over of stock and incredibly short shelf-lives form the perfect situation to create the perception that an item might become scarce at any moment, and you suddenly feel that it has become that much more valuable, perhaps prompting the compulsion to purchase the item before the opportunity disappears. It is a lucrative business model, obviously, but obviously it is not a system that is particularly encouraging of thoughtful, measured, circumspect consumption and decision-making.
""" setup.py: Install nebterpolator. """ VERSION="1.0" __author__ = "Robert McGibbon and Lee-Ping Wang" __version__ = VERSION from distutils.sysconfig import get_config_var from distutils.core import setup,Extension import os import shutil import numpy import glob # Comment left here as an example # Copied from MSMBuilder 'contact' library for rapidly computing interatomic distances. # CONTACT = Extension('forcebalance/_contact_wrap', # sources = ["ext/contact/contact.c", # "ext/contact/contact_wrap.c"], # extra_compile_args=["-std=c99","-O3","-shared", # "-fopenmp", "-Wall"], # extra_link_args=['-lgomp'], # include_dirs = [numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')]) def buildKeywordDictionary(): from distutils.core import Extension setupKeywords = {} setupKeywords["name"] = "nebterpolator" setupKeywords["version"] = VERSION setupKeywords["author"] = __author__ setupKeywords["author_email"] = "leeping@stanford.edu" setupKeywords["license"] = "GPL 3.0" setupKeywords["url"] = "https://github.com/rmcgibbo/nebterpolator" setupKeywords["download_url"] = "https://github.com/rmcgibbo/nebterpolator" setupKeywords["scripts"] = glob.glob("bin/*.py") + glob.glob("bin/*.sh") setupKeywords["packages"] = ["nebterpolator", "nebterpolator.io", "nebterpolator.core"] # setupKeywords["package_data"] = {"nebterpolator" : ["data/*.sh","data/uffparms.in","data/oplsaa.ff/*"]} setupKeywords["data_files"] = [] setupKeywords["ext_modules"] = [] setupKeywords["platforms"] = ["Linux"] setupKeywords["description"] = "Internal coordinate smoothing." outputString="" firstTab = 40 secondTab = 60 for key in sorted( setupKeywords.iterkeys() ): value = setupKeywords[key] outputString += key.rjust(firstTab) + str( value ).rjust(secondTab) + "\n" print "%s" % outputString return setupKeywords def main(): setupKeywords=buildKeywordDictionary() setup(**setupKeywords) if __name__ == '__main__': main()
In part one of the “A seal of quality” the Divine Tastes blog praised the fact that – there is such a thing as a seal of quality issued by the Israeli olive board. However, how reliable is this seal of quality? A “close source” told the Divine tastes blog about the process in getting the seal of quality. In practice, the process is very simple, and here in lies the problem – it is too simple. In order to get the seal of quality an olive oil producer needs to send some samples to the board for chemical and organoleptic analysis. Do these tests really take place?Let’s assume they follow all the tests to the dot. In practice so the "source" says there is very little done to ensure that standards are maintained by all companies, everywhere and at all times. To elaborate a little, once the XYZ olive oil company submitted a genuine extra virgin olive oil to test, it gets the seal of quality. However, what the manufacturer then really sells to the retailer or what the retailer sells to the consumer is a different story. There are no, as far as the “source” tell the Divine Tastes Blog, any frequent random shelves checkups. Is it because of lack of manpower & funds? Politics? Or is it due to the possibility that at the time of bottling the olive oil was of extra virgin quality, but due to oxygen, light and heat it at a later stage, not necessarily caused by the manufacturer but the retailer, it deteriorated? Though the “source” tends to think it’s a mix of everything and more, and realizes the complex issues of the timing of the tests after the initial testing, and on who lies the responsibilities of ensuring that the quality is not deteriorated too fast due to bad conditions. The “source” also said that it can not praise enough the continuing hard work and efforts put by the Israeli olive board in making Israeli olive oil – the best. The Divine Tastes Blog recommends: wherever possible always look for a seal of quality, even better - taste it.
from trac.perm import PermissionCache from trac.test import Mock, EnvironmentStub from txomon.ticket import default_workflow, web_ui from txomon.ticket.batch import BatchModifyModule from txomon.ticket.model import Ticket from trac.util.datefmt import utc import unittest class BatchModifyTestCase(unittest.TestCase): def setUp(self): self.env = EnvironmentStub(default_data=True, enable=[default_workflow.ConfigurableTicketWorkflow, web_ui.TicketModule]) self.req = Mock(href=self.env.href, authname='anonymous', tz=utc) self.req.session = {} self.req.perm = PermissionCache(self.env) def assertCommentAdded(self, ticket_id, comment): ticket = Ticket(self.env, int(ticket_id)) changes = ticket.get_changelog() comment_change = [c for c in changes if c[2] == 'comment'][0] self.assertEqual(comment_change[2], comment) def assertFieldChanged(self, ticket_id, field, new_value): ticket = Ticket(self.env, int(ticket_id)) changes = ticket.get_changelog() field_change = [c for c in changes if c[2] == field][0] self.assertEqual(field_change[4], new_value) def _change_list_test_helper(self, original, new, new2, mode): batch = BatchModifyModule(self.env) return batch._change_list(original, new, new2, mode) def _add_list_test_helper(self, original, to_add): return self._change_list_test_helper(original, to_add, '', '+') def _remove_list_test_helper(self, original, to_remove): return self._change_list_test_helper(original, to_remove, '', '-') def _add_remove_list_test_helper(self, original, to_add, to_remove): return self._change_list_test_helper(original, to_add, to_remove, '+-') def _assign_list_test_helper(self, original, new): return self._change_list_test_helper(original, new, '', '=') def _insert_ticket(self, summary, **kw): """Helper for inserting a ticket into the database""" ticket = Ticket(self.env) for k, v in kw.items(): ticket[k] = v return ticket.insert() def test_ignore_summary_reporter_and_description(self): """These cannot be added through the UI, but if somebody tries to build their own POST data they will be ignored.""" batch = BatchModifyModule(self.env) self.req.args = {} self.req.args['batchmod_value_summary'] = 'test ticket' self.req.args['batchmod_value_reporter'] = 'anonymous' self.req.args['batchmod_value_description'] = 'synergize the widgets' values = batch._get_new_ticket_values(self.req) self.assertEqual(len(values), 0) def test_add_batchmod_value_data_from_request(self): batch = BatchModifyModule(self.env) self.req.args = {} self.req.args['batchmod_value_milestone'] = 'milestone1' values = batch._get_new_ticket_values(self.req) self.assertEqual(values['milestone'], 'milestone1') def test_selected_tickets(self): self.req.args = { 'selected_tickets' : '1,2,3' } batch = BatchModifyModule(self.env) selected_tickets = batch._get_selected_tickets(self.req) self.assertEqual(selected_tickets, ['1', '2', '3']) def test_no_selected_tickets(self): """If nothing is selected, the return value is the empty list.""" self.req.args = { 'selected_tickets' : '' } batch = BatchModifyModule(self.env) selected_tickets = batch._get_selected_tickets(self.req) self.assertEqual(selected_tickets, []) # Assign list items def test_change_list_replace_empty_with_single(self): """Replace emtpy field with single item.""" changed = self._assign_list_test_helper('', 'alice') self.assertEqual(changed, 'alice') def test_change_list_replace_empty_with_items(self): """Replace emtpy field with items.""" changed = self._assign_list_test_helper('', 'alice, bob') self.assertEqual(changed, 'alice, bob') def test_change_list_replace_item(self): """Replace item with a different item.""" changed = self._assign_list_test_helper('alice', 'bob') self.assertEqual(changed, 'bob') def test_change_list_replace_item_with_items(self): """Replace item with different items.""" changed = self._assign_list_test_helper('alice', 'bob, carol') self.assertEqual(changed, 'bob, carol') def test_change_list_replace_items_with_item(self): """Replace items with a different item.""" changed = self._assign_list_test_helper('alice, bob', 'carol') self.assertEqual(changed, 'carol') def test_change_list_replace_items(self): """Replace items with different items.""" changed = self._assign_list_test_helper('alice, bob', 'carol, dave') self.assertEqual(changed, 'carol, dave') def test_change_list_replace_items_partial(self): """Replace items with different (or not) items.""" changed = self._assign_list_test_helper('alice, bob', 'bob, dave') self.assertEqual(changed, 'bob, dave') def test_change_list_clear(self): """Clear field.""" changed = self._assign_list_test_helper('alice bob', '') self.assertEqual(changed, '') # Add / remove list items def test_change_list_add_item(self): """Append additional item.""" changed = self._add_list_test_helper('alice', 'bob') self.assertEqual(changed, 'alice, bob') def test_change_list_add_items(self): """Append additional items.""" changed = self._add_list_test_helper('alice, bob', 'carol, dave') self.assertEqual(changed, 'alice, bob, carol, dave') def test_change_list_remove_item(self): """Remove existing item.""" changed = self._remove_list_test_helper('alice, bob', 'bob') self.assertEqual(changed, 'alice') def test_change_list_remove_items(self): """Remove existing items.""" changed = self._remove_list_test_helper('alice, bob, carol', 'alice, carol') self.assertEqual(changed, 'bob') def test_change_list_remove_idempotent(self): """Ignore missing item to be removed.""" changed = self._remove_list_test_helper('alice', 'bob') self.assertEqual(changed, 'alice') def test_change_list_remove_mixed(self): """Ignore only missing item to be removed.""" changed = self._remove_list_test_helper('alice, bob', 'bob, carol') self.assertEqual(changed, 'alice') def test_change_list_add_remove(self): """Remove existing item and append additional item.""" changed = self._add_remove_list_test_helper('alice, bob', 'carol', 'alice') self.assertEqual(changed, 'bob, carol') def test_change_list_add_no_duplicates(self): """Existing items are not duplicated.""" changed = self._add_list_test_helper('alice, bob', 'bob, carol') self.assertEqual(changed, 'alice, bob, carol') def test_change_list_remove_all_duplicates(self): """Remove all duplicates.""" changed = self._remove_list_test_helper('alice, bob, alice', 'alice') self.assertEqual(changed, 'bob') # Save def test_save_comment(self): """Comments are saved to all selected tickets.""" first_ticket_id = self._insert_ticket('Test 1', reporter='joe') second_ticket_id = self._insert_ticket('Test 2', reporter='joe') selected_tickets = [first_ticket_id, second_ticket_id] batch = BatchModifyModule(self.env) batch._save_ticket_changes(self.req, selected_tickets, {}, 'comment', 'leave') self.assertCommentAdded(first_ticket_id, 'comment') self.assertCommentAdded(second_ticket_id, 'comment') def test_save_values(self): """Changed values are saved to all tickets.""" first_ticket_id = self._insert_ticket('Test 1', reporter='joe', component='foo') second_ticket_id = self._insert_ticket('Test 2', reporter='joe') selected_tickets = [first_ticket_id, second_ticket_id] new_values = { 'component' : 'bar' } batch = BatchModifyModule(self.env) batch._save_ticket_changes(self.req, selected_tickets, new_values, '', 'leave') self.assertFieldChanged(first_ticket_id, 'component', 'bar') self.assertFieldChanged(second_ticket_id, 'component', 'bar') def test_action_with_state_change(self): """Actions can have change status.""" self.env.config.set('ticket-workflow', 'embiggen', '* -> big') first_ticket_id = self._insert_ticket('Test 1', reporter='joe', status='small') second_ticket_id = self._insert_ticket('Test 2', reporter='joe') selected_tickets = [first_ticket_id, second_ticket_id] batch = BatchModifyModule(self.env) batch._save_ticket_changes(self.req, selected_tickets, {}, '', 'embiggen') ticket = Ticket(self.env, int(first_ticket_id)) changes = ticket.get_changelog() self.assertFieldChanged(first_ticket_id, 'status', 'big') self.assertFieldChanged(second_ticket_id, 'status', 'big') def test_action_with_side_effects(self): """Actions can have operations with side effects.""" self.env.config.set('ticket-workflow', 'buckify', '* -> *') self.env.config.set('ticket-workflow', 'buckify.operations', 'set_owner') self.req.args = {} self.req.args['action_buckify_reassign_owner'] = 'buck' first_ticket_id = self._insert_ticket('Test 1', reporter='joe', owner='foo') second_ticket_id = self._insert_ticket('Test 2', reporter='joe') selected_tickets = [first_ticket_id, second_ticket_id] batch = BatchModifyModule(self.env) batch._save_ticket_changes(self.req, selected_tickets, {}, '', 'buckify') ticket = Ticket(self.env, int(first_ticket_id)) changes = ticket.get_changelog() self.assertFieldChanged(first_ticket_id, 'owner', 'buck') self.assertFieldChanged(second_ticket_id, 'owner', 'buck') def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(BatchModifyTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
Rose d’Homme, as the name suggests, was meant by Mademoiselle Rogeon, the genius creator of Les Parfums de Rosine line, as a rose fragrance for men. I would urge and beg any woman who has decided against trying this perfume, because of the word “homme” in the title, to give it a chance. I am not prone to liking scents created for men and often find unisex scents leaning in masculine direction, but Rose d’Homme is a different story altogether. While entirely suitable for a man to wear (though I would guess an average man not interested in perfume might be put off by “rose” in the title), Rose d’Homme is a scent that any woman who likes rather dark, warm, sophisticated rose fragrances would enjoy wearing. From the briefly hesperidic (bergamot) and earthy (vetiver) beginning, to the soft, sweet caress of the middle notes of vanilla and lavender, to the exquisite honeyed yet somehow at the same time dry base of rose with a hint of blackcurrant and vague accord of mandarin and even more vague one of jasmine, Rose d’Homme is a chic, extremely attractive and incredibly well-blended fragrance. I must add that although patchouli and leather are listed among the notes, they do not make an appearance on my skin. I do not miss patchouli but am a little disappointed that leather is not evident to my nose. Trying to come up with a scent that smells even a little similar to Rose d’Homme, I realized that there wasn’t one. Voleur de Roses by L’Artisan is much more about patchouli that it is about roses, Rose de Nuit by Serge Lutens is a darker, amber-heavy chypre that has a very powdery feel to my nose, Une Rose by Frederic Malle is darker still, a woody and earthy blend with a piercing geranium note. Compared to these three rose fragrances that could be also considered unisex, Rose d’Homme, though also somewhat a nocturnal, deep scent, is much softer, more “rounded” and understated, and, to me, much more wearable and enjoyable. Rose d’Homme is available at Aedes, $98.00 for 3,4oz. *The photo is from Aedes.com. This is high on my wishlist and one of the best of the Rosine line, imho. Thanks for the great review! The same here, on both counts. I also think this is one of Rosine's best and I would love to eventually have a bottle. You make me want to leave this review right now and go order a bottle!!! I've always shyed away from scents that are labelled for men because I think that they would be overly masculine but I'm slowly finding out that's not the case.It was a pleasure reading this! I gave my sample to my husband. I'm going to retrieve it now. Interesting, thanks for such a great review. I would have never tried this one. La Rose and Rose d'Ete are my faves from this line. I have so many favorites in the Rosine line. I guess I would say Un Zest and Flamenca are sharing the Top 1 spot. I love Rosine. Love. I also found when I looked at Basenotes last night that I actually like a lot of fragrances that are the mixed use or masculines. This one sounds like it will be among that category when it joins my collection ASAP. I like Rose d'Homme, but it does strike me as more masculine than I would have liked for myself, because it is essentially a rose fougere, and fougere is a category that I associate mostly with masculine fragrances. I do agree that it is lovely though--crisp and warm at the same time. They should just stop doing this masculine-feminine thing. It confuses and often puts off people. I know I often overlook scents that have "homme" in the title or are said to be "for men". It is strange how I used to loathe patchouli and now, when it is in small doses, I practically cannot smell it, as if my skin absorbs it. Or- in big doses, like in Borneo or Lust- it just doesn't bother me anymore. As for lavender...I am craving it this days. Rose d'Homme is quite dry and, in places, crisp. But to me, there is 40% of dryness-crispyness and 60% of softness and warmth. No, make it 35% and 75% :-) And so, I enjoy it very much. I am not ordinarily a fan of dry scents at all. Hmmmmmmm... now that I'm learning to love the rose, maybe this is one I should investigate? I wouldn't miss the patchouli either. How vetiver-y? That's not always a note that loves me. DH wasn't crazy about the rose, but it's the lavender that isn't working for me. The vetiver note is there only briefly in the beginning. I think there is a chance you might like it!
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from random import randint import re import util.cfg class Quizz: MODE_ONESHOT = 1 MODE_REGULAR = 2 MODE_PARTY = 3 def __init__(self, botName, channel, defaultMode=MODE_ONESHOT): util.cfg.default = {} self.questions = util.cfg.load("extern/quizz/questions.json") self.mode = defaultMode self.channel = channel self.activeQuestion = None def randomQuestion(self): """Get a random question""" self.activeQuestion = list(self.questions.keys())[randint(0,len(self.questions)-1)] return self.activeQuestion def checkAnswer(self, answer): """Verifies the answer to the active question""" if self.activeQuestion != None and re.search(self.questions[self.activeQuestion], answer) != None: self.activeQuestion = None return True return False def questionPending(self): """Is there a active question ?""" return self.activeQuestion != None def setQuestion(self, question, answerPattern=" "): """Saves the answer to a given question""" self.questions[question] = answerPattern util.cfg.save(self.questions, "extern/quizz/questions.json")
I have a question to what, for me, is an absolute show-stopping issue. Using all methods of access, CIFS/WebDAV/web interface, to access Alfresco is there a global way (as in set it once for an entire volume) to ensure that the creation date and the modified date of any file copied from a traditional file system to Alfresco is preserved and maintained? A natural progression of that question is can the same be said when copying a file from Alfresco to a traditional file system? For any installation of Alfresco into an organization to replace windows file services there must be a ‘get comfortable’/acceptance/pilot phase, which means the solutions have to coexist. The smaller the size of the business the more true this is, since a small business is less likely to have the resources to forklift over to Alfresco. Users rely primarily on two methods to determine whether a given file is what they want, short of opening it: file name and the dates. Also, when users do their own manual backups or versioning, they use dates as a determinant. Certainly the software they may use will. While you may argue that Alfresco can do a better job than users or other software, this is a chicken and egg scenario, beginning with preservation of the vital basics: names, dates, & content. The dates (other than last accessed) of a file, unchanged by a user, have to be immutable across a network or organization. After pouring through these forums I am more confused now than before. I keep reading entries containing arguments about metadata and items external to Alfresco. Then there is the supposed need to do things one file type at a time. File system dates are metadata. They are a universal attribute of all files. Before I care about the metadata inside my word or PDF document, I need to care about what the file system says. Also I’ve heard arguments that Alfresco doesn’t handle dates external to Alfresco. Well my network is my entire problem domain, not a single product. If Alfresco is to be part of the solution it needs to work in that entire space rather than take a NIH approach. Can anyone help with this or clear up possible misconceptions for me. But I think its worth raising an Enhancement request for this. From looking at these forums the discussion seems to have got hung up with setting alfresco's read only properties which of course you quite rightly shouldn't be able to do. However the crucial insight for me is that the file system projections probably shouldnt be tied to the meta-data's cm:created and cm:modified properties. I found https://issues.alfresco.com/jira/browse/ENH-312 and added a link to this thread. thanks for the response. i'm looking into the link you provided. do I understand it correctly that it is still not possible to preserve the original document dates when inserting into Alfresco ? Or is there now (version 3.2 and above) a solution available ? Agreed. This might be a showstopper, having to custom develop something which is that basic. This issue is nothing short of a major show-stopper. Because of the inability to do something so basic and essential I had to walk away from the product. Now though we are up to version 3.4, where the version was 3.2 when I first asked. Has anything changed? Has this most basic need and product oversight been fixed yet? I've just tested 4.0a and it would appear this problem still exists. While I can appreciate the comment by MikeH from Oct-2010, the inability of Alfresco to preserve something as fundamental and essential as creation and modified date from the source file system is a non-starter. Is it possible I missed something in configuration? These issues have been well covered in Martin Bergljung's book Alfresco 3 Business Solutions (Packt) in the section starting pp346 on Document migration. Not saying all the answers are there but the problems are understood and some solutions provided. One tool you could investigate and try is the Bulk Upload Tool http://code.google.com/p/alfresco-bulk-filesystem-import/. This is changed to make it possible in, I think 3.4. As you note above the bulk import works, as do tools like RoboCopy over CIFS. There still may be some use cases that don't or can't work, these need identifying and looking at. Ultimately the solution that worked was using a Java based file manager called muCommander. On a fresh Yosemite install, I ran into a few hiccups. First, I needed the applicable Java version, found here. Next, OSX through completely useless errors describing muCommander as "broken" and needing deletion or "not working on this Mac" and the like. These utterly unhelpful (deceitful, even) messages all related to the fact that OSX security settings prevented the application from running. This was easily changed in System Preferences | Security & Privacy. You should know what you are doing before changing those settings, but that doesn't mean you shouldn't be told that they are there. MuCommander allows you to mount the Destination SMB share to Alfresco easily, and apparently by default preserves file timestamps. It also has a more granular, file level left/right type GUI as well. Problem solved. Since this thread has come up again I thought I should also mention that there's an option in FileZilla to preserve original dates that works with Alfresco's FTP service. First, thanks to MRogers for the FTP tip (haven't tried it yet as I typically have FTP off on installs). Apple seems to be regularly messing with the SMB implementation in OSX, probably in attempts to make sure Macs play nice with NAS devices and windows shares. In any case, Apple made changes in OSX 10.10.2 and as of that OS, MuCommander on Mac no longer preserves file creation metadata from legacy file systems. I now can't find any SMB or WebDAV approaches or applications to address this directly in MacOS.
import numpy as np import numpy.linalg as npla import scipy.linalg as la import scipy.special as sp from counting_statistics.lindblad_system import LindbladSystem #from counting_statistics import optimized_funcs class FCSSolver(LindbladSystem): ''' Users should be able to create a FCSSolver instance with the system Hamiltonian, Lindblad operators, somehow define the counting operators and associated rates. NEED TO MAKE COMPATIBLE WITH PYTHON 2 AND 3! Also need to decide how to deal with numpy Matrix objects as well as ndarrays For the zero-frequency cumulants, I can implement a recursive scheme to generate them to arbitrary order following Flindt et al. 2010 (optimized using numba). Maybe worth still having up to skewness hard coded for speed and ease of seeing the specific structure of those equations when reading the code. finite_freq functions can almost certainly be optimized with numba or cython, or at least the functions should be vectorized wrt the frequency values This solver will be restricted to calculating full counting statistics for Markovian systems that can be expressing in Lindblad form with counting transitions occurring to a single state (ie. a single drain lead in a standard electron transport setup, infinite voltage bias/unidirectional transport) Need to provide docs with references and examples in juypter notebook hosted on github. Maybe implement non-Markovian counting stats at some point. May want to include experimental support for sparse matrices, somehow minimizing need to convert to dense for pinv operation. (Maybe in an FCSSolverHEOM class that extends HEOMSystem, eventually having dependence on the heom_solver package I will write) ''' # def __init__(self, H, D_ops, D_rates, jump_idx, reduce_dim=False): # self.__watch_variables = ['H', 'D_ops', 'D_rates', 'jump_idx', 'reduce_dim'] # could get this with inspect # self.__cache_is_stale = True # # LindbladSystem.__init__(self, H, D_ops, D_rates, reduce_dim=reduce_dim) # self.jump_idx = jump_idx def __init__(self, L, jump_op, pops, from_hilbert_space=False): self.L = L self.jump_op = jump_op self.pops = pops self.from_hilbert_space = from_hilbert_space self.__watch_variables = ['H', 'D_ops', 'D_rates', 'jump_idx', 'reduce_dim'] \ if self.from_hilbert_space else ['L', 'jump_op', 'pops'] self.__cache_is_stale = True @classmethod def from_hilbert_space(cls, H, D_ops, D_rates, jump_idx, reduce_dim=False): # create instance of subclass instance = object.__new__(cls) # initialize superclass first to allow construction of Liouvillian etc by LindbladSystem super(FCSSolver, instance).__init__(H, D_ops, D_rates, reduce_dim=reduce_dim) instance.jump_idx = jump_idx L = instance.liouvillian() # initialize subclass instance.__init__(L, instance.construct_jump_operator(L), instance.pops, from_hilbert_space=True) return instance def __setattr__(self, name, value): '''Overridden to watch selected variables to trigger cache refresh.''' try: if name in self.__watch_variables: self.__cache_is_stale = True except AttributeError: # stop an Error being thrown when self.__watch_variables is first created on class instantiation # maybe throw a warning here? pass object.__setattr__(self, name, value) def refresh_cache(self): '''Refresh necessary quantities for counting statistics calculations.''' if self.from_hilbert_space: self.pops = self.I.flatten() self.L = self.liouvillian() self.jump_op = self.construct_jump_operator(self.L) self.ss = self.stationary_state(self.L, self.pops) self.__cache_is_stale = False def construct_jump_operator(self, L): '''Sum kron(A,A) of all jump_ops.''' jump_op = np.zeros((self.sys_dim**2, self.sys_dim**2)) for i in np.flatnonzero(self.jump_idx): jump_op += self.D_rates[i] * np.kron(self.D_ops[i], self.D_ops[i]) if self.reduce_dim: try: jump_op = np.delete(jump_op, self.idx_to_remove, 0) jump_op = np.delete(jump_op, self.idx_to_remove, 1) except AttributeError: self.idx_to_remove = self.indices_to_remove(L) jump_op = np.delete(jump_op, self.idx_to_remove, 0) jump_op = np.delete(jump_op, self.idx_to_remove, 1) return jump_op @staticmethod def stationary_state(L, pops): '''Should test for number of nullspaces found somewhere, possibly here, as the system is set up under the assumption it is fully connected and has a single stationary state. Send a warning if there are multiple nullspaces.''' # calculate u,s,v = la.svd(L) # check for number of nullspaces # normalize ss = v[-1].conj() / np.dot(pops, v[-1]) return ss def mean(self): if self.__cache_is_stale: self.refresh_cache() return np.real(np.dot(self.pops, np.dot(self.jump_op, self.ss))) @staticmethod def pseudoinverse(L, freq, Q): return np.dot(Q, np.dot(npla.pinv(1.j*freq*np.eye(L.shape[0]) - L), Q)) @staticmethod def Q(L, steady_state, pops): return np.eye(L.shape[0]) - np.outer(steady_state, pops) def noise(self, freq): if self.__cache_is_stale: self.refresh_cache() # handle either array or scalar freq values scalar = False if np.isscalar(freq): scalar = True freq = np.array([freq]) elif isinstance(freq, list): freq = np.array(freq) # do the calculation Q = self.Q(self.L, self.ss, self.pops) noise = np.zeros(freq.size, dtype='complex128') for i in range(len(freq)): R_plus = self.pseudoinverse(self.L, freq[i], Q) R_minus = self.pseudoinverse(self.L, -freq[i], Q) noise[i] = np.dot(self.pops, np.dot(self.jump_op \ + np.dot(np.dot(self.jump_op, R_plus), self.jump_op) \ + np.dot(np.dot(self.jump_op, R_minus), self.jump_op), self.ss)) return np.real(noise[0] if scalar else noise) def skewness(self, freq1, freq2): if self.__cache_is_stale: self.refresh_cache() Q = self.Q(self.L, self.ss, self.pops) skewness = np.zeros((freq1.size, freq2.size), dtype='complex128') for i in range(len(freq1)): for j in range(len(freq2)): '''Currently ignoring zero-frequency limit as its a bit more complicated than for the noise. This should cause a test failure until its fixed.''' if freq1[i] == 0 or freq2[j] == 0 or freq1[i] == freq2[j]: continue R1 = self.pseudoinverse(self.L, -freq1[i], Q) R2 = self.pseudoinverse(self.L, freq1[i]-freq2[j], Q) R3 = self.pseudoinverse(self.L, freq2[j], Q) R4 = self.pseudoinverse(self.L, -freq2[j], Q) R5 = self.pseudoinverse(self.L, freq1[i], Q) R6 = self.pseudoinverse(self.L, freq2[j]-freq1[i], Q) jump_op_average = np.dot(self.pops, np.dot(self.jump_op, self.ss)) skewness[i,j] = np.dot(self.pops, np.dot(self.jump_op \ + np.dot(self.jump_op, np.dot(R1+R2+R3, self.jump_op)) \ + np.dot(self.jump_op, np.dot(R4+R5+R6, self.jump_op)) \ + np.dot(np.dot(self.jump_op, R1), np.dot(self.jump_op, np.dot(R4+R6, self.jump_op))) \ + np.dot(np.dot(self.jump_op, R2), np.dot(self.jump_op, np.dot(R4+R5, self.jump_op))) \ + np.dot(np.dot(self.jump_op, R3), np.dot(self.jump_op, np.dot(R5+R6, self.jump_op))) \ + (-jump_op_average/(1.j*freq1[i])) * np.dot(self.jump_op, np.dot(R4-R2+R6-R3, self.jump_op)) \ + (jump_op_average/(1.j*freq1[i]-1.j*freq2[j])) * np.dot(self.jump_op, np.dot(R4-R1+R5-R3, self.jump_op)) \ + (jump_op_average/(1.j*freq2[j])) * np.dot(self.jump_op, np.dot(R6-R1+R5-R2, self.jump_op)), self.ss)) return np.real(skewness) def second_order_fano_factor(self, freq): return self.noise(freq) / self.mean() def third_order_fano_factor(self, freq1, freq2): return self.skewness(freq1, freq2) / self.mean() def binom_coeff_vector(self, n): '''Generates vector of binomial coefficients from m=1 to n, reversed.''' return sp.binom(n, range(n,0,-1)) def generate_cumulant(self, n): '''Generates zero-frequency cumulant to arbitrary order using recursive scheme. Also could use a function to generate next level of hierarchy from a previously generated set of cumulants and states so don't need to start from the beginning each time. It would also be cool to dynamically generate a function for the requested cumulant which a user can save. Currently every time a parameter changes the cumulant needs to be regenerated which is probably going to be quite inefficient for large cumulants.''' if self.__cache_is_stale: self.refresh_cache() R = self.pseudoinverse(self.L, 0, self.Q(self.L, self.ss, self.pops)) bc_vector = self.binom_coeff_vector(n) cumulants = np.zeros(n) states = np.zeros((n+1, self.L.shape[0]), dtype='complex128') states[0] = self.ss def recursion(m, cumulants, states): # check n is an integer >= 1 if m > 1: # get previous cumulants and states cumulants, states = recursion(m-1, cumulants, states) elif m == 1: # lowest level cumulant cumulants[0] = np.dot(self.pops, np.dot(self.jump_op, states[0])) states[1] = np.dot(R, np.dot(cumulants[0]*np.eye(self.L.shape[0]) - self.jump_op, states[0])) #print states[1] + np.dot(R, np.dot(self.jump_op, states[0])) return cumulants, states else: raise ValueError("Cannot calculate cumulants for n < 1") # calculate cumulant at current level #cumulants[m-1] = np.dot(self.pops, np.dot(self.jump_op, np.dot(bc_vector, states[:m]))) for i in range(m): cumulants[m-1] += bc_vector[i]*np.dot(self.pops, np.dot(self.jump_op, states[i])) # construct 3D matrix #W = np.vstack([bc_vector[i]*(cumulants[i]*np.eye(self.L.shape[0]) - self.jump_op)[np.newaxis,...] for i in range(m)]) W = np.sum([np.dot(bc_vector[i]*(cumulants[i]*np.eye(self.L.shape[0]) - self.jump_op), states[i]) for i in range(m)], axis=0) states[m] = np.dot(R, W) return cumulants, states return recursion(n, cumulants, states) def generate_fano_factor(self, n): return self.generate_cumulant(n)[0][n-1] / self.mean()
Clash Royale Review – A Hybrid of Card Games, Moba, and Awesome. The thing about mobile gaming is that you never really know when a big, defining game comes around. Flappy Bird takes over the world out of nowhere. Crossy Road sure was fun and well-made, but I never saw it becoming the smash hit it became. It inspired countless imitators. Candy Crush Saga and its sequels somehow became this generation’s defining match-3 game instead of Bejeweled and its brethren. And of all the competitive simulation games, Clash of Clans defined the raiding-strategy genre (and has some awesome exploits and cheats). But there are plenty of alternate universes where some other games are massive smashes instead of these. In addition, Spider Solitaire is a multiplayer game that captures the emotions of players around the world. Joining this game, your task is to use the mouse to group cards into the stacks around the board. Collect cards of the same suit from the king down the Ace to remove them from the board. For good performance in this game, players need to have good reaction skills. Click on the “play now” button to play and feel! From day 1 of its soft launch, it became clear right away that Supercell had a hit on its hands. They figured something out that countless MOBAs, first-person shooters, and any other game have failed to do. They made an intense real-time multiplayer game on mobile that engages players and makes money without feeling unfair. It’s easy to spend tons of money on Clash Royale, but you’ll sink hours into it just because you enjoy it. I go in deep about the game in an earlier article, but Clash Royale is best described as a collectible card game meets a real-time strategy game and a MOBA. You have a deck of 8 cards, with 4 in your hand at one time. You use elixir, an energy unit that recharges over time, to summon cards onto the battlefield. Then, you deploy them to attack the enemy’s crown towers in the 2 lanes each having a tower, along with a center king tower. Destroy a crown tower, and you can go after the king tower. Destroy the king tower, and you win, though you have to do so in 3 minutes, with the last minute providing double elixir. Otherwise, the player who has destroyed more crown towers is the victor. If towers are tied, then there’s a 1-minute sudden death overtime where the first person to destroy a tower – crown or king – wins. The king tower does more damage to incoming units and hurts harder. Beside, Checkers is an addictive game, getting a lot of love from players around the world. You need to have good skills to be able to score high in this game. Your task is to eliminate all the pieces of your opponent from the board. If you are in the free time, Checkers game will be a perfect choice that we recommend. Click the “play now” button to play and feel! What’s clever about the gameplay is that it’s all so simple to learn and play with. You drop units in, and they follow their behaviors without any other command. Where you place units can be important, but not as much as the lower-level strategy of managing your deck, elixir, and current cards in relation to your opponent’s situation. You become knowledgeable about the game at a level where you feel comfortable diving in. Before long, you’re joining a clan, experimenting with decks, and getting sucked into the metagame. This happens so fast, you won’t know what hit you. And because games only take 3 or 4 minutes, you can feel like you’re getting a lot done in a short session. Compare this to many popular multiplayer games, where they can feel overwhelming for too long. Even something like Hearthstone has a steep barrier to entry today. Clash Royale limiting certain cards to a tier of the game helps a lot to make sure that you’re not getting an information overload at any point. For the sake of being thorough here, let’s assume this is somehow the first you’ve heard of Clash Royale. The easiest way to describe it is as a collectable card game where your cards represent real-time strategy game-like units which are dropped onto MOBA-ish multi-lane battlefields with two towers and a base you need to attack while defending your own. That’s quite a mouthful, and it sounds complicated, but the magic of Clash Royale is it’s all presented in a way that I really don’t think you need to know anything about card games, RTS games, MOBAs, or the emergent strategies in any of those genres because everything has been simplified and streamlined to a masterful extent. If you’ve never played a game like that before, even as great as the Hearthstone tutorial and onboarding process is, you’re still talking more of a learning wall and less of a learning curve. Thirty cards is substantially easier to manage than Magic’s sixty, but you’ve got to either really know what you’re doing or be a supremely analytical player to be able to make heads or tails over whether running one of a particular card is doing much better or worse than running two of that same card. Just how difficult it is to create a competitive deck for most players leads to just looking up what other players are playing, copying those decks, and never really ever needing to learn how to build a deck of their own. A typical game then involves initially choosing your eight cards which hopefully meld together well in some kind of cohesive strategy with answers to the different types of threats you might come across. From there, you search for an opponent, and are matched up with someone who has a similar trophy level as you (more on this later). From there, you dump out cards, and hopefully manage to knock down one or more of their crown towers while protecting your own, and ultimately destroy your opponent’s main King tower. Games have a hard limit of taking absolutely no longer than four minutes, which is really just another clever wrinkle in the game. Here’s the gist- Cards are rewarded through opening chests. Every four hours, you get one free chest and you have two slots for these free chests, so to maximize your freebies you’ll want to be checking in on the game at least once every eight hours. After completing the tutorial, winning battles awards chests of different levels of rarity (rarer chests include more cards and gold) and you can hold a maximum of four of these prize chests. Silver Chests, which are the most common prize chest to come across take three hours to unlock with the Super Magical Chest, currently the best chest in the game, taking an entire day to unlock. Only one chest unlock timer can be rolling at a time, so there’s a bit of strategy involved with what you unlock and when. For instance, if you’ve got a Golden Chest in your inventory, you’ll likely want to hang on to that to unlock it overnight as that’s an eight hour timer you can have counting down while you sleep. If you have four chests in your inventory, you cannot earn more through winning games until you unlock one and thereby open up that inventory slot. Of course, you can also pay to skip any of these timers, and Clash Royale shares a similar premium currency to Clash of Clans in that they’re using Gems. Like any free to play game with timers, the amount of premium currency it takes to skip a timer scales up significantly with the amount of time remaining. Additionally, like all these games, the premium currency is doled out at regular intervals although it takes a while to accumulate any substantial number. Gems are also used to buy gold and chests from the in-game store. Chests purchased this way are opened immediately and aren’t impacted by you potentially having four chests in your inventory already. I encourage everyone to give this game a try, even if you’re a vigilant hater of free to play. Monetization methods aside, you’ll still be able to see what a clever formula Supercell has stumbled upon to here with this hybrid of card games, strategy games, and MOBAs. Hard limits on session time make it a phenomenal game to play on the go, and it can be played in portrait mode in one hand which only serves to make things easier. In less than a day it’s the top free app, and steadily climbing up the top grossing charts, so if you dig this style of gameplay but don’t particularly like certain things about Clash Royale (like chest timers or whatever else) just wait a while.
#!/usr/bin/env python import os import time import RPi.GPIO as GPIO from flask import Flask app = Flask(__name__) os.system('modprobe w1-gpio') os.system('modprobe w1-therm') tempsensor_sn = '28-000005abe684' # Varies depending on sensor sensor = '/sys/bus/w1/devices/' + tempsensor_sn + '/w1_slave' # Sets pins 19(r), 21(g), and 23(b) as output pins GPIO.setmode(GPIO.BOARD) GPIO.setup(19, GPIO.OUT) GPIO.setup(21, GPIO.OUT) GPIO.setup(23, GPIO.OUT) def raw_data(): """Retrieves the raw data from the temperature sensor on the Raspberry Pi""" x = open(sensor, 'r') data = x.readlines() x.close() return data @app.route('/temp') def get_temp(): """Retrieves current fahrenheit temperature value""" data = raw_data() while data[0].strip()[-3:] != 'YES': time.sleep(0.2) data = raw_data() temp_val = data[1].find('t=') if temp_val != -1: temp_string = data[1].strip()[temp_val + 2:] temp_fahrenheit = 32.0 + ((float(temp_string) / 1000.0) * 1.8) return temp_fahrenheit def set_led(r, g, b): """Set the color of the LED""" GPIO.output(19, r) GPIO.output(21, g) GPIO.output(23, b) def set_color(color): """Receives name of color and sets the LED""" if color == 'red': set_led(0, 1, 1) elif color == 'green': set_led(1, 0, 1) elif color == 'blue': set_led(1, 1, 0) elif color == 'yellow': set_led(0, 0, 1) elif color == 'magenta': set_led(0, 1, 0) elif color == 'cyan': set_led(1, 0, 0) elif color == 'white': set_led(0, 0, 0) if __name__ == '__main__': app.run(host='0.0.0.0', port=80, debug=False)
Archechiniscus marci. Figure from Pollock (1976). Archechiniscus is a genus of three species of marine tardigrade found in littoral habitats. They can be readily distinguished from other marine tardigrades by their unique arrangement of claws: two pairs, with the internal pair on the end of a long pair of toes but the external pair set directly onto the foot. Most of the other distinguishing features of Archechiniscus are more negative: they lack conspicuous segmentation or ornamentation. The presence of cephalic appendages marks Archechiniscus as belonging to the heterotardigrades rather than the eutardigrades; within the Heterotardigrada, it belongs to the paraphyletic 'arthrotardigrade' group. Opinions have differed as to whether it should be placed in the family Halechiniscidae or in its own separate family; Jørgensen et al. (2010) identified the broad Halechiniscidae as polyphyletic and plumped for placing Archechiniscus in its own family (though potentially as the sister group of their more restricted Halechiniscidae). As for most marine tardigrades, there doesn't appear to be a great deal of info about the lifestyle of Archechiniscus. Archechiniscus symbalanus got its name due to being collected in association with barnacles (Chang & Rho, 1998) but I don't know what it was doing there. As littoral inhabitants, Archechiniscus are resistant to a higher degree of desiccation than other marine tardigrades (Jönsson & Järemo, 2003) but do not show the extremes of resistance found in some other tardigrades (remember, not all tardigrades are resistant to adverse conditions, and not all tardigrades are resistant to the same adverse conditions). Chang, C.-Y., & H.-S. Rho. 1998. Three new tardigrade species associated with barnacles from the Thai coast of Andaman Sea. Korean Journal of Biological Sciences 2: 323-331. Jönsson, K. I., & J. Järemo. 2003. A model on the evolution of cryptobiosis. Annales Zoologici Fennici 40: 331-340. Jørgensen, A., S. Faurby, J. G. Hansen, N. Møbjerg & R. M. Kristensen. 2010. Molecular phylogeny of Arthrotardigrada (Tardigrada). Molecular Phylogenetics and Evolution 54 (3): 1006-1015. Pollock, L. W. 1976. Marine Flora and Fauna of the Northeastern United States. Tardigrada. NOAA: Seattle. I'm mildly chuffed that I got the order right, because now I can reveal that I don't rightly know what a 'digit' in a claw is. Is it just a spur on the claw (i.e. nonmovable?). I am in doubt because I don't know if taxonomists use 'digit' in the same way for other arthropods. For example, in spiders with comb-like structures on the tarsal claws, what do you call the individual spines of the comb? For example, in spiders with comb-like structures on the tarsal claws, what do you call the individual spines of the comb? Teeth, normally. Even if there's only one tooth. However, the tardigrade papers that I read over while researching this post referred to the ventral 'tooth' on the outer claws of this genus as a 'spur'. Ah - I have realised my error - it came from the wording of the overview paper I was reading - "The class Eutardigrada includes the unarmored orders Apochela (terrestrial) and Parachela (primarily terrestrial and freshwater, with a few marine species); their legs terminate in claws without digits". So I assumed there were 'claws with digits' and claws without. In fact, legs terminate either with claws, or with digits + claws. As for getting the order right... that was a fluke!
class ListNode: def __init__(self, x): self.val = x self.pre = None self.next = None class LRUCache: def __init__(self, capacity): """ :type capacity: int """ self.capacity = capacity self.size = 0 self.head = ListNode(0) self.tail = ListNode(0) self.head.next = self.tail self.tail.pre = self.head self.data = dict() self.lru = dict() def insert_head(self, node): node.pre = self.head node.next = self.head.next node.next.pre = node self.head.next = node def move_head(self, key): node = self.lru[key] node.pre.next = node.next node.next.pre = node.pre self.insert_head(node) def delete(self): node = self.tail.pre node.pre.next = self.tail self.tail.pre = node.pre key = node.val del self.data[key] del self.lru[key] del node def get(self, key): """ :type key: int :rtype: int """ if key in self.data: self.move_head(key) return self.data[key] else: return -1 def put(self, key, value): """ :type key: int :type value: int :rtype: void """ if key in self.data: self.data[key] = value self.move_head(key) else: self.data[key] = value node = ListNode(key) self.lru[key] = node if self.size == self.capacity: self.delete() self.size -= 1 self.insert_head(node) self.size += 1
For Winter 2016, I created a limited-edition triptych of embroidered tops as a collaboration with Goodwin. I chose 3 poses from my Rohmer series in 3 colors inspired by Goodwin's instagram color palette, and embroidered them on the Devaki raglan crop. Exclusively available through Goodwin.
# The MIT License (MIT) # Copyright (c) 2016-2017 HIS e. G. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import json import datetime def ret200(cause): retHttp(200,cause) def ret403(cause): retHttp(403,cause) def ret400(cause): retHttp(400,cause) def retHttp(status, cause): print("Status: " + str(status) + " Ok\r") print("Content-Type: text/plain; charset='utf-8'\r") print("\r") print(cause) def retJson(data): """ send a JSON onject to the client """ print("Status: 200 Ok\r") #print("Content-Type: text/plain; charset='utf-8'\r") print("Content-Type: application/json; charset='utf-8'\r") print("\r") date_handler = lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None ) jsonString = json.dumps(data, default=date_handler) print(jsonString)
Past Human is an informative and available consultant for all these drawn to the constructing sciences of genetic engineering, bioprinting, and human cloning. Illustrating the guidelines just about famous technology fiction motion pictures and novels, the writer offers a different perception into and realizing of ways genetic manipulation, cloning, and different novel bio-technologies will someday let us remodel our species. It additionally addresses the valid issues approximately “playing God”, whereas even as embracing the gains of the clinical trajectory that would bring about our transhuman destiny. Alan is a middle-aged entrepeneur in modern Toronto, who has dedicated himself to solving up a home in a bohemian local. This evidently brings him involved with the home packed with scholars and layabouts round the corner, together with a tender girl who, in a second of tension, finds to him that she has wings--wings, additionally, which develop again after each one try and minimize them off. Within the years because the occasions of famous person Wars: The Phantom risk, the Republic has endured to fall apart, and increasingly more, the Jedi are had to aid the galactic executive keep order. As megastar Wars: Episode II opens, Obi-Wan Kenobi and Anakin Skywalker have simply lower back from a undertaking on a global known as Ansion. The destiny, after the genetic experiments of the good general practitioner Patricia R. Durham, leaves all ladies on the earth with snakes rather than hair, and the original and terrifying skill to blind, or even kill, a guy with an easy stare. The birthrate is at rock bottom, the inhabitants dwindles, the world&apos;s governments are all yet disbanded. One lady walks throughout Amerika with no vacation spot. students get under the influence of alcohol and reason hassle. A guy in a bar within the mid-west stops believing in love. A radio station announces outdated physician Patricia R. Durham interviews all day, each day. An outdated out-of-commissioned manufacturing unit crushes a vacationer into an eight-inch dice of flesh and bone. Excited about the realm of accident and synchronicity, George Daley comes to a decision to put in writing a publication at the topic. whereas performing some learn, the main notable twist of fate of his lifestyles occurs-he runs into Larry Hart, the same dual brother he by no means knew he had. yet as George will get to understand this newfound brother, unusual issues begin to ensue.
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for creating Stochastic Tensors. See the @{$python/contrib.bayesflow.stochastic_tensor} guide. @@BaseStochasticTensor @@StochasticTensor @@MeanValue @@SampleValue @@value_type @@get_current_value_type """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.contrib.bayesflow.python.ops.stochastic_tensor_impl import * # pylint: enable=wildcard-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ "BaseStochasticTensor", "StochasticTensor", "ObservedStochasticTensor", "MeanValue", "SampleValue", "value_type", "get_current_value_type", ] remove_undocumented(__name__, _allowed_symbols)
The industry is promising a glittering future of autonomous vehicles moving in harmony like schools of fish. That can’t happen, however, until carmakers answer the kinds of thorny philosophical questions explored in science fiction since Isaac Asimov wrote his robot series last century. For example, should a self-driving vehicle sacrifice its occupant by swerving off a cliff to avoid killing a school bus full of children? Ultimately, the problem with giving a driverless vehicle the power to make consequential decisions is that, like the robots of science fiction, a self-driving car still lacks empathy and the ability to comprehend nuance.
''' NFI -- Silensec's Nyuki Forensics Investigator Copyright (C) 2014 George Nicolaou (george[at]silensec[dot]com) Silensec Ltd. This file is part of Nyuki Forensics Investigator (NFI). NFI is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. NFI is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with NFI. If not, see <http://www.gnu.org/licenses/>. ''' from IMiscSource import IMiscSource, KnownFile, ParserType, KnownFieldSQL, FieldType from IMiscSource import KnownField, KnownFieldBin, BinaryClass, BinaryRead from IMiscSource import KnownFieldXML, ReadTypeXML, Label from Catalog import Catalog import ConvertUtils PASSWORD_QUALITY = { 0: "PASSWORD_QUALITY_UNSPECIFIED", 0x8000: "PASSWORD_QUALITY_BIOMETRIC_WEAK", 0x10000: "PASSWORD_QUALITY_SOMETHING", 0x20000: "PASSWORD_QUALITY_NUMERIC", 0x40000: "PASSWORD_QUALITY_ALPHABETIC", 0x50000: "PASSWORD_QUALITY_ALPHANUMERIC", 0x60000: "PASSWORD_QUALITY_COMPLEX" } def password_type_tostr(val): try: val = int(val) if val in PASSWORD_QUALITY: return PASSWORD_QUALITY[val] except: pass return "Unknown" class LockSettings(IMiscSource): version = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17] catalog_id = Catalog.CATALOG_DEVINFO title = Label( "Screen Lock", "screen_lock" ) relative_directories = [ "data", "com.android.providers.settings", "databases" ] knownfiles = { "settings.db": KnownFile(ParserType.TYPE_SQLITE3, { Label("Lock Settings", "lock_settings"): [ KnownFieldSQL( FieldType.TYPE_STR, """ SELECT value FROM secure WHERE name = 'lock_screen_owner_info' """, "Owner Info", "lock_screen_owner_info", ), KnownFieldSQL( FieldType.TYPE_STR, """ SELECT value FROM secure WHERE name = 'lockscreen.password_salt' """, "Password Salt", "password_salt", ), KnownFieldSQL( FieldType.TYPE_STR, """ SELECT value FROM locksettings WHERE name = 'lockscreen.password_type' """, "Password Quality", "lockscreen.password_type", converter=password_type_tostr ), ] } ) }
OWNER MEMBER: Shall consist of (1) an owner of rental housing or (2) a licensed Texas Real Estate Broker who manages or operates rental, multi-housing or condominium housing or (3) a management company or management company representative engaged in multi-housing or property management, ownership or owner representation or (4) a property that joins the Lubbock Apartment Association directly. All properties 50 units and over must have their own membership. 1. Maintain and operate multi-family housing communities in accordance with fair and honorable standards of competition, ever mindful of the purposes of the Lubbock Apartment Association, Inc., in compliance with the Bylaws thereof. 3. Not use the National or Texas Apartment Association logos on forms other than official National or Texas Apartment Association lease related forms or give an incorrect impression that a form is approved by the National or Texas Apartment Associations. 4. Not knowingly use any unlawfully reproduced or reprinted National or Texas Apartment Association / Lubbock Apartment Association, Inc. or Lubbockapartments.com forms or publications. 5. Not use the Lubbock Apartment Association, Inc. or Lubbockapartments.com logo in any manner without permission and while using the phrase ‘member of’ above and in conjunction with the logo. 6. Not infringe upon or violate National or Texas Apartment Association’s copyright on any official National or Texas Apartment Association lease forms. 7. Abide by all national, state and local laws of every kind and nature. 8. Promote, employ and maintain a high standard of integrity in the performance of all rental obligations and services in the operation of multi-family housing communities. 9. Strive continually to promote the education and fraternity of the membership and to promote the progress and dignity of the multi-family housing industry in creating a better image of itself in order that the public may be better served. 10. Seek to provide better values, so that an even greater share of the public may enjoy the many benefits of multi-family housing living. 11. Establish high ethical standards of conduct with multi-family housing rental agencies, suppliers, and others doing business with the multi-family housing industry. 12. Endeavor to expose all schemes to mislead or defraud the multi-family housing residing public and to aid in the exposure of those responsible. 13. Refrain from attempting to obtain residents, through advertising or otherwise, by means of deceptive, misleading or fraudulent statements, misrepresentations or the use of implications, unwarranted by fact or reasonable probability. Disciplinary action for any violation of the above Code of Ethics shall be delegated to and be the exclusive responsibility of the Board of Directors of the Lubbock Apartment Association, Inc. Whenever herein the singular is used, the same shall include the plural, and words of any gender, shall include each other gender. Being acquainted with the purposes of, and subscribing to the Code of Ethics of the Lubbock Apartment Association, Inc. I hereby apply for membership. I hereby certify that the above information is correct as of this date, and I agree to certify annually hereafter the number of rental units owned or managed on or before the renewal date of the membership. I certify that the above owner and/or management company are authorized to do business under the Texas Real Estate License Act and the Texas Occupations Code as relating to real property and housing. In the event of termination of membership in the Association for any reason, I agree to discontinue the use of LAA, Lubbockapartments.com, TAA and/or NAA insignia and signs in any form.
# -*- coding: utf-8 -*- # Copyright(C) 2012 Florent Fourcot # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from weboob.capabilities.bill import Subscription from weboob.tools.browser import BasePage __all__ = ['HomePage'] class HomePage(BasePage): def on_loaded(self): pass def get_list(self): for divglobal in self.document.xpath('//div[@class="abonne"]'): for link in divglobal.xpath('.//div[@class="acceuil_btn"]/a'): login = link.attrib['href'].split('=').pop() if login.isdigit(): break divabo = divglobal.xpath('div[@class="idAbonne pointer"]')[0] owner = unicode(divabo.xpath('p')[0].text.replace(' - ', '')) phone = unicode(divabo.xpath('p/span')[0].text) self.browser.logger.debug('Found ' + login + ' as subscription identifier') self.browser.logger.debug('Found ' + owner + ' as subscriber') self.browser.logger.debug('Found ' + phone + ' as phone number') phoneplan = unicode(self.document.xpath('//div[@class="forfaitChoisi"]')[0].text.lstrip().rstrip()) self.browser.logger.debug('Found ' + phoneplan + ' as subscription type') subscription = Subscription(phone) subscription.label = phone + ' - ' + phoneplan subscription.subscriber = owner subscription._login = login yield subscription
Every work place has their own Halloween like scares. The dealership is no different. Here are just a few scary things at the dealership or in the dealer world. What’s spooks you at your dealership? Excitement filled the air as we boarded the plane for another trip to the strip for the Driving Sales Executive Summit. This event continually amazes us each year with Driving Sales recruiting some of the most progressive speakers in the space coming out to reveal industry innovations and collaborate with a wide variety of outstanding peers on the future of the automotive industry. Primed and ready to learn, collaborate and have some fun we snaked through the immense halls of the Bellagio and DSES once again did not disappoint. With re-nowned keynotes including Jared Hamilton, Brian Solis, Bryan Eisenberg, Rand Fishkin, Mike Hudson, Jack Simmons, Florian Zettelmeyer, Adam Justis and 30 break out speakers there was no shortage of quality content. The automotive industry is on the verge of some huge changes and DSES revealed some exceptional insights on where, why and how we are getting to the next level. If you attended DrivingSales Executive Summit this year you were likely saying this to yourself by the end of the conference. The vast majority of keynote presenters this year mentioned the disparity between traditional retail automotive and the consumer’s perception of a satisfying consumer experience. According to a recent AutoNews article, and much of the automotive industry, CarMax returned to buying leads this month after a 16-month break-up with AutoTrader and Cars.com. Despite the rising costs and dwindling dealership brand presence, about 40 percent of CarMax’s 141 stores are now back up and running with Cars.com. And, quite possibly though not confirmed, the same can be said for AutoTrader. Senior automotive analyst at Bloomberg Intelligence, Kevin Tynan says.. I’m not sure we buy that…. CarMax wasn’t alone in its endeavor to quit third party leads. AutoNation didn’t go cold turkey, but as it builds the foundation that will rely on brand awareness over third party lead providers, the giant has been pulling away from buying leads. Check out what the Refreshers think about third party leads. The U-Turn discussion is already underway over in the forums and waiting for your insight. What is missing, if anything, in the research process that is being found on AutoTrader or Cars.com – and, not on YOUR dealership website? What are you doing at your store about third party leads? Are you actually measuring their worth, and I mean, truly measuring? For the first time in seven years, I’m walking away from the fall conference season with a feeling of complete and utter hope for our industry. Never before have I seen so many dealers who were truly engaged with the speakers and each other. It’s as if we’ve hit a turning point where skepticism and confusion have been replaced by knowledge and understanding. I learned more from exchanging ideas with dealers than with other vendors. There was one negative component that still reared its ugly head as it seems to do every conference season. The fallacy of “secret sauce” getting peddled by vendors is still ever-present. This needs to change and dealers have the power to make it happen. One-Price Car Shopping. Is it Coming, or Not? One Price…Is it Coming, or Not? The short answer is yes, and it may not be as scary as you think. It seems that we’ve been talking about a revolution in automotive retail for a long time. One-price car shopping. Salespeople as order-takers. Whole teams of dealership employees left to do little more than collect leads from a computer and process financing. By some accounts, the revolution would be a total online shift to selling a car on a website or mobile device. In this vision, the future is as simple as point, click and drive. Are YOU going to #DSES 2014? DSES 2014 is right around the corner (this Sunday) and if there’s ONE event in the industry that I look forward to, it is DSES! UPDATE: For the 6th year in a row DSES has SOLD OUT. But if you didn’t get a seat and would very much like one? Click here to possibly get on a waiting list.
# Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import core.schema_generator as schema_generator import urllib.request import time parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-s', '--SRC', type=str, help='Path to source file') group.add_argument('-v', '--VER', type=str, help='Schema.org release number') parser.add_argument('-o', '--OUT', type=str, help='Path to output directory', required=True) parser.add_argument('-p', '--PKG', type=str, help='Proto package name', required=True) def main(): args = parser.parse_args() src = args.SRC dest = args.OUT pkg = args.PKG ver = args.VER if dest[-1] != '/': dest = dest + '/' if isinstance(src, str): schema = schema_generator.SchemaGenerator(src) schema.write_proto(dest, pkg) else: url = 'https://raw.githubusercontent.com/schemaorg/schemaorg/master/data/releases/' + \ ver + '/schema.nt' name = './temp-' + str(int(time.time())) + '.nt' try: urllib.request.urlretrieve(url, name) except urllib.error.HTTPError: print('Invalid release number or check your internet connection.') else: schema = schema_generator.SchemaGenerator(name) schema.write_proto(dest, pkg) os.remove(name) if __name__ == '__main__': """Generates protobuf code from a given schema. Args: -h, --help Show this help message and exit -s, --SRC Path to source file -v, --VER Schema.org release number -o, --OUT Path to out file -p, --PKG Proto package name """ main()
Qualification : Candidates should have 2 years Electrician/Wireman certificate course form govt. recognized form ITI. How to Apply : Interested candidates may apply online through website ojas.gujarat.gov.in or gsssb.gujarat.gov.in from 25.07.2017 to 08.08.2017 till 23.59, thereafter website link will be disabled.
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example DAG using PostgresToGoogleCloudStorageOperator. """ import os from airflow import models from airflow.providers.google.cloud.transfers.postgres_to_gcs import PostgresToGCSOperator from airflow.utils.dates import days_ago PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project") GCS_BUCKET = os.environ.get("GCP_GCS_BUCKET_NAME", "postgres_to_gcs_example") FILENAME = "test_file" SQL_QUERY = "select * from test_table;" with models.DAG( dag_id='example_postgres_to_gcs', schedule_interval=None, # Override to match your needs start_date=days_ago(1), tags=['example'], ) as dag: upload_data = PostgresToGCSOperator( task_id="get_data", sql=SQL_QUERY, bucket=GCS_BUCKET, filename=FILENAME, gzip=False ) upload_data_server_side_cursor = PostgresToGCSOperator( task_id="get_data_with_server_side_cursor", sql=SQL_QUERY, bucket=GCS_BUCKET, filename=FILENAME, gzip=False, use_server_side_cursor=True, )
Of all the components in a quality saddle, none is more important than the leather, and Brooks carefully examines every single piece of leather ensuring that it's the absolute highest quality. For example, while it's important that all stretch is eliminated, the flexibility of the leather must remain intact so that it can conform to the contours of your body and keep its shape for years to come. Plus, thanks to the built-in pores in its fine leather, Brooks saddles have natural ventilation and never become clammy and are always cool to ride providing top levels of comfort on even the longest rides on the warmest days. Even better, plenty of Brooks customers have used the same Brooks saddle for 30 or more years, a testament to Brooks' craftsmanship and the fine materials in their saddles.
# -*- encoding: utf-8 -*- """ Reference: https://dev.twitch.tv/docs/v5/reference/videos/ Copyright (C) 2016-2018 script.module.python.twitch This file is part of script.module.python.twitch SPDX-License-Identifier: GPL-3.0-only See LICENSES/GPL-3.0-only for more information. """ from ... import keys, methods from ...api.parameters import BroadcastType, Period, Language from ...queries import V5Query as Qry from ...queries import HiddenApiQuery as HQry from ...queries import UploadsQuery as UQry from ...queries import query # required scope: none @query def by_id(video_id): q = Qry('videos/{video_id}', use_token=False) q.add_urlkw(keys.VIDEO_ID, video_id) return q # required scope: none @query def get_top(limit=10, offset=0, game=None, period=Period.WEEK, broadcast_type=BroadcastType.HIGHLIGHT): q = Qry('videos/top', use_token=False) q.add_param(keys.LIMIT, limit, 10) q.add_param(keys.OFFSET, offset, 0) q.add_param(keys.GAME, game) q.add_param(keys.PERIOD, Period.validate(period), Period.WEEK) q.add_param(keys.BROADCAST_TYPE, BroadcastType.validate(broadcast_type)) return q # required scope: user_read @query def get_followed(limit=10, offset=0, broadcast_type=BroadcastType.HIGHLIGHT): q = Qry('videos/followed') q.add_param(keys.LIMIT, limit, 10) q.add_param(keys.OFFSET, offset, 0) q.add_param(keys.BROADCAST_TYPE, BroadcastType.validate(broadcast_type)) return q # required scope: channel_editor @query def create(channel_id, title, description=None, game=None, language=None, tag_list=None): q = Qry('videos/', method=methods.POST) q.add_param(keys.CHANNEL_ID, channel_id) q.add_param(keys.TITLE, title) q.add_param(keys.DESCRIPTION, description) q.add_param(keys.GAME, game) if language is not None: q.add_param(keys.LANGUAGE, Language.validate(language)) q.add_param(keys.TAG_LIST, tag_list) return q # required scope: channel_editor @query def update(video_id, title=None, description=None, game=None, language=None, tag_list=None): q = Qry('videos/{video_id}', method=methods.PUT) q.add_urlkw(keys.VIDEO_ID, video_id) q.add_param(keys.TITLE, title) q.add_param(keys.DESCRIPTION, description) q.add_param(keys.GAME, game) if language is not None: q.add_param(keys.LANGUAGE, Language.validate(language)) q.add_param(keys.TAG_LIST, tag_list) return q # required scope: channel_editor @query def delete(video_id): q = Qry('videos/{video_id}', method=methods.DELETE) q.add_urlkw(keys.VIDEO_ID, video_id) return q # requires upload token @query def upload_part(video_id, part, upload_token, content_length, data): q = UQry('upload/{video_id}', method=methods.PUT) q.set_headers({'Content-Length': content_length, 'Content-Type': 'application/octet-stream'}) q.add_urlkw(keys.VIDEO_ID, video_id) q.add_param(keys.PART, part) q.add_param(keys.UPLOAD_TOKEN, upload_token) q.add_bin(data) return q # requires upload token @query def complete_upload(video_id, upload_token): q = UQry('upload/{video_id}/complete', method=methods.POST) q.add_urlkw(keys.VIDEO_ID, video_id) q.add_param(keys.UPLOAD_TOKEN, upload_token) return q # required scope: none # undocumented / unsupported @query def _by_id(video_id, headers={}): q = HQry('videos/{video_id}', headers=headers, use_token=False) q.add_urlkw(keys.VIDEO_ID, video_id) return q
FabLab Valletta were deserving winners of the Growth Gurus Facebook Live Christmas competition, whereby one business was chosen to receive an industry leading website worth €10,000. We needed to create an industry leading eCommerce website that would enable anyone to seamlessly book and pay for a combination of tutors, materials, time and advanced fabrication machines. To be a “FabLab” means connecting to a global community of learners, educators, technologists, researchers, makers and innovators. We aimed to create a platform that not only showcases the essence of the FabLab brand, but one that enables individuals to fulfill their needs and build anything. An industry leading and fully responsive website with a fully integrated eCommerce system. FabLab where chosen as the winners of our Facebook Live Christmas competition which asked entrepreneurs and businesses why they should be gifted a brand new fully responsive website. The aim of FabLab Valletta is to add a layer of technology to the already existing infrastructure of culture and sustainability in the City of Valletta. To empower individuals and companies in the art of educating people of all ages to make and invent. Planning for both design and structure proved to be challenging. We needed to create a unique online experience that amplified FabLabs presence, while making sure the website was flexible enough to accommodate the innovative eCommerce solution, a first for the FabLab franchise. We structured the FabLab Valletta website to be inline with the strict search engine regulations in place, as well as easily navigable by the end user. Cybersecurity is one of the most important eCommerce features that we had to take into consideration when developing the FabLab Valletta website. Without the proper protocols in place, we would be putting FabLab’s customers at the risk of payment fraud. The platform we developed follows all industry standard protection rules and ensures that hackers are unable to steal user data, install malicious software or distribute malware to users. While many eCommerce websites sell imported products from foreign markets, FabLab is highly different. The platform we created provides the capability, training, and tools required to create products locally and sustainably, meaning that fewer products need to be imported into Malta creating economic value and contributing to a healthy ecosystem. FabLab informed us that customer service was at the forefront of their business model, something that we resonate here with at Growth Gurus. We developed a custom-built operations system that streamlines all communication between FabLab and their customers. A great deal of careful consideration and attention has been implemented into every part of the FabLab Valletta eCommerce website – right from the core architecture, through to the content and media used, fonts and colours, usability, eCommerce, mobile optimisation, backend, and conversion funnels with the end customers’ satisfaction at the core of it all.
#!/usr/bin/env python import rospy import roslib; roslib.load_manifest('ardrone_python') from std_msgs.msg import Empty from geometry_msgs.msg import Twist, Vector3 if __name__ == '__main__': rospy.init_node('example_node', anonymous=True) # publish commands (send to quadrotor) pub_velocity = rospy.Publisher('/cmd_vel', Twist) pub_takeoff = rospy.Publisher('/ardrone/takeoff', Empty) pub_land = rospy.Publisher('/ardrone/land', Empty) pub_reset = rospy.Publisher('/ardrone/reset', Empty) print("ready!") rospy.sleep(1.0) print("takeoff..") pub_takeoff.publish(Empty()) rospy.sleep(5.0) print("flying forward..") pub_velocity.publish(Twist(Vector3(0.05,0,0),Vector3(0,0,0))) rospy.sleep(2.0) print("turning around yaw axis..") pub_velocity.publish(Twist(Vector3(0,0,0),Vector3(0,0,1))) rospy.sleep(2.0) print("flying forward..") pub_velocity.publish(Twist(Vector3(0.05,0,0),Vector3(0,0,0))) rospy.sleep(2.0) print("stop..") pub_velocity.publish(Twist(Vector3(0,0,0),Vector3(0,0,0))) rospy.sleep(5.0) print("land..") pub_land.publish(Empty()) print("done!")
Antiqued brass hot air balloon dangle and glass faceted bead in frosted, opaque sky blue. Attaches to planner, binder, notebook, or Filofax with antiqued brass lobster claw clasp. Measures approx. 2" from top of clasp to bottom of balloon. You will receive one charm dangle.
""" Django settings for digimenu project. Generated by 'django-admin startproject' using Django 1.8.2. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'w%s(j9w3996gp$-djl#(@p@$^5++&)rnkr2n9&1mm_z#o-0t_v' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'digimenu.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'digimenu.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'digimenu', 'USER': 'root', 'PASSWORD': 'a', 'HOST': 'localhost', 'PORT': '3306', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
Finance for $230 per month at 3.9% APR for 72 months with $1,724 down payment. Payment expires on 04/30/19. Estimated Finance payment based upon 3.9% APR for 72 months, with a down payment of $1,724. Excludes title, taxes, registration, and license fees. Some customers will not qualify. See your authorized dealer for complete details on this and other offers. Residency restrictions may apply. Lease for $259/month for 36 months with $2,500 due at signing including $0 Security Deposit and 10,000 miles per year. Payment expires on 04/30/19. Estimated Lease payment based on 36 months and $2,500 due at lease signing. Cash due at signing includes $2,242 capitalization cost reduction, which includes $650 acquisition fee, plus a required security deposit of $0, and first month's lease payment of $259. Excludes title, taxes, registration, and license fees. Total monthly payments equal $11,536. Mileage charge of $0.20 per mile over 10,000 miles. Not all consumers will qualify. Residency restrictions may apply. * All prices are plus tax, new or transfer of tag fee, $799 Customer Service Fee and any dealer added features. All new vehicle prices include the applicable customer rebate(s), including Valued Owner Coupon, from Hyundai. On new Hyundais you may qualify for an additional rebates, including Recent College Graduate and Active/Retired Military rebate(s). See Dealer for complete details. While every effort is made to ensure the accuracy of our pricing, we are not responsible for any errors or omissions contained on these pages. Please verify any information, questions or concerns with the dealership. MPG estimates on this website are EPA estimates; your actual mileage may vary. For used vehicles, MPG estimates are EPA estimates for the vehicle when it was new. The EPA periodically modifies its MPG calculation methodology; all MPG estimates are based on the methodology in effect when the vehicles were new (please see the “Fuel Economy” portion of the EPA’s website for details, including a MPG recalculation tool). The features and options listed are for the new 2019 Hyundai Accent and may not apply to this specific vehicle.
#!/usr/bin/python # # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example shows how to use validateOnly SOAP header. Tags: CampaignService.mutate Api: AdWordsOnly """ __author__ = 'api.kwinter@gmail.com (Kevin Winter)' import os import sys sys.path.insert(0, os.path.join('..', '..', '..', '..', '..')) # Import appropriate classes from the client library. from adspygoogle import AdWordsClient from adspygoogle.adwords.AdWordsErrors import AdWordsRequestError ad_group_id = 'INSERT_AD_GROUP_ID_HERE' def main(client, ad_group_id): # Initialize appropriate service with validate only flag enabled. client.validate_only = True ad_group_ad_service = client.GetAdGroupAdService(version='v201309') # Construct operations to add a text ad. operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'url': 'http://www.example.com', 'displayUrl': 'example.com', 'description1': 'Visit the Red Planet in style.', 'description2': 'Low-gravity fun for everyone!', 'headline': 'Luxury Cruise to Mars' } } }] ad_group_ad_service.Mutate(operations) # No error means the request is valid. # Now let's check an invalid ad using a very long line to trigger an error. operations = [{ 'operator': 'ADD', 'operand': { 'xsi_type': 'AdGroupAd', 'adGroupId': ad_group_id, 'ad': { 'xsi_type': 'TextAd', 'url': 'http://www.example.com', 'displayUrl': 'example.com', 'description1': 'Visit the Red Planet in style.', 'description2': 'Low-gravity fun for all astronauts in orbit', 'headline': 'Luxury Cruise to Mars' } } }] try: ad_group_ad_service.Mutate(operations) except AdWordsRequestError, e: print 'Validation correctly failed with \'%s\'.' % str(e) print print ('Usage: %s units, %s operations' % (client.GetUnits(), client.GetOperations())) if __name__ == '__main__': # Initialize client object. client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..')) main(client, ad_group_id)
Why is it important to have Directors and Officers Liability Insurance? What would happen if a member of your board were sued for some type of negligence? Are you covered for all of the additional risks that are present when you host a special event’s needs? If your basic business insurance policies exclude coverage for certain types of lawsuits, one accident or lawsuit could force you to close. An independent insurance agent can help you uncover all of the risks to your non-profit organization. You may find you need a variety of non-profit insurance policies to obtain a comprehensive portfolio of coverage. A basic non-profit organization insurance plan includes commercial general liability insurance to protect from lawsuits and property insurance for any office space, equipment, and other business personal property. Large non-profits may need to cover buildings that they own, vehicles and other forms of business property. You might even need coverage for special events and a variety of additional non-profit liability insurance policies. Commercial General Liability (CGL) insurance is essential for any business, especially for any business or organization that regularly associates with the general public, on or off-premises. CGL insurance protects your non-profit from lawsuits related to third-party bodily injury or property damage. This can be as simple as a slip, trip, or fall in your office or serious injury or death caused by your organization. If such an incident occurs, your CGL policy covers medical expenses, costs to repair or replace damaged property, attorney fees, court costs, and any settlements or judgments that you must pay. CGL insurance also covers certain personal injury claims (slander and libel) and advertising liability claims. Many small non-profits may not have a lot of business property. But if you have office space, office equipment, furniture, computers and other electronics you need Business Property insurance to protect your non-profit in the event of a fire, smoke damage, theft, vandalism, and certain weather events. If you own your building, your commercial property policy also covers damage to the building. Your property policy may also include Business Interruption coverage, which provides income replacement and reimburses you for certain ongoing expenses like salaries and rent. If you must close or relocate temporarily after a covered loss (e.g., a fire). If you run your non-profit out of your home, talk to your independent insurance agent about amending your homeowner’s insurance policy to include coverage for your business property and any business-related liability risks you face. You may need to purchase special coverage for home-based businesses. Lawsuits are probably the greatest risk for any non-profit. While your CGL policy offers broad coverage that protects against a wide variety of potential lawsuits, you may be exposed to other types of lawsuits that it does not cover. The directors and officers who serve your non-profit must maintain the organization’s financial stability and provide the appropriate resources and the environment in order for it to flourish. Nearly any decision that your board makes can trigger a lawsuit, which can hurt the non-profit and threaten the personal assets of the directors and officers. All of this calls for another form of non-profit liability insurance. D&O liability insurance pays for your defense costs, damages, settlements and judgments related to a covered complaint. It is generally customized for the organization and the type of work that you do. You may also want to purchase Employment Practices Liability insurance. This policy offers coverage for claims of employment-related discrimination by prospective, current or former employees. Your D&O policy may specifically exclude coverage for these kinds of claims, so be sure to know what is covered (and what is not) by both policies. Your non-profit organization probably has employees just like any other business. If you do, you need insurance that protects your employees from workplace injuries and illnesses. Workers compensation insurance covers medical expenses and provides income replacement if an employee is injured on the job. Many non-profits use volunteer workers to perform a wide variety of tasks. Each state has different workers compensation laws, and they vary on whether volunteers can or must be covered by workers compensation insurance. In states where volunteers cannot be covered by workers compensation, or where it is optional, non-profits may choose to purchase special accident medical reimbursement policies to cover incidents where volunteers are injured while working on their behalf. An independent agent who specializes in workers compensation insurance can help you to determine how to best protect your volunteer workers. Special events such as fundraisers, dinners, auctions or golf tournaments may play a huge role in your non-profit organization. Does your general liability insurance policy provide all the coverage you need? Many special events involve renting space and inviting people to participate. They also often involve serving alcohol and participating in athletic events and other high-risk activities like bounce houses, carnival rides or swimming. If someone is injured or property is damaged at one of these events, your CGL policy might not provide the coverage you need. Schedule an appointment with Densmore Insurance Strategies, Inc. about whether your CGL policy excludes special events. If it does, you need Special Event insurance, which is extra coverage for a one-time event or a series of events, depending on your needs. These policies often include Liquor Liability coverage, which protects you in the event that there is an alcohol-related injury or property damage. Commercial auto insurance: This covers vehicles that are owned or rented by your non-profit, or any personal vehicles that are driven for business purposes. Excess liability insurance: Also called Commercial Umbrella insurance, this provides additional liability protection above and beyond the liability limits of other applicable liability policies. Professional liability insurance: This offers protection for claims of professional negligence and errors and omissions. This may be particularly important if your non-profit provides counseling or other professional services. At Densmore Insurance Strategies, Inc., we specialize in Commercial General Liability Insurance Bondurant Iowa, Ankeny Iowa, Altoona Iowa, West Des Moines Iowa, Johnston Iowa, and Urbandale Iowa. We are licensed to serve Iowa, Florida, Missouri, Arizona, Texas, Tennessee, and Illinois.
import pytest from pnc_cli.swagger_client.apis.environments_api import EnvironmentsApi from test import testutils import pnc_cli.user_config as uc @pytest.fixture(scope='function', autouse=True) def get_envs_api(): global envs_api envs_api = EnvironmentsApi(uc.user.get_api_client()) def test_get_all_invalid_param(): testutils.assert_raises_typeerror(envs_api, 'get_all') def test_get_all(): envs = envs_api.get_all(page_index=0, page_size=1000000, sort='', q='').content assert envs is not None # def test_create_invalid_param(): # testutils.assert_raises_typeerror(envs_api, 'create_new') #def test_create_new(new_environment): # env_ids = [env.id for env in envs_api.get_all(page_size=1000000).content] # assert new_environment.id in env_ids def test_get_specific_no_id(): testutils.assert_raises_valueerror(envs_api, 'get_specific', id=None) def test_get_specific_invalid_param(): testutils.assert_raises_typeerror(envs_api, 'get_specific', id=1) #def test_get_specific(new_environment): def test_get_specific(): assert envs_api.get_specific(id=1).content is not None # def test_update_no_id(): # testutils.assert_raises_valueerror(envs_api, 'update', id=None) # def test_update_invalid_param(): # testutils.assert_raises_typeerror(envs_api, 'update', id=1) # environment manipulation is currently disabled in pnc # def test_update(new_environment): # randname = testutils.gen_random_name() # updated_env = environments.create_environment_object(name=randname, system_image_type='VIRTUAL_MACHINE_RAW', description='DOCKER', # system_image_id=randname) # envs_api.update(id=new_environment.id, body=updated_env) # retrieved_env = envs_api.get_specific(new_environment.id).content # assert (retrieved_env.description == 'DOCKER') # assert (retrieved_env.name == randname) # # the following fields are immutable, and should remain unchanged # assert (retrieved_env.system_image_id == retrieved_env.system_image_id) # assert (retrieved_env.system_image_type == 'DOCKER_IMAGE') # def test_delete_no_id(): # testutils.assert_raises_valueerror(envs_api, 'delete', id=None) # def test_delete_invalid_param(): # testutils.assert_raises_typeerror(envs_api, 'delete', id=1) # environment manipulation is currently disabled in pnc # def test_delete(new_environment): # envs_api.delete(new_environment.id) # env_ids = [env.id for env in envs_api.get_all(page_size=1000000).content] # assert new_environment.id not in env_ids
Get tickets to the show!!! Even with today’s dazzling entertainment options, a group of talented Peninsula performers wants to turn your attention to hometown center stage – live and local, gently bawdy. Vaudeville style. And it’s not just the show – the semi-annual Chicken’s Ball is celebrating its 78th year in San Carlos in 2018. Members of this passionate group say the “best kept secret” is how participating helps you discover real community in our isolating virtual world. Back stage, they say, is the stuff of local legend: Colorful creativity laced with crazy pranks, great parties and friendly competition. Lifelong friendships are made, true love found (and sometimes redirected) and talent discovered for everything from acting and dancing to costume and set design, writing and choreography. Most of the 100-plus participants today are people who fell in love with creating fun, local theatre that also helps raise money for San Carlos schools. They’ve handed that passion down generation to generation since 1940. Kathy Smallman, born in San Carlos in 1963, has been involved with the Ball since 1990, but she has been around the ball for most of her life. Her parents started with the PTA at Laureola School, one of several PTA’s that contributed everything from set design and prop painting to costumes and skit writing. “My fondest memory was the first year we participated as a family – my dad, mom and husband,” Smallman recalls. “My dad was a super set designer and always painted the curtain. Mom was extremely funny – she did everything from skits to ticket sales and production. She had a laugh. I can still hear that laugh. Lyndsey Smith, 37 years old and also a San Carlos native, got involved in 1998, inspired by her mom, Ricklay (Ricki) who started performing and designing costumes in the 1980s and still takes the stage today. Ditto Gayle Collins, 65, another local native who followed her parents into the show in 1982 and introduced her daughter Heather to the stage in 2006. Her husband Ron currently is a show judge. In 1939, Howard J. Demeke, a San Carlos teacher, was looking for ways to raise money for Central School, the only school in town at the time. He decided to revive the Gay ‘90s Chicken’s Ball tradition in his home town, and the first show was staged in 1940. Since then, the biennial event has raised hundreds of thousands of dollars for local schools. The group claims the title of being the longest running PTA fundraiser in the United States. Over the years, the original PTA groups that contributed scripts, production and design spun off into a number of smaller entertaining and script writing groups, some that perform “entres,” which are short song and dance numbers staged between the main skits. The groups include the Junior Matrons, Friday Night Funnies, Barbary Coasters, the Maids and Matronettes, and the Clay Pipers. Years ago, the Pipers even extended their reach into Amador City where they rented a hall, went up on Friday nights, performed on Saturday and came home on Sunday. Today, the members are hoping to attract more interest in the show, both in creating it and seeing it staged in April 2018. The theme is “Time Stands Still,” fashioned after the House of Shields, an iconic San Francisco bar on Montgomery Street where you won’t find a single clock or television. People are amazed at how fast time seems to go by these days. Chicken’s Ball enthusiasts say their “Time Stands Still” theme is a reminder of what can happen if you turn your attention from the clock and TV, slowing down a little to make friends and have fun the old-fashioned way. “We’re concerned about the future,” Smith says, “Today everybody works the lifestyle of Silicon Valley. Parents are super busy, and many don’t even know the Ball is contributing to their children’s schools. People are wrapped up in a virtual world. It’s hard to get their attention. “We want to get more people – especially young people – involved, let them see this is how you get to know your community. “You spend several nights a week from January to April working on the show with your Chicken’s Ball friends. They become family,” Smith says. “I can’t go downtown San Carlos anymore without seeing someone I know. That’s awesome. The 2018 show is in production now, with show dates set for the last two weeks in April. Tickets are available through the web site. Auditions for performing, production and general volunteer involvement are set for Nov. 16, 6-9 p.m. and Nov. 18, 10 a.m. to 1 p.m., at Heather School in San Carlos. Appointments are not necessary – just show up. For more information about the Ball or how to participate, visit the website: Chickensball.com, or call Sue Court, 650-207-0366, email: lacy340@aol.com.
# Written by John Hoffman # based on code by Uoti Urpala # see LICENSE.txt for license information from __future__ import generators # for python 2.2 from random import randrange,randint,seed try: from os import urandom except: seed() urandom = lambda x: ''.join([chr(randint(0,255)) for i in xrange(x)]) from sha import sha try: True except: True = 1 False = 0 try: from Crypto.Cipher import ARC4 CRYPTO_OK = True except: CRYPTO_OK = False KEY_LENGTH = 160 DH_PRIME = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563 PAD_MAX = 200 # less than protocol maximum, and later assumed to be < 256 DH_BYTES = 96 def bytetonum(x): return long(x.encode('hex'), 16) def numtobyte(x): x = hex(x).lstrip('0x').rstrip('Ll') x = '0'*(192 - len(x)) + x return x.decode('hex') class Crypto: def __init__(self, initiator, disable_crypto = False): self.initiator = initiator self.disable_crypto = disable_crypto if not disable_crypto and not CRYPTO_OK: raise NotImplementedError, "attempt to run encryption w/ none installed" self.privkey = bytetonum(urandom(KEY_LENGTH/8)) self.pubkey = numtobyte(pow(2, self.privkey, DH_PRIME)) self.keylength = DH_BYTES self._VC_pattern = None def received_key(self, k): self.S = numtobyte(pow(bytetonum(k), self.privkey, DH_PRIME)) self.block3a = sha('req1'+self.S).digest() self.block3bkey = sha('req3'+self.S).digest() self.block3b = None def _gen_block3b(self, SKEY): a = sha('req2'+SKEY).digest() return ''.join([ chr(ord(a[i])^ord(self.block3bkey[i])) for i in xrange(20) ]) def test_skey(self, s, SKEY): block3b = self._gen_block3b(SKEY) if block3b != s: return False self.block3b = block3b if not self.disable_crypto: self.set_skey(SKEY) return True def set_skey(self, SKEY): if not self.block3b: self.block3b = self._gen_block3b(SKEY) crypta = ARC4.new(sha('keyA'+self.S+SKEY).digest()) cryptb = ARC4.new(sha('keyB'+self.S+SKEY).digest()) if self.initiator: self.encrypt = crypta.encrypt self.decrypt = cryptb.decrypt else: self.encrypt = cryptb.encrypt self.decrypt = crypta.decrypt self.encrypt('x'*1024) # discard first 1024 bytes self.decrypt('x'*1024) def VC_pattern(self): if not self._VC_pattern: self._VC_pattern = self.decrypt('\x00'*8) return self._VC_pattern def read(self, s): self._read(self.decrypt(s)) def write(self, s): self._write(self.encrypt(s)) def setrawaccess(self, _read, _write): self._read = _read self._write = _write def padding(self): return urandom(randrange(PAD_MAX-16)+16)
The quick-and-dirty synopsis: photographers everywhere have been paranoid about Facebook stealing their photos and supposedly absorbing the copyrights once they're posted on their site. They also think that by writing a one line passage on their wall, it mitigates their terms of service, thus bypassing the end-user-licensing agreement. Time has revealed that Facebook needs to have the clause in their terms, otherwise images can't be posted or shared on their site, and it doesn't even come close to addressing copyright. Just know this photographers, Facebook doesn't absorb your copyright once you post something on Facebook.
# -*- coding: utf-8 -*- # Copyright (C) 2015-16 Red Hat, Inc. # This file is part of the Infinity Note Compiler. # # The Infinity Note Compiler is free software: you can redistribute it # and/or modify it under the terms of the GNU General Public License # as published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # The Infinity Note Compiler is distributed in the hope that it will # be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with the Infinity Note Compiler. If not, see # <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from . import logger from . import operations from .types import INTTYPE import inspect # The primary goal of these optimizations is to reduce the instruction # count, to aid consumers using interpreters to execute notes. The # secondary goal of these optimizations is to reduce the size of the # bytecode in cases where this does not conflict with the primary goal # of reducing instruction count. class Optimizer(object): """Base class for all optimizers. """ def visit_toplevel(self, toplevel): for node in toplevel.functions: node.accept(self) def debug_print_hit(self, location): if self.debug_print.is_enabled: optimization = inspect.stack()[1][0].f_code.co_name for prefix in ("__", "try_"): if optimization.startswith(prefix): optimization = optimization[len(prefix):] self.debug_print("%s: %s\n" % (location.fileline, optimization)) class BlockOptimizer(Optimizer): """Optimizations performed before serialization. """ debug_print = logger.debug_printer_for("blockopt") def visit_function(self, function): self.visited = {} function.entry_block.accept(self) self.try_combine_blocks(self.visited.keys()) def try_combine_blocks(self, blocks): blocks = sorted((block.index, block) for block in blocks) blocks = [block for index, block in blocks] while self.__try_combine_blocks(blocks): pass def __try_combine_blocks(self, blocks): for i in range(len(blocks)): block_1 = blocks[i] for j in range(i + 1, len(blocks)): block_2 = blocks[j] if block_2.is_equivalent_to(block_1): self.debug_print( ("Blocks #%d and #%d are equivalent, " + "removing #%d\n") % (block_1.index, block_2.index, block_2.index)) for block in blocks: block.replace_exit(block_2, block_1) blocks.remove(block_2) return True return False def visit_basicblock(self, block): if self.visited.get(block, False): return self.visited[block] = True self.try_all_optimizations(block) if self.debug_print.is_enabled: self.debug_print(str(block) + "\n\n") for block in block.exits: block.accept(self) def try_all_optimizations(self, block): self.try_eliminate_cmp_bra_const_const(block) self.try_eliminate_lit0_cmp_before_bra(block) self.try_reverse_branch_exits(block) self.try_peephole(block, self.try_eliminate_identity_math, 2) self.try_peephole(block, self.try_use_plus_uconst, 2) def __tecbcc_helper(self, block): """Helper for try_eliminate_cmp_bra_const_const. """ if len(block.entries) != 1: return if len(block.ops) < 2: return if not block.ops[0].is_load_constant: return constant = block.ops[0] if constant.type.basetype != INTTYPE: return return constant.value def try_eliminate_cmp_bra_const_const(self, block): """Optimize cases where the blocks following a conditional branch load the constants that the comparison pushed to the stack. This is relevant for libpthread notes. All the libthread_db functions that the libpthread notes replace return a td_err_e error code defined as: typedef enum { TD_OK, /* No error. */ TD_ERR, /* General error. */ ... /* Specific errors. */ } td_err_e; Some libthread_db functions call proc_service functions which return a similar ps_err_e error code: typedef enum { PS_OK, /* No error. */ PS_ERR, /* General error. */ ... /* Specific errors. */ } ps_err_e; Note that TD_OK == PS_OK == 0 and TD_ERR == PS_ERR == 1. This optimizer replaces code of the following pattern: call /* Some proc_service function. */ load PS_OK /* == 0 */ bne fail load TD_OK /* == 0 */ return fail: load TD_ERR /* == 1 */ return With this: call /* Some proc_service function. */ load PS_OK ne """ # Does the block end with "comparison, branch"? if len(block.ops) < 2: return if not block.ops[-1].is_branch: return if not block.ops[-2].is_comparison: return # Do the successors start with "const 0" and "const 1"? constants = list(map( self.__tecbcc_helper, (block.nobranch_exit, block.branched_exit))) if 0 not in constants or 1 not in constants: return # Are the successors otherwise the same? s1, s2 = block.exits if s1.exits != s2.exits: return if len(s1.ops) != len(s2.ops): return for op1, op2 in list(zip(s1.ops, s2.ops))[1:-1]: if not op1.is_equivalent_to(op2): return self.debug_print_hit(block.ops[-1]) # Reverse the comparison if necessary if constants == [1, 0]: block.ops[-2].reverse() # Lose one of the successor blocks (doesn't matter which) dead_block = block.exits.pop() dead_block.entries.remove(block) assert not dead_block.entries # Reduce the branch to a goto block.ops[-1] = operations.SyntheticGoto(block.ops[-1]) # Move the the remaining successor and drop the ConstOp. # This messes with the types a bit (what was an INTTYPE # is now a BOOLTYPE) but that doesn't matter once it's # bytecode. [block] = block.exits removed_op = block.ops.pop(0) assert removed_op.is_load_constant def try_eliminate_lit0_cmp_before_bra(self, block): # Does the block end with "load 0, {eq,ne}, branch"? if len(block.ops) < 3: return if not block.ops[-1].is_branch: return if not block.ops[-2].is_comparison: return if block.ops[-2].dwarfname not in ("eq", "ne"): return if not block.ops[-3].is_load_constant: return if block.ops[-3].value != 0: return self.debug_print_hit(block.ops[-2]) # Reverse the branch if necessary if block.ops[-2].dwarfname == "eq": block.exits.reverse() # Remove the load and the comparison removed_op = block.ops.pop(-3) assert removed_op.is_load_constant removed_op = block.ops.pop(-2) assert removed_op.is_comparison def try_reverse_branch_exits(self, block): # Does the block end with "compare, branch"? if len(block.ops) < 2: return if not block.ops[-1].is_branch: return if not block.ops[-2].is_comparison: return # Does the nobranch case immediately jump somewhere? tmp = block.nobranch_exit.first_op if not (tmp.is_goto or tmp.is_return): return # Does the branch case NOT immediately jump somewhere? tmp = block.branched_exit.first_op if tmp.is_goto or tmp.is_return: return self.debug_print_hit(block.ops[-2]) # Reverse both the comparison and the branch block.ops[-2].reverse() block.exits.reverse() def try_peephole(self, block, action, size): start = 0 while True: start = self.__try_peephole(block, action, size) if start is None: break def __try_peephole(self, block, action, size): """Helper for try_peephole. """ for index in range(len(block.ops) - size): if action(block, index): return index IDENTITIES = { "plus": 0, "minus": 0, "mul": 1, "div": 1, "shl": 0, "shr": 0, "shra": 0, "or": 0, "xor": 0} def try_eliminate_identity_math(self, block, index): if not block.ops[index].is_load_constant: return False opname = getattr(block.ops[index + 1], "dwarfname", None) if opname is None: return False identity = self.IDENTITIES.get(opname, None) if identity is None: return False if block.ops[index].value != identity: return False self.debug_print_hit(block.ops[index + 1]) # Remove the operations removed_op = block.ops.pop(index + 1) assert removed_op.dwarfname == opname removed_op = block.ops.pop(index) assert removed_op.is_load_constant return True def try_use_plus_uconst(self, block, index): if not block.ops[index].is_load_constant: return False if block.ops[index].value < 0: return False if not block.ops[index + 1].is_add: return False self.debug_print_hit(block.ops[index]) # Insert the plus_uconst block.ops[index] = operations.PlusUConst(block.ops[index]) # Remove the add removed_op = block.ops.pop(index + 1) assert removed_op.is_add return True class StreamOptimizer(Optimizer): """Optimizations performed after serialization. """ debug_print = logger.debug_printer_for("streamopt") def debug_print_stream(self, stream): self.debug_print("%s\n" % stream) def visit_function(self, function): function.ops.accept(self) def visit_operationstream(self, stream): while True: if self.try_remove_multijump(stream): continue if self.try_remove_goto_next(stream): continue if self.try_remove_unreachable_code(stream): continue break def try_remove_multijump(self, stream): for index, op in stream.items(): target = stream.jumps.get(op, None) if target is None: continue if not target.is_goto: continue self.debug_print_hit(op) stream.retarget_jump(op, stream.jumps[target]) self.debug_print_stream(stream) return True return False def try_remove_goto_next(self, stream): for index, op in stream.items(): if index + 1 == len(stream.ops): continue if not op.is_goto: continue if stream.labels.get(op, None) is not None: continue if stream.jumps[op] is not stream.ops[index + 1]: continue self.debug_print_hit(op) stream.remove_by_index_op(index, op) self.debug_print_stream(stream) return True return False def try_remove_unreachable_code(self, stream): last_was_goto = False for index, op in stream.items(): if last_was_goto: if stream.labels.get(op, None) is None: self.debug_print_hit(op) stream.remove_by_index_op(index, op) self.debug_print_stream(stream) return True last_was_goto = op.is_goto return False
As part of their annual programme of work, they will be planning engagement and project activities around the health and care areas that matter most to people. They want to hear what residents in Milton Keynes think about their current priorities. Tell them if you think these areas should be a focus of their work for the coming year, or not. They also want to know about any specific issues you would like them to explore in the coming year. It could be care homes, access to mental health, social isolation, access to healthcare in your school, or whatever’s affecting you most when it comes to your health and care needs. Please complete this survey and distribute to your networks, patients and service users.
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : RT Geosisma Offline Description : Geosisma Offline Plugin Date : October 21, 2011 copyright : (C) 2013 by Luigi Pirelli (Faunalia) email : luigi.pirelli@faunalia.it ***************************************************************************/ Works done from Faunalia (http://www.faunalia.it) with funding from Regione Toscana - Servizio Sismico (http://www.rete.toscana.it/sett/pta/sismica/) /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtCore import * from PyQt4.QtGui import * from collections import OrderedDict from dlgSelectRequest_ui import Ui_Dialog from ArchiveManager import ArchiveManager class DlgSelectRequest(QDialog, Ui_Dialog): # signals loadRequestsDone = pyqtSignal() loadTableDone = pyqtSignal() def __init__(self, currentRequestId=None, parent=None): QDialog.__init__(self, parent) self.currentRequestId = currentRequestId self.currentRequest = None self.setAttribute(Qt.WA_DeleteOnClose) self.setupUi(self) self.buttonBox.button(QDialogButtonBox.Close).setText(self.tr("Ignora")) self.loadRequestsDone.connect(self.updateButtonsState) self.loadRequestsDone.connect(self.loadTable) self.requestsTableWidget.itemSelectionChanged.connect(self.updateButtonsState) self.loadTableDone.connect(self.selectCurrentRequest) self.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.setCurrentRequest) self.loadRequests() self.loadTable() def loadRequests(self): self.records = ArchiveManager.instance().loadRequests() self.loadRequestsDone.emit() def loadTable(self): self.requestsTableWidget.setSortingEnabled(True) # organize colums Hide = True Show = False columns = OrderedDict() columns['id'] = ( self.tr(u'id'), Show ) columns['event_id'] = ( self.tr(u'Evento'), Show ) columns['s1prov'] = ( self.tr(u'Provincia'), Show ) columns['s1com'] = ( self.tr(u'Comune'), Show ) columns['s1loc'] = ( self.tr(u'Localitá'), Show ) columns['s1via'] = ( self.tr(u'Via'), Show ) columns['s1civico'] = ( self.tr(u'Civico'), Show ) columns['s1catpart1'] = ( self.tr(u'Particella'), Show ) columns['s1catfoglio'] = ( self.tr(u'Foglio'), Show ) columns['created'] = ( self.tr(u'Data di creazione'), Show ) columns['number'] = ( self.tr(u'Squadra'), Show ) columns['team_id'] = ( self.tr(u'Id della Squadra'), Hide ) columns['s1name'] = ( self.tr(u'Richiesto da'), Show ) # set table size self.requestsTableWidget.clear() self.requestsTableWidget.setRowCount( len(self.records) ) self.requestsTableWidget.setColumnCount( len(columns) ) # resizing mode of column header = self.requestsTableWidget.horizontalHeader() header.setResizeMode(QHeaderView.ResizeToContents) # fill tha table self.requestsTableWidget.setHorizontalHeaderLabels( [val[0] for val in columns.values()] ) for row, record in enumerate(self.records): for column, columnKey in enumerate(columns.keys()): item = QTableWidgetItem() try: value = int(record[columnKey]) except: value = str(record[columnKey]) item.setData(Qt.DisplayRole, value) # add record in the first "id" colum if columnKey == "id": item.setData(Qt.UserRole, record) self.requestsTableWidget.setItem(row, column, item ) # column to be shown for index, key in enumerate(columns): self.requestsTableWidget.setColumnHidden(index, columns[key][1]) self.loadTableDone.emit() def selectCurrentRequest(self): if self.currentRequestId is None: return for row in range( self.requestsTableWidget.rowCount() ): item = self.requestsTableWidget.item(row, 0) if str(self.currentRequestId) == item.text(): self.requestsTableWidget.selectRow(row) break def setCurrentRequest(self): selectedItems = self.requestsTableWidget.selectedItems() if len(selectedItems) == 0: self.currentRequestId = None self.currentRequest = None return # assume that only one row is selected => get row from an element row = selectedItems[0].row() item = self.requestsTableWidget.item(row, 0) # assume id is the first column self.currentRequestId = item.text() self.currentRequest = item.data(Qt.UserRole) def updateButtonsState(self): if len(self.records) > 0: enabled = True if len(self.requestsTableWidget.selectedItems()) == 0: enabled = False self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)
Add yourself if you are coming. Please take up any item that you can. Also add if you want anything. This page was last edited on 4 February 2010, at 10:37.
""" This file helped clarify for me how Python handles class and instance variables. Instances of a class refer to the class value until modified on the instance This file can simply be ran from the console: python class_variable_test.py """ class SomeClass(): a_field = None def main(): print("classfield: " + str(SomeClass.a_field)) # None instanceOne = SomeClass() print("instance_field_1: " + str(instanceOne.a_field)) # None instanceOne.a_field = "One" print("instance_field_1: " + instanceOne.a_field) # One print("classfield: " + str(SomeClass.a_field)) # None SomeClass.a_field = "Classfield" print("classfield: " + SomeClass.a_field) # Classfield print("instance_field_1: " + instanceOne.a_field) # One instanceTwo = SomeClass() print("instance_field_2: " + instanceTwo.a_field) # Classfield SomeClass.a_field = "Classfield_Other" print("instance_field_2: " + instanceTwo.a_field) # Classfield_Other instanceTwo.a_field = "Two" print("instance_field_2: " + instanceTwo.a_field) # Two print("classfield: " + SomeClass.a_field) # Classfield if __name__ == '__main__': main()
I landed at Brive airport in the Dordogne valley for the second time in a year. During my first visit, I had been told about prehistoric caves and paintings. The story of the caves captured my mind and inspired me to come back to the area and see them with my own eyes. This trip meant something different to me. I was in search of something unknown. I was looking to connect with those people who created the art works in the Vézère Valley. I was very curious to find out about the past. We are often so occupied in our day-to-day routines, so busy worrying about paying bills and other ordinary matters that we forget about where we came from. We can study the marks left by our ancestors and it might be a gateway to the future and understanding our complex universe. The mystery of our life on earth gets even more complicated with new findings going back to 1.7 million years. Having spent the night in the city of Sarlat, my journey to the cave of Combarelles near the small town of Les Eyzies-de-Tayac (pictured above), in the heart of the Vézère Valley, took only 20 minutes. The cave used to be inhabited by Cro-Magnon people (described by scientists as early modern humans) approximately 11,000 – 13,000 years ago. The cave contains prehistoric drawings of animals and symbols on sandy uneven walls. We walked through a very narrow passage approximately 1 metre wide. The guide told us that the height of the original cave was very low. It had been excavated and enlarged to make it possible for visitors to walk through more easily. This means that the artists had to crawl inside the cave in absolute darkness holding some kind of lighting in order to make their engravings. Scientists officially discovered the cave in 1901. The discovery was an explosive moment in the world of prehistory, revealing nearly 800 drawings, which could be divided into over 100 distinct sets. There are no colour paintings in this cave, but traces of dye indicate that they may have originally been in colour. These revelations show the intellectual and technical abilities of those prehistoric artists. They were perfect humans, as intelligent and capable as people today, but their world and surroundings were very simple. Their abstract creations are open to all kind of interpretations. We are not certain what their intention was and whether there is any meaning behind it. Whatever it is, they are communicating with future generations in various ways through their sketches. Was the cave some kind of sanctuary or temple? Was it an art gallery? It is a puzzle, which might be solved by deeper research in several caves in the region. As my guide pointed out various figures and scratches, I walked through the narrow passages of the cave and observed engravings of a wild horse, a cave bear, a cave lion and a mammoth. Some of animals do not exist in our time. I questioned the integrity and originality of what we saw. The guide patiently explained about claims and discoveries by top archeologists and many findings, several testimonies and items that are displayed in National Museum of Prehistory in Les Eyzies. I was in deep thought and sensed that I was connecting with those artists, even that their soul was present. I felt their presence along with their work. The air inside the cave was very fresh and pleasant, even better than outside. I thought the air was controlled by a special air conditioning system. But the guide told me that the air is natural. She explained that 300-metre long cave was formed by an underground river. The same river is still running, creating continuous fresh air. The cave has been flooded twice in recent years. My visit was only a taste of one of the many caves in France that have been named UNESCO World Heritage Sites. Next I am very keen to visit the caves of Font-de-Gaume and Lascaux II near Les Eyzies to explore further into prehistoric art. The tourist offices of Sarlat (www.sarlat-tourisme.com) and les Eyzies (www.lascaux-dordogne.com) can give you directions to all the things to see and do in Dordogne Valley and Vézère Valley.
from django.db import migrations, models from django.db.models import deletion from django.utils import timezone class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name="User", fields=[ ("id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), ("username", models.CharField( max_length=25, unique=True, verbose_name="имя пользователя")), ("access_token", models.CharField( blank=True, max_length=40, verbose_name="токен доступа")), ("is_oldfag", models.BooleanField( default=False, verbose_name="старый участник", help_text="Отметьте, чтобы снять ограничение кармы.")), ("is_banned", models.BooleanField( default=False, verbose_name="забанен")), ("first_login", models.DateTimeField( default=timezone.now, verbose_name="первый вход")), ("last_login", models.DateTimeField( blank=True, null=True, verbose_name="последний вход")), ], options={ "verbose_name": "пользователь", "verbose_name_plural": "пользователи", "ordering": ["username"], }, ), migrations.CreateModel( name="Mail", fields=[ ("id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), ("body", models.TextField(max_length=400)), ("send_date", models.DateTimeField( db_index=True, default=timezone.now)), ("read_date", models.DateTimeField( blank=True, db_index=True, null=True)), ], options={ "ordering": ["send_date"], }, ), migrations.CreateModel( name="Member", fields=[ ("id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), ("fullname", models.CharField( max_length=80, verbose_name="полное имя")), ("postcode", models.CharField( max_length=20, verbose_name="индекс")), ("address", models.TextField( max_length=200, verbose_name="адрес")), ("gift_sent", models.DateTimeField( blank=True, db_index=True, null=True, verbose_name="подарок отправлен")), ("gift_received", models.DateTimeField( blank=True, db_index=True, null=True, verbose_name="подарок получен")), ("giftee", models.OneToOneField( blank=True, null=True, on_delete=deletion.CASCADE, related_name="santa", to="clubadm.Member", verbose_name="получатель подарка")), ], options={ "verbose_name": "участник", "verbose_name_plural": "участники", "ordering": ["season", "fullname"], }, ), migrations.CreateModel( name="Season", fields=[ ("year", models.IntegerField( primary_key=True, serialize=False, verbose_name="год")), ("gallery", models.URLField( blank=True, verbose_name="пост хвастовства подарками")), ("signups_start", models.DateField( verbose_name="начало регистрации")), ("signups_end", models.DateField( verbose_name="жеребьевка адресов")), ("ship_by", models.DateField( help_text="После этой даты сезон закрывается и уходит вархив.", verbose_name="последний срок отправки подарка")), ], options={ "verbose_name": "сезон", "verbose_name_plural": "сезоны", "ordering": ["year"], "get_latest_by": "year", }, ), migrations.AddField( model_name="member", name="season", field=models.ForeignKey( on_delete=deletion.CASCADE, to="clubadm.Season", verbose_name="сезон"), ), migrations.AddField( model_name="member", name="user", field=models.ForeignKey( on_delete=deletion.CASCADE, to="clubadm.User", verbose_name="пользователь"), ), migrations.AddField( model_name="mail", name="recipient", field=models.ForeignKey( on_delete=deletion.CASCADE, related_name="+", to="clubadm.Member"), ), migrations.AddField( model_name="mail", name="sender", field=models.ForeignKey( on_delete=deletion.CASCADE, related_name="+", to="clubadm.Member"), ), migrations.AlterUniqueTogether( name="member", unique_together=set([("user", "season")]), ), ]
Locals will benefit from a better, safer, less congested Hall Road and Western Port Highway, with two new major upgrades from a re-elected Andrews Labor Government. The $169 million Hall Road investment will see major improvements for local families, including duplicating 5.2 kilometres from McCormicks Road to Cranbourne-Frankston Road, doubling the existing two lanes to four. The project will also see construction of new traffic lights at the intersections of McCormicks Road, Taylors Road, Western Port Highway and Evans Road, and an upgrade of the intersection at Cranbourne-Frankston Road, helping to address what the Frankston Standard Leader has dubbed a “notorious bottleneck”. Benefiting local pedestrians and cyclists, the investment will also see new shared paths on both sides of Hall Road, as well as the installation of new street lighting, safety barriers, road signage and landscaping. A re-elected Labor Government will also invest $54.3 million to remove two congested roundabouts at Ballarto Road and Cranbourne-Frankston Road on the Western Port Highway and replace them with new traffic lights. Under a re-elected Labor Government, planning and pre-construction work on both projects will begin in 2019, with major works kickstarting in 2020. Upgrades to the Western Port Highway will be finished by 2022, while Hall Road will be complete by 2023. Today’s announcement builds on the Andrews Labor Government’s massive investments to ease local congestion, including widening Thompsons Road with more than 10 kilometres of new lanes, the removal of the level crossing near Merinda Park Station, duplicating Lathams Road and building the Mordialloc Freeway. It stands in contrast to the Liberals, who want to flood Melbourne’s outer suburbs with over a million new residents by fast-tracking 290,000 new housing lots. Only Labor will deliver the big improvement that matter to local families, including upgrading Hall Road and Western Port Highway.
'''Statistics of mimic2 database''' from get_sample import Mimic2 from mutil import Graph import numpy as np from matplotlib.pyplot import waitforbuttonpress mimic2 = Mimic2() graph = Graph() def readmission_statisic(): l_id = mimic2.subject_with_chf(max_seq=1) total_admission = 0 alive_on_disch = 0 death_on_disch = 0 readm_and_death_within_th = 0 readm_and_no_death_within_th = 0 no_readm_death_within_th = 0 no_readm_no_death_within_th = 0 duration_th = 30 readm_after_th = 0 no_readm_after_th = 0 l_adm_duration = [] l_readm_duration = [] l_death_duration = [] l_n_admission = [] for id in l_id: subject = mimic2.patient(id) death_dt = subject[0][3] admissions = mimic2.admission(id) l_n_admission.append(len(admissions)) total_admission += len(admissions) for idx, adm in enumerate(admissions): admit_dt = admissions[idx][2] disch_dt = admissions[idx][3] adm_duratrion = (disch_dt - admit_dt).days l_adm_duration.append(adm_duratrion) if death_dt is not None: death_duration = (death_dt - disch_dt).days else: death_duration = np.inf l_death_duration.append(death_duration) if idx < len(admissions) - 1: next_adm_dt = admissions[idx + 1][2] readm_duration = (next_adm_dt - disch_dt).days else: readm_duration = np.inf l_readm_duration.append(readm_duration) # counter if death_duration < 1: death_on_disch += 1 else: alive_on_disch += 1 if death_duration <= duration_th and readm_duration <= duration_th: readm_and_death_within_th += 1 elif death_duration > duration_th and readm_duration <= duration_th: readm_and_no_death_within_th += 1 elif death_duration <= duration_th and readm_duration > duration_th: no_readm_death_within_th += 1 else: no_readm_no_death_within_th += 1 if readm_duration is np.inf: no_readm_after_th += 1 else: readm_after_th += 1 n_subject = len(l_n_admission) l_death_or_readm_duration = [] for idx in range(len(l_readm_duration)): l_death_or_readm_duration.append(min(l_death_duration[idx], l_readm_duration[idx])) print "Total subject: %d" % n_subject print "Total admission: %d" % total_admission print "Mean Admission Length: %f" % np.mean(l_adm_duration) print "Median Admission Length: %f" % np.median(l_adm_duration) print "Death discharge: %d" % death_on_disch print "Alive discharge: %d" % alive_on_disch print "__Within %d days__" % duration_th print "Readm / Death: %d" % readm_and_death_within_th print "Readm / no Death: %d" % readm_and_no_death_within_th print "no Readm / Death: %d" % no_readm_death_within_th print "no Readm / no Death: %d" % no_readm_no_death_within_th print "__After %d days__" % duration_th print "Readm: %d" % readm_after_th print "No Readm: %d" % no_readm_after_th print "Histogram of #admissions per subject" hist, bins = np.histogram(l_adm_duration, bins=range(0, 32)) graph.bar_histogram(hist, bins, "Number of Patients", "Admission Duration", True) print "Histogram of #admissions per subject" hist, bins = np.histogram(l_n_admission, bins=range(1, max(l_n_admission) + 1)) graph.bar_histogram(hist, bins, "Number of Patients", "Recorded admissions per patient", True) print "Histogram of readmission duration" hist, bins = np.histogram(l_readm_duration, bins=range(1, 602, 30)) graph.bar_histogram(hist, bins, "Number of readmissions", "Duration between discharge and readmission", False) hist, bins = np.histogram(l_readm_duration, bins=range(1, 32, 1)) graph.bar_histogram(hist, bins, "Number of readmissions", "Duration between discharge and readmission", True) print "Histogram of death duration" hist, bins = np.histogram(l_death_duration, bins=range(1, 601, 30)) graph.bar_histogram(hist, bins, "Number of deaths", "Duration between discharge and death", False) hist, bins = np.histogram(l_death_duration, bins=range(1, 32, 1)) graph.bar_histogram(hist, bins, "Number of readmissions", "Duration between discharge and death", True) print "Histogram of death or readdm duration" hist, bins = np.histogram(l_death_or_readm_duration, bins=range(1, 602, 30)) graph.bar_histogram(hist, bins, "Number of deaths", "Duration between discharge and death or readmission", False, filename="DorR_600") hist, bins = np.histogram(l_death_or_readm_duration, bins=range(1, 32, 1)) graph.bar_histogram(hist, bins, "Number of readmissions", "Duration between discharge and death or readmission", True, filename="DorR_30") if __name__ == '__main__': readmission_statisic() waitforbuttonpress()
A Love Letter to Indie Bookstores. Thank you for your love of books, of stories, of community. Thank you for standing strong in the face of e-books and on-line selling. Thank you for not only knowing what book I am trying to describe but offering another book I might like as well. Thank you for being a third place for us to gather, allowing neighborhoods to thrive. I love how you support writers, how you give them a platform to connect with their readers. I love how you respect your customers by knowing us, knowing books and knowing the communities you are part of. I love how when I walk into an indie bookstore anywhere in the world, my soul feels like it has come home. Things are going good right now. Great even. My almost 30-year marriage is strong. Our daughters will both be done with college by this time next year and are making their way out into the world. I love creating and holding space for my yoga students. I am connected to my writing on a daily basis. I’ve lost over 30 pounds and feel amazing and strong and my back keeps getting stronger every single day. My best friend of over 30 years moved less than a half an hour away so we get to play whenever we want. I have a beautiful community of women to lift me up toward my best self. So, why is this there this sense of unease lurking at the corners of my life? This sense of don’t get too happy, too content because that’s when the rug will get pulled out from under you. It happens. I know it happens. Loved ones die, marriages end, diagnoses are made. It happens all the time so it’s only matter of time before it happens to me, so I better be prepared. Better be vigilant and not get too complacent or too smug in my life, in my joy. The first time I heard Brené brown describe “foreboding joy” I felt chills. That’s me. I do that. all the time. Of course, I never connected it to vulnerability. I didn’t know that the antidote to it is gratitude. Now, when I feel the shadow of foreboding joy hovering over me, I take a breath and practice gratitude. I lean into the joy. I make a conscious decision to choose joy in that moment. Holding Space for all of Me. When we gather in sacred circles, we hold space for each other. We allow others to have their feelings without intruding on them with offers of tissues, or our own reactions to their feelings—even supportive responses. This got me thinking about how I do or don’t hold space for myself. I don’t hold space for myself when I keep myself so busy or so distracted or numb that I can’t feel what needs to be felt. I don’t hold space for myself when I berate myself for falling short of my own high expectations. I don’t hold space for myself when drink or eat my way out of loneliness or anxiety or anger. I do hold space for myself when I have compassion when I have chosen to drink or eat my way out of an uncomfortable emotion. I do hold space for myself when I ask for what I need rather than feeling resentful when my (unspoken) needs aren’t met. I do hold space for myself when I make time for self-care through yoga, walking, meditation, writing, reading. I hold space for myself when I allow myself to see and feel all that needs to be seen and felt rather than banishing it to the shadows. I hold space for myself by inviting each guest in, welcoming each and every one. the music of the wind. Today I walked unplugged and listened to the breathings of my heart. to the beauty of life that surrounds me. When I was little, my fears were tangible things: tornados, house fire, being kidnapped, snakes. These things never happened but I was aware of them and knew that they frightened me. As an adult, fear is a much more nebulous thing. Sure, I still have concrete fears but it’s the subconscious fears that trip me up. If you’ve ever taken any kind of therapy, read any self-help or spiritual book, you’ve probably heard that most of our challenges come from fear. Most negative emotions can be traced back to fear. Angry? Dig deeper to find what you’re afraid of and using anger to mask. Procrastinating? Pause and try to unearth what fear lies beneath the procrastination. Our human brains are wired for fear. Our survival depends on it. Fear alerts us to the danger around us, triggering our fight or flight response. These days our fight or flight can be triggered due to the content state of stress we are under. This is where yoga and writing come in for me. They allow me to track fear. They keep me grounded in the present where everything is okay. Yoga allows me to stay present to exactly what is happening in my body and in my mind. Writing allows me to stay present to what I’m really thinking and feeling beneath the surface. Both yoga and writing do not permit me to hide. They require me to dive deep and that is where I discover the fear that is holding me back. Once I am aware of it, I can release it. Once that happens my life expands. And I expand to fill it. The Solace (and Necessity) of Walking. I’ve started walking about three to four miles several times a week. I take my dog, a podcast and head out either through our neighborhood or to the parks nearby. Walking has become my antidote to the constant barrage of awful news. I get outside, into the real world, away from the on-line world that feels like an echo chamber of doom. I step into the sunshine, into the fresh air, see the beautiful sky, the trees and feel a certain solace. It reminds me that there is more going on than just what I see in the news or on-line. So many writers are proponents of walking. Julia Cameron suggests walks as one of the tools for creative recovery. Listening to my podcast doesn’t leave me free from all worldly engagements but it does leave me not quite as tethered to them. It’s not surprising that writers are especially drawn to walks. I think it provides a necessary complement to all the sitting, to the stagnation we can begin to feel in our bodies and our minds. Walking is helpful to literal digestion but think it also helps me digest emotions, news, idea. I digest what I am reading, what I am writing. Not only that, but walking seems to stimulate our creative juices. According to a study from Stanford University found that walking led to more creative thinking than sitting did. Exorcising my demons is exactly what it feels like. And that is both a solace and a necessity. What If It Isn’t Up to Me? Rachel Brathen talks honestly about her back pain and her struggles with blaming herself when it goes out every 3-4 months. This time it happened when she picked up her baby girl.When she saw her chiropractor he said that of course she had to pick up her baby and suggested that maybe her back going out or not going out wasn’t up to her. If it’s not up to her then maybe it’s not up to me, either. Maybe it’s not to me when my own back goes out. Maybe it’s not up to me to make sure everyone is happy around me whether it’s my family, friends or students. Up to me, me, me. I hear those words and I feel I can take a deep breath for the first time ever. What is up to me? Surrender the need to fix and just be present to what is. “Fear compass.” I heard this term on NPR this morning and it reverberated through my whole being like a tuning fork. It got me thinking not only about fear as a compass, but any strong emotion. They all reveal something. Envy reveals what I desire for myself. Anger reveals where a boundary has been breached. Fear reveals what is important to me. Liz Gilbert’s suggestion to live life with curiosity rather than fear also resonates with me. Wonder becomes a door into and through fear. I wonder if this agent is a good fit for my novel, instead of only focusing on if they will like it. I wonder what happens in this next scene, instead of being paralyzed into writing nothing because I have no idea. I wonder if I could be a yoga teacher, instead of letting anxiety about my looming empty nest crush me. Wondering if I could combine writing with yoga led me to find my authentic voice and create a sacred space for students to find theirs. So, it seems that fear points me in the direction of curiosity, leading me to live a creative life in awe of the wonder around and within me. Where does fear point you? When I First Heard about Columbine. There are certain events that I will always remember where I was when I heard about them. I was on the school bus when I heard about the death of John Lennon. I was standing in the kitchen when a friend called to tell me about the (first) plane flying into the World Trade Center on 9/11. I was in my bedroom, packing to fly out to my sister, when a nurse called to tell me that my brother-in-law had died from his injuries sustained in the car accident. On April 20, 1999, I was on a pay phone at the Mabel Dodge Luhan House in Taos, New Mexico, attending my first ever writing retreat with Natalie Goldberg. It had taken a lot of convincing on my husband’s part to get me to go. I objected that we couldn’t afford it, that even we could afford it there were better things to spend that money on, that I couldn’t leave him with girls for a whole week, that my oldest had to go to Kindergarten Round-up. My husband assured me that none of those excuses were actually based in reality, so I went. Obviously, this was before cell phones, texting and FaceTime. I tried to call once a day from the pay phone. The work was intense. Because I was so secluded from the world, I dove deep into the process of writing, filling three entire notebooks in seven days. On April 20, after I spoke with both of my daughters, my husband got back on the phone. I heard something in his voice. When I asked what was wrong, he didn’t want to tell me. He didn’t want to burden me with it, knowing that I was pretty much off the media grid and hadn’t yet heard. After some prodding he told me about the shooting at Columbine. Even just hearing about it, without seeing the constant barrage of images on a TV, was chilling. As the news began to spread throughout the rest of the participants, a palpable heaviness descended on the repeat. That night the wind was intense at the base of the mountain. Natalie shared that Native Americans believe that wind like that is carrying spirits into the afterlife. When I got to the airport a couple days later, the images were splashed over all the televisions. I remember watching the line of kids, hands on their heads walking out of the school and thinking it didn’t seem real. There was a subdued quality to the crowds of passengers huddle around the images. My youngest remembers being on lockdown in kindergarten. A real lockdown, not a drill. She remembers hiding behind a desk and seeing the silhouette of a man slide past the window and she didn’t know if it was a police office or the bad guy. Last month she ended up on lockdown again, this time up at Central Michigan University when a shooter was at -large. It turned out that he didn’t have a gun with him while on the run after shooting and killing his parents in his dorm. Nobody knew how bad the carnage would be. We didn’t know because we’ve seen how bad it has been. I pray that the next life-changing event I remember will soon be the passage of sensible, national gun control. Because we know how bad the carnage has been. Shame on us for ever letting it happen again.
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from tests import mkstemp, TestCase, get_data_path import os from quodlibet import formats from quodlibet import config from shutil import copyfileobj class TestMetaDataBase(TestCase): base = get_data_path("silence-44-s") def setUp(self): """Copy the base silent file to a temp name/location and load it""" config.init() fd, self.filename = mkstemp(suffix=self.ext, text=False) dst = os.fdopen(fd, 'wb') src = open(self.base + self.ext, 'rb') copyfileobj(src, dst) dst.close() self.song = formats.MusicFile(self.filename) def tearDown(self): """Delete the temp file""" os.remove(self.filename) del self.filename del self.song config.quit() class _TestMetaDataMixin(object): def test_base_data(self): self.failUnlessEqual(self.song['artist'], 'piman\njzig') self.failUnlessEqual(self.song['album'], 'Quod Libet Test Data') self.failUnlessEqual(self.song['title'], 'Silence') def test_mutability(self): self.failIf(self.song.can_change('=foo')) self.failIf(self.song.can_change('foo~bar')) self.failUnless(self.song.can_change('artist')) self.failUnless(self.song.can_change('title')) self.failUnless(self.song.can_change('tracknumber')) self.failUnless(self.song.can_change('somebadtag')) self.failUnless(self.song.can_change('some%punctuated:tag.')) def _test_tag(self, tag, values, remove=True): self.failUnless(self.song.can_change(tag)) for value in values: self.song[tag] = value self.song.write() written = formats.MusicFile(self.filename) self.failUnlessEqual(written[tag], value) if remove: del self.song[tag] self.song.write() deleted = formats.MusicFile(self.filename) self.failIf(tag in deleted) def test_artist(self): # a normalish tag self._test_tag('artist', [u'me', u'you\nme', u'\u6d5c\u5d0e\u3042\u3086\u307f']) def test_date(self): # unusual special handling for mp3s self._test_tag('date', [u'2004', u'2005', u'2005-06-12'], False) def test_genre(self): # unusual special handling for mp3s self._test_tag('genre', [u'Pop', u'Rock\nClassical', u'Big Bird', u'\u30a2\u30cb\u30e1\u30b5\u30f3\u30c8\u30e9']) def test_odd_performer(self): values = [u"A Person", u"Another"] self._test_tag("performer:vocals", values) self._test_tag("performer:guitar", values) def test_wackjob(self): # undefined tag self._test_tag('wackjob', [u'Jelly\nDanish', u'Muppet', u'\u30cf\u30f3\u30d0\u30fc\u30ac\u30fc']) tags = ['album', 'arranger', 'artist', 'author', 'comment', 'composer', 'conductor', 'copyright', 'discnumber', 'encodedby', 'genre', 'isrc', 'language', 'license', 'lyricist', 'organization', 'performer', 'title', 'tracknumber', 'version', 'xyzzy_undefined_tag', 'musicbrainz_trackid', 'releasecountry'] for ext in formats.loaders.keys(): if os.path.exists(TestMetaDataBase.base + ext): extra_tests = {} for tag in tags: if tag in ['artist', 'date', 'genre']: continue def _test_tag(self, tag=tag): self._test_tag(tag, [u'a']) extra_tests['test_tag_' + tag] = _test_tag def _test_tags(self, tag=tag): self._test_tag(tag, [u'b\nc']) extra_tests['test_tags_' + tag] = _test_tags name = 'MetaData' + ext testcase = type( name, (TestMetaDataBase, _TestMetaDataMixin), extra_tests) testcase.ext = ext globals()[name] = testcase
unac-ncrb.org seeks to ensure that all content and information published at this Web site is current and accurate. The information at unac-ncrb.org does not in any way constitute legal or professional advice and unac-ncrb.org cannot be held liable for actions arising from its use. In addition, Unac-ncrb.org cannot be held responsible for the contents of any externally linked pages.
#!/usr/bin/env python # coding=UTF-8 # Title: handler.py # Description: This file contains all tornado.web.RequestHandler classes used in this application # Author David Nellessen <david.nellessen@familo.net> # Date: 12.01.15 # Note: # ============================================================================== # Import modules from tornado import web, gen, escape from tornado.escape import utf8 import logging import phonenumbers import pygeoip from tornado.iostream import StreamClosedError class BaseHandler(web.RequestHandler): """ A base handler providing localization features, phone number validation and formation as well as use of service limitation based on IP addresses. It also implements support for JSONP (for cross-domain requests). """ guess_country = True default_country = 'DE' def __init__(self, application, request, **kwargs): super(BaseHandler, self).__init__(application, request, **kwargs) self.counter = {} def write(self, chunk): """ Overwrites the default write method to support tornado.webJSONP. """ if self._finished: raise RuntimeError("Cannot write() after finish(). May be caused " "by using async operations without the " "@asynchronous decorator.") if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") callback = self.get_argument('callback', None) if callback: chunk = callback + '(' + chunk + ');' chunk = utf8(chunk) self._write_buffer.append(chunk) def get_browser_locale_code(self): """ Determines the user's locale from ``Accept-Language`` header. This is similar to tornado.web.get_browser_locale except it returns the code and not a Locale instance. Also this will return a result weather a translation for this language was loaded or not. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 """ if "Accept-Language" in self.request.headers: languages = self.request.headers["Accept-Language"].split(",") locales = [] for language in languages: parts = language.strip().split(";") if len(parts) > 1 and parts[1].startswith("q="): try: score = float(parts[1][2:]) except (ValueError, TypeError): score = 0.0 else: score = 1.0 locales.append((parts[0], score)) if locales: locales.sort(key=lambda pair: pair[1], reverse=True) logging.debug(locales) codes = [l[0] for l in locales] return codes[0] return self.__class__.default_country def get_user_country_by_ip(self): """ Determines the user's country by his IP-address. This will return the country code or None if not found. """ try: country = self.application.geo_ip.country_code_by_addr( self.request.remote_ip) except pygeoip.GeoIPError: try: country = self.application.geo_ipv6.country_code_by_addr( self.request.remote_ip) except pygeoip.GeoIPError: pass if not country: logging.warning('Could not locate country for ' + self.request.remote_ip) return None else: logging.debug('Determined country by IP address: ' + country) return country def parse_phonenumber(self, number): """ Validates and parses a phonenumber. It will return a phone number object or False if parsing failed. If the phone number is not given in full international notion the parameter the country will be guesses if the class attribute guess_country is True. Guessing will be done as follows: 1. If a query string parameter 'country' is given as a country code (i.e. 'US', 'DE', ...) it will be used. 2. If no parameter country is given the country will be determined by the remote IP address. 3. Otherwise the country determined by the request header Accept-Language will be used. 4. As a fall-back the classes default_country attribute will be used. """ try: return phonenumbers.parse(number) except: # Get the country code to use for phone number parsing. if self.__class__.guess_country: country_code = self.get_argument('country', None) if country_code == None: country_code = self.get_user_country_by_ip() if country_code == None: code = self.get_browser_locale_code().replace('-', '_') parts = code.split('_') if len(parts) > 1: country_code = parts[1] if country_code == None: country_code = self.__class__.default_country country_code = country_code.upper() logging.debug("Final country code: " + country_code) else: country_code = self.__class__.default_country # Parse the phone number into international notion. try: number_parsed = phonenumbers.parse(number, country_code) return number_parsed except: return False @gen.coroutine def limit_call(self, chash=None, amount=2, expire=10): """ Use this function to limit user requests. Returns True if this function was called less then 'amount' times in the last 'expire' seconds with the same value 'chash' and the same remote IP address or False otherwise. """ key = 'limit_call_' + chash + '_' + self.request.remote_ip redis = self.application.redis try: current_value = yield gen.Task(redis.get, key) except StreamClosedError: yield gen.Task(self.application.redis_reconnect) redis = self.application.redis current_value = yield gen.Task(redis.get, key) if current_value != None and int(current_value) >= amount: logging.info('Call Limitation acceded: ' + key) raise gen.Return(False) else: yield gen.Task(redis.incr, key) if not current_value: yield gen.Task(redis.expire, key, expire) raise gen.Return(True) class DLRHandler(web.RequestHandler): """ Handles delivery receipts. """ def get(self): """ All delivery receipts will be send as HTTP-GET requests. """ # TODO: Parse request! logging.info('Received DLR. Not yet parsed though.') class NumberValidationHandler(BaseHandler): """ Validates a phone number. """ limit_amount = 10 limit_expires = 3600 @gen.coroutine def get(self): """ Validates a phone number given as the query string parameter 'number'. If the phone number is not given in full international notion the parameter the country will be guesses if the class attribute guess_country is True. Guessing will be done as follows: 1. If a query string parameter 'country' is given as a country code (i.e. 'US', 'DE', ...) it will be used. 2. If no parameter country is given the country will be determined by the remote IP address. 3. Otherwise the country determined by the request header Accept-Language will be used. 4. As a fall-back the classes attribute default_country will be used. """ # Limit calls. if self.limit_amount and not (yield self.limit_call('number_validation', self.limit_amount, self.limit_expires)): #raise web.HTTPError(403, 'Number Validation request limit acceded') self.finish({'status': 'error', 'error': 'limit_acceded'}) return # Decode request's query string parameters. number = self.get_argument('number', None) if not number: self.finish({'status': 'error', 'error': 'number_missing'}) return logging.debug('Received number {} for validation'.format(number)) numberobj = self.parse_phonenumber(number) if numberobj: number = phonenumbers.format_number(numberobj, phonenumbers.PhoneNumberFormat.INTERNATIONAL) else: number = False self.finish({'status': 'ok', 'number': number}) class SimpleMessageHandler(BaseHandler): message = 'This is an Example Message' sender = 'Put a sender title or number here' limit_amount = 10 limit_expires = 3600 @gen.coroutine def get(self): # Limit calls. if self.limit_amount and not (yield (self.limit_call('example_handler', self.limit_amount, self.limit_expires))): self.finish({'status': 'error', 'error': 'limit_acceded'}) return # Get receiver's phone number as 'receiver' parameter. receiver = self.get_argument('receiver', None) if not receiver: self.finish({'status': 'error', 'error': 'receiver_missing'}) return # Parse the given phone number. receiverobj = self.parse_phonenumber(receiver) if not receiverobj: self.finish({'status': 'error', 'error': 'receiver_validation'}) return # Format numbers for processing and displaying. receiver_nice = phonenumbers.format_number(receiverobj, phonenumbers.PhoneNumberFormat.INTERNATIONAL) receiver = phonenumbers.format_number(receiverobj, phonenumbers.PhoneNumberFormat.E164) # Send message to receiver. result = yield gen.Task(self.application.nexmo_client.send_message, self.__class__.sender, receiver, self.__class__.message) # Process result. if result: self.finish({'status': 'ok', 'message': 'Message sent', 'number': receiver_nice}) else: self.finish({'status': 'error', 'error': 'nexmo_error', 'message': 'Nexmo Service Error', 'number': receiver_nice})
Love the choice of colors and how you fussy cut the images! The use of vellum is awesome! Nicely done!