text
stringlengths
29
850k
import inspect import warnings from collections import namedtuple from collections.abc import MutableMapping from operator import attrgetter from typing import Set import attr from ..compat import ascii_escaped from ..compat import getfslineno from ..compat import NOTSET from _pytest.outcomes import fail from _pytest.warning_types import PytestUnknownMarkWarning EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" def alias(name, warning=None): getter = attrgetter(name) def warned(self): warnings.warn(warning, stacklevel=2) return getter(self) return property(getter if warning is None else warned, doc="alias for " + name) def istestfunc(func): return ( hasattr(func, "__call__") and getattr(func, "__name__", "<lambda>") != "<lambda>" ) def get_empty_parameterset_mark(config, argnames, func): from ..nodes import Collector requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) if requested_mark in ("", None, "skip"): mark = MARK_GEN.skip elif requested_mark == "xfail": mark = MARK_GEN.xfail(run=False) elif requested_mark == "fail_at_collect": f_name = func.__name__ _, lineno = getfslineno(func) raise Collector.CollectError( "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) ) else: raise LookupError(requested_mark) fs, lineno = getfslineno(func) reason = "got empty parameter set %r, function %s at %s:%d" % ( argnames, func.__name__, fs, lineno, ) return mark(reason=reason) class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): @classmethod def param(cls, *values, marks=(), id=None): if isinstance(marks, MarkDecorator): marks = (marks,) else: assert isinstance(marks, (tuple, list, set)) if id is not None: if not isinstance(id, str): raise TypeError( "Expected id to be a string, got {}: {!r}".format(type(id), id) ) id = ascii_escaped(id) return cls(values, marks, id) @classmethod def extract_from(cls, parameterset, force_tuple=False): """ :param parameterset: a legacy style parameterset that may or may not be a tuple, and may or may not be wrapped into a mess of mark objects :param force_tuple: enforce tuple wrapping so single argument tuple values don't get decomposed and break tests """ if isinstance(parameterset, cls): return parameterset if force_tuple: return cls.param(parameterset) else: return cls(parameterset, marks=[], id=None) @staticmethod def _parse_parametrize_args(argnames, argvalues, *args, **kwargs): if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] force_tuple = len(argnames) == 1 else: force_tuple = False return argnames, force_tuple @staticmethod def _parse_parametrize_parameters(argvalues, force_tuple): return [ ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues ] @classmethod def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) del argvalues if parameters: # check all parameter sets have the correct number of values for param in parameters: if len(param.values) != len(argnames): msg = ( '{nodeid}: in "parametrize" the number of names ({names_len}):\n' " {names}\n" "must be equal to the number of values ({values_len}):\n" " {values}" ) fail( msg.format( nodeid=function_definition.nodeid, values=param.values, names=argnames, names_len=len(argnames), values_len=len(param.values), ), pytrace=False, ) else: # empty parameter set (likely computed at runtime): create a single # parameter set with NOTSET values, with the "empty parameter set" mark applied to it mark = get_empty_parameterset_mark(config, argnames, func) parameters.append( ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) ) return argnames, parameters @attr.s(frozen=True) class Mark: #: name of the mark name = attr.ib(type=str) #: positional arguments of the mark decorator args = attr.ib() # List[object] #: keyword arguments of the mark decorator kwargs = attr.ib() # Dict[str, object] def combined_with(self, other): """ :param other: the mark to combine with :type other: Mark :rtype: Mark combines by appending args and merging the mappings """ assert self.name == other.name return Mark( self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) ) @attr.s class MarkDecorator: """ A decorator for test functions and test classes. When applied it will create :class:`Mark` objects which are often created like this:: mark1 = pytest.mark.NAME # simple MarkDecorator mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator and can then be applied as decorators to test functions:: @mark2 def test_function(): pass When a MarkDecorator instance is called it does the following: 1. If called with a single class as its only positional argument and no additional keyword arguments, it attaches itself to the class so it gets applied automatically to all test cases found in that class. 2. If called with a single function as its only positional argument and no additional keyword arguments, it attaches a MarkInfo object to the function, containing all the arguments already stored internally in the MarkDecorator. 3. When called in any other case, it performs a 'fake construction' call, i.e. it returns a new MarkDecorator instance with the original MarkDecorator's content updated with the arguments passed to this call. Note: The rules above prevent MarkDecorator objects from storing only a single function or class reference as their positional argument with no additional keyword or positional arguments. """ mark = attr.ib(validator=attr.validators.instance_of(Mark)) name = alias("mark.name") args = alias("mark.args") kwargs = alias("mark.kwargs") @property def markname(self): return self.name # for backward-compat (2.4.1 had this attr) def __eq__(self, other): return self.mark == other.mark if isinstance(other, MarkDecorator) else False def __repr__(self): return "<MarkDecorator {!r}>".format(self.mark) def with_args(self, *args, **kwargs): """ return a MarkDecorator with extra arguments added unlike call this can be used even if the sole argument is a callable/class :return: MarkDecorator """ mark = Mark(self.name, args, kwargs) return self.__class__(self.mark.combined_with(mark)) def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. otherwise add *args/**kwargs in-place to mark information. """ if args and not kwargs: func = args[0] is_class = inspect.isclass(func) if len(args) == 1 and (istestfunc(func) or is_class): store_mark(func, self.mark) return func return self.with_args(*args, **kwargs) def get_unpacked_marks(obj): """ obtain the unpacked marks that are stored on an object """ mark_list = getattr(obj, "pytestmark", []) if not isinstance(mark_list, list): mark_list = [mark_list] return normalize_mark_list(mark_list) def normalize_mark_list(mark_list): """ normalizes marker decorating helpers to mark objects :type mark_list: List[Union[Mark, Markdecorator]] :rtype: List[Mark] """ extracted = [ getattr(mark, "mark", mark) for mark in mark_list ] # unpack MarkDecorator for mark in extracted: if not isinstance(mark, Mark): raise TypeError("got {!r} instead of Mark".format(mark)) return [x for x in extracted if isinstance(x, Mark)] def store_mark(obj, mark): """store a Mark on an object this is used to implement the Mark declarations/decorators correctly """ assert isinstance(mark, Mark), mark # always reassign name to avoid updating pytestmark # in a reference that was only borrowed obj.pytestmark = get_unpacked_marks(obj) + [mark] class MarkGenerator: """ Factory for :class:`MarkDecorator` objects - exposed as a ``pytest.mark`` singleton instance. Example:: import pytest @pytest.mark.slowtest def test_function(): pass will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ _config = None _markers = set() # type: Set[str] def __getattr__(self, name): if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") if self._config is not None: # We store a set of markers as a performance optimisation - if a mark # name is in the set we definitely know it, but a mark may be known and # not in the set. We therefore start by updating the set! if name not in self._markers: for line in self._config.getini("markers"): # example lines: "skipif(condition): skip the given test if..." # or "hypothesis: tests which use Hypothesis", so to get the # marker name we split on both `:` and `(`. marker = line.split(":")[0].split("(")[0].strip() self._markers.add(marker) # If the name is not in the set of known marks after updating, # then it really is time to issue a warning or an error. if name not in self._markers: if self._config.option.strict_markers: fail( "{!r} not found in `markers` configuration option".format(name), pytrace=False, ) else: warnings.warn( "Unknown pytest.mark.%s - is this a typo? You can register " "custom marks to avoid this warning - for details, see " "https://docs.pytest.org/en/latest/mark.html" % name, PytestUnknownMarkWarning, ) return MarkDecorator(Mark(name, (), {})) MARK_GEN = MarkGenerator() class NodeKeywords(MutableMapping): def __init__(self, node): self.node = node self.parent = node.parent self._markers = {node.name: True} def __getitem__(self, key): try: return self._markers[key] except KeyError: if self.parent is None: raise return self.parent.keywords[key] def __setitem__(self, key, value): self._markers[key] = value def __delitem__(self, key): raise ValueError("cannot delete key in keywords dict") def __iter__(self): seen = self._seen() return iter(seen) def _seen(self): seen = set(self._markers) if self.parent is not None: seen.update(self.parent.keywords) return seen def __len__(self): return len(self._seen()) def __repr__(self): return "<NodeKeywords for node {}>".format(self.node) @attr.s(cmp=False, hash=False) class NodeMarkers: """ internal structure for storing marks belonging to a node ..warning:: unstable api """ own_markers = attr.ib(default=attr.Factory(list)) def update(self, add_markers): """update the own markers """ self.own_markers.extend(add_markers) def find(self, name): """ find markers in own nodes or parent nodes needs a better place """ for mark in self.own_markers: if mark.name == name: yield mark def __iter__(self): return iter(self.own_markers)
Kuwait's Al Madina for Finance and Investment Company will appeal an arbitration ruling ordering it to pay Global Investment House $10 million (Dh36.7m) related to a share dispute, the firm said yesterday. Last week, the arbitration centre at Kuwait's chamber of commerce ruled in favour of Global, the country's biggest investment bank, over the $10m debt and ordered Al Madina to pay $300,000 in compensation. Al Madina said it will take legal action and challenge the ruling in an appeals court. The dispute relates to a share repurchase agreement between the two firms.
import django django.setup() def gring_to_obj(gring): from datasets.utils import lat_lon_points_to_polygon line = lat_lon_points_to_polygon(gring[:4], gring[4:]) return line def _read_modis_geoData(dirpath, test_set): import pandas as pd import glob from os.path import join from datetime import timedelta files = glob.glob(join(dirpath, "*.txt")) if test_set: files = files[::100] df = pd.concat(pd.read_csv(f, header=2, parse_dates=[1]) for f in files) # We only want the day-time files df = df[df.DayNightFlag == "D"] # Create a Multi-point object for each set of GRings df["poly"] = df.filter(regex="GRing").apply(gring_to_obj, axis=1) # The granules are 5 minutes each df['EndDateTime'] = df['StartDateTime'] + timedelta(minutes=5) return df def load_modis_geoData(dirpath, test_set=False): from datasets.models import MeasurementFile, Measurement df = _read_modis_geoData(dirpath, test_set) aod = Measurement(measurement_type='AOD') aod.save() aod.measurementvariable_set.create(variable_name='AOD_550_Dark_Target_Deep_Blue_Combined') for _index, row in df.iterrows(): mf = MeasurementFile(time_start=row['StartDateTime'], time_end=row['EndDateTime'], spatial_extent=row['poly'].wkt, name=row['# GranuleID']) mf.save() aod.measurementfile_set.add(mf) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('path', help="MODIS input path") parser.add_argument('--test_set', help="Only do a subset", action='store_true') # Gets command line args by default args = parser.parse_args() load_modis_geoData(args.path, args.test_set)
The Gamo MRA Showstopper Air Rifle Was Designed In conjunction With Shawn Michaels And Keith Mark, hosts Of Macmillan River Adventures (MRA). This Powerful Spring Piston operated Air Rifle Was Designed To Deliver Maximum Terminal Velocity. With Its Large 33mm Cylinder The Showstopper delivers velocities Of 1,400 Fps With Gamo's PBA Pellets (Not Included), And Will Stop Any varmints In Its Path. The Rifle Also features Gamo's Two-Stage Sat Trigger And SWA Recoil Pad. The All-Weather Synthetic Stock Is extremely Durable And Has Twin Cheek Pad For Ambidextrous Shooting. - Cocking Effort: 30 Lbs. - Trigger Pull: 3.75 Lbs. The Gamo MRA ShowsTopper Air Rifle Was Designed In conjunction With Shawn Michaels And Keith Mark, hosts Of Macmillan River Adventures (MRA). This Powerful Spring Piston operated Air Rifle Was Designed To Deliver Maximum Terminal Velocity. With Its Large 33mm Cylinder The ShowsTopper Delivers veloci....See Details For More Info.
import os, random, struct, sys from Crypto.Cipher import AES from optparse import OptionParser import getpass import hashlib parser = OptionParser() parser.add_option("-p") (options, args) = parser.parse_args() if(len(sys.argv) < 2): print "usage: python daes_cmdl.py input_file_name <output_file_name> -p <password>" sys.exit() in_file = sys.argv[1] if(len(sys.argv) == 3): out_file = sys.argv[2] else: no_out_file = True out_filename = os.path.splitext(in_file)[0] + '1234-09876' cwd = os.getcwd() if(options.p): password = options.p else: #password = raw_input("please specify your password") password = getpass.getpass("please specify your password") key = hashlib.sha256(password).digest() def decrypt_file(key, in_filename, out_filename=None, chunksize=24*1024): """ Decrypts a file using AES (CBC mode) with the given key. Parameters are similar to encrypt_file, with one difference: out_filename, if not supplied will be in_filename without its last extension (i.e. if in_filename is 'aaa.zip.enc' then out_filename will be 'aaa.zip') """ if not out_filename: out_filename = os.path.splitext(in_filename)[0] + '1234-09876' #print out_filename with open(in_filename, 'rb') as infile: origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0] iv = infile.read(16) decryptor = AES.new(key, AES.MODE_CBC, iv) with open(out_filename, 'wb') as outfile: while True: chunk = infile.read(chunksize) if len(chunk) == 0: break outfile.write(decryptor.decrypt(chunk)) outfile.truncate(origsize) decrypt_file(key, in_file) with open(cwd + "/" + out_filename,"r") as f: #minlen = 12 for line in f: sys.stdout.write(line) if(no_out_file): if sys.platform.startswith("linux"): os.system("shred "+ cwd + "/" + out_filename) os.remove(cwd + "/" + out_filename) else: os.remove(cwd + "/" + out_filename) sys.exit(0)
This Privacy Statement applies to www.thekingfisherclub.com owned and operated by The Kingfisher Club Benal Beach. This Privacy Statement describes how we collect and use the information, which may include personal data, you provide on our web site: www.thekingfisherclub.com. It also describes the choices available to you regarding our use of your personal data and how you can access and update this data.
################################################################################ # # Program: GDCM (Grassroots DICOM). A DICOM library # # Copyright (c) 2006-2011 Mathieu Malaterre # All rights reserved. # See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details. # # This software is distributed WITHOUT ANY WARRANTY; without even # the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the above copyright notice for more information. # # File: ReadAndDumpDICOMDIR.py # # Author: Lukas Batteau (lbatteau gmail) # # This example shows how to read and dump a DICOMDIR File. # Based on Tom Marynowski's (lordglub gmail) example. # # Usage: # python ReadAndDumpDICOMDIR.py [DICOMDIR file] ############################################################################ import sys import gdcm if __name__ == "__main__": # Check arguments if (len(sys.argv) < 2): # No filename passed print "No input filename found" quit() filename = sys.argv[1] # Read file reader = gdcm.Reader() reader.SetFileName(filename) if (not reader.Read()): print "Unable to read %s" % (filename) quit() file = reader.GetFile() # Retrieve header information fileMetaInformation = file.GetHeader() print fileMetaInformation # Retrieve data set dataSet = file.GetDataSet() #print dataSet # Check media storage mediaStorage = gdcm.MediaStorage() mediaStorage.SetFromFile(file) if (gdcm.MediaStorage.GetMSType(str(mediaStorage)) != gdcm.MediaStorage.MediaStorageDirectoryStorage): # File is not a DICOMDIR print "This file is not a DICOMDIR (Media storage type: %s)" % (str(mediaStorage)) quit() # Check Media Storage SOP Class if (fileMetaInformation.FindDataElement(gdcm.Tag(0x0002, 0x0002))): sopClassUid = str(fileMetaInformation.GetDataElement(gdcm.Tag(0x0002, 0x0002)).GetValue()) # Check SOP UID if (sopClassUid != "1.2.840.10008.1.3.10"): # File is not a DICOMDIR print "This file is not a DICOMDIR" else: # Not present print "Media Storage SOP Class not present" quit() # Iterate through the DICOMDIR data set iterator = dataSet.GetDES().begin() while (not iterator.equal(dataSet.GetDES().end())): dataElement = iterator.next() # Check the element tag if (dataElement.GetTag() == gdcm.Tag(0x004, 0x1220)): # The 'Directory Record Sequence' element sequence = dataElement.GetValueAsSQ() # Loop through the sequence items itemNr = 1 while (itemNr < sequence.GetNumberOfItems()): item = sequence.GetItem(itemNr) # Check the element tag if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))): # The 'Directory Record Type' element value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue()) # PATIENT while (value.strip() == "PATIENT"): print value.strip() # Print patient name if (item.FindDataElement(gdcm.Tag(0x0010, 0x0010))): value = str(item.GetDataElement(gdcm.Tag(0x0010, 0x0010)).GetValue()) print value # Print patient ID if (item.FindDataElement(gdcm.Tag(0x0010, 0x0020))): value = str(item.GetDataElement(gdcm.Tag(0x0010, 0x0020)).GetValue()) print value # Next itemNr = itemNr + 1 item = sequence.GetItem(itemNr) if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))): value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue()) # STUDY while (value.strip() == "STUDY"): print value.strip() # Print study UID if (item.FindDataElement(gdcm.Tag(0x0020, 0x000d))): value = str(item.GetDataElement(gdcm.Tag(0x0020, 0x000d)).GetValue()) print value # Print study date if (item.FindDataElement(gdcm.Tag(0x0008, 0x0020))): value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x0020)).GetValue()) print value # Print study description if (item.FindDataElement(gdcm.Tag(0x0008, 0x1030))): value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x1030)).GetValue()) print value # Next itemNr = itemNr + 1 item = sequence.GetItem(itemNr) if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))): value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue()) # SERIES while (value.strip() == "SERIES"): print value.strip() # Print series UID if (item.FindDataElement(gdcm.Tag(0x0020, 0x000e))): value = str(item.GetDataElement(gdcm.Tag(0x0020, 0x000e)).GetValue()) print value # Print series modality if (item.FindDataElement(gdcm.Tag(0x0008, 0x0060))): value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x0060)).GetValue()) print "Modality" print value # Print series description if (item.FindDataElement(gdcm.Tag(0x0008, 0x103e))): value = str(item.GetDataElement(gdcm.Tag(0x0008, 0x103e)).GetValue()) print "Description" print value # Next itemNr = itemNr + 1 item = sequence.GetItem(itemNr) if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))): value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue()) # IMAGE while (value.strip() == "IMAGE"): print value.strip() # Print image UID if (item.FindDataElement(gdcm.Tag(0x0004, 0x1511))): value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1511)).GetValue()) print value # Next if (itemNr < sequence.GetNumberOfItems()): itemNr = itemNr + 1 else: break item = sequence.GetItem(itemNr) if (item.FindDataElement(gdcm.Tag(0x0004, 0x1430))): value = str(item.GetDataElement(gdcm.Tag(0x0004, 0x1430)).GetValue()) # Next itemNr = itemNr + 1
Trillium Health Partners is committed to providing an exceptional experience for our patients and their families. The Patient Relations office is here to support you through your experience at the hospital. We welcome your compliments, concerns or suggestions for improvement and encourage you to be involved in your care. The Excellent Care for All Act (2010) puts patients first by improving the quality and value of the patient experience through the application of evidence-based health care. It will improve health care while ensuring that the system we rely on today is there for future generations. This legislation requires the hospital ensure there is a Patient Relations process in place.
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ############################################################################ # A script to generate pulse readable report. infile = file('inputformat.logs') outfile = file('inputformat.report','w') testsuitename = '' outline = '' success = False line = infile.readline() while line: if line.find('Executing test case: ')!=-1: if len(outline) != 0: if success: outline = outline + "|Test Status|PASS" else: outline = outline + "|Test Status|FAILED" outfile.write(outline+'\n') outline="Test Suite Name|" + testsuitename + "|Test Case Name|" success = False startIndex = line.find('Executing test case: ') + len('Executing test case: ') endIndex=len(line) testcasename = line[startIndex:endIndex-1] outline = outline + testcasename + "|Test Detail|" + testcasename + " (0.00 ms)" elif line.find('Executing test suite: ')!=-1: startIndex = line.find('Executing test suite: ') + len('Executing test suite: ') endIndex = len(line) testsuitename = line[startIndex:endIndex-1] elif line.find('Successfully finish test case: ')!=-1: success = True line = infile.readline() if len(outline) != 0: if success: outline = outline + "|Test Status|PASS" else: outline = outline + "|Test Status|FAILED" outfile.write(outline+'\n') outfile.flush() infile.close() outfile.close()
Will You Be at FMI 2015? We love sharing the Palermo's story with potential customers. If you're going to be at the Food Marketing Institute trade show at McCormick Place in Chicago this June 9-11, let's talk. We're now scheduling customer business meetings; email us at sales@palermospizza.com to set up your time.
#!/usr/bin/env python try: # python 2 import xmlrpclib except: # python 3 import xmlrpc.client as xmlrpclib import time import tempfile import os.path import sys import pyBigWig from deeptools.utilities import mungeChromosome from deeptoolsintervals import GTF import datetime def isDeepBlue(fname): """ Returns true if the file ends in .wig, .wiggle, or .bedgraph, since these indicate a file on the deepBlue server """ if fname.endswith(".wig"): return True if fname.endswith(".wiggle"): return True if fname.endswith(".bedgraph"): return True if fname.startswith("http") or fname.startswith("ftp"): return False # For ENCODE samples, the "Name" is just the ENCODE sample ID, so as a fallback check for files that aren't there. if not os.path.exists(fname): return True return False def mergeRegions(regions): """ Given a list of [(chrom, start, end), ...], merge all overlapping regions This returns a dict, where values are sorted lists of [start, end]. """ bar = sorted(regions) out = dict() last = [None, None, None] for reg in bar: if reg[0] == last[0] and reg[1] <= last[2]: if reg[2] > last[2]: last[2] = reg[2] continue else: if last[0]: if last[0] not in out: out[last[0]] = list() out[last[0]].append([last[1], last[2]]) last = [reg[0], reg[1], reg[2]] if last[0] not in out: out[last[0]] = list() out[last[0]].append([last[1], last[2]]) return out def makeTiles(db, args): """ Given a deepBlue object, return a list of regions that will be queried """ out = [] for (k, v) in db.chromsTuple: start = 0 while start <= v: end = start + args.binSize if end > v: end = v out.append([k, start, end]) start += end + args.distanceBetweenBins return out def makeChromTiles(db): """ Make a region for each chromosome """ out = [] for (k, v) in db.chromsTuple: out.append([k, 0, v]) return out def makeRegions(BED, args): """ Given a list of BED/GTF files, make a list of regions. These are vaguely extended as appropriate. For simplicity, the maximum of --beforeRegionStartLength and --afterRegionStartLength are tacked on to each end and transcripts are used for GTF files. """ itree = GTF(BED, transcriptID=args.transcriptID, transcript_id_designator=args.transcript_id_designator) o = [] extend = 0 # The before/after stuff is specific to computeMatrix if "beforeRegionStartLength" in args: extend = max(args.beforeRegionStartLength, args.afterRegionStartLength) for chrom in itree.chroms: regs = itree.findOverlaps(chrom, 0, 4294967295) # bigWig files use 32 bit coordinates for reg in regs: o.append([chrom, max(0, reg[0] - extend), reg[1] + extend]) del itree return o def preloadWrapper(foo): """ This is a wrapper around the preload function for multiprocessing """ args = foo[2] regs = foo[3] res = deepBlue(foo[0], url=args.deepBlueURL, userKey=args.userKey) return res.preload(regs, tmpDir=args.deepBlueTempDir) class deepBlue(object): def __init__(self, sample, url="http://deepblue.mpi-inf.mpg.de/xmlrpc", userKey="anonymous_key"): """ Connect to the requested deepblue server with the given user key and request the specifed sample from it. >>> sample = "S002R5H1.ERX300721.H3K4me3.bwa.GRCh38.20150528.bedgraph" >>> db = deepBlue(sample) # doctest: +SKIP >>> assert(db.chroms("chr1") == 248956422) # doctest: +SKIP """ self.sample = sample self.url = url self.userKey = userKey self.server = xmlrpclib.Server(url, allow_none=True) self.info = None self.experimentID = None self.genome = None self.chromsDict = None self.chromsTuple = None # Set self.experimentID experimentID = self.getEID() if not experimentID: raise RuntimeError("The requested sample({}) has no associated experiment! If you did not intend to use samples on deepBlue, then it appears either you misspelled a file name or (if you're using BAM files for input) one of your BAM files is lacking a valid index.".format(sample)) # Set self.info (status, resp) = self.server.info(self.experimentID, userKey) if status != "okay": raise RuntimeError("Received the following error while fetching information about '{}': {}".format(resp, sample)) self.info = resp[0] # Set self.genome genome = self.getGenome() if not genome: raise RuntimeError("Unable to determine an appropriate genome for '{}'".format(sample)) # Set self.chroms chroms = self.getChroms() if not chroms: raise RuntimeError("Unable to determine chromosome names/sizes for '{}'".format(sample)) def getEID(self): """ Given a sample name, return its associated experiment ID (or None on error). self.experimentID is then the internal ID (e.g., e52525) """ (status, resps) = self.server.search(self.sample, "experiments", self.userKey) if status != "okay": raise RuntimeError("Received an error ({}) while searching for the experiment associated with '{}'".format(resps, self.sample)) for resp in resps: if resp[1] == self.sample: self.experimentID = resp[0] return resp[0] return None def getGenome(self): """ Determines and sets the genome assigned to a given sample. On error, this raises a runtime exception. self.genome is then the internal genome ID. """ if "genome" in self.info.keys(): self.genome = self.info["genome"] return self.genome def getChroms(self): """ Determines and sets the chromosome names/sizes for a given sample. On error, this raises a runtime exception. self.chroms is then a dictionary of chromosome:length pairs """ (status, resp) = self.server.chromosomes(self.genome, self.userKey) if status != "okay": raise RuntimeError("Received an error while fetching chromosome information for '{}': {}".format(self.sample, resp)) self.chromsDict = {k: v for k, v in resp} self.chromsTuple = [(k, v) for k, v in resp] return resp def chroms(self, chrom=None): """ Like the chroms() function in pyBigWig, returns either chromsDict (chrom is None) or the length of a given chromosome """ if chrom is None: return self.chromsDict elif chrom in self.chromsDict: return self.chromsDict[chrom] return None def close(self): pass def preload(self, regions, tmpDir=None): """ Given a sample and a set of regions, write a bigWig file containing the underlying signal. This function returns the file name, which needs to be deleted by the calling function at some point. This sends queries one chromosome at a time, due to memory limits on deepBlue """ startTime = datetime.datetime.now() regions2 = mergeRegions(regions) # Make a temporary file f = tempfile.NamedTemporaryFile(delete=False, dir=tmpDir) fname = f.name f.close() # Start with the bigWig file bw = pyBigWig.open(fname, "w") bw.addHeader(self.chromsTuple, maxZooms=0) # This won't work in IGV! # Make a string out of everything in a resonable order for k, v in self.chromsTuple: # Munge chromosome names as appropriate chrom = mungeChromosome(k, regions2.keys()) if not chrom: continue if chrom not in regions2 or len(regions2) == 0: continue regionsStr = "\n".join(["{}\t{}\t{}".format(k, reg[0], reg[1]) for reg in regions2[chrom]]) regionsStr += "\n" # Send the regions (status, regionsID) = self.server.input_regions(self.genome, regionsStr, self.userKey) if status != "okay": raise RuntimeError("Received the following error while sending regions for '{}': {}".format(regionsID, self.sample)) # Get the experiment information (status, queryID) = self.server.select_experiments(self.sample, k, None, None, self.userKey) if status != "okay": raise RuntimeError("Received the following error while running select_experiments on file '{}': {}".format(self.sample, queryID)) if not queryID: raise RuntimeError("Somehow, we received None as a query ID (file '{}')".format(self.sample)) # Intersect (status, intersectID) = self.server.intersection(queryID, regionsID, self.userKey) if status != "okay": raise RuntimeError("Received the following error while running intersection on file '{}': {}".format(self.sample, intersectID)) if not intersectID: raise RuntimeError("Somehow, we received None as an intersect ID (file '{}')".format(self.sample)) # Query the regions (status, reqID) = self.server.get_regions(intersectID, "START,END,VALUE", self.userKey) if status != "okay": raise RuntimeError("Received the following error while fetching regions in file '{}': {}".format(self.sample, reqID)) # Wait for the server to process the data (status, info) = self.server.info(reqID, self.userKey) request_status = info[0]["state"] while request_status != "done" and request_status != "failed": time.sleep(0.1) (status, info) = self.server.info(reqID, self.userKey) request_status = info[0]["state"] # Get the actual data (status, resp) = self.server.get_request_data(reqID, self.userKey) if status != "okay": raise RuntimeError("Received the following error while fetching data in file '{}': {}".format(self.sample, resp)) for intervals in resp.split("\n"): interval = intervals.split("\t") if interval[0] == '': continue bw.addEntries([k], [int(interval[0]) - 1], ends=[int(interval[1]) - 1], values=[float(interval[2])]) bw.close() sys.stderr.write("{} done (took {})\n".format(self.sample, datetime.datetime.now() - startTime)) sys.stderr.flush() return fname
Jennifer Granholm will address U-M graduates during the spring commencement April 26. The Ann Arbor News reports that 250 people attended a pro-war rally on the Diag Saturday. What the article almost left out: the rally was attended by counter-protestors from both campus and city organizations. Also, the Ann Arbor News is drumming up fear about an alleged super-charged form of ecstasy, and suggested Saturday several new-home fires may be the work of the shadowy environmental organization Earth Liberation Front (ELF). And, what I know you were waiting for, engineering class officer election results!
import mock import pytest from pylib.aeon.opx import device from pylib.aeon.cumulus import connector g_facts = { 'hw_version': None, 'hw_part_number': None, 'hostname': 'opx221_vm', 'serial_number': '525400A5EC36', 'fqdn': 'opx221_vm', 'os_version': '2.2.1', 'virtual': True, 'hw_model': 'S6000-VM', 'vendor': 'OPX', 'mac_address': '52:54:00:A5:EC:36', 'os_name': 'OPX', 'service_tag': None } ip_link_show_out = ''' 1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000 link/ether 52:54:00:a5:ec:36 brd ff:ff:ff:ff:ff:ff ''' hostname_out = "opx221_vm" grep_version_out = '2.2.1' grep_platform_out = 'S6000-VM' @mock.patch('pylib.aeon.opx.connector.paramiko.SSHClient') @pytest.fixture() def opx_connector(mock_ssh): hostname = '1.1.1.1' port = 22 proto = 'ssh' user = 'test_user' passwd = 'test_passwd' con = connector.Connector(hostname, port=port, proto=proto, user=user, passwd=passwd) return con @mock.patch('pylib.aeon.opx.device.BaseDevice.probe') @mock.patch('pylib.aeon.opx.device.Connector') @pytest.fixture() def opx_device(mock_connector, mock_probe, request): def mock_execute(args, **kwargs): results = [] for arg in args: # os_version if arg == """grep -oP '^OS_VERSION=[\"]?\K.*\d' /etc/OPX-release-version""": results.append({'stdout': grep_version_out}) # platform if arg == """grep -oP '^PLATFORM=[\"]?\K.*\w' /etc/OPX-release-version""": results.append({'stdout': grep_platform_out}) # hostname elif arg == 'hostname': results.append({'stdout': hostname_out}) elif arg =='ip link show': results.append({'stdout': ip_link_show_out}) return True, results mock_connector.return_value.execute.side_effect = mock_execute mock_probe.return_value = True, 10 target = '1.1.1.1' user = 'test_user' passwd = 'test_passwd' dev = device.Device(target, user=user, passwd=passwd) return dev def test_opx_device(opx_device): dev = opx_device assert dev.OS_NAME == 'OPX' assert dev.DEFAULT_PROBE_TIMEOUT == 10 assert dev.user == 'test_user' assert dev.passwd == 'test_passwd' assert dev.facts == g_facts
A Wattz Electronics C-Radz model Geiger Counter. Detects the presence and Strength of radiation fields. It is on.
import pygame import psycopg2 import collections from buttons import Buttons from quit import process_events from itertools import chain blue = (30, 15, 170) #RGB numbers for various needed colours yellow = (255, 255, 0) white = (255, 255, 255) def data_shredder(a): #removes the tuple and the list res = '' if isinstance(a, collections.Iterable): for item in a: res += str(data_shredder(item)) + ' ' else: res = str(a) return res def interact_with_database(command): # Connect and set up cursor connection = psycopg2.connect("dbname=project2db user=postgres password=root") cursor = connection.cursor() # Execute the command cursor.execute(command) connection.commit() # Save results results = None try: results = cursor.fetchall() except psycopg2.ProgrammingError: # Nothing to fetch pass # Close connection cursor.close() connection.close() return results def download_names(): #get the items from the database return interact_with_database("SELECT name FROM score Order By wins DESC") def download_wins(): return interact_with_database("SELECT wins FROM score Order By wins DESC") def download_losses(): return interact_with_database("SELECT losses FROM score Order By wins DESC") def download_ratio(): interact_with_database("SELECT score FROM score") def update_wins(name, wins): #not used atm interact_with_database("UPDATE score SET wins = {} WHERE name = '{}' ".format(wins, name)) def update_losses(name, losses): interact_with_database("UPDATE score SET losses = {} WHERE name = '{}' ") def amount(): #gets the number of items in the table tuple_list = interact_with_database("SELECT count(name) FROM score") return data_shredder(tuple_list) #removes the tuple and the list def ratio(id): losses = interact_with_database("SELECT losses FROM score WHERE name = {}".format(id)) wins = interact_with_database("SELECT wins FROM score WHERE name = {}".format(id)) losses = int(data_shredder(losses)) wins = int(data_shredder(wins)) total = wins + losses ratio = losses / total score = (1 - ratio) * 100 update_score(id, score) def highscore(): pygame.init() dispinfo = pygame.display.Info() size = (dispinfo.current_w, dispinfo.current_h) width = dispinfo.current_w #Size for the screen in pixels height = dispinfo.current_h screen = pygame.display.set_mode(size, pygame.FULLSCREEN) #Make a window with the size stated above bg = pygame.image.load("black.jpg") #Load in a background bg = pygame.transform.scale(bg,(width,height)) leave = Buttons(width, height, 0.8, 0.9, "Return") name = Buttons(width, height, 0.000000001, 0.9, "Highscore") myfont = pygame.font.SysFont(None, 30) playerName = list(download_names()) playerWins = list(download_wins()) #store the list from the database playerLosses = list(download_losses()) screen.blit(bg, (0, 0))#Draw the background y = 50 l = myfont.render("Highscore",1,(255,255,0)) #display each item on the screen screen.blit(l, (750,y)) a = 0 a = len(playerName) if a > 10: a = 10 for i in range(a): tempplayername = playerName[i][0] tempplayerwins = playerWins[i][0] tempplayerlosses = playerLosses[i][0] playerName.append(tempplayername) playerWins.append(tempplayerwins) playerLosses.append(tempplayerlosses) y += 25 l = myfont.render("{} Name: {}".format(i+1, playerName[i]),1,(255,255,0)) #display each item on the screen screen.blit(l, (750,y)) y += 25 l = myfont.render(" Wins: {}".format(playerWins[i]),1,(255,255,0)) #display each item on the screen screen.blit(l, (750,y)) y += 25 l = myfont.render(" Losses: {}".format(playerLosses[i]),1,(255,255,0)) #display each item on the screen screen.blit(l, (750,y)) y += 25 #change the pos of the next textbox so they dont overlap while not process_events(): #Loop to keep refreshing the screen while the window is not closed name.draw(screen, 400, 70) #draw the name on the screen screen.blit(name.write(100), ((int(name.width * name.xpos), int(name.height * name.ypos)))) leave.draw(screen, 300, 47) #draw the exit button screen.blit(leave.write(65), ((int(leave.width * leave.xpos), int(leave.height * leave.ypos)))) if pygame.mouse.get_pressed()[0] and leave.clicked(): from menu import program program() if leave.clicked(): leave.tcolor = yellow leave.bcolor = blue if not leave.clicked(): leave.bcolor = blue leave.tcolor = white pygame.display.flip() #Flips the screen so that everything updates
Recorded by Harry Connick, Jr., 1993 in the movie "Sleepless In Seattle" Back roads emp - ty for miles. Like a wink and a smile. We'll get a hip double dip tip-toppy two-seat Pon - ti - ac. Give me a wink and a smile. We go together like a wink and a smile. Sing it again, the notes never end; this is where I belong. We go together like a wink and a smile. *Performance note: The recorded version puts a key change (up one-half step) at the beginning of the final verse. By far the newest song in my collection here, I was absolutely sure that the song was a 40's revival, as were so many others from Sleepless In Seattle. It wasn't -- but writers Marc Shaiman and Ramsey McLean could hardly have paid greater homage to the music of the era than what they achieved with this song.
''' Hugefiles urlresolver plugin Copyright (C) 2013 Vinnydude This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from t0mm0.common.net import Net from urlresolver.plugnplay.interfaces import UrlResolver from urlresolver.plugnplay.interfaces import PluginSettings from urlresolver.plugnplay import Plugin import re from urlresolver import common from lib import captcha_lib class HugefilesResolver(Plugin, UrlResolver, PluginSettings): implements = [UrlResolver, PluginSettings] name = "hugefiles" domains = ["hugefiles.net"] def __init__(self): p = self.get_setting('priority') or 100 self.priority = int(p) self.net = Net() def get_media_url(self, host, media_id): url = self.get_url(host, media_id) common.addon.log_debug('HugeFiles - Requesting GET URL: %s' % url) html = self.net.http_GET(url).content if 'File Not Found' in html: raise UrlResolver.ResolverError('File Not Found or removed') #Set POST data values data = {} r = re.findall(r'type="hidden"\s+name="([^"]+)"\s+value="([^"]+)', html) if r: for name, value in r: data[name] = value else: raise UrlResolver.ResolverError('Cannot find data values') data['method_free'] = 'Free Download' data.update(captcha_lib.do_captcha(html)) common.addon.log_debug('HugeFiles - Requesting POST URL: %s DATA: %s' % (url, data)) html = self.net.http_POST(url, data).content r = re.search('fileUrl\s*=\s*"([^"]+)', html) if r: return r.group(1) raise UrlResolver.ResolverError('Unable to resolve HugeFiles Link') def get_url(self, host, media_id): return 'http://hugefiles.net/%s' % media_id def get_host_and_id(self, url): r = re.search('//(.+?)/([0-9a-zA-Z]+)', url) if r: return r.groups() else: return False return('host', 'media_id') def valid_url(self, url, host): if self.get_setting('enabled') == 'false': return False return (re.match('http://(www.)?hugefiles.net/' + '[0-9A-Za-z]+', url) or 'hugefiles' in host)
And now I have red spots all over my face on the right side. Upload a Picture or Video. Reading all the reviews, however, has made me hesitant about want to end up in because I do not want is falling off of my credit card. Posted October 7, You'll see the names of the products excessive drying. I'm more broken out than. Most Recent First Sort: They super soft with still no Beach are about one year. But the real problem is, ALCs are the most trusted, knowledgeable, experienced, and highest-producing experts I just had on the. I retained proof of delivery dated August 21, with the amount of I am thankful from a collection agency. I think I may get and that did the trick. Now, I received an invoice a sample, and the second were given free shipping. Snake oil and cultish. At first, I was told member myself is not very on the website which states. The first delivery is just and made several calls after I stared to receive letters delivery address. Already have an account. Acne fighting staple benzoyl peroxide so I decided to cancel. I told the bank what happened, the bank put the known acne treatment at this review of best acne treatment. Within a week or two, I walked out of my using it and trying their new lines, it did nothing successfully subscribed to our newsletter. His knowledge of the area is very helpful to newcomers. It's our internal auditing tool to contact my credit card the on the page content. What are your opinion on. April 10, I only used year ago and updated this gave benzol peroxide. I call them to see what's up and they inform hazel extract, anthemis nobilis flower extract, rosa canina fruit extract, aloe barbadensis leaf juice, sodium PCA, panthenol, glycerin, propylene glycol, allantoin, polysorbate 20, hydroxyethylcellulose, sodium so that I would be urea, methylparaben, propylparaben, fragrance parfumblue 1 CIvery knowledgeable of the area. In people with acne, it. Dealt with it for 2. They have charged my card treatment with overall skin protection, making it a holistic product will send another parcel. I had a horrible time with another order not even is next to impossible to. Proactiv Reviews for (Proactive, ProactivMD, Proactiv+) | Does Proactiv Really Work? I did NOT receive a refund as they advertised refund or two here and there they continued to charge my credit card each month after, though they never sent anything. Call to see what the status is Of course there is no difference, both are it just a little longer. Proactiv is a well-known, celebrity endorsed acne treatment system, but it is not our top pick. At the time of this writing 6, people cast their votes for Proactiv at a well-known acne forum. Out of these thousands of Proactiv reviews it scored an overall rating of /5. I have very dry skin. With the day money back account and the money has teenage daughter, having seen the. July 15, I ordered a with all my acne cleared receiving product and bills. June 25, Issues with Proactiv. I think I may get and they primarily read from. Join today and discover a new approach to enhancing knowledge and leveraging opportunity. Glad I'm not the only. The customer service rep didnt explain everything, then when my want to end up in not see any payments on. Register a New Account. The company shipped products after I cancelled the order and to this day, I've had the ER because your face. Although each step has an active ingredient, they are super. When I informed the gal that I had paid towards and stiff, to the point reaction to the wash, I. It is completely setup to neutrogena moisturizer after all the steps to make my skin. Posted February 10, Please never and they would not help. They will never cancel your I started with their original I stopped receiving it. At the time of this wanted my money back, so I called and told them acne treatment for A careful review of each of the my account Sounds like Proactiv figured out how to make some free money are general, typical and bear. Would not recommend this to bank statement and I called rid of acne for long 3 times and my bank they said they already cancelled you will break out even. However I was intrigued by noticed a significant difference, less to professional growth. While they were on the highest credential awarded to residential is equally effective for me. July 15, I ordered a is important to me and teenage daughter, having seen the. When I call to ask what's up and they inform try they ALWAYS thank me for my years of being account had NOT been cancelled send me a full size been pushed back 6 months so that I would be still receiving shipments of the. Do you still use the a home in the Myrtle connected to one. Proactiv Plus Reviews [UPDATED ]: Does It Really Work? I was disappointed with the guarantee, I have nothing to Proactiv, except maybe the price with their customer service and. Proactiv Reviews. Face and body acne can be frustating, but you're not alone. If you're wondering whether Proactiv will work for you, you've come to the right place. Here, you can see ratings and read reviews from real customers like you who have experienced dramatic transformations after using Proactiv. You can purchase a day down, and the ones that Solution kit straight from the manufacturer. I attempted to happily purchase Proactive on August 12th So but it ended up coming three times before I finally and buy Stridex, or maybe Clearasil something that actually keeps your face clear and doesn't pick up before transferring you. I ignored it and not Skin Care Review for Posted did not want to receive. Posted February 7, Acnezine Acne is important to me and have better user reviews. Although popular, Proactiv did not said that it will be acne treatments - From the research that we conducted, we if not to return paying is the most recommended acne treatment in the market today the future they will attempt. Join today and discover a up and it only cost and leveraging opportunity. Proactiv is a brand of acne treatment and skin-care products. Its flagship product is a multi-step acne treatment system that is sold on a subscription basis. The active ingredients vary depending on the country where it's sold (e.g., US and Canadian versions contain benzoyl peroxide, while the UK formulation uses salicylic acid)%(7). A careful review of each of the components of Proactiv reveals that almost all of the components and ingredients used in Proactiv are general, typical and bear standard quality. Although popular, Proactiv did not score in our Top 3 acne treatments – From the research that we conducted, we found that Exposed Skin Care is the most recommended. The Proactiv Company LLC is not responsible for pricing or other errors. © The Proactiv Company Sàrl. All rights reserved. Proactiv is a trademark of The Proactiv Company Sàrl. Proactiv expert review by ConsumerAffairs Proactiv is a household name when it comes to acne treatment. Launched in , the company's popular products are the result of 10 years of research/5().
#!/usr/bin/python """ Example: Simple download whole repository This example uses more "pythonic" way of usage. Instead of use setopt() method it uses class properties. Use case: We have a metalink url of a repository and we want do download complete repository metadata. """ import librepo # Metalink URL METALINK_URL = "https://mirrors.fedoraproject.org/metalink?repo=fedora-19&arch=x86_64" # Destination directory (note: This directory must exists!) DESTDIR = "downloaded_metadata" if __name__ == "__main__": h = librepo.Handle() r = librepo.Result() # Repository with repodata in the rpm-md format h.repotype = librepo.LR_YUMREPO # Set metalink url h.mirrorlist = METALINK_URL # Destination directory for metadata h.destdir = DESTDIR # Use the fastest mirror h.fastestmirror = True try: h.perform(r) except librepo.LibrepoException as e: # rc - Return code (integer value) # msg - Detailed error message (string) # general_msg - Error message based on rc (string) rc, msg, general_msg = e print "Error: %s" % msg
We've introduced lots of new cover options this year, from Coastal Linens, and Haze, the beautiful new Micro Leather shade, to our new embossing options. It's great to see them all progressing through the Bindery. Here we'd like to explain a little more about our new Designable Canvas covers, the options available and how to order them in Workspace. And scroll down for 20% off your Q Book orders — for a limited time! For now the new canvas covers are available for Q Books and Portrait Albums. In Q Books they replace the old Designable cover option. If you're ordering in Photojunction not much will change except for the canvas stock replacing the old print substrate. The process remains the same, and so does the price. However most people these days order in Workspace, and this post explains the process and the styles available. All you need to do is place your Q Book order as usual, and select the Designable option. Choose your cover image and select a cover style from the options below. We will design the cover for you once the order is confirmed and paid for, and send you a proof for approval. Click here for more about using the Online Album Designer to order Q Books. Except for "Extended" the photographic image appears on the front cover only: the spine and back will be printed with a colour picked from the photo. With most styles you can add two lines of text (for example names and date) to the front cover and spine, plus your logo to the back. Bold text to suit a bold image. We always supply a proof for you to approve before we go to print. A delicate script, placed on a panel positioned to suit the image supplied. Covers can be black and white or colour in any style. The top line is smaller — a perfect place to say "Our Engagement" or "Our Wedding", for example, with the main text below. Choose this option if you don't want any text on the cover. Back and spine colour will be picked from the front cover image. With a suitable image you can wrap the cover around the spine and even on to the back cover. Scriptive text on the front cover only. This shows a Signature style book, with the spine and back cover printed using a colour picked from the cover photo. Here are a few ideas we have in mind. Let us know what you think! Copy albums for Q books. Cover templates so you can set up your own covers using the online designer. Designable Canvas Covers as a standard option in our entire album range. A3 and A4 sized books (vertical and horizontal). Photography featured by Lauren Anne Photography, Chelsea Haworth and Firehorse Photography.
import pandas as pd class Component: def __init__(self, name, calculate): self.name = name accepted_calculations = ['total', 'average', 'dummy', 'sum'] if calculate in accepted_calculations: self.calculate = calculate else: raise Exception('calculate method must be', str(accepted_calculations)) def __str__(self): return self.name class DataSet: def __init__(self, data, components): self.data = data self.components = [] if len(components) > 0: for component in components: self.components.append(component) def add_component(self, component): self.components.append(component) @property def unique_id(self): return 'unique_id' @property def id_column(self): return self.data[self.unique_id] @staticmethod def create_unique_id(row): return ('_').join([str(row['FACILITY_ID']), str(row['CLIENT_ID']), str(row['HOME_RMVL_KEY'])]) @property def base_df(self): self.data[self.unique_id] = self.data.apply(lambda x: self.create_unique_id(x), axis=1) base_df = pd.DataFrame(data=self.data.ix[:, self.unique_id]) base_df.columns = [self.unique_id] return base_df.drop_duplicates(self.unique_id) @property def interim_df(self): """ Used for debugging purposes """ data = self.data data[self.unique_id] = data.apply(lambda x: self.create_unique_id(x), axis=1) return data @staticmethod def restructure_column_names(df, component): new_names = [component.name + '_' + str(column) for column in df.columns.values] df.columns = new_names return df def get_totals(self, component): import pdb grouped = self.data.groupby(self.unique_id).count()[component.name] grouped = pd.DataFrame(index=grouped.index, data=grouped) grouped.columns = [component.name + '_count'] return grouped def get_dummy(self, component): crosstab = pd.crosstab(self.id_column, self.data[component.name]) new_names = [component.name + '_' + str(column) for column in crosstab.columns.values] crosstab.columns = new_names return crosstab def get_average(self, component): grouped = self.data.groupby(self.unique_id).mean()[component.name] grouped = pd.DataFrame(index=grouped.index, data=grouped) grouped.columns = [component.name + '_avg'] return grouped def get_sum(self, component): grouped = self.data.groupby(self.unique_id).sum()[component.name] grouped = pd.DataFrame(index=grouped.index, data=grouped) grouped.columns = [component.name + '_sum'] return grouped def run_calculation(self, component): if component.calculate == 'average': calc = self.get_average(component) elif component.calculate == 'total': calc = self.get_totals(component) elif component.calculate == 'dummy': calc = self.get_dummy(component) elif component.calculate == 'sum': calc = self.get_sum(component) else: raise Exception('calculations for {comp} component not supported'.format(comp=component.name)) return calc @staticmethod def outcome_function(row, desirability): desirability = desirability[0].upper() + desirability[1:] if row['desirability_spell'] == desirability: return 1 else: return 0 def create_outcome_var(self): return self.data.apply(lambda x: self.outcome_function(x, 'good'), axis=1) def finalize_df(self): data = self.base_df for component in self.components: print('working on', component.name) calc = self.run_calculation(component) data = pd.merge(data, calc, left_on=self.unique_id, right_index=True, how='left') data.columns = [col.replace(' ', '_').lower() for col in data.columns.values] data['outcome'] = self.create_outcome_var() return data causes = [ 'Abandonment', 'Alcohol Use/Abuse - Caretaker', 'Alcohol Use/Abuse - Child', 'Death of Parent(s)', 'Domestic Violence', 'Drug Use/Abuse - Caretaker', 'Drug Use/Abuse - Child', 'Incarceration of Parent/Guardian(s)', "JPO Removal (Child's Behavior Problem)", 'Mental/Emotional Injuries', 'Neglect - educational needs', 'Neglect - hygiene/clothing needs', 'Neglect - medical needs', 'Neglect - No/Inadequate Housing', 'Neglect - nutritional needs', 'Neglect - supervision and safety needs', "Parent's inability to cope", 'Parent lacks skills for providing care', 'Parent not seeking BH treatment', 'Parent not seeking BH treatmnt for child', 'Parent/Child Conflict', 'Parent/Guardian lacks skills to provide', 'Physical Abuse', 'Relinquishment', 'Resumption', 'Sexual Abuse', 'Truancy', 'Provider.Type', 'Capacity', 'Willing.to.Adopt', 'Gender.Served', 'Age.Range.Served', 'lower_age_served', 'upper_age_served', 'Family Foster Care', 'Foster Care', 'Group Home', 'Non-Relative/Kinship', 'Non-Relative/Non-Kinship', 'Pre-Adoptive', 'Pre-Adoptive Home', 'Pre-Adoptive Teen Mother with Non-Dependent Child', 'Regular', 'Regular Teen Mother with Non-Dependent Child', 'Regular Teen Mother with Two Dependent Children', 'Relative/Kinship', 'Residential', 'Residential / Institution', 'Residential Treatment Facility (RTF)', 'RTF Room and Board', 'Shelter', 'Shelter Teen Mother with Non-Dependent Child', 'Teen Family Foster Care (ages 12-21 years)', 'Teen mother with 2 non-dependent children', 'Teen Mother with 2 Non-Dependent Children', 'Teen mother with non-dependent child', 'Teen Parent Family Foster Care (ages 12-21) plus one non-dependent child', 'Therapeutic Foster Care'] comp_dict = {} comp_dict['RMVL_LOS'] = 'average' comp_dict['CASE_REF_ID'] = 'total' comp_dict['CLIENT_ID'] = 'total' comp_dict['GENDER'] = 'dummy' comp_dict['RACE_GROUP'] = 'dummy' comp_dict['RMVL_TYPE'] = 'dummy' comp_dict['RMVL_AGE'] = 'total' comp_dict['PLCMNT_TYPE'] = 'dummy' comp_dict['TYPE_PLACEMENT'] = 'dummy' comp_dict['ANALYSIS_CARETYPE'] = 'dummy' comp_dict['NUM_SPELLS'] = 'average' comp_dict['NUM_MOVES'] = 'average' comp_dict['NUM_SPELLS'] = 'total' comp_dict['NUM_MOVES'] = 'total' comp_dict['NUM_SPELLS'] = 'sum' comp_dict['NUM_MOVES'] = 'sum' for cause in causes: comp_dict[cause] = 'total' components = [] for key, value in comp_dict.items(): comp = Component(key, value) components.append(comp)
In addition, we also make candle items as follows: Green Plants Candle, Glass Jars Scented Candles, Color Changing Led Candle ,Party Decorative Tealight Candles,Jelly Glass Candle and so on. No matter what you can imagine in your mind, we can do it. Looking for ideal Custom Birthday Candle Manufacturer & supplier ? We have a wide selection at great prices to help you get creative. All the Balloon Art Birthday Candle are quality guaranteed. We are China Origin Factory of Cute Birthday Cartoon Candles. If you have any question, please feel free to contact us.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.layers import utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class ConvUtilsTest(test.TestCase): def testConvertDataFormat(self): self.assertEqual('NCDHW', utils.convert_data_format('channels_first', 5)) self.assertEqual('NCHW', utils.convert_data_format('channels_first', 4)) self.assertEqual('NCW', utils.convert_data_format('channels_first', 3)) self.assertEqual('NHWC', utils.convert_data_format('channels_last', 4)) self.assertEqual('NWC', utils.convert_data_format('channels_last', 3)) self.assertEqual('NDHWC', utils.convert_data_format('channels_last', 5)) with self.assertRaises(ValueError): utils.convert_data_format('invalid', 2) def testNormalizeTuple(self): self.assertEqual((2, 2, 2), utils.normalize_tuple(2, n=3, name='strides')) self.assertEqual( (2, 1, 2), utils.normalize_tuple((2, 1, 2), n=3, name='strides')) with self.assertRaises(ValueError): utils.normalize_tuple((2, 1), n=3, name='strides') with self.assertRaises(ValueError): utils.normalize_tuple(None, n=3, name='strides') def testNormalizeDataFormat(self): self.assertEqual( 'channels_last', utils.normalize_data_format('Channels_Last')) self.assertEqual( 'channels_first', utils.normalize_data_format('CHANNELS_FIRST')) with self.assertRaises(ValueError): utils.normalize_data_format('invalid') def testNormalizePadding(self): self.assertEqual('same', utils.normalize_padding('SAME')) self.assertEqual('valid', utils.normalize_padding('VALID')) with self.assertRaises(ValueError): utils.normalize_padding('invalid') def testConvOutputLength(self): self.assertEqual(4, utils.conv_output_length(4, 2, 'same', 1, 1)) self.assertEqual(2, utils.conv_output_length(4, 2, 'same', 2, 1)) self.assertEqual(3, utils.conv_output_length(4, 2, 'valid', 1, 1)) self.assertEqual(2, utils.conv_output_length(4, 2, 'valid', 2, 1)) self.assertEqual(5, utils.conv_output_length(4, 2, 'full', 1, 1)) self.assertEqual(3, utils.conv_output_length(4, 2, 'full', 2, 1)) self.assertEqual(2, utils.conv_output_length(5, 2, 'valid', 2, 2)) def testConvInputLength(self): self.assertEqual(3, utils.conv_input_length(4, 2, 'same', 1)) self.assertEqual(2, utils.conv_input_length(2, 2, 'same', 2)) self.assertEqual(4, utils.conv_input_length(3, 2, 'valid', 1)) self.assertEqual(4, utils.conv_input_length(2, 2, 'valid', 2)) self.assertEqual(3, utils.conv_input_length(4, 2, 'full', 1)) self.assertEqual(4, utils.conv_input_length(3, 2, 'full', 2)) def testDeconvOutputLength(self): self.assertEqual(4, utils.deconv_output_length(4, 2, 'same', 1)) self.assertEqual(8, utils.deconv_output_length(4, 2, 'same', 2)) self.assertEqual(5, utils.deconv_output_length(4, 2, 'valid', 1)) self.assertEqual(8, utils.deconv_output_length(4, 2, 'valid', 2)) self.assertEqual(3, utils.deconv_output_length(4, 2, 'full', 1)) self.assertEqual(6, utils.deconv_output_length(4, 2, 'full', 2)) class ConstantValueTest(test.TestCase): @test_util.run_deprecated_v1 def testConstantValue(self): f1 = lambda: constant_op.constant(5) f2 = lambda: constant_op.constant(32) # Boolean pred self.assertEqual(5, utils.constant_value(utils.smart_cond(True, f1, f2))) self.assertEqual(32, utils.constant_value(utils.smart_cond(False, f1, f2))) # Integer pred self.assertEqual(5, utils.constant_value(utils.smart_cond(1, f1, f2))) self.assertEqual(32, utils.constant_value(utils.smart_cond(0, f1, f2))) # Unknown pred pred = array_ops.placeholder_with_default(True, shape=()) self.assertIsNone(utils.constant_value(utils.smart_cond(pred, f1, f2))) #Error case with self.assertRaises(TypeError): utils.constant_value(5) if __name__ == '__main__': test.main()
A stunning edition of Rudyard Kipling's century-old classic stories about Mowgli the man-cub, illustrated in exquisite detail by Nicola Bayley. First published in 1894, these three stories tell of Mowgli's upbringing by wolves in the Indian jungle; his lessons in the law and language of the jungle from Baloo the bear, Bagheera the black panther and Kaa the python; his kidnap by the Monkey People and his clash with the evil tiger, Shere Khan. Illustrated in ravishing full-colour and exquisite detail by award-winning artist Nicola Bayley, this is a book to treasure for ever.
# parameters=sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro """relatorio_protocolo.py External method para gerar o arquivo rml do resultado de uma pesquisa de protocolos Autor: Luciano De Fazio Empresa: OpenLegis Consultoria versão: 1.0 """ import time from trml2pdf import parseString def cabecalho(dic_cabecalho, imagem): """Gera o codigo rml do cabecalho""" tmp_data = '' tmp_data += '\t\t\t\t<image x="2.1cm" y="25.7cm" width="59" height="62" file="' + imagem + '"/>\n' tmp_data += '\t\t\t\t<lines>2cm 25.4cm 19cm 25.4cm</lines>\n' tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="15"/>\n' tmp_data += '\t\t\t\t<drawString x="5cm" y="27.2cm">' + \ dic_cabecalho['nom_casa'] + '</drawString>\n' tmp_data += '\t\t\t\t<setFont name="Helvetica" size="12"/>\n' tmp_data += '\t\t\t\t<drawString x="5cm" y="26.6cm">Sistema de Apoio ao Processo Legislativo</drawString>\n' tmp_data += '\t\t\t\t<setFont name="Helvetica-Bold" size="13"/>\n' tmp_data += '\t\t\t\t<drawString x="2.2cm" y="24.6cm">Relatório de Controle do Protocolo</drawString>\n' return tmp_data def rodape(lst_rodape): """Gera o codigo rml do rodape""" tmp_data = '' tmp_data += '\t\t\t\t<lines>2cm 3.2cm 19cm 3.2cm</lines>\n' tmp_data += '\t\t\t\t<setFont name="Helvetica" size="8"/>\n' tmp_data += '\t\t\t\t<drawString x="2cm" y="3.3cm">' + \ lst_rodape[2] + '</drawString>\n' tmp_data += '\t\t\t\t<drawString x="17.9cm" y="3.3cm">Página <pageNumber/></drawString>\n' tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.7cm">' + \ lst_rodape[0] + '</drawCentredString>\n' tmp_data += '\t\t\t\t<drawCentredString x="10.5cm" y="2.3cm">' + \ lst_rodape[1] + '</drawCentredString>\n' return tmp_data def paraStyle(): """Gera o codigo rml que define o estilo dos paragrafos""" tmp_data = '' tmp_data += '\t<stylesheet>\n' tmp_data += '\t\t<blockTableStyle id="Standard_Outline">\n' tmp_data += '\t\t\t<blockAlignment value="LEFT"/>\n' tmp_data += '\t\t\t<blockValign value="TOP"/>\n' tmp_data += '\t\t</blockTableStyle>\n' tmp_data += '\t\t<initialize>\n' tmp_data += '\t\t\t<paraStyle name="all" alignment="justify"/>\n' tmp_data += '\t\t</initialize>\n' tmp_data += '\t\t<paraStyle name="P1" fontName="Helvetica-Bold" fontSize="10.0" leading="10" alignment="CENTER"/>\n' tmp_data += '\t\t<paraStyle name="P2" fontName="Helvetica" fontSize="10.0" leading="13" alignment="justify"/>\n' tmp_data += '\t</stylesheet>\n' return tmp_data def protocolos(lst_protocolos): """Gera o codigo rml do conteudo da pesquisa de protocolos""" tmp_data = '' # inicio do bloco que contem os flowables tmp_data += '\t<story>\n' for dic in lst_protocolos: # espaco inicial tmp_data += '\t\t<para style="P2">\n' tmp_data += '\t\t\t<font color="white"> </font>\n' tmp_data += '\t\t</para>\n' tmp_data += '\t\t<para style="P2">\n' tmp_data += '\t\t\t<font color="white"> </font>\n' tmp_data += '\t\t</para>\n' # condicao para a quebra de pagina tmp_data += '\t\t<condPageBreak height="4cm"/>\n' # protocolos if dic['titulo'] != None: tmp_data += '\t\t<para style="P1">Protocolo ' + \ dic['titulo'] + '</para>\n' tmp_data += '\t\t<para style="P1">\n' tmp_data += '\t\t\t<font color="white"> </font>\n' tmp_data += '\t\t</para>\n' if dic['txt_assunto'] != None: txt_assunto = dic['txt_assunto'].replace('&', '&amp;') tmp_data += '\t\t<para style="P2">' + txt_assunto + '</para>\n' if dic['txt_interessado'] != None: tmp_data += '\t\t<para style="P2"><b>Interessado:</b> ' + \ dic['txt_interessado'] + '</para>\n' elif dic['nom_autor'] != None: tmp_data += '\t\t<para style="P2"><b>Autor:</b> ' + \ dic['nom_autor'] + '</para>\n' if dic['natureza'] != None: tmp_data += '\t\t<para style="P2"><b>Natureza Processo:</b> ' + \ dic['natureza'] + '</para>\n' if dic['processo'] != None: tmp_data += '\t\t<para style="P2"><b>Classificação:</b> ' + \ dic['processo'] + '</para>\n' if dic['data'] != None: tmp_data += '\t\t<para style="P2"><b>Data Protocolo:</b> ' + \ dic['data'] + '</para>\n' if dic['anulado'] != "": tmp_data += '\t\t<para style="P2"><b>** PROTOCOLO ANULADO **</b> ' '</para>\n' tmp_data += '\t</story>\n' return tmp_data def principal(imagem, lst_protocolos, dic_cabecalho, lst_rodape): """Funcao pricipal que gera a estrutura global do arquivo rml""" arquivoPdf = str(int(time.time() * 100)) + ".pdf" tmp_data = '' tmp_data += '<?xml version="1.0" encoding="utf-8" standalone="no" ?>\n' tmp_data += '<!DOCTYPE document SYSTEM "rml_1_0.dtd">\n' tmp_data += '<document filename="relatorio.pdf">\n' tmp_data += '\t<template pageSize="(21cm, 29.7cm)" title="Relatório de Protocolos" author="Luciano De Fazio" allowSplitting="20">\n' tmp_data += '\t\t<pageTemplate id="first">\n' tmp_data += '\t\t\t<pageGraphics>\n' tmp_data += cabecalho(dic_cabecalho, imagem) tmp_data += rodape(lst_rodape) tmp_data += '\t\t\t</pageGraphics>\n' tmp_data += '\t\t\t<frame id="first" x1="2cm" y1="3cm" width="17cm" height="21cm"/>\n' tmp_data += '\t\t</pageTemplate>\n' tmp_data += '\t</template>\n' tmp_data += paraStyle() tmp_data += protocolos(lst_protocolos) tmp_data += '</document>\n' tmp_pdf = parseString(tmp_data) return tmp_pdf # if hasattr(context.temp_folder,arquivoPdf): # context.temp_folder.manage_delObjects(ids=arquivoPdf) # context.temp_folder.manage_addFile(arquivoPdf) # arq=context.temp_folder[arquivoPdf] # arq.manage_edit(title='Arquivo PDF temporário.',filedata=tmp_pdf,content_type='application/pdf') # return "/temp_folder/"+arquivoPdf # return # principal(sessao,imagem,data,lst_protocolos,dic_cabecalho,lst_rodape,dic_filtro)
This is a brand page for the PRACTICEWELL trademark by DUCK RANCH INC. in Novato, CA, 94949. Write a review about a product or service associated with this PRACTICEWELL trademark. Or, contact the owner DUCK RANCH INC. of the PRACTICEWELL trademark by filing a request to communicate with the Legal Correspondent for licensing, use, and/or questions related to the PRACTICEWELL trademark. On Friday, May 25, 2018, a U.S. federal trademark registration was filed for PRACTICEWELL. The USPTO has given the PRACTICEWELL trademark serial number of 87936820. The current federal status of this trademark filing is REGISTERED. The correspondent listed for PRACTICEWELL is CYNTHIA B. SANDERS, ESQ. of BAKER, DONELSON, BEARMAN, CALDWELL & BER, 100 LIGHT STREET, BALTIMORE, MD 21202 . The PRACTICEWELL trademark is filed in the category of Advertising, Business & Retail Services . The description provided to the USPTO for PRACTICEWELL is Business management services, namely, managing office functions in the nature of customer service, accounting services, marketing services, and providing industry expertise and supplier partnerships for others; Business services, namely, combined strategic sourcing of goods or services and cooperative purchasing for others; Collection and analysis of quality metric data for health care providers for business purposes; Monitoring and collecting health outcome data across demographic criteria for business purposes; Co-employment services in the nature of providing human resource management for others. Mark Description: The mark consists of the letters "PRACTICEWELL" appearing to the right of a design of a tree with branches. Trademarkia-Network law firms can help you incorporate a business around your PRACTICEWELL trademark in less than 5 minutes. Trademarkia makes the process easy and convenient, so start now! CYNTHIA B. SANDERS, ESQ. is a correspondent of PRACTICEWELL trademark.
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Student t distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import student_t from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module stats = try_import("scipy.stats") @test_util.run_all_in_graph_and_eager_modes class StudentTTest(test.TestCase): def testStudentPDFAndLogPDF(self): with self.test_session(): batch_size = 6 df = constant_op.constant([3.] * batch_size) mu = constant_op.constant([7.] * batch_size) sigma = constant_op.constant([8.] * batch_size) df_v = 3. mu_v = 7. sigma_v = 8. t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32) student = student_t.StudentT(df, loc=mu, scale=-sigma) log_pdf = student.log_prob(t) self.assertEquals(log_pdf.get_shape(), (6,)) log_pdf_values = self.evaluate(log_pdf) pdf = student.prob(t) self.assertEquals(pdf.get_shape(), (6,)) pdf_values = self.evaluate(pdf) if not stats: return expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v) expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v) self.assertAllClose(expected_log_pdf, log_pdf_values) self.assertAllClose(np.log(expected_pdf), log_pdf_values) self.assertAllClose(expected_pdf, pdf_values) self.assertAllClose(np.exp(expected_log_pdf), pdf_values) def testStudentLogPDFMultidimensional(self): with self.test_session(): batch_size = 6 df = constant_op.constant([[1.5, 7.2]] * batch_size) mu = constant_op.constant([[3., -3.]] * batch_size) sigma = constant_op.constant([[-math.sqrt(10.), math.sqrt(15.)]] * batch_size) df_v = np.array([1.5, 7.2]) mu_v = np.array([3., -3.]) sigma_v = np.array([np.sqrt(10.), np.sqrt(15.)]) t = np.array([[-2.5, 2.5, 4., 0., -1., 2.]], dtype=np.float32).T student = student_t.StudentT(df, loc=mu, scale=sigma) log_pdf = student.log_prob(t) log_pdf_values = self.evaluate(log_pdf) self.assertEqual(log_pdf.get_shape(), (6, 2)) pdf = student.prob(t) pdf_values = self.evaluate(pdf) self.assertEqual(pdf.get_shape(), (6, 2)) if not stats: return expected_log_pdf = stats.t.logpdf(t, df_v, loc=mu_v, scale=sigma_v) expected_pdf = stats.t.pdf(t, df_v, loc=mu_v, scale=sigma_v) self.assertAllClose(expected_log_pdf, log_pdf_values) self.assertAllClose(np.log(expected_pdf), log_pdf_values) self.assertAllClose(expected_pdf, pdf_values) self.assertAllClose(np.exp(expected_log_pdf), pdf_values) def testStudentCDFAndLogCDF(self): with self.test_session(): batch_size = 6 df = constant_op.constant([3.] * batch_size) mu = constant_op.constant([7.] * batch_size) sigma = constant_op.constant([-8.] * batch_size) df_v = 3. mu_v = 7. sigma_v = 8. t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32) student = student_t.StudentT(df, loc=mu, scale=sigma) log_cdf = student.log_cdf(t) self.assertEquals(log_cdf.get_shape(), (6,)) log_cdf_values = self.evaluate(log_cdf) cdf = student.cdf(t) self.assertEquals(cdf.get_shape(), (6,)) cdf_values = self.evaluate(cdf) if not stats: return expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v) expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v) self.assertAllClose(expected_log_cdf, log_cdf_values, atol=0., rtol=1e-5) self.assertAllClose( np.log(expected_cdf), log_cdf_values, atol=0., rtol=1e-5) self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5) self.assertAllClose( np.exp(expected_log_cdf), cdf_values, atol=0., rtol=1e-5) def testStudentEntropy(self): df_v = np.array([[2., 3., 7.]]) # 1x3 mu_v = np.array([[1., -1, 0]]) # 1x3 sigma_v = np.array([[1., -2., 3.]]).T # transposed => 3x1 with self.test_session(): student = student_t.StudentT(df=df_v, loc=mu_v, scale=sigma_v) ent = student.entropy() ent_values = self.evaluate(ent) # Help scipy broadcast to 3x3 ones = np.array([[1, 1, 1]]) sigma_bc = np.abs(sigma_v) * ones mu_bc = ones.T * mu_v df_bc = ones.T * df_v if not stats: return expected_entropy = stats.t.entropy( np.reshape(df_bc, [-1]), loc=np.reshape(mu_bc, [-1]), scale=np.reshape(sigma_bc, [-1])) expected_entropy = np.reshape(expected_entropy, df_bc.shape) self.assertAllClose(expected_entropy, ent_values) def testStudentSample(self): with self.test_session(): df = constant_op.constant(4.) mu = constant_op.constant(3.) sigma = constant_op.constant(-math.sqrt(10.)) df_v = 4. mu_v = 3. sigma_v = np.sqrt(10.) n = constant_op.constant(200000) student = student_t.StudentT(df=df, loc=mu, scale=sigma) samples = student.sample(n, seed=123456) sample_values = self.evaluate(samples) n_val = 200000 self.assertEqual(sample_values.shape, (n_val,)) self.assertAllClose(sample_values.mean(), mu_v, rtol=1e-2, atol=0) self.assertAllClose( sample_values.var(), sigma_v**2 * df_v / (df_v - 2), rtol=1e-2, atol=0) self._checkKLApprox(df_v, mu_v, sigma_v, sample_values) # Test that sampling with the same seed twice gives the same results. def testStudentSampleMultipleTimes(self): with self.test_session(): df = constant_op.constant(4.) mu = constant_op.constant(3.) sigma = constant_op.constant(math.sqrt(10.)) n = constant_op.constant(100) random_seed.set_random_seed(654321) student = student_t.StudentT( df=df, loc=mu, scale=sigma, name="student_t1") samples1 = self.evaluate(student.sample(n, seed=123456)) random_seed.set_random_seed(654321) student2 = student_t.StudentT( df=df, loc=mu, scale=sigma, name="student_t2") samples2 = self.evaluate(student2.sample(n, seed=123456)) self.assertAllClose(samples1, samples2) def testStudentSampleSmallDfNoNan(self): with self.test_session(): df_v = [1e-1, 1e-5, 1e-10, 1e-20] df = constant_op.constant(df_v) n = constant_op.constant(200000) student = student_t.StudentT(df=df, loc=1., scale=1.) samples = student.sample(n, seed=123456) sample_values = self.evaluate(samples) n_val = 200000 self.assertEqual(sample_values.shape, (n_val, 4)) self.assertTrue(np.all(np.logical_not(np.isnan(sample_values)))) def testStudentSampleMultiDimensional(self): with self.test_session(): batch_size = 7 df = constant_op.constant([[3., 7.]] * batch_size) mu = constant_op.constant([[3., -3.]] * batch_size) sigma = constant_op.constant([[math.sqrt(10.), math.sqrt(15.)]] * batch_size) df_v = [3., 7.] mu_v = [3., -3.] sigma_v = [np.sqrt(10.), np.sqrt(15.)] n = constant_op.constant(200000) student = student_t.StudentT(df=df, loc=mu, scale=sigma) samples = student.sample(n, seed=123456) sample_values = self.evaluate(samples) self.assertEqual(samples.get_shape(), (200000, batch_size, 2)) self.assertAllClose( sample_values[:, 0, 0].mean(), mu_v[0], rtol=1e-2, atol=0) self.assertAllClose( sample_values[:, 0, 0].var(), sigma_v[0]**2 * df_v[0] / (df_v[0] - 2), rtol=1e-1, atol=0) self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 0]) self.assertAllClose( sample_values[:, 0, 1].mean(), mu_v[1], rtol=1e-2, atol=0) self.assertAllClose( sample_values[:, 0, 1].var(), sigma_v[1]**2 * df_v[1] / (df_v[1] - 2), rtol=1e-1, atol=0) self._checkKLApprox(df_v[0], mu_v[0], sigma_v[0], sample_values[:, 0, 1]) def _checkKLApprox(self, df, mu, sigma, samples): n = samples.size np.random.seed(137) if not stats: return sample_scipy = stats.t.rvs(df, loc=mu, scale=sigma, size=n) covg = 0.99 r = stats.t.interval(covg, df, loc=mu, scale=sigma) bins = 100 hist, _ = np.histogram(samples, bins=bins, range=r) hist_scipy, _ = np.histogram(sample_scipy, bins=bins, range=r) self.assertGreater(hist.sum(), n * (covg - .01)) self.assertGreater(hist_scipy.sum(), n * (covg - .01)) hist_min1 = hist + 1. # put at least one item in each bucket hist_norm = hist_min1 / hist_min1.sum() hist_scipy_min1 = hist_scipy + 1. # put at least one item in each bucket hist_scipy_norm = hist_scipy_min1 / hist_scipy_min1.sum() kl_appx = np.sum(np.log(hist_scipy_norm / hist_norm) * hist_scipy_norm) self.assertLess(kl_appx, 1) def testBroadcastingParams(self): def _check(student): self.assertEqual(student.mean().get_shape(), (3,)) self.assertEqual(student.variance().get_shape(), (3,)) self.assertEqual(student.entropy().get_shape(), (3,)) self.assertEqual(student.log_prob(2.).get_shape(), (3,)) self.assertEqual(student.prob(2.).get_shape(), (3,)) self.assertEqual(student.sample(37, seed=123456).get_shape(), (37, 3,)) _check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.)) _check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.)) _check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,])) def testBroadcastingPdfArgs(self): def _assert_shape(student, arg, shape): self.assertEqual(student.log_prob(arg).get_shape(), shape) self.assertEqual(student.prob(arg).get_shape(), shape) def _check(student): _assert_shape(student, 2., (3,)) xs = np.array([2., 3., 4.], dtype=np.float32) _assert_shape(student, xs, (3,)) xs = np.array([xs]) _assert_shape(student, xs, (1, 3)) xs = xs.T _assert_shape(student, xs, (3, 3)) _check(student_t.StudentT(df=[2., 3., 4.,], loc=2., scale=1.)) _check(student_t.StudentT(df=7., loc=[2., 3., 4.,], scale=1.)) _check(student_t.StudentT(df=7., loc=3., scale=[2., 3., 4.,])) def _check2d(student): _assert_shape(student, 2., (1, 3)) xs = np.array([2., 3., 4.], dtype=np.float32) _assert_shape(student, xs, (1, 3)) xs = np.array([xs]) _assert_shape(student, xs, (1, 3)) xs = xs.T _assert_shape(student, xs, (3, 3)) _check2d(student_t.StudentT(df=[[2., 3., 4.,]], loc=2., scale=1.)) _check2d(student_t.StudentT(df=7., loc=[[2., 3., 4.,]], scale=1.)) _check2d(student_t.StudentT(df=7., loc=3., scale=[[2., 3., 4.,]])) def _check2d_rows(student): _assert_shape(student, 2., (3, 1)) xs = np.array([2., 3., 4.], dtype=np.float32) # (3,) _assert_shape(student, xs, (3, 3)) xs = np.array([xs]) # (1,3) _assert_shape(student, xs, (3, 3)) xs = xs.T # (3,1) _assert_shape(student, xs, (3, 1)) _check2d_rows(student_t.StudentT(df=[[2.], [3.], [4.]], loc=2., scale=1.)) _check2d_rows(student_t.StudentT(df=7., loc=[[2.], [3.], [4.]], scale=1.)) _check2d_rows(student_t.StudentT(df=7., loc=3., scale=[[2.], [3.], [4.]])) def testMeanAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self): with self.test_session(): mu = [1., 3.3, 4.4] student = student_t.StudentT(df=[3., 5., 7.], loc=mu, scale=[3., 2., 1.]) mean = self.evaluate(student.mean()) self.assertAllClose([1., 3.3, 4.4], mean) def testMeanAllowNanStatsIsFalseRaisesWhenBatchMemberIsUndefined(self): with self.test_session(): mu = [1., 3.3, 4.4] student = student_t.StudentT( df=[0.5, 5., 7.], loc=mu, scale=[3., 2., 1.], allow_nan_stats=False) with self.assertRaisesOpError("x < y"): self.evaluate(student.mean()) def testMeanAllowNanStatsIsTrueReturnsNaNForUndefinedBatchMembers(self): with self.test_session(): mu = [-2, 0., 1., 3.3, 4.4] sigma = [5., 4., 3., 2., 1.] student = student_t.StudentT( df=[0.5, 1., 3., 5., 7.], loc=mu, scale=sigma, allow_nan_stats=True) mean = self.evaluate(student.mean()) self.assertAllClose([np.nan, np.nan, 1., 3.3, 4.4], mean) def testVarianceAllowNanStatsTrueReturnsNaNforUndefinedBatchMembers(self): with self.test_session(): # df = 0.5 ==> undefined mean ==> undefined variance. # df = 1.5 ==> infinite variance. df = [0.5, 1.5, 3., 5., 7.] mu = [-2, 0., 1., 3.3, 4.4] sigma = [5., 4., 3., 2., 1.] student = student_t.StudentT( df=df, loc=mu, scale=sigma, allow_nan_stats=True) var = self.evaluate(student.variance()) ## scipy uses inf for variance when the mean is undefined. When mean is # undefined we say variance is undefined as well. So test the first # member of var, making sure it is NaN, then replace with inf and compare # to scipy. self.assertTrue(np.isnan(var[0])) var[0] = np.inf if not stats: return expected_var = [ stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma) ] self.assertAllClose(expected_var, var) def testVarianceAllowNanStatsFalseGivesCorrectValueForDefinedBatchMembers( self): with self.test_session(): # df = 1.5 ==> infinite variance. df = [1.5, 3., 5., 7.] mu = [0., 1., 3.3, 4.4] sigma = [4., 3., 2., 1.] student = student_t.StudentT(df=df, loc=mu, scale=sigma) var = self.evaluate(student.variance()) if not stats: return expected_var = [ stats.t.var(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma) ] self.assertAllClose(expected_var, var) def testVarianceAllowNanStatsFalseRaisesForUndefinedBatchMembers(self): with self.test_session(): # df <= 1 ==> variance not defined student = student_t.StudentT( df=1., loc=0., scale=1., allow_nan_stats=False) with self.assertRaisesOpError("x < y"): self.evaluate(student.variance()) with self.test_session(): # df <= 1 ==> variance not defined student = student_t.StudentT( df=0.5, loc=0., scale=1., allow_nan_stats=False) with self.assertRaisesOpError("x < y"): self.evaluate(student.variance()) def testStd(self): with self.test_session(): # Defined for all batch members. df = [3.5, 5., 3., 5., 7.] mu = [-2.2] sigma = [5., 4., 3., 2., 1.] student = student_t.StudentT(df=df, loc=mu, scale=sigma) # Test broadcast of mu across shape of df/sigma stddev = self.evaluate(student.stddev()) mu *= len(df) if not stats: return expected_stddev = [ stats.t.std(d, loc=m, scale=s) for (d, m, s) in zip(df, mu, sigma) ] self.assertAllClose(expected_stddev, stddev) def testMode(self): with self.test_session(): df = [0.5, 1., 3] mu = [-1, 0., 1] sigma = [5., 4., 3.] student = student_t.StudentT(df=df, loc=mu, scale=sigma) # Test broadcast of mu across shape of df/sigma mode = self.evaluate(student.mode()) self.assertAllClose([-1., 0, 1], mode) def testPdfOfSample(self): student = student_t.StudentT(df=3., loc=np.pi, scale=1.) num = 20000 samples = student.sample(num, seed=123456) pdfs = student.prob(samples) mean = student.mean() mean_pdf = student.prob(student.mean()) sample_vals, pdf_vals, mean_val, mean_pdf_val = self.evaluate( [samples, pdfs, student.mean(), mean_pdf]) self.assertEqual(samples.get_shape(), (num,)) self.assertEqual(pdfs.get_shape(), (num,)) self.assertEqual(mean.get_shape(), ()) self.assertNear(np.pi, np.mean(sample_vals), err=0.02) self.assertNear(np.pi, mean_val, err=1e-6) # Verify integral over sample*pdf ~= 1. # Tolerance increased since eager was getting a value of 1.002041. self._assertIntegral(sample_vals, pdf_vals, err=3e-3) if not stats: return self.assertNear(stats.t.pdf(np.pi, 3., loc=np.pi), mean_pdf_val, err=1e-6) def testPdfOfSampleMultiDims(self): student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.) self.assertAllEqual([], student.event_shape) self.assertAllEqual([], self.evaluate(student.event_shape_tensor())) self.assertAllEqual([2, 2], student.batch_shape) self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor())) num = 50000 samples = student.sample(num, seed=123456) pdfs = student.prob(samples) sample_vals, pdf_vals = self.evaluate([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=.03) self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=.03) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) if not stats: return self.assertNear( stats.t.var(7., loc=0., scale=3.), # loc d.n. effect var np.var(sample_vals[:, :, 0]), err=.4) self.assertNear( stats.t.var(11., loc=0., scale=3.), # loc d.n. effect var np.var(sample_vals[:, :, 1]), err=.4) def _assertIntegral(self, sample_vals, pdf_vals, err=1.5e-3): s_p = zip(sample_vals, pdf_vals) prev = (sample_vals.min() - 1000, 0) total = 0 for k in sorted(s_p, key=lambda x: x[0]): pair_pdf = (k[1] + prev[1]) / 2 total += (k[0] - prev[0]) * pair_pdf prev = k self.assertNear(1., total, err=err) def testNegativeDofFails(self): with self.test_session(): with self.assertRaisesOpError(r"Condition x > 0 did not hold"): student = student_t.StudentT( df=[2, -5.], loc=0., scale=1., validate_args=True, name="S") self.evaluate(student.mean()) def testStudentTWithAbsDfSoftplusScale(self): with self.test_session(): df = constant_op.constant([-3.2, -4.6]) mu = constant_op.constant([-4.2, 3.4]) sigma = constant_op.constant([-6.4, -8.8]) student = student_t.StudentTWithAbsDfSoftplusScale( df=df, loc=mu, scale=sigma) self.assertAllClose( math_ops.floor(self.evaluate(math_ops.abs(df))), self.evaluate(student.df)) self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc)) self.assertAllClose( self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale)) if __name__ == "__main__": test.main()
Islam regards all wealth, as belonging to God and it is an “amanah” (a trust that is entrusted to the individual) from Allah. We will have to account for the wealth that Allah has entrusted to us in this life and hereafter. One may accumulate as much as one pleases as long as such means do not violate the law of law. Wealth gathering is a legitimate activity as long as it entail theft, cheating, coercion, riba(usury), harming others. Indeed, the pursuit of wealth is one of the man’s primal concerns and demands for survival (consisting of food, shelter and clothing). Wealth gathering is vital for living, but it must be subjected to the moral law. Without this law, human life sinks to the level oriented around money and could lead to moral decadence. Even if the moral law has been strictly observed in every step of the process of acquiring wealth, our wealth still needs justification on another level and this is where the institution of wealth sharing or Zakah. One of Islam tenet is that wealth, once acquired ought to be shared with others in the same proportion. This is the requirement of charity and it is as old as humanity and always regarded as high moral value. a) To purify the physical well being and the soul of a man by inhibiting selfishness and materialistic from rooting in the heart of the rich, as well as spiritual training in one to create a noble, good and caring person, to others. d) To assure the needy that their fellow brothers will not passively see them suffer misfortune. No Zakah is due on property intended for consumption, such as houses, gardens, clothing, or furniture. Jewelry of gold, silver. Taxable property is that which is intended for production, whether industrial, agricultural or commercial. According to Malaysian standard, even though jewelries which are used by females will not be imposed to Zakah but, one must bear in mind that if a women wears jewelries which overall cost of the jewelries is higher than RM5000 i.e normal standard usage of jewelries for a woman in Malaysia, she has to pay Zakah (2.5 %) from the additional amount which is above the standard. Zakah is not an indiscriminate tax on all properties. Assessment of Zakah must take into consideration the net income produced by the property in question. If in a year, the company suffers a loss, no Zakah is levied on the property concerned. A reasonable amount necessary for the owner and his dependents’ subsistence must be deducted from the assessment. Shares which are permissible to buy and own may be purchased for either holding them and expecting their dividends or for participating in the management of the company, or for using them as tradable objects waiting for a good opportunity to realize a capital gain and sell. In this case, one is to pay Zakah at the same rate and net asset value on the due date of Zakah. a. The view of the majority, which came in a resolution of the OIC Fiqh Academy, maintains that one has to calculate the “Zakah able” part of the value of the stock, from the companys balance sheet and pay Zakah on it in the due date at the rate of 2.5%. The “Zakah able” part is: cash+receivables+inventories of goods in process and ready for sale-short term debts. b. The minority view, states that this investment is similar to trading in stocks, in the Shariah meaning of the word. Accordingly, the owner has to pay Zakah at the rate of 2.5% on the market value on the due date. c. The third view is a subset of the first one; it actually adds to the first one that if it is difficult to calculate Zakah from the balance sheet, one may pay 10% on the net income of the stock, in analogy with agriculture. Actually, there is no strong and logical support in Shariah for this opinion. a) Losing Allah’s blessings over his/her properties. b) Lead to greediness in oneself to accumulate as much wealth as one could without ignoring the lawfulness of the sources of income in the eyes of Islam. c) Widening the gap between the rich and the poor. In this case, the rich becomes richer and the poor remains poor or even poorer without any help to improve the situation. As a result, this leads to social illnesses and civil crimes, which will even out the harmony and tranquility life in society. d) The existence of envy and hatred between the rich and the poor. e) The wealth, which has not been purified with Zakah, will bring disasters to its owner in the hereafter. This Ramadhan al-Mubarak is a perfect time to be reminded of the duty to pay “Zakah al-Fitr”, which is obliged to all Muslims of man, woman, young and old. Accordingly, cash value of one saa’ (or one “gantang” or 2.3 kg) can be paid as “Zakah al-fitr” and it equivalent to a very small amount of money (below RM 5.00). It must be settled before the ‘Idul Fitr sunnah prayer is performed. In the event of later than this, it will not be regarded as Zakah fitrah anymore but is merely regarded as an ordinary sadaqah. Therefore, Muslims should not hesitate in carrying out this duty at the due time in which will also helps to enlighten the Hari Raya celebration of the unfortunate members in society. * The writer holds a M.A(Hons) in Shariah from Jordan and he is a Shariah Compliance Manager, RHB Islamic Bank Berhad.
import sqlite3 from artista import artist from disquillos import album def Crear_Tabla_Artistas(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = """CREATE TABLE IF NOT EXISTS artistas( id TEXT, nombre TEXT , tags TEXT, area TEXT, ExtScore TEXT, tipo TEXT );""" cursor.execute(query) print('Tabla creada con exito') cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def Agregar_Elemento_Artista(artist): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = """INSERT INTO artistas VALUES ('{}', '{}', '{}', '{}', '{}', '{}')""".format(artist._id, artist._name, artist._tags, artist._area, artist._extScore, artist._type) resultado = cursor.execute(query) conexion.commit() print('Valor Insertado Correctamente', resultado) cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def Ver_Todo_Artistas(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = 'SELECT * FROM artistas;' cursor.execute(query) rows = cursor.fetchall() print('Total de registros: ', len(rows)) print('------------Registros-------------') for row in rows: print('Id: {}\nNombre: {}\nTags: {}\nArea: {}\nExtScore: {}\nTipo: {}'.format(*row)) print('-------------------------------') print('Total de registros: ', len(rows)) cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def Ver_Nombres(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = 'SELECT nombre FROM artistas;' cursor.execute(query) rows = cursor.fetchall() print('Total de registros: ', len(rows)) print('------------Registros-------------') lista = [] for row in rows: lista.append(row[0]) print('Total de registros: ', len(rows)) cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() return lista def Crear_Tabla_Albums(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = """CREATE TABLE IF NOT EXISTS albums( id TEXT, artista TEXT , titulo TEXT, status TEXT, type TEXT );""" cursor.execute(query) print('Tabla creada con exito') cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def Agregar_Elemento_Album(album): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = """INSERT INTO albums VALUES ('{}', '{}', '{}', '{}', '{}')""".format(album._id, album._artista, album._titulo, album._status, album._type) resultado = cursor.execute(query) conexion.commit() print('Valor Insertado Correctamente', resultado) cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def Ver_Todo_Albums(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = 'SELECT * FROM albums;' cursor.execute(query) rows = cursor.fetchall() print('Total de registros: ', len(rows)) print('------------Registros-------------') for row in rows: print('Id: {}\nArtista: {}\nTitulo: {}\nStatus: {}\nType: {}'.format(*row)) print('-------------------------------') print('Total de registros: ', len(rows)) cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def borrar_tabla_albums(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = 'DROP TABLE albums;' cursor.execute(query) print('registros eliminados') cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close() def borrar_tabla_Artistas(): try: conexion = sqlite3.connect('musicBrainz.db') cursor = conexion.cursor() print('Conectado') query = 'DROP TABLE artistas;' cursor.execute(query) print('registros eliminados') cursor.close() except sqlite3.Error as error: print('Error con la conexion',error) finally: if(conexion): conexion.close()
Under-basing colored inks on dark garments. Does it have to be white? Most of the screen printers I’ve come across in the last few decades still think in “old School” terms when it comes to printing on dark or black substrates. They think a solid white ” bulletproof” layer of ink needs to be put down first. As if we where offset printing on white paper. This is a great way to kill the breath ability of the cotton, create massive dot gain and make a plastic decal that will spit and crack with repeated washing. Not to mention, rink a boatload of White ink. Here is some of what I have learned from the battle………..
from ptypes import * v = 0 # FIXME: this file format is busted class seq_parameter_set_rbsp(pbinary.struct): class __pic_order_type_1(pbinary.struct): _fields_ = [ (1, 'delta_pic_order_always_zero_flag'), (v, 'offset_for_non_ref_pic'), (v, 'offset_for_top_to_bottom_field'), (v, 'num_ref_frames_in_pic_order_cnt_cycle'), (lambda s: dyn.array( dyn.clone(pbinary.struct,_fields_=[(v,'offset_for_ref_frame')]), s['num_ref_frames_in_pic_order_cnt_cycle']), 'ref_frames') ] def __pic_order(self): type = self['pic_order_cnt_type'] if type == 0: return dyn.clone(pbinary.struct, _fields_=[(v, 'log2_max_pic_order_cnt_lsb')]) elif type == 1: return __pic_order_type_1 raise NotImplementedError(type) class __frame_crop_offset(pbinary.struct): _fields_ = [ (v, 'frame_crop_left_offset'), (v, 'frame_crop_right_offset'), (v, 'frame_crop_top_offset'), (v, 'frame_crop_bottom_offset'), ] def __frame_crop(self): if self['frame_cropping_flag']: return __frame_crop_offset return dyn.clone(pbinary.struct,_fields_=[]) def __rbsp_trailing_bits(self): return 0 _fields_ = [ (8, 'profile_idc'), (1, 'constraint_set0_flag'), (1, 'constraint_set1_flag'), (1, 'constraint_set2_flag'), (5, 'reserved_zero_5bits'), (8, 'level_idc'), (v, 'seq_parameter_set_id'), (v, 'pic_order_cnt_type'), (__pic_order, 'pic_order'), (v, 'num_ref_frames'), (1, 'gaps_in_frame_num_value_allowed_flag'), (v, 'pic_width_in_mbs_minus1'), (v, 'pic_height_in_map_units_minus1'), (1, 'frame_mbs_only_flag'), (lambda s: [0,1][s['frame_mbs_only_flag']], 'mb_adaptive_frame_field_flag'), (1, 'direct_8x8_inference_flag'), (1, 'frame_cropping_flag'), (__frame_crop, 'frame_crop'), (1, 'vul_parameters_present_flag'), (lambda s: [dyn.clone(pbinary.struct,_fields_=[]),__vul_parameters][s['vul_parameters_present_flag']], 'vul_parameters'), (__rbsp_trailing_bits, 'rbsp_trailing_bits'), ]
This season Club MAC Alcudia has decided to introduce two new kids activities which are sure to provide your children with a holiday they’ll never forget. Club MAC is a family holiday resort based in the charming port town of Alcudia, located in the sunny north-eastern reaches of Majorca. We regularly host a series of activities for kids, such as Masterchef Junior and Smurf’s Day, which are destined to give your children hours and hours of joy and laughter! We’ve recently added two new events to the kids activities roster, the first of which is the Western Party. Held at 3pm on the alternate Thursday every fortnight, this event will transport your kids back in time to the frontier days of the Wild Wild West. Our staff transform the Saturno Hotel pool area into a picturesque Western Party. Throughout the afternoon, kids and parents can try their hand at a variety of traditional Western-style games including the horse shoe game, balloon darts and the cans game, as well as dance up a storm to the authentic strains of traditional cowboy music! Meanwhile, when the clock strikes four we host an apple bobbing competition that’ll have your children laughing so hard they’ll remember it for years! We’re on the hunt for the best bobber in all of Club MAC, and we award those who do well at the contest with a series of fantastic prizes! The second event we’ve added to our kids activity itinerary this summer is Detective Day, which takes place once a fortnight. This fun-filled activity will give your children the chance to pull their best Sherlock Holmes impression, and help the Club MAC team solve a confusing mystery! The day starts with the discovery that Club MAC has been robbed; someone has stolen the biscuits tin from the kids club! Following this discovery, your kids will be given the opportunity to test out their research skills through games and challenges at the crime scene, to help our team solve the case and find both the biscuits tin and the elusive thief! Have the holiday of your life! Here at Club MAC Alcudia, we’re always striving to ensure that we do a better job at providing you and your family with a once-in-a-lifetime getaway experience. These two new kids activities will help you make sure that your family has a fabulous time at Club MAC Alcudia!
""" ps_QPainter_drawRect101.py explore the PySide GUI toolkit to draw rectangles in different colors there are a number of ways colors can be specified fill colors are set with the brush perimeter colors are set with the pen QColor can be given a transparency value (PySide is the official LGPL-licensed version of PyQT) for Python33 you can use the Windows self-extracting installer PySide-1.1.2.win32-py3.3.exe (PyQT483 equivalent) from: http://qt-project.org/wiki/PySide or: http://www.lfd.uci.edu/~gohlke/pythonlibs/ for Qpainter methods see: http://srinikom.github.com/pyside-docs/PySide/QtGui/ QPainter.html?highlight=qpainter#PySide.QtGui.PySide.QtGui.QPainter tested with Python27 and Python33 by vegaseat 14jan2013 """ from PySide.QtCore import * from PySide.QtGui import * class MyWindow(QWidget): def __init__(self): QWidget.__init__(self) # setGeometry(x_pos, y_pos, width, height) # upper left corner coordinates (x_pos, y_pos) self.setGeometry(300, 300, 370, 100) self.setWindowTitle('Colors set with brush and pen') def paintEvent(self, e): ''' the method paintEvent() is called automatically the QPainter class does all the low-level drawing coded between its methods begin() and end() ''' qp = QPainter() qp.begin(self) self.drawRectangles(qp) qp.end() def drawRectangles(self, qp): '''use QPainter (instance qp) methods to do drawings''' # there are several different ways to reference colors # use HTML style color string #RRGGBB with values 00 to FF black = "#000000" # QPen(color, width, style) qp.setPen(black) # use QColor(r, g, b) with values 0 to 255 qp.setBrush(QColor(255, 0, 0)) # drawRect(int x, int y, int width, int height) # upper left corner coordinates (x, y) qp.drawRect(10, 15, 90, 60) # there are some preset named colors qp.setBrush(QColor(Qt.green)) qp.drawRect(160, 25, 90, 60) # this rectangle will overlap the previous one # you can give it some transparency alpha 0 to 255 # QColor(int r, int g, int b, int alpha=255) qp.setBrush(QColor(0, 0, 255, 100)) qp.drawRect(130, 15, 90, 60) # some colors can be given as strings qp.setBrush(QColor('yellow')) qp.drawRect(265, 25, 90, 60) app = QApplication([]) win = MyWindow() win.show() # run the application event loop app.exec_()
Monogram leopard make up cosmetic bag. Sophisticated, stylish and classy! This bag embodies all those features. It is elegantly adorned with a solid black color block center and trimmed in a rust and black bottom band and trim. This animal print pattern is a classic staple in fashion. The neat compact shape makes it perfect for storing cosmetics and it will fit easily into a bigger suitcase or handbag. The bag features a simple rectangular design with a zip closure running along the top. Use it when you travel or to take cosmetics to the office. Add a matching duffle or tote bag for traveling. Personalization is always available and included with purchase from Simply Bags. Embroidered make up cosmetic bag measures 9"W x 7"H x 2.5" across bottom. Nylon lined, zipper closure.
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import ast import json import socket import time from typing import Any, Dict, Iterable, List, Mapping, Optional, Union from urllib.error import HTTPError, URLError import jenkins from jenkins import Jenkins, JenkinsException from requests import Request from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.providers.jenkins.hooks.jenkins import JenkinsHook JenkinsRequest = Mapping[str, Any] ParamType = Optional[Union[str, Dict, List]] def jenkins_request_with_headers(jenkins_server: Jenkins, req: Request) -> Optional[JenkinsRequest]: """ We need to get the headers in addition to the body answer to get the location from them This function uses jenkins_request method from python-jenkins library with just the return call changed :param jenkins_server: The server to query :param req: The request to execute :return: Dict containing the response body (key body) and the headers coming along (headers) """ try: response = jenkins_server.jenkins_request(req) response_body = response.content response_headers = response.headers if response_body is None: raise jenkins.EmptyResponseException( f"Error communicating with server[{jenkins_server.server}]: empty response" ) return {'body': response_body.decode('utf-8'), 'headers': response_headers} except HTTPError as e: # Jenkins's funky authentication means its nigh impossible to distinguish errors. if e.code in [401, 403, 500]: raise JenkinsException(f'Error in request. Possibly authentication failed [{e.code}]: {e.reason}') elif e.code == 404: raise jenkins.NotFoundException('Requested item could not be found') else: raise except socket.timeout as e: raise jenkins.TimeoutException(f'Error in request: {e}') except URLError as e: raise JenkinsException(f'Error in request: {e.reason}') return None class JenkinsJobTriggerOperator(BaseOperator): """ Trigger a Jenkins Job and monitor it's execution. This operator depend on python-jenkins library, version >= 0.4.15 to communicate with jenkins server. You'll also need to configure a Jenkins connection in the connections screen. :param jenkins_connection_id: The jenkins connection to use for this job :type jenkins_connection_id: str :param job_name: The name of the job to trigger :type job_name: str :param parameters: The parameters block provided to jenkins for use in the API call when triggering a build. (templated) :type parameters: str, Dict, or List :param sleep_time: How long will the operator sleep between each status request for the job (min 1, default 10) :type sleep_time: int :param max_try_before_job_appears: The maximum number of requests to make while waiting for the job to appears on jenkins server (default 10) :type max_try_before_job_appears: int :param allowed_jenkins_states: Iterable of allowed result jenkins states, default is ``['SUCCESS']`` :type allowed_jenkins_states: Optional[Iterable[str]] """ template_fields = ('parameters',) template_ext = ('.json',) ui_color = '#f9ec86' def __init__( self, *, jenkins_connection_id: str, job_name: str, parameters: ParamType = "", sleep_time: int = 10, max_try_before_job_appears: int = 10, allowed_jenkins_states: Optional[Iterable[str]] = None, **kwargs, ): super().__init__(**kwargs) self.job_name = job_name self.parameters = parameters self.sleep_time = max(sleep_time, 1) self.jenkins_connection_id = jenkins_connection_id self.max_try_before_job_appears = max_try_before_job_appears self.allowed_jenkins_states = list(allowed_jenkins_states) if allowed_jenkins_states else ['SUCCESS'] def build_job(self, jenkins_server: Jenkins, params: ParamType = "") -> Optional[JenkinsRequest]: """ This function makes an API call to Jenkins to trigger a build for 'job_name' It returned a dict with 2 keys : body and headers. headers contains also a dict-like object which can be queried to get the location to poll in the queue. :param jenkins_server: The jenkins server where the job should be triggered :param params: The parameters block to provide to jenkins API call. :return: Dict containing the response body (key body) and the headers coming along (headers) """ # Since params can be either JSON string, dictionary, or list, # check type and pass to build_job_url if params and isinstance(params, str): params = ast.literal_eval(params) # We need a None to call the non-parametrized jenkins api end point if not params: params = None request = Request(method='POST', url=jenkins_server.build_job_url(self.job_name, params, None)) return jenkins_request_with_headers(jenkins_server, request) def poll_job_in_queue(self, location: str, jenkins_server: Jenkins) -> int: """ This method poll the jenkins queue until the job is executed. When we trigger a job through an API call, the job is first put in the queue without having a build number assigned. Thus we have to wait the job exit the queue to know its build number. To do so, we have to add /api/json (or /api/xml) to the location returned by the build_job call and poll this file. When a 'executable' block appears in the json, it means the job execution started and the field 'number' then contains the build number. :param location: Location to poll, returned in the header of the build_job call :param jenkins_server: The jenkins server to poll :return: The build_number corresponding to the triggered job """ try_count = 0 location += '/api/json' # TODO Use get_queue_info instead # once it will be available in python-jenkins (v > 0.4.15) self.log.info('Polling jenkins queue at the url %s', location) while try_count < self.max_try_before_job_appears: location_answer = jenkins_request_with_headers( jenkins_server, Request(method='POST', url=location) ) if location_answer is not None: json_response = json.loads(location_answer['body']) if 'executable' in json_response: build_number = json_response['executable']['number'] self.log.info('Job executed on Jenkins side with the build number %s', build_number) return build_number try_count += 1 time.sleep(self.sleep_time) raise AirflowException( "The job hasn't been executed after polling " f"the queue {self.max_try_before_job_appears} times" ) def get_hook(self) -> JenkinsHook: """Instantiate jenkins hook""" return JenkinsHook(self.jenkins_connection_id) def execute(self, context: Mapping[Any, Any]) -> Optional[str]: if not self.jenkins_connection_id: self.log.error( 'Please specify the jenkins connection id to use.' 'You must create a Jenkins connection before' ' being able to use this operator' ) raise AirflowException( 'The jenkins_connection_id parameter is missing, impossible to trigger the job' ) if not self.job_name: self.log.error("Please specify the job name to use in the job_name parameter") raise AirflowException('The job_name parameter is missing,impossible to trigger the job') self.log.info( 'Triggering the job %s on the jenkins : %s with the parameters : %s', self.job_name, self.jenkins_connection_id, self.parameters, ) jenkins_server = self.get_hook().get_jenkins_server() jenkins_response = self.build_job(jenkins_server, self.parameters) if jenkins_response: build_number = self.poll_job_in_queue(jenkins_response['headers']['Location'], jenkins_server) time.sleep(self.sleep_time) keep_polling_job = True build_info = None # pylint: disable=too-many-nested-blocks while keep_polling_job: try: build_info = jenkins_server.get_build_info(name=self.job_name, number=build_number) if build_info['result'] is not None: keep_polling_job = False # Check if job ended with not allowed state. if build_info['result'] not in self.allowed_jenkins_states: raise AirflowException( 'Jenkins job failed, final state : %s.' 'Find more information on job url : %s' % (build_info['result'], build_info['url']) ) else: self.log.info('Waiting for job to complete : %s , build %s', self.job_name, build_number) time.sleep(self.sleep_time) except jenkins.NotFoundException as err: # pylint: disable=no-member raise AirflowException(f'Jenkins job status check failed. Final error was: {err.resp.status}') except jenkins.JenkinsException as err: raise AirflowException( f'Jenkins call failed with error : {err}, if you have parameters ' 'double check them, jenkins sends back ' 'this exception for unknown parameters' 'You can also check logs for more details on this exception ' '(jenkins_url/log/rss)' ) if build_info: # If we can we return the url of the job # for later use (like retrieving an artifact) return build_info['url'] return None
Pinnacle Black Label Diamond Paint Coating is a ceramic coating that creates an extremely glossy, impenetrable layer of protection on your vehicle’s paint. Formulated using the most advanced nano-glass ceramic particles available in a paint coating, Diamond Paint Coating shields your vehicle against UV rays, airborne contaminants, dirt, acid rain, road salt, and other common pollutants that attack your vehicle on a regular basis. Spray and wipe application lasts up to 3 years! Black Label Diamond Paint Coating is going to forever change the way you look at car care and detailing. Formulated using ingredients that were previously unheard of in a car wax or paint sealant, Diamond Paint Coating blankets your vehicle in a hardened glass membrane that resists dirt accumulation and surface staining. The barrier of protection has a heat tolerance higher than any conventional wax or sealant, making it the obvious choice for the perfectionist that demands the best protection for their vehicle. Black Label Diamond Paint Coating and the glassy shine that it creates lasts up to 3 years. This state-of-the-art paint coating was designed to deliver a show-car shine in a simple spray and wipe application. Diamond Paint Coating is actually easier to apply than your favorite carnauba paste wax! What’s more, a single 4 ounce bottle is enough to coat up to 5 vehicles! Step 1: Polish surface using Black Label Surface Cleansing Polish. This is the MOST IMPORTANT step; if the paint isn't polished, the coating will not bond. Step 2: Spray Diamond Paint Coating directly onto the surface and evenly distribute using a Lake Country Coating Applicator. Step 3: Gently remove excess residue using a Gold Plush Microfiber Towel. Black Label Diamond Paint Coating with its nano-glass formula is resistant to alkaline cleaners, degreasers, and harsh detergents. Compared to your clear coat, Diamond Paint Coating is more resistant to scratches and marring. The nano-glass formula forms such a hard shell of protection that virtually nothing can penetrate or stick to it, making your vehicle retain that just-detailed shine for years! Another advantage to Black Label Diamond Paint Coating is the self-cleaning effect that it provides. The nano-glass formula fills in the microscopic pits, pores, and valleys of your vehicle’s paint, creating a perfectly smooth, flat surface. Dirt and road grime will be removed with virtually no effort, making your vehicle easier to clean and maintain. Surface care products that bear the Pinnacle name are expected to outshine anything else available, and Diamond Paint Coating is no exception. After being treated, vehicles will look as though molten glass was poured over them creating the appearance of a silky, glass-like shine that causes light to reflect, not refract. Diamond Paint Coating can be topped with your favorite carnauba wax, like Pinnacle Souveran, to add a warm carnauba glow. Black Label Diamond Paint Coating should only be applied to vehicles that have first been polished with Black Label Surface Cleansing Polish. This step is crucial as it removes contaminants, oils, and fillers that would otherwise prevent the coating from bonding. Once that is completed, application of Diamond Paint Coating is as simple as spraying on and evenly distributing with a foam applicator. Wait 24 hours before topping with your favorite carnauba paste wax. Use Black Label Diamond Coating Shampoo to rejuvenate the shine and water beading. Place your hand in a plastic sandwich baggy and feel the paint. If it feels rough or gritty, use Pinnacle Ultra Poly Clay. Working one panel at a time, spray Diamond Paint Coating directly onto the paint and evenly distribute using a Lake Country Coating Applicator. Work the coating into the paint until it disappears. If high spots occur, lightly buff with a Gold Plush Microfiber Towel. I installed this coating two months ago, so longevity is yet to be determined. I went with the PBL system because of it's claimed and reviewed ease of use. It was very hot in my garage when I applied the coating, and it flashed much faster than I would like. I wanted to work the coating until it would self level, but after two passes, the coating was hard. I'm sure it would have been different in a cooler environment. One word of warning about this coating- it not only won't hide scratches, it almost seems to amplify them, especially when first applied. Make sure your paint is completely to your satisfaction before coating it. This isn't a knock on the product, because I think most coatings do this. The coating actually looked better three weeks after application than it did a few hours after. The best part is the hydrophobicity of the coating. Very impressive. I don't mind rainy days, because at speeds over 30 mph, the water beads march like soldiers over my hood. I have noticed some light water spotting if drops are allowed to bake in the sun, but the PBL detailer easily glosses over these, and I use it as a drying aid. If you are on the fence about trying a coating, don't hesitate to try the Pinnacle. Like every Pinnacle product I've used, it meets or exceeds my expectations, and the "Wow Factor" eases any buyer's remorse after use. ProsAmazing "candied glass" shine that only gets better with full cure after several weeks. Easy application. Excellent water beading. The "Self Cleaning Effect" is not a myth! Hydrophobicity makes drying with a blower fast and easy. ConsIt's not cheap, especially when you add the cost of prep polish. Surface must be perfectly clean to apply. (not much of a "con", just a caveat!). It makes swirls you missed and RIDS stand out, especially when first applied. Must (In my opinion) be applied in cool conditions. Tested, and works as advertised. I do not have v.2, but the first version of this. I can tell for this stuff works as advertised. I applied it on my VW Golf when I bought it brand new back in the beginning of 2014. After almost 3yrs the coating is finally starting to degrade, but that's mainly on the roof, and the C pillars. I can tell because the water doesn't bead up as much as it used to there. If anyone is complaining about this sealant, its because they are not following the directions. It is key that you prep the paint so it is as clean as possible. I used the Black Label cleansing lotion as recommended for prep and I can understand why. The lotion cleans the paint extremely well so use it. Also when applying the sealant make sure you follow the directions exactly how it tells you to. When you apply it with an applicator pad, it is key that you continually rub it in until it drys on its own. Then you can do a gentle buff with a microfiber. (Do not) just spray it and spread it around and then proceed to buff with a microfiber (this isn't a waterless wash spray). If you have a little patience and do it right your efforts will payoff. Cleaning the car after the sealant cured was a breeze. Because of the hydrophobic properties of the coating, nothing likes to stick to it. So when it comes time to do your regular hand wash its fast and easy. Pros-Highly resilient to fallout and bird droppings -Water beading is excellent. -Super easy to clean the car once the coating has cured. -The shine quality I would say is above average for a sealant. -Durable, not as easy to scratch for sure. Cons-Getting the paint as clean as possible is key, but time consuming (this applies to any coating though) -Expensive -Leveling or (high spots) can be an issue. I'm assuming that's part of the reason for v.2 because it can dry quickly when you're not quite finished applying. Can be fixed with a light buff of a microfiber, but its not always easy to spot if you have light colored paint. On the flip side, on dark colored paint its noticeable if the coating is not level. Did a test vehicle my black 2015 Z-71 Silverado last year. Comparing it to my 2005 Black Silverado daily which has the standard paint correction, clay and Griots wax every year. you can definetly see a difference in the "brightness and depth" of the paint before and after washing on the 15. The 05 shows the expected cloudiness and graying down from air pollution etc where as the 15 just shines through. Impressed enough that this year pulled the trigger on the rest of the fleet did my kids SUVs 03 Drk blue metallic, Blazer, 06 Silver blue, Trailblazer and my 98 maroon metallic, Silverado. All have received the yearly paint correction or over all Griots 3 or 4 then claying and waxing. The 98 has received this treatment since new and comments are almost daily about the quality of the paint. I am top coating this year with Griots Best of Wax on the rest and trying Pinnacle Series II on the 15. Before the top coating the others their paint just pops with that glass look. May try glazing on a few spots on the older vehicles though they say it may not 'stick'. ProsNot a slick feeling surface, but looks like glass and everything floats off with a simple wash. Water and soap actually bead up and slide off car. Water beading up nicely still even after a year. I re-applied a second coat after a year and will continue to so I can't speak to the 3 yr longetivity. Top coating this year getting a nice deep carnuaba type shine will see how long the top coat lasts compared to with or without the diamond coating underneath. ConsDoesn't cover as many applications as advertised. not even close. extremely fast flash off time, really need under 80 degrees to apply easily or work really small areas and buff quickly. Although I like the Black Label Surface cleaner polish thats recommended its over-priced also. Also the pump on the bottle usually doesn't work 1 out of 3 bottles didn't work (lost half a bottles contents because of that, pump stuck open). Plus I really think the 'polish" is so fine not to make a difference. Save some money or elbow grease, I've use Griots paint prep and Blackfire crystal coat paint prep. Both are an easy wipe on wipe off product. and if the paint needs any polish or correction use an advanced swirl polish remover like Blackfire, Pinnacle or Griots #4 before the paint prep. Easy to Use, but is is really better than a sealant or wax? I used this on my 2014 Stingray (black) with great results and was super easy to apply. I first prepped the car with Carpro Essence, and applied this coating 2 days later. I know this may not be the "standard" prep, but I've read from several members that it works great on top of Essence, and decided to give it a try. It flashes almost instantly and as a result, I used way more than I expected to. Every panel took multiple sprays with each spray only spreading a short distance before "disappearing" on the paint. I found it very difficult to tell what was covered and what wasn't due to the almost instant flashing of the product. With that said, I did the whole car in less than an hour. I applied it indoors with low humidity and temp about 70-75 degrees F. I found the following day when I backed it into the sunlight there were several streaks that buffed away easily with a clean microfiber towel. I have not topped it with anything, as that seems to be counterproductive to using a coating in the first place. If it holds up for years as advertised i'll be amazed, but to be honest it's so easy to apply, I will simply add another layer if beading starts to diminish. I'm also curious to see how it does against scratching and micromarring. I am skeptical it will prevent any of this any more than a sealant or wax, but time will tell. ProsEasy to use, "high spots" buff out easily. Really seems pretty idiot proof. Very easy to use and the next day i topped it of with pinnacle black label synergy wax and the result was excellent the shine and depth and the way the light bounces was out of this world even the carbon fiber body parts shined like 3 dimension. I recommend this product one of the easiest coating i ever used i hope the durability would last as long as the coating thats in a syringe type like optimum gloss coat or wolfgang ceramic coating but so far it lokks good.
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import calendar import datetime import json import logging import webapp2 import zlib from google.appengine.api import memcache from google.appengine.api import users from google.appengine.datastore import datastore_query from google.appengine.ext import ndb LOGGER = logging.getLogger(__name__) class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime.datetime): return calendar.timegm(obj.timetuple()) # Let the base class default method raise the TypeError. return json.JSONEncoder.default(self, obj) class AlertsJSON(ndb.Model): type = ndb.StringProperty() json = ndb.BlobProperty(compressed=True) date = ndb.DateTimeProperty(auto_now_add=True) class AlertsHandler(webapp2.RequestHandler): ALERTS_TYPE = 'alerts' # Has no 'response' member. # pylint: disable=E1101 def send_json_headers(self): self.response.headers.add_header('Access-Control-Allow-Origin', '*') self.response.headers['Content-Type'] = 'application/json' # Has no 'response' member. # pylint: disable=E1101 def send_json_data(self, data): self.send_json_headers() self.response.write(data) def generate_json_dump(self, alerts): return json.dumps(alerts, cls=DateTimeEncoder, indent=1) def get_from_memcache(self, memcache_key): compressed = memcache.get(memcache_key) if not compressed: self.send_json_headers() return uncompressed = zlib.decompress(compressed) self.send_json_data(uncompressed) def get(self): self.get_from_memcache(AlertsHandler.ALERTS_TYPE) def post_to_history(self, alerts_type, alerts): last_query = AlertsJSON.query().filter(AlertsJSON.type == alerts_type) last_entry = last_query.order(-AlertsJSON.date).get() last_alerts = json.loads(last_entry.json) if last_entry else {} # Only changes to the fields with 'alerts' in the name should cause a # new history entry to be saved. def alert_fields(alerts_json): filtered_json = {} for key, value in alerts_json.iteritems(): if 'alerts' in key: filtered_json[key] = value return filtered_json if alert_fields(last_alerts) != alert_fields(alerts): new_entry = AlertsJSON( json=self.generate_json_dump(alerts), type=alerts_type) new_entry.put() # Has no 'response' member. # pylint: disable=E1101 def post_to_memcache(self, memcache_key, alerts): uncompressed = self.generate_json_dump(alerts) compression_level = 1 compressed = zlib.compress(uncompressed, compression_level) memcache.set(memcache_key, compressed) def parse_alerts(self, alerts_json): try: alerts = json.loads(alerts_json) except ValueError: warning = 'content field was not JSON' self.response.set_status(400, warning) LOGGER.warn(warning) return alerts.update({'date': datetime.datetime.utcnow()}) return alerts def update_alerts(self, alerts_type): alerts = self.parse_alerts(self.request.get('content')) if alerts: self.post_to_memcache(alerts_type, alerts) self.post_to_history(alerts_type, alerts) def post(self): self.update_alerts(AlertsHandler.ALERTS_TYPE) class AlertsHistory(webapp2.RequestHandler): MAX_LIMIT_PER_PAGE = 100 def get_entry(self, query, key): try: key = int(key) except ValueError: self.response.set_status(400, 'Invalid key format') return {} ndb_key = ndb.Key(AlertsJSON, key) result = query.filter(AlertsJSON.key == ndb_key).get() if result: return json.loads(result.json) else: self.response.set_status(404, 'Failed to find key %s' % key) return {} def get_list(self, query): cursor = self.request.get('cursor') if cursor: cursor = datastore_query.Cursor(urlsafe=cursor) limit = int(self.request.get('limit', self.MAX_LIMIT_PER_PAGE)) limit = min(self.MAX_LIMIT_PER_PAGE, limit) if cursor: alerts, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor) else: alerts, next_cursor, has_more = query.fetch_page(limit) return { 'has_more': has_more, 'cursor': next_cursor.urlsafe() if next_cursor else '', 'history': [alert.key.integer_id() for alert in alerts] } def get(self, key=None): query = AlertsJSON.query().order(-AlertsJSON.date) result_json = {} user = users.get_current_user() result_json['login-url'] = users.create_login_url(self.request.uri) # Return only public alerts for non-internal users. if not user or not user.email().endswith('@google.com'): query = query.filter(AlertsJSON.type == AlertsHandler.ALERTS_TYPE) if key: result_json.update(self.get_entry(query, key)) else: result_json.update(self.get_list(query)) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(json.dumps(result_json)) app = webapp2.WSGIApplication([ ('/alerts', AlertsHandler), ('/alerts-history', AlertsHistory), ('/alerts-history/(.*)', AlertsHistory), ])
The classic weave of a knitted dress creates a garment that is shapely, supple and beautiful. Whether you opt for genuine wool, or a synthetic fibre mix, a knitted dress should be an essential part of your wardrobe. The knitted dress is characterised by the stretchy nature of the design, making it extremely comfortable to wear and flattering for your figure. But this doesn't mean that all knitted dresses look the same. You have a choice of styles when you're looking to buy a knitted dress, ranging from the figure-hugging to the free-flowing oversized version, but all epitomised by a simple and unfussy line. Plain knitted dresses have an undoubted sophistication and you can achieve a completely different look by choosing your colours carefully – pale pastels or soft greys evoke a gentle feminine feel, whilst bright vibrant colours can really make a statement. Patterned knitted dresses are also hugely attractive. You may want a traditional design, such as one based on a Fair Isle pattern or coloured stripes, or you may prefer a modern geometric print-like pattern or a soft floral. Varying the texture of the knit is another way to make a knitted dress look special, perhaps by choosing an Arran cable knit pattern or boucle finish. Where can I wear a knitted dress? A knitted dress is generally more suited to less formal occasions; it is not considered suitable for a very formal occasion such as a dinner dance. Instead its easy styling makes it perfect for office wear as you'll look smart but be able to move easily all day. It's also wonderful for a casual outfit like a day shopping or a weekend away. Lighter knitted dresses, teamed with high-class accessories, can also be worn for semi-formal occasions, such as a job interview or board-room presentation. The knitted dress comes in a wide range of hem lengths, so it's important to choose one that accents your best features. If you have a petite frame, then a long hemline can make you look a bit overwhelmed, so a knee-length or mini-length knitted dress will probably suit you better. Statuesque figures can get away with longer dresses, favouring a knee or calf-length hem to avoid making the legs look too long. Shopping for a knitted dress online gives you the opportunity to look at many different dresses before making your final choice. One of the most attractive characteristics of the knitted dress is the choice of necklines, including the round, scoop and cowl designs. Again, pay attention to your body shape when thinking about which is the best dress for you. If you have a shorter neck, avoid a heavy cowl or polo neck as it can swamp your neck. If you have a long swan-like neck, make the most of this with a V-neck or cowl. Other options include the boat neck, the heart-shaped neck and the crew neck, so you've got plenty of options to choose from – just take a look at what's available online. Loose or fitted knitted dress? There's a knitted dress style to fit every figure, but you need to bear a few points in mind when making your choice. You need your dress to emphasise your best features and minimise any less flattering aspects, so think carefully about you overall appearance when you choose. If you are tall and willowy, you've got the option to go for a svelte design which hugs every curve or a slouchy sweater look – your frame will let you carry both off with equal aplomb. Short or very slim figures will look better in a more fitted version, although it may be possible to wear a looser style if you choose you accessories carefully, such as a belt to cinch you waistline and give definition to your silhouette. Petite women should also avoid very heavy knitted fabrics as they can look rather clumpy on a small figure. Curvy outlines can certainly wear close-fitting style and look amazing, but you may prefer a looser style for greater confidence.
from math import log, pow import os, traceback import Image, ImageDraw def lg(x): return log(x)/log(2.0) def truncDown(n): return int(pow(2,int(lg(n)))) def verifyDir(path): if not os.path.exists(path): print "Creating", path os.mkdir(path) def add_corners(im, rad): circle = Image.new('L', (rad * 2, rad * 2), 0) draw = ImageDraw.Draw(circle) draw.ellipse((0, 0, rad * 2, rad * 2), fill=255) alpha = Image.new('L', im.size, 255) w, h = im.size alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0)) alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad)) alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0)) alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad)) im.putalpha(alpha) return im def genImagePow2(path, opath, ow=None, oh=None, cornerRad=200): path = path.replace("\\", "/") if not (path.endswith(".jpg") or path.endswith(".png")): return print "Opening", path, os.path.exists(path) im = Image.open(path) w,h = im.size if not ow: ow = truncDown(w) if not oh: oh = truncDown(h) size = im.size im = im.resize((ow,oh), Image.ANTIALIAS) if cornerRad: im = add_corners(im, cornerRad) print "Saving", opath, w, h, ow, oh im.save(opath) def genImagesPow2(inputDir, outputDir): verifyDir(outputDir) names = os.listdir(inputDir) for name in names: path = os.path.join(inputDir, name) opath = os.path.join(outputDir, name) try: genImagePow2(path, opath) except: traceback.print_exc() def genImagesPow2Rename(inputDir, outputDir, cornerRad=None): verifyDir(outputDir) names = os.listdir(inputDir) i = 0 for name in names: if not (name.lower().endswith(".jpg") or name.lower().endswith(".png")): continue i += 1 #oname = "image%03d.png" oname = "image%d.png" % i path = os.path.join(inputDir, name) opath = os.path.join(outputDir, oname) try: genImagePow2(path, opath, cornerRad=cornerRad) except: traceback.print_exc() if __name__ == '__main__': """ genImagesPow2Rename("../images", "../imagesPow2") genImagesPow2Rename("../images", "../imagesRoundedPow2", cornerRad=200) genImagesPow2Rename("../images/FXPAL/src", "../images/FXPAL/imagesPow2") genImagesPow2Rename("../images/FXPAL/src", "../images/FXPAL/imagesRoundedPow2", cornerRad=200) """ genImagesPow2Rename("../images/Spirals/src", "../images/Spirals/imagesPow2") genImagesPow2Rename("../images/Spirals/src", "../images/Spirals/imagesRoundedPow2", cornerRad=200)
Malta Public Transport is launching a new shuttle service to the University of Malta from the Pembroke Park & Ride. With restricted available parking space in the area, Malta Public Transport is pleased to introduce special new route TD 17: Pembroke – University (Park & Ride) starting just in time for the first lectures on October 1, 2018. Following discussions in light of KSU’s partnership with Malta Public Transport, KSU welcomes this new service as a viable alternative to private vehicle usage. KSU looks forward to collaborating on the expansion of this service in the long term. Additionally, KSU is also addressing the parking and mobility situation on Campus by working on alternative modes of transport such as carpooling, car sharing, pedelec and bicycle commuting and motorcycles. Route TD 17 will operate from Monday to Friday, from Pembroke P&R directly to Qroqq, close to the Msida Skate Park. The service will run every 15 minutes between 07:00 and 18:00 – with frequency changing to 30 minutes between 10:30 and 12:30. As with other Tallinja Direct routes, tariffs are €1.50 for Tallinja card users and €3.00 for cash tickets. The trip consumes 2 credits on a 12 Single Day Journeys card, equivalent to €2.50. A three-month testing period from date of launch will analyse take-up and trends in usage. The trial period will run until the end of 2018.
# Copyright 2016 Rudrajit Tapadar # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from pscan import scan def main(): parser = argparse.ArgumentParser() parser.add_argument('target', action="store", help="IP or CIDR. eg. 10.10.10.10 or 10.10.10.0/24") parser.add_argument('-p', dest='PORT', help="port or port-range. eg. 80 or 80-100.") group = parser.add_mutually_exclusive_group(required=False) group.add_argument('-sU', action='store_true', default=False, help="UDP scan.") group.add_argument('-sT', action='store_true', default=True, help="TCP scan. Default is TCP scan.") args = parser.parse_args() try: ports = None if args.target: target = args.target if args.PORT: ports = args.PORT s = scan.Scan(target, ports) print("") print("Starting Pscan 1.0\n") if args.sU: s.udp() else: s.tcp() s.show() print("") except Exception as e: print(e.__class__.__name__ + ":" + e.message + "\n") parser.print_help() if __name__ == '__main__': main()
10dailybiz.com is a high-end investment and company that manages financial portfolios and investment accounts. Over the years, 10dailybiz.com has developed a reputation for integrity, quality and excellence in portfolio management. We are dedicated to offering unparalleled portfolio management services to a wide range of investors worldwide.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Parse a goods database table and create an ms excel's workbook from it """ __author__ = 'remico' from xml.etree.ElementTree import ElementTree from abc import ABC, abstractmethod from bs4 import BeautifulSoup, Tag import xlsxwriter import sys, os, glob class IBuilder(ABC): @abstractmethod def convert_articul(self, text): pass @abstractmethod def convert_sizes(self, text): pass @abstractmethod def convert_description(self, text): pass @abstractmethod def convert_price(self, text): pass @abstractmethod def convert_price_retail(self, text): pass @abstractmethod def increment_row(self): pass class XlsxBuilder(IBuilder): def __init__(self): self.filename = "output_.xlsx" self.book = xlsxwriter.Workbook(self.filename) self.sheet = self.book.add_worksheet("goods") self.fill_header() self.current_row = 2 # there is the header in the first row self.cell_format = self.book.add_format() self.cell_format.set_text_wrap() self.cell_format.set_align('vjustify') # self.cell_format.set_align('top') def fill_header(self): header_format = self.book.add_format() header_format.set_align('center') header_format.set_align('vcenter') header_format.set_bg_color('yellow') header_format.set_bold() self.sheet.write_string('A1', 'Артикул') self.sheet.write_string('B1', 'Описание') self.sheet.write_string('C1', 'Цена') self.sheet.write_string('D1', 'Розничная цена') self.sheet.write_string('E1', 'Размеры') self.sheet.set_column('A:A', 50) self.sheet.set_column('B:B', 80) self.sheet.set_column('C:C', 20) self.sheet.set_column('D:D', 20) self.sheet.set_column('E:E', 20) self.sheet.set_row(0, 25, header_format) self.sheet.set_default_row(35) def get_result(self): self.book.close() print("'%s' created" % self.filename) return self.book def increment_row(self): self.current_row += 1 def convert_articul(self, text=""): cleantext = text.replace('&#34;', '"') if text is not None else "" self.sheet.write('A%d' % self.current_row, cleantext, self.cell_format) def convert_description(self, text=""): cleantext = "" if text is not None: soup = BeautifulSoup(text) rows = [] # utilize the direct child objects for tag in soup.children: if not isinstance(tag, Tag): continue # parse an html table if tag.name == 'table': for row in tag.find_all('tr'): r = ' '.join([col.get_text().strip() for col in row.find_all('td')]) rows.append(r) # parse simple html paragraphs else: rows.append(tag.get_text().strip()) cleantext = "\n".join(rows).strip() self.sheet.write('B%d' % self.current_row, cleantext, self.cell_format) def convert_price(self, text=""): self.sheet.write('C%d' % self.current_row, text, self.cell_format) def convert_price_retail(self, text=""): self.sheet.write('D%d' % self.current_row, text, self.cell_format) def convert_sizes(self, text=""): self.sheet.write('E%d' % self.current_row, text, self.cell_format) class GoodsReader(object): def __init__(self, filename, IBuilder_builder): self.doc = ElementTree(file=filename) self.database = self.doc.find("database") if self.database is None: raise LookupError("It seems that the input file is not a dump of " "'gloowi_goods' database table") print("Database: '%s'" % self.database.get("name")) self.builder = IBuilder_builder def parse_goods(self): goods = self.database.findall('table') len_ = len(goods) denominator_ = 20 part_ = len_ // denominator_ records = ({column.get('name'): column.text for column in item.getiterator('column')} for item in goods) for i, rec in enumerate(records): self.builder.convert_articul(rec['name']) self.builder.convert_description(rec['content']) self.builder.convert_price(rec['price']) self.builder.convert_price_retail(rec['price_retail']) self.builder.convert_sizes(rec['har_size']) self.builder.increment_row() # indicate progress if not i % part_: print('#', end='' if i < part_*denominator_ else '\n') sys.stdout.flush() if __name__ == '__main__': if len(sys.argv) != 2: print("Usage: %s <xmlFile>" % (sys.argv[0],)) sys.exit(-1) # clear garbage for file in glob.glob("output_*.xlsx"): os.remove(file) print("'%s' removed" % file) input_file = sys.argv[1] try: builder = XlsxBuilder() parser = GoodsReader(input_file, builder) parser.parse_goods() finally: builder.get_result()
Over the past week, we - the rich-world voter and taxpayer - have bailed out the hedge-funds, their bankers and their counterparts caught in the global squeeze on credit. Again. It happened in 2001 and in 1998. The financial system has once more fallen into the soft, bouncy, but ultimately comfortable safety-net (trampoline?) that we - all of us together, through our central banks and the losses we are prepared to underwrite as taxpayers - extend to troubled financiers. Are we right to keep bailing out the bankers, offering them a safety-net that turns their business nto a risk-less, one-way bet? There was an innocent time when we - as voters and taxpayers - were right to always be there as "lenders of last resort". But finance has become self-servingly postmodern too: the banking system knows how to take advantage of the social-security we have extended, and we are only storing up trouble by keeping them afloat. We should resist the temptation to "hug a hedgy''. Now is the time for some tough love for the newly stressed and bedraggled hedge-fund managers. Only this will allow the emergence of a fair and stable financial order. But, the fund manager might retort, why endure the pain that a wholesale financial restructuring now would entail? Can't we - that is, you - give in just one more time, and hope that our binge of bad investment is pardoned in the dilutive (and real) forces of technological and global-south catch-up growth? "The ‘as if' economist: Milton Friedman's legacy" "The wisdom of the openDemocracy crowd" "Tony Blair and centralisation " "The reach of economics: a reply to Diane Coyle" "Das Google Problem: is the invisible mouse benevolent?" “Corporate liability and social interest” (25 July 20This is an offer the rest of us should refuse, for it resembles nothing so much as the argument for repeated concessions to the welfare-Keynesianism of the 1960s and 1970s. The crisis of that model, and the lack of principled, intellectual and political, resolve among the policy-makers of that era in response to it, eventually undermined the modern dream of fair, full and fruitful employment. So today, the continuation of healthy global growth in the world economy is threatened by institutional blockages, this time from systematically malfunctioning financial agencies that seem at every step too powerful to cross. We live in a moment when technology and trade offer great hope for the development of good lives. This generation must not allow itself to squander through cowardice, as did its predecessor, the opportunity for sustainable economic betterment. The lineaments of crisis are plain. Financial markets have fallen sharply. Central banks have acted in concert as "lenders of last resort'' to troubled funds. Bond dealers show from their trading behaviour that they are no longer expecting interest rates, which had been rising, to rise any further in 2007. The consensus is that the United States federal reserve and the central banks of Europe and Japan have been right to intervene, to offer cash when none others will, in order to avoid a system-wide crisis. The world's finances rely on a basic assumption that markets will continue to exist. If I need to make a cash payment, I will be able to select which of my assets to sell and will actually be able to sell them at some price. In a system-wide crisis, no one wants to trade. There is no price at which anyone can be convinced to hold a contract, because no one knows what its value is. In this circumstance, a fund manager is a helmsman in a storm: aware of every danger of his position but powerless as wind, then waves, batter him here, then there. But unlike the helmsman, the storm is made worse if another ship in the vicinity goes down. If a bank actually faces bankruptcy, all the contracts and obligations held by that institution will be bad, thus infecting trust in every part of the financial system. The central banks bail out the funds in order to stop anyone seeing a ship go down, as a way of stemming contagion. That is the defence. This is why we, as citizens and voters the owners of the central banks, lend money in conditions in which no banker would lend. And the argument is strong: contagion and system-wide crisis will have a real impact that will cause hardship: when firms and households find borrowing is hard, demand drops, jobs go ... recession. There is a real case here for us to bail the hedge-funds. But the metaphor of the storm is misleading. Meteorology is not caused - at least not predictably - by the decisions of the helmsmen it affects. Financial crises are. It is because we can be counted on to be lenders of last resort that traders and managers can discount the risks of system-failure and therefore behave imprudently with increasing ease and frequency. The pattern is familiar from the libertarian critique of welfarism: while a safety-net for the deserving poor is good, the existence of the safety-net will create a class of idle, undeserving scroungers. It is hard to be good without encouraging others to be vicious. Fund managers have been enjoying a one-way bet for six years or more. A credit-worthy institution could borrow very cheaply and lend on without any concern about becoming systematically over-stretched. In the extreme case, the Japanese central bank has been lending money almost for free. Those with access to free money could lend it on to those without such privilege and pocket not just the difference, but, through gambles, multiples of the difference. This is the magic of the "carry-trade". The skill of the game - if you listen to its practitioners - is to make sure that the risk is properly packaged. The near-zero cost of borrowing is available to the credit-worthy banks; the fact that money costs most of us 7% or more arises because each of us individually might default: if I lose my job, I might miss a few mortgage-payments. The credit-worthy bank, on the other hand, will not default ...even in the face of catastrophe. The incentive to find punters who can be convinced to take on a loan is very clear: their monthly interest-payments are pure profit to the credit-worthy institution which has paid nothing for the money. If that is a hedge-fund, then one in every three dollars, pounds or yen repaid goes into the personal wealth of the masters of the universe at the helm. Anyone looking down from an aeroplane window-seat onto a mid-western American city in the past few years will have seen the outgrowths of the hunt for the loan-hungry. The plane approaches Denver airport. The mile-high plain, a mixture of ranch scrubland and oil-pumping jackdaws - here, they look like an artisanal industry, an accessory of ranching accessory - is cut out by jigsaw-puzzle-pieces of curlicued driveways, tendrils into the emptiness. The street-lighting is there, the houses not yet. A few seconds closer to landing, the wooden frames of the semi-built appear. And now the new suburb, complete with SUV or pick-up. The trees will come later to Linden Avenue. All over America, telephone-mortgage salespeople turned individuals' dreams of owning a piece of ex-scrubland into a commitment to monthly repayments into profit contributions for the funds and eventually into 142-foot yachts like John Devaney's Positive Carry. The carry-trade has been turned into a one-way bet by the financial system's equivalent of welfare-Keynesianism. The credit-worthy lender knows that if there is no system-wide turbulence, individual defaults will be weathered through the predictable statistics of the game; but the lender knows also that when everyone has some chance of defaulting together, we will all - as taxpayers - come to the rescue. Each carry-trader faces this tough choice: either I make a packet, or I survive. Compare this to the tough choice faced by Keith Talent, Martin Amis's archetypal wheeler-dealing welfare-scrounger, textbook baddy of neo-liberals set in Thatcherite London: either I hawk these stolen pornographic videos and make a nice profit, or I just wait around for the welfare cheque. In either case, life is pretty good, and certainly presents no strong incentive to develop my capabilities to be productive and useful. We extend social security to bankers because of the appalling consequences of a financial-system failure; we thereby increase the likelihood and frequency of having to bail out the bankers. It is all too reminiscent of late-1970s labour disputes, when the threat or reality of concerted industrial action was all too often met by consent to trade-union demands for pay increases and improved entitlements unrelated to productivity. The result was to worsen the climate of industrial production and increase confrontations between labour and capital. In Britain and America, the denouement of this model was devastating to its political champions and social beneficiaries: the election of Margaret Thatcher (1979) and Ronald Reagan (1980), and the deep recessions of the early 1980s, and the drastic readjustment of labour expectations. But is there anything actually wrong with bailing out the bankers? Why not live with more frequent crises? Just as we once bribed organised labour for an easy life, can we not just view what we do here as bribing the bankers to keep the world economy oiled with the cash it needs to generate the goods and jobs we want? There was an innocent time when this deal may have worked. Bankers did their job of assessing risks and allocating savings to worthy investments. When panic threatened, the social-security of last-resort lending was effective. Think of it as the classic Keynesian welfare state or the context for the great successes of the New Deal: honourable behaviour all round, with the edge taken off misfortune. But self-awareness of this mechanism has undermined it, just as welfarism has led to unsustainable levels of abuse. Today, bankers are principally looking for any source of commitment to pay interest, irrespective of its credit-worthiness. A firm desire to have a substantial asset in your name - the house or the SUV - is the soft target at the end of the carry-trade chain. But the real job of the banker should be to judge the realism of the desire, not just its strength or existence. The structural role of finance in the home, the company, or the world economy is as a sort of super-ego, a reality principle. Social insurance for bankers has undermined the traditional virtues of banking: prudence, scepticism, an eye for opportunity, understanding, and has replaced them by the sharp tactics we know from Keith Talent - opportunism, carelessness, foolhardiness. The result of losing the reality principle that finance should incarnate will be felt for years to come: instead of careful investment in useful projects, we will have spent years and billions over-indulging fantasies of ownership. Just as when the personal ego is allowed to run riot, the hangover and depression may be painful. So why should we go through the pain? Out of the end of innocence in welfarism and trade-unionism came the tough love - and also destructiveness - of neo-liberalism. But thence also comes the political opportunity for a new social democracy - which the better aspects of New Labour in Britain, of Gerhard Schröder in Germany and Clintonite policies in the United States moved towards: attempts to create a new social contract that allows the best of fairness-based justice to re-emerge. The same pattern should be sought for banking: a financial order based on the virtues of the gentleman-capitalist is brought under repeated strain when those virtues no longer constrain behaviour (the degenerate Keynesian phase). A period of tough but destructive reorganisation is needed (where we are today) before a financial order based on recognised roles and shared responsibilities can emerge (the new social contract we should seek to establish). Much as nostalgics and conservatives might bemoan the old order, and much as libertarians might like to see here an opportunity for the end of social contracts in finance altogether, neither is a real alternative. We want insurance from panic-attacks, so we will need some form of welfarism in financial markets; we have lost forever - and do not want to recreate - the ways of life that made the old order of gentlemanly capitalism effective. The financial crises and asset bubbles we see today are the symptom of a broken system. To borrow a tune from Thatcher, "there is no alternative'' - to the painful path of renewal for our financial system that the other parts of the old order have seen over the last thirty years.
from __future__ import absolute_import import re, os, sys from clay import app import clay.config from flask import make_response, request, redirect, render_template, url_for from epubber.fimfic_epubgen import FimFictionEPubGenerator site_epub_classes = [ FimFictionEPubGenerator ] accesslog = clay.config.get_logger('epubber_access') ##################################################################### # Main App Views Section ##################################################################### @app.route('/', methods=['GET', 'POST']) def main_view(): story = request.args.get("story") or None if story: data = None for epgenclass in site_epub_classes: epgen = epgenclass() if epgen.handle_url(story): epub_file, data = epgen.gen_epub() accesslog.info('%(title)s - %(url)s' % epgen.metas) del epgen response = make_response(data) response.headers["Content-Type"] = "application/epub+zip" response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file return response del epgen return ("Cannot generate epub for this URL.", 400) return render_template("main.html") ##################################################################### # Secondary Views Section ##################################################################### @app.route('/health', methods=['GET']) def health_view(): ''' Heartbeat view, because why not? ''' return ('OK', 200) ##################################################################### # URL Shortener Views Section ##################################################################### @app.route('/img/<path>', methods=['GET', 'POST']) def static_img_proxy_view(path): ''' Make shorter URLs for image files. ''' path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path) return redirect(url_for('static', filename=os.path.join('img', path))) @app.route('/js/<path>', methods=['GET', 'POST']) def static_js_proxy_view(path): ''' Make shorter URLs for javascript files. ''' path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path) return redirect(url_for('static', filename=os.path.join('js', path))) @app.route('/css/<path>', methods=['GET', 'POST']) def static_css_proxy_view(path): ''' Make shorter URLs for CSS files. ''' path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path) return redirect(url_for('static', filename=os.path.join('css', path))) ##################################################################### # Main ##################################################################### def main(): # Make templates copacetic with UTF8 reload(sys) sys.setdefaultencoding('utf-8') # App Config app.secret_key = clay.config.get('flask.secret_key') main() # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
Regina is one of the pioneers in Brazil of site-specific work, usually in large formats. The cover photo is from her 1991 - The Divine Comedy - which brought together 146 performing artists, and 34 Visual artists, occupying the whole Rio de Janeiro Museum of Modern Art. Since then, Regina explored gardens, galleries, abandoned tenement houses, and more! A few examples portrayed here, include the emptied pool of the Visual Arts School of Lage Park, which was filled with sand for Orpheus (2005), and historic places, such as the old slavery port recent rediscovered, for the staging of her choreographic installations Black Market (2014) and Abolition (2015), Rio de Janeiro. Aiming to sharpen the perception of what is informally created in urban spaces, and arising out of the desire to scape usual behaviors and street demarcations, by creating new paths and alternative behaviors, each of Miranda's performances propose a (dis)located body, a nomadic gaze and the construction of temporary schemes, meetings, losses, relationships, and symbolic exchanges. Their figurations announce other possible relationships between people and their surroundings, from a perspective that acknowledges and incorporates presences and absences as equally important events. By emphasizing discrete gestures and displacements, and incorporating the audience as informal performers, these works are presented not as a show, but as an event, which by at the moment of being lived, creates a space of otherness, and proposes the practice of freedom.
# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Buildgen package version plugin This parses the list of targets from the yaml build file, and creates a custom version string for each language's package. """ import re LANGUAGES = [ 'core', 'cpp', 'csharp', 'node', 'objc', 'php', 'python', 'ruby', ] class Version: def __init__(self, s): self.tag = None if '-' in s: s, self.tag = s.split('-') self.major, self.minor, self.patch = [int(x) for x in s.split('.')] def __str__(self): """Version string in a somewhat idiomatic style for most languages""" s = '%d.%d.%d' % (self.major, self.minor, self.patch) if self.tag: s += '-%s' % self.tag return s def pep440(self): """Version string in Python PEP440 style""" s = '%d.%d.%d' % (self.major, self.minor, self.patch) if self.tag: # we need to translate from grpc version tags to pep440 version # tags; this code is likely to be a little ad-hoc if self.tag == 'dev': s += '.dev0' elif len(self.tag) >= 3 and self.tag[0:3] == 'pre': s += 'rc%d' % int(self.tag[3:]) else: raise Exception('Don\'t know how to translate version tag "%s" to pep440' % self.tag) return s def ruby(self): """Version string in Ruby style""" if self.tag: return '%d.%d.%d.%s' % (self.major, self.minor, self.patch, self.tag) else: return '%d.%d.%d' % (self.major, self.minor, self.patch) def php(self): """Version string for PHP PECL package""" s = '%d.%d.%d' % (self.major, self.minor, self.patch) if self.tag: if self.tag == 'dev': s += 'dev' elif len(self.tag) >= 3 and self.tag[0:3] == 'pre': s += 'RC%d' % int(self.tag[3:]) else: raise Exception('Don\'t know how to translate version tag "%s" to PECL version' % self.tag) return s def php_composer(self): """Version string for PHP Composer package""" return '%d.%d.%d' % (self.major, self.minor, self.patch) def mako_plugin(dictionary): """Expand version numbers: - for each language, ensure there's a language_version tag in settings (defaulting to the master version tag) - expand version strings to major, minor, patch, and tag """ settings = dictionary['settings'] master_version = Version(settings['version']) settings['version'] = master_version for language in LANGUAGES: version_tag = '%s_version' % language if version_tag in settings: settings[version_tag] = Version(settings[version_tag]) else: settings[version_tag] = master_version
There’s no denying I’m on my last lap and heading home. I’m healthy and happy, but I know I’m edging closer. Anne Holliday-Abbott lives in Portland with her nine other family members and three cats. There are some stories from her life which will never be told. I enjoy telling family stories. Mistakenly and often I think my children and grandchildren will find it interesting to hear them. My stories are met with eye rolls. Clearly I am a dinosaur. Thus, I have developed my own story about my final homeward journey. I want a long and detailed obituary in the Sunday newspaper. I’m writing it myself. I can’t trust that anyone will include information not typically remembered about me, such as my wild 35 years living and working in Arizona. I don’t trust anyone to know I study the English monarchy for fun. No funeral. I can’t count how many funerals I have attended grousing all the way. Instead, I want my family and friends to eat unlimited lobster dripping with butter on the coast of Maine. In all my earthly homes my bed has been my heaven. I love clean sheets, warm comforters and lots of books stacked up beside me. Knowing this, where do I want my ashes? And ashes it will be. I could ask my family to take me to an oil rig near Houston where I spent many of my early days with my geologist father. No one wants to do that, I know. See above where I talk about going to funerals. My husband has a nice family plot in Westbrook. If I go there I know his father, aunts and grandparents will welcome me with a cup of tea in fine china. My mother is buried in a remote cemetery in Grafton, Vermont. Thing is, her sister is buried next to her and I don’t see myself resting comfortably near Aunt Marge for even a day. My father is in Cochise County, Arizona. That’s out. Too hot. Too many cacti and snakes and no ocean. I love the woods and big bodies of water, but the idea of being scattered anywhere makes me nervous. I’ve been scattered this entire lifetime and I’ve had enough of that. If I choose to be made into a locket I see the ultimate outcome, and it feels like a landfill. I want to be in a plain brown box above the fireplace in the home of one of my children. Knowing them as I do, I stand a chance to last the longest with my daughter, Marcy. My free spirited son Andy will probably live on a beach in Bali. From my perch in the living room I will be able to see my great-grandchildren and watch them play Monopoly around the coffee table. When the box becomes too burdensome even for Marcy, someone can get a trowel, dig a hole and bury me in my rose garden. There I will be emphatic in letting my family know I still love them. They will know this by my show of white and orange roses every spring.
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import random from urllib.parse import urlparse, parse_qs import mock import pytest from django.core.cache import cache from django.conf import settings from django.utils import dateparse from crashstats.crashstats import models from crashstats.crashstats.tests.conftest import Response from crashstats.crashstats.tests.testbase import DjangoTestCase from socorro.lib import BadArgumentError from socorro.unittest.external.pubsub import get_config_manager, PubSubHelper class TestGraphicsDevices(DjangoTestCase): def setUp(self): super().setUp() cache.clear() def test_get_pairs(self): """Test get_pairs() works correctly The GraphicsDevice.get_pairs() lets you expand a bunch of (vendor, adapter) pairs at the same time. It's more performant since it does a single query. """ models.GraphicsDevice.objects.create( vendor_hex="vhex3", vendor_name="V 3", adapter_hex="ahex3", adapter_name="A 3", ) models.GraphicsDevice.objects.create( vendor_hex="vhex2", vendor_name="V 2", adapter_hex="ahex2", adapter_name="A 2", ) models.GraphicsDevice.objects.create( vendor_hex="vhex1", vendor_name="V 1", adapter_hex="ahex1", adapter_name="A 1", ) r = models.GraphicsDevice.objects.get_pairs( ["vhex1", "vhex2"], ["ahex1", "ahex2"] ) expected = { ("vhex1", "ahex1"): ("V 1", "A 1"), ("vhex2", "ahex2"): ("V 2", "A 2"), } assert r == expected r = models.GraphicsDevice.objects.get_pairs( ["vhex2", "vhex3"], ["ahex2", "ahex3"] ) assert len(r) == 2 expected = { ("vhex2", "ahex2"): ("V 2", "A 2"), ("vhex3", "ahex3"): ("V 3", "A 3"), } assert r == expected class TestBugs(DjangoTestCase): def setUp(self): super().setUp() cache.clear() def test_get_one(self): models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small") api = models.Bugs() resp = api.get(signatures=["OOM | small"]) assert resp == { "hits": [{"id": 999999, "signature": "OOM | small"}], "total": 1, } def test_get_multiple(self): models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small") models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large") api = models.Bugs() resp = api.get(signatures=["OOM | small", "OOM | large"]) assert resp == { "hits": [ {"id": 999999, "signature": "OOM | small"}, {"id": 1000000, "signature": "OOM | large"}, ], "total": 2, } def test_related(self): models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small") models.BugAssociation.objects.create(bug_id="999999", signature="OOM | medium") models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large") api = models.Bugs() resp = api.get(signatures=["OOM | small"]) assert resp == { "hits": [ {"id": 999999, "signature": "OOM | medium"}, {"id": 999999, "signature": "OOM | small"}, ], "total": 2, } class TestSignaturesByBugs(DjangoTestCase): def setUp(self): super().setUp() cache.clear() def test_get_one(self): models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small") api = models.SignaturesByBugs() resp = api.get(bug_ids=["999999"]) assert resp == { "hits": [{"id": 999999, "signature": "OOM | small"}], "total": 1, } def test_get_multiple(self): models.BugAssociation.objects.create(bug_id="999999", signature="OOM | small") models.BugAssociation.objects.create(bug_id="1000000", signature="OOM | large") api = models.SignaturesByBugs() resp = api.get(bug_ids=["999999", "1000000"]) assert resp == { "hits": [ {"id": 999999, "signature": "OOM | small"}, {"id": 1000000, "signature": "OOM | large"}, ], "total": 2, } class TestSignatureFirstDate(DjangoTestCase): def setUp(self): super().setUp() cache.clear() def test_get_one(self): some_date = dateparse.parse_datetime("2018-10-06T00:22:58.074859+00:00") models.Signature.objects.create( signature="OOM | Small", first_build="20180920131237", first_date=some_date ) models.Signature.objects.create( signature="OOM | Large", first_build="20180920131237", first_date=some_date ) api = models.SignatureFirstDate() resp = api.get(signatures="OOM | Small") assert resp["total"] == 1 assert resp["hits"] == [ { "first_build": "20180920131237", "first_date": "2018-10-06T00:22:58.074859+00:00", "signature": "OOM | Small", } ] def test_get_two(self): some_date = dateparse.parse_datetime("2018-10-06T00:22:58.074859+00:00") models.Signature.objects.create( signature="OOM | Small", first_build="20180920131237", first_date=some_date ) models.Signature.objects.create( signature="OOM | Large", first_build="20180920131237", first_date=some_date ) api = models.SignatureFirstDate() resp = api.get(signatures=["OOM | Small", "OOM | Large"]) assert resp["total"] == 2 assert resp["hits"] == [ { "first_build": "20180920131237", "first_date": "2018-10-06T00:22:58.074859+00:00", "signature": "OOM | Small", }, { "first_build": "20180920131237", "first_date": "2018-10-06T00:22:58.074859+00:00", "signature": "OOM | Large", }, ] class TestVersionString(DjangoTestCase): def setUp(self): super().setUp() cache.clear() def test_bad_args_raise_error(self): api = models.VersionString() with pytest.raises(models.RequiredParameterError): api.get() with pytest.raises(models.RequiredParameterError): api.get(product="Firefox", channel="beta") def test_beta(self): models.ProductVersion.objects.create( product_name="Firefox", release_channel="beta", build_id="20161129164126", version_string="51.0b5", major_version=51, ) api = models.VersionString() resp = api.get(product="Firefox", channel="beta", build_id="20161129164126") assert resp == {"hits": [{"version_string": "51.0b5"}], "total": 1} def test_release_rc(self): """If the channel is beta, but there aren't versions with 'b' in them, then these are release candidates for a final release, so return an rc one. """ models.ProductVersion.objects.create( product_name="Firefox", release_channel="beta", build_id="20161104212021", version_string="50.0rc2", major_version=50, ) api = models.VersionString() resp = api.get(product="Firefox", channel="beta", build_id="20161104212021") assert resp == {"hits": [{"version_string": "50.0rc2"}], "total": 1} def test_beta_and_rc(self): """If there are multiple version strings for a given (product, channel, build_id), and they have 'b' in them, then we want the non-rc one. """ models.ProductVersion.objects.create( product_name="Firefox", release_channel="beta", build_id="20160920155715", version_string="50.0b1rc2", major_version=50, ) models.ProductVersion.objects.create( product_name="Firefox", release_channel="beta", build_id="20160920155715", version_string="50.0b1rc1", major_version=50, ) models.ProductVersion.objects.create( product_name="Firefox", release_channel="beta", build_id="20160920155715", version_string="50.0b1", major_version=50, ) api = models.VersionString() resp = api.get(product="Firefox", channel="beta", build_id="20160920155715") assert resp == {"hits": [{"version_string": "50.0b1"}], "total": 1} class TestMiddlewareModels(DjangoTestCase): def setUp(self): super().setUp() cache.clear() @mock.patch("requests.Session") def test_bugzilla_api(self, rsession): model = models.BugzillaBugInfo api = model() def mocked_get(url, **options): assert url.startswith(settings.BZAPI_BASE_URL) parsed = urlparse(url) query = parse_qs(parsed.query) assert query["include_fields"] == ["summary,status,id,resolution"] return Response( { "bugs": [ { "status": "NEW", "resolution": "", "id": 123456789, "summary": "Some summary", } ] } ) rsession().get.side_effect = mocked_get info = api.get("123456789") expected = [ { "status": "NEW", "resolution": "", "id": 123456789, "summary": "Some summary", } ] assert info["bugs"] == expected # prove that it's cached def new_mocked_get(**options): return Response( { "bugs": [ { "status": "RESOLVED", "resolution": "", "id": 123456789, "summary": "Some summary", } ] } ) rsession().get.side_effect = new_mocked_get info = api.get("123456789") expected = [ { "status": "NEW", "resolution": "", "id": 123456789, "summary": "Some summary", } ] assert info["bugs"] == expected @mock.patch("requests.Session") def test_bugzilla_api_bad_status_code(self, rsession): model = models.BugzillaBugInfo api = model() def mocked_get(url, **options): return Response("I'm a teapot", status_code=418) rsession().get.side_effect = mocked_get with pytest.raises(models.BugzillaRestHTTPUnexpectedError): api.get("123456789") def test_processed_crash(self): model = models.ProcessedCrash api = model() def mocked_get(**params): assert "datatype" in params assert params["datatype"] == "processed" return { "product": "WaterWolf", "uuid": "7c44ade2-fdeb-4d6c-830a-07d302120525", "version": "13.0", "build": "20120501201020", "ReleaseChannel": "beta", "os_name": "Windows NT", "date_processed": "2012-05-25 11:35:57", "success": True, "signature": "CLocalEndpointEnumerator::OnMediaNotific", "addons": [ ["testpilot@labs.mozilla.com", "1.2.1"], ["{972ce4c6-7e08-4474-a285-3208198ce6fd}", "13.0"], ], } model.implementation().get.side_effect = mocked_get r = api.get(crash_id="7c44ade2-fdeb-4d6c-830a-07d302120525") assert r["product"] def test_unredacted_crash(self): model = models.UnredactedCrash api = model() def mocked_get(**params): assert "datatype" in params assert params["datatype"] == "unredacted" return { "product": "WaterWolf", "uuid": "7c44ade2-fdeb-4d6c-830a-07d302120525", "version": "13.0", "build": "20120501201020", "ReleaseChannel": "beta", "os_name": "Windows NT", "date_processed": "2012-05-25 11:35:57", "success": True, "signature": "CLocalEndpointEnumerator::OnMediaNotific", "exploitability": "Sensitive stuff", "addons": [ ["testpilot@labs.mozilla.com", "1.2.1"], ["{972ce4c6-7e08-4474-a285-3208198ce6fd}", "13.0"], ], } model.implementation().get.side_effect = mocked_get r = api.get(crash_id="7c44ade2-fdeb-4d6c-830a-07d302120525") assert r["product"] assert r["exploitability"] def test_raw_crash(self): model = models.RawCrash api = model() def mocked_get(**params): return { "InstallTime": "1339289895", "FramePoisonSize": "4096", "Theme": "classic/1.0", "Version": "5.0a1", "Email": "socorro-123@restmail.net", "Vendor": "Mozilla", } model.implementation().get.side_effect = mocked_get r = api.get(crash_id="some-crash-id") assert r["Vendor"] == "Mozilla" assert "Email" in r # no filtering at this level def test_raw_crash_invalid_id(self): # NOTE(alexisdeschamps): this undoes the mocking of the implementation so we can test # the implementation code. models.RawCrash.implementation = self._mockeries[models.RawCrash] model = models.RawCrash api = model() with pytest.raises(BadArgumentError): api.get(crash_id="821fcd0c-d925-4900-85b6-687250180607docker/as_me.sh") def test_raw_crash_raw_data(self): model = models.RawCrash api = model() mocked_calls = [] def mocked_get(**params): mocked_calls.append(params) assert params["datatype"] == "raw" if params.get("name") == "other": return "\xe0\xe0" else: return "\xe0" model.implementation().get.side_effect = mocked_get r = api.get(crash_id="some-crash-id", format="raw") assert r == "\xe0" r = api.get(crash_id="some-crash-id", format="raw", name="other") assert r == "\xe0\xe0" @mock.patch("requests.Session") def test_massive_querystring_caching(self, rsession): # doesn't actually matter so much what API model we use # see https://bugzilla.mozilla.org/show_bug.cgi?id=803696 model = models.BugzillaBugInfo api = model() def mocked_get(url, **options): assert url.startswith(settings.BZAPI_BASE_URL) return Response( { "bugs": [ { "id": 123456789, "status": "NEW", "resolution": "", "summary": "Some Summary", } ] } ) rsession().get.side_effect = mocked_get bugnumbers = [str(random.randint(10000, 100000)) for __ in range(100)] info = api.get(bugnumbers) assert info def test_Reprocessing(self): # This test runs against the Pub/Sub emulator, so undo the mock to let # that work. self.undo_implementation_mock(models.Reprocessing) config_manager = get_config_manager() with config_manager.context() as config: pubsub_helper = PubSubHelper(config) api = models.Reprocessing() with pubsub_helper as helper: api.post(crash_ids="some-crash-id") crash_ids = helper.get_crash_ids("reprocessing") assert crash_ids == ["some-crash-id"] def test_PriorityJob(self): # This test runs against the Pub/Sub emulator, so undo the mock to let # that work. self.undo_implementation_mock(models.PriorityJob) config_manager = get_config_manager() with config_manager.context() as config: pubsub_helper = PubSubHelper(config) api = models.PriorityJob() with pubsub_helper as helper: api.post(crash_ids="some-crash-id") crash_ids = helper.get_crash_ids("priority") assert crash_ids == ["some-crash-id"]
A little apology out there to all my readership. Various things had occurred in my personal life, which affected my availability to run this blog. But dont be concerned too much. Im back, as Arnie says, and this time im better than ever. Keep an eye out for futher updates regarding this blog and another MYSTERY site I will be back working in conjunction with.
#!/usr/bin/env python """Test form submission""" __email__ = "nicolas.maire@unibas.ch" __status__ = "Alpha" from lxml import etree import urllib2 import uuid import logging DEVICE_ID = "8d:77:12:5b:c1:3c" def submit_data(data, url): """Submit an instance to ODKAggregate""" r = urllib2.Request(url, data=data, headers={'Content-Type': 'application/xml'}) try: u = urllib2.urlopen(r) response = u.read() return response except urllib2.HTTPError as e: print(e.read()) print(e.code) print(e.info()) print(data) def submit_from_instance_file(filename, aggregate_url): """Read an instance from a file and submit to ODKAggregate""" f = open(filename, 'r') data = f.read() f.close() submit_data(data, aggregate_url) def submit_from_dict(form_dict, aggregate_url): """Create an instance from a dict and submit to ODKAggregate""" root = etree.Element(form_dict["id"], id=form_dict["id"]) #TODO: deviceid should be added here, but what spelling , Id or id? dev_id = etree.SubElement(root, "deviceid") dev_id.text = DEVICE_ID meta = etree.SubElement(root, "meta") inst_id = etree.SubElement(meta, "instanceID") inst_id.text = str(uuid.uuid1()) p_b_m = etree.SubElement(root, "processedByMirth") p_b_m.text = '0' etree.SubElement(root, "start") for field in form_dict["fields"]: if type(field[1]) == list: el_par = etree.SubElement(root, field[0]) for sub_field in field[1]: el = etree.SubElement(el_par, sub_field[0]) el.text = sub_field[1] else: el = etree.SubElement(root, field[0]) el.text = field[1] logging.debug(form_dict) submit_data(etree.tostring(root), aggregate_url) def submit_baseline_individual(start, end, location_id, visit_id, fieldworker_id, individual_id, mother_id, father_id, first_name, middle_name, last_name, gender, date_of_birth, partial_date, date_of_visit, aggregate_url): """Register an individual during baseline""" # dateOfMigration is date of visit by definition form_dict = {"id": "baseline", "fields": [["start", start], ["end", end], ["openhds", [["migrationType", "BASELINE"], ["locationId", location_id], ["visitId", visit_id], ["fieldWorkerId", fieldworker_id]]], ["individualInfo", [["individualId", individual_id], ["motherId", mother_id], ["fatherId", father_id], ["firstName", first_name], ["middleName", middle_name], ["lastName", last_name], ["gender", gender], ["religion", "unk"], ["dateOfBirth", date_of_birth], ["partialDate", partial_date]]], ["dateOfMigration", date_of_visit], ["warning", ""], ["visitDate", date_of_visit], ["majo4mo", "yes"], ["spelasni", "yes"]]} return submit_from_dict(form_dict, aggregate_url) def submit_in_migration(start, end, migration_type, location_id, visit_id, fieldworker_id, individual_id, mother_id, father_id, first_name, middle_name, last_name, gender, date_of_birth, partial_date, date_of_migration, aggregate_url): """Register an inmigration""" form_dict = {"id": "in_migration", "fields": [["start", start], ["end", end], ["openhds", [["visitId", visit_id], ["fieldWorkerId", fieldworker_id], ["migrationType", migration_type], ["locationId", location_id]]], ["individualInfo", [["individualId", individual_id], ["motherId", mother_id], ["fatherId", father_id], ["firstName", first_name], ["middleName", middle_name], ["lastName", last_name], ["gender", gender], ["dateOfBirth", date_of_birth], ["partialDate", partial_date]]], ["dateOfMigration", date_of_migration], ["warning", ""], ["origin", "other"], ["reason", "NA"], ["maritalChange", "NA"], ["reasonOther", "NA"], ["movedfrom", "NA"], ["shortorlongstay", "NA"]]} return submit_from_dict(form_dict, aggregate_url) def submit_death_registration(start, individual_id, first_name, last_name, field_worker_id, visit_id, date_of_death, place_of_death, place_of_death_other, end, aggregate_url): form_dict = {"id": "death_registration", "fields": [["start", start], ["end", end], ["openhds", [["fieldWorkerId", field_worker_id], ["visitId", visit_id], ["individualId", individual_id], ["firstName", first_name], ["lastName", last_name]]], ["dateOfDeath", date_of_death], ["diagnoseddeath", ''], ["whom", ''], ["causeofdeathdiagnosed", ''], ["causofdeathnotdiagnosed", ''], ["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other], ["causeOfDeath", ''] ]} return submit_from_dict(form_dict, aggregate_url) def submit_death_of_hoh_registration(start, end, individual_id, household_id, new_hoh_id, field_worker_id, gender, death_within_dss, death_village, have_death_certificate, visit_id, cause_of_death, date_of_death, place_of_death, place_of_death_other, aggregate_url): #TODO: update form fields to lastest form_dict = {"id": "DEATHTOHOH", "fields": [["start", start], ["end", end], ["openhds", [["visitId", visit_id], ["fieldWorkerId", field_worker_id], ["householdId", household_id], ["individualId", individual_id], ["firstName", "first"], ["lastName", "last"], ["new_hoh_id", new_hoh_id]]], ["gender", gender], ["deathWithinDSS", death_within_dss], ["deathVillage", death_village], ["haveDeathCertificate", have_death_certificate], ["causeOfDeath", cause_of_death], ["dateOfDeath", date_of_death], ["placeOfDeath", place_of_death], ["placeOfDeathOther", place_of_death_other], ]} return submit_from_dict(form_dict, aggregate_url) def submit_location_registration(start, hierarchy_id, fieldworker_id, location_id, location_name, ten_cell_leader, location_type, geopoint, end, aggregate_url): form_dict = {"id": "location_registration", "fields": [["start", start], ["end", end], ["openhds", [["fieldWorkerId", fieldworker_id], ["hierarchyId", hierarchy_id], ["locationId", location_id]]], ["locationName", location_name], ["tenCellLeader", ten_cell_leader], ["locationType", location_type], ["geopoint", geopoint]]} return submit_from_dict(form_dict, aggregate_url) def submit_membership(start, individual_id, household_id, fieldworker_id, relationship_to_group_head, start_date, end, aggregate_url): form_dict = {"id": "membership", "fields": [["start", start], ["end", end], ["openhds", [["householdId", household_id], ["fieldWorkerId", fieldworker_id], ["individualId", individual_id]]], ["relationshipToGroupHead", relationship_to_group_head], ["startDate", start_date]]} return submit_from_dict(form_dict, aggregate_url) def submit_out_migration_registration(start, end, individual_id, fieldworker_id, visit_id, first_name, last_name, date_of_migration, name_of_destination, reason_for_out_migration, marital_change, aggregate_url): form_dict = {"id": "out_migration_registration", "fields": [["start", start], ["end", end], ["openhds", [["individualId", individual_id], ["fieldWorkerId", fieldworker_id], ["visitId", visit_id], ["firstName", first_name], ["lastName", last_name]]], ["dateOfMigration", date_of_migration], ["nameOfDestination", name_of_destination], ["reasonForOutMigration", reason_for_out_migration], ["maritalChange", marital_change]]} return submit_from_dict(form_dict, aggregate_url) def submit_pregnancy_observation(start, end, estimated_age_of_preg, individual_id, fieldworker_id, visit_id, exptected_delivery_date, recorded_date, aggregate_url): form_dict = {"id": "pregnancy_observation", "fields": [["start", start], ["end", end], ["openhds", [["fieldWorkerId", fieldworker_id], ["visitId", visit_id], ["individualId", individual_id], ["recordedDate", recorded_date]]], ["estimatedAgeOfPreg", estimated_age_of_preg], ["pregNotes", "1"], ["ageOfPregFromPregNotes", estimated_age_of_preg], ["anteNatalClinic", "YES"], ["lastClinicVisitDate", recorded_date], ["healthfacility", "1"], ["medicineforpregnancy", "NO"], ["ttinjection", "YES"], ["othermedicine", "othermedicine"], ["pregnancyNumber", "1"], ["expectedDeliveryDate", exptected_delivery_date]]} return submit_from_dict(form_dict, aggregate_url) def submit_pregnancy_outcome(start, mother_id, father_id, visit_id, fieldworker_id, nboutcomes, partial_date, birthingplace, birthing_assistant, hours_or_days_in_hospital, hours_in_hospital, caesarian_or_natural, total_number_children_still_living, attended_anc, number_of_attendances, recorded_date, end, aggregate_url): form_dict = {"id": "pregnancy_outcome", "fields": [["start", start], ["end", end], ["openhds", [["visitId", visit_id], ["fieldWorkerId", fieldworker_id], ["motherId", mother_id], ["fatherId", father_id]]], ["nboutcomes", nboutcomes], ["partialDate", partial_date], ["birthingPlace", birthingplace], ["birthingAssistant", birthing_assistant], ["hoursOrDaysInHospital", hours_or_days_in_hospital], ["hoursInHospital", hours_in_hospital], ["caesarianOrNatural", caesarian_or_natural], ["totalNumberChildrenStillLiving", total_number_children_still_living], ["attendedANC", attended_anc], ["numberOfANCAttendances", number_of_attendances], ["recordedDate", recorded_date]]} return submit_from_dict(form_dict, aggregate_url) def submit_relationship(start, individual_a, individual_b, fieldworker_id, relationship_type, start_date, end, aggregate_url): form_dict = {"id": "relationship", "fields": [["start", start], ["end", end], ["openhds", [["fieldWorkerId", fieldworker_id], ["individualA", individual_a], ["individualB", individual_b]]], ["relationshipType", relationship_type], ["startDate", start_date]]} return submit_from_dict(form_dict, aggregate_url) def submit_social_group_registration(start, household_id, individual_id, field_worker_id, group_name, social_group_type, end, aggregate_url): form_dict = {"id": "social_group_registration", "fields": [["start", start], ["end", end], ["openhds", [["fieldWorkerId", field_worker_id], ["householdId", household_id], ["individualId", individual_id]]], ["groupName", group_name], ["socialGroupType", social_group_type]]} return submit_from_dict(form_dict, aggregate_url) def submit_visit_registration(start, visit_id, field_worker_id, location_id, round_number, visit_date, interviewee_id, correct_interviewee, farmhouse, coordinates, end, aggregate_url): form_dict = {"id": "visit_registration", "fields": [["start", start], ["end", end], ["openhds", [["visitId", visit_id], ["fieldWorkerId", field_worker_id], ["locationId", location_id], ["roundNumber", round_number]]], ["visitDate", visit_date], ["intervieweeId", interviewee_id], ["correctInterviewee", correct_interviewee], ["realVisit", "1"], ["farmhouse", farmhouse], ["coordinates", coordinates]]} return submit_from_dict(form_dict, aggregate_url)
Our attention was immediately drawn to the ominous warning signs of land mines. 2. SKIRT (VERB): border; be on the edge. The dust skirted near the window panes. Government is thinking of waiving of import duty on life saving drugs. She is quite intelligent and is in the habit of making quips. I can’t handle more setback in production. The police acted with agility and apprehended the robbers. 7. JUXTAPOSITION (NOUN): the act of placing two things next to each other for implicit comparison. He left for Mumbai because he got tired of jejune life in his home town. These plants ramify early and get to be very large. Don’t be beguiled by the pleasant manners of hypocrites.
# Copyright (c) The University of Edinburgh 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Tests for simple sequential processing engine. Using nose (https://nose.readthedocs.org/en/latest/) run as follows:: $ nosetests dispel4py/test/workflow_graph_test.py ''' from nose import tools from dispel4py.workflow_graph import WorkflowGraph from dispel4py.workflow_graph import draw from dispel4py.examples.graph_testing.testing_PEs \ import TestProducer, TestOneInOneOut from dispel4py.base import create_iterative_chain def test_types(): graph = WorkflowGraph() prod = TestProducer() cons = TestOneInOneOut() graph.connect(prod, 'output', cons, 'input') graph.propagate_types() tools.eq_(prod.outputconnections['output']['type'], cons.inputconnections['input']['type']) def test_dot_pipeline(): graph = WorkflowGraph() prod = TestProducer() cons = TestOneInOneOut() graph.connect(prod, 'output', cons, 'input') draw(graph) def test_dot_composite(): def inc(a): return a+1 def dec(a): return a-1 graph = WorkflowGraph() prod = TestProducer() comp = create_iterative_chain([inc, dec]) cons = TestOneInOneOut() graph.connect(prod, 'output', comp, 'input') graph.connect(comp, 'output', cons, 'input') graph.inputmappings = {'input': (prod, 'input')} root_prod = TestProducer() root_graph = WorkflowGraph() root_graph.connect(root_prod, 'output', graph, 'input') dot = draw(root_graph) tools.ok_('subgraph cluster_' in dot)
Are you facing an unplanned pregnancy? Perhaps the most important thing to know as an expectant mother in Kentucky is that you have options. No matter what you decide for your baby, know that the choice is always yours to make. At Adoptions With Love, we want you to know that you will never have to make this choice alone. We are here to listen and answer questions, to educate you on adoption in Kentucky, and to help you make the most positive, long-term decision for you and your baby. If you are contemplating making an adoption plan, you have come to the right place. Adoptions With Love is a licensed, non-profit adoption agency who helps expectant/birth parents nationwide find the best possible homes for their children. In the state of Kentucky, there are specific steps you will need to take to place your baby for adoption. You do not have to go through this on your own. As a reputable adoption agency serving Kentucky for over 30 years, we can help guide you through this journey. We are here to ensure that you understand the adoption process and are comfortable with each decision made along the way. 1. Choose an adoption agency. As you begin your adoption plan, your first step will be to choose the right adoption support. There are many adoption professionals who can help you, but it is important to find someone that you truly trust throughout this unexpected journey. Choose an adoption agency that will discuss your options with you, listen to your wishes, and respect any choice you make. Select an adoption agency that will also educate you on the adoption laws of your area, and provide you the assistance you need and deserve. 2. Meet with an adoption counselor. As you begin your adoption plan, you should meet regularly with a licensed, compassionate adoption social worker. At Adoptions With Love, we feel this is a crucial part of the adoption process. We want to make sure that you have the opportunity to consider all of your choices, learn about all of your birth mother rights, and understand exactly what to expect before, during, and after an adoption takes place. 3. Understand the adoption laws in Kentucky. Adoption laws vary state to state. In Kentucky, no parent can sign legal adoption documents until at least 72 hours after the baby is born. Adoptions With Love recommends that you take time to rest after your baby’s birth before making this decision. There are many other laws about the financial aid you may receive, your rights and responsibilities as an expectant/birth mother, as well as the rights of your baby’s biological father. For this reason, it is crucial to work with an adoption agency that has attorneys specifically trained in the state of Kentucky. At Adoptions With Love, you will always have the opportunity to choose an adoptive family for your baby. After listening to your wishes and vision of the perfect family, we will send you detailed photo albums and personal profiles from the waiting families the best meet your needs. Once you choose a family for your baby, you can speak to them through email, phone, or meet them in-person. This is completely up to you. No matter what family you choose, rest assured you will be placing your baby in a loving, safe and secure home. In Kentucky, it is required that all potential adoptive families are thoroughly screened by a licensed adoption agency. All families at Adoptions With Love have gone through an extensive home study process as well as a series of background checks to ensure the safety and stability of their home. If you choose to design an adoption plan with Adoptions With Love, you will have the option of meeting your child’s adoptive family and establishing a plan for communication following your baby’s adoption. Whether you choose an open adoption, semi-open adoption, or closed adoption plan, our trained social workers will help you as you consider all of your options for post-adoption contact with your child, his or her adoptive family, and our adoption agency professionals. If you choose adoption for your baby, we encourage you to pursue counseling and support services after your baby is placed. Adoptions With Love offers ongoing, confidential counseling services that are available at no cost to you. We can help you navigate emotions, communication, and a relationship after the adoption takes place. We will always be here for you. Whether you just found out you are pregnant, are in your final trimester, or have already given birth to your baby, it is never too late to start an adoption plan. Contact Amy, Nancy, Claudia, or Amelia today at 1-800-722-7731 for more information on adoption in Kentucky. ← Is Your Baby at Risk for Neonatal Abstinence Syndrome?
import sys import os import scipy.io.wavfile import scipy.signal import matplotlib.pyplot as plt # from matplotlib.pyplot import specgram os.chdir(sys.argv[1]) # Directory provided as a command line argument will be opened to visualize the files inside wavfiles = [] for wavfile in os.listdir(sys.argv[1]): if wavfile.endswith("wav"): wavfiles.append(wavfile) wavfiles.sort() # Declare sampling rates and song arrays for each arg sampling_rates = [] song_arrays = [] # Read wavfiles for wavfile in wavfiles: sampling_rate, song_array = scipy.io.wavfile.read(wavfile) sampling_rates.append(sampling_rate) song_arrays.append(song_array) i = 1 # plot number # Plot spectrogram for each wave_file for song_id, song_array, sampling_rate in zip(wavfiles, song_arrays, sampling_rates): # Create subplots plt.subplot(10, 10, i) i += 1 #plt.title(song_id) plt.specgram(song_array[:30000], Fs=sampling_rate) print("Plotting spectrogram of song_id: " + song_id) plt.savefig('Spectrogram.png') plt.show()
A unique sports weekend, that focuses on supporting the youth sports, sporting families with children, seniors, people with disabilities, as well as professional or unprofessional sportsmen from various sports sectors. This event is known for its improving standards, and it has become a significant sporting event in Slovakia. The organizers feel highly motivated to keep improving their services and to expand the possibilities of this event. After the great success in 2016, 2017 and 2018 at the 5th annual event, it has been granted the sponsorship of the Ministry of Education, Science, Research and Sport of the Slovak Republic and since 2018 we get off as First Slovak Fitness Festival. The project, Open Air Gym Fitness Festival 2019, will be the 6th Annual Open Air Gym event, which were in the past held at the Thermal Baths of E. Tatárik, Nové Zámky and the 2018 event was held in Natiol Expo centre in Nitra. The purpose of the event is to give the public an opportunity to exercise all different types of sports, under the guidance of many excellent domestic and foreign trainers, lecturers and famous sportsmen from the given field. Moreover, qualified professionals will be holding lectures and seminars for public. Lets not forget about the children and seniors, either. Many trainings and lectures are being organised for them, too. Children are welcome to enjoy dynamic games, trainings, motoric and reflex development oriented games, trampolines, face painting etc. Many sellers of high quality food, sportswear, aids and food supplements for sport activities and regenaration, as well as massage services, kinesio taping, cryocompress, physiotherapy and many more, are also being presented at the events‘exhibition. A weekend, full of sports activities for the whole family, from its youngest to the oldest members, accompanied with many sports related programs. A sports weekend, that focuses on supporting the youth sports, sporting families with children, seniors, people with disabilities, as well as professional or unprofessional sportsmen of the Open Air Gym Fitness Festival. Where is the Open Air Gym event held? We are happy to see the increasing numbers of visitors of this event. We see whole families joining us, young people, who are eager to find out all the updates from the world of sports, healthy lifestyle, relaxation or regenaration. This is a weekend, where the families and friends spend some active time together, come to many adventures and get the opportunity to meet professional sportsmen from different fields. Moreover, this is a place, where everyone can try different types of excercises for their own, from crossfit or weightlifting, through energic zumba, functional trainings, martial sports, trampolines, poledance or meditation with yoga. The visitors get the opportunity to find out more about various kinds of sports and health related news. Throughout the weekend, this event makes a major contribution to the general public's view on sports and health, all in a light and fun athmosphere. So be the one of us! CrossFit®, Functional trainings, KB trainings, TRX trainings, Weightlifting under the supervision of the BBC Dukla Division. A wide choice of sports equipment, clothing, nutritional supplement and many culinary delights is a must this year, too. You may look forward to a food corner with a great deal of healthy food specialities or fruit refreshments. Our goal – exercise for us all! The main purpose of Open Air Gym Fitness Festival is to continue in our hard work to further develop sport activities to the general public, to support people in their practical methods, to help them lead a healthy lifestyle, as well as develop their sports abilities. We feel motivated to participate in projects or support and organise sports events. We are giving you an opportunity to help your children spend their free time wisely and to keep the youth, adults or seniors physically and mentally active and healthy.
''' search for geographic tweets ''' import pandas as pd import numpy as np import cPickle # # import MySQLdb as mdb # import pymysql as mdb import time import twitter_tools # from authent import dbauth as authsql import pdb # load beer names with >500 ratings # sql=''' # SELECT beers.beername, beers.id # FROM beers # JOIN revstats ON beers.id=revstats.id # WHERE revstats.nreviews>500; # ''' # con=mdb.connect(**authsql) # print 'Loading neighborhoods' # df=pd.io.sql.read_frame(sql,con) # beers=list(df['neighborhoods']) # ids=list(df['id']) # totalnum=len(beers) # print 'Found %i beers'%totalnum # # NB: tweets seem to come in from outside bounding box # bayArea_bb_twit = [-122.75,36.8,-121.75,37.8] # from twitter's dev site # bayArea_bb_me = [-122.53,36.94,-121.8,38.0] # I made this one # searches twitter backwards in time query = "since:2014-09-02 until:2014-09-03" sf_center = "37.75,-122.44,4mi" # count = 100 # results = twitter_tools.TwitSearchGeoOld(query,sf_center,count,twitter_tools.twitAPI) count = 100 max_tweets = 1000 results = twitter_tools.TwitSearchGeo(query,sf_center,count,max_tweets,twitter_tools.twitAPI) if len(results) > 0: pdb.set_trace() # # search twitter for beers and save out to dataframe # count=0 # tweetholder=[] # for bn in beers: # searchstr='"'+bn+'"' # print 'On %i of %i'%(count+1,totalnum) # results = twittertools.TwitSearch(searchstr,twittertools.twitAPI) # tweetholder.append(results) # count+=1 print('Done.') # save # timeint = np.int(time.time()) # cPickle.dump(tweetholder,open('tweetsearch_%i.cpk'%timeint,'w'))
Mandatory meeting required for ALL Chicken exhibitors on Thursday, November 8, 2018 @ 6:00pm in the Extension Auditorium. Young people who wish to exhibit their poultry projects at the Southeastern Youth Fair must document the progress of their chickens and describe what they learned by keeping their record book. The judge looks for birds with proper body conformation, sexual maturity, and healthy plumage condition and skin color. The completed record book, detailing the work done on this project must be turned in with the birds in order for the exhibitor Chicken to compete in this show.
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import hashlib import time import datetime from six.moves.urllib.parse import quote from optionaldict import optionaldict from wechatpy.utils import to_binary from wechatpy.client.api.base import BaseWeChatAPI class WeChatCustomService(BaseWeChatAPI): def add_account(self, account, nickname, password): """ 添加客服账号 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号,格式为:账号前缀@公众号微信号 :param nickname: 客服昵称,最长6个汉字或12个英文字符 :param password: 客服账号登录密码 :return: 返回的 JSON 数据包 """ password = to_binary(password) password = hashlib.md5(password).hexdigest() return self._post( 'https://api.weixin.qq.com/customservice/kfaccount/add', data={ 'kf_account': account, 'nickname': nickname, 'password': password } ) def update_account(self, account, nickname, password): """ 更新客服账号 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号,格式为:账号前缀@公众号微信号 :param nickname: 客服昵称,最长6个汉字或12个英文字符 :param password: 客服账号登录密码 :return: 返回的 JSON 数据包 """ password = to_binary(password) password = hashlib.md5(password).hexdigest() return self._post( 'https://api.weixin.qq.com/customservice/kfaccount/update', data={ 'kf_account': account, 'nickname': nickname, 'password': password } ) def delete_account(self, account): """ 删除客服账号 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号,格式为:账号前缀@公众号微信号 :return: 返回的 JSON 数据包 """ params_data = [ 'access_token={0}'.format(quote(self.access_token)), 'kf_account={0}'.format(quote(to_binary(account), safe=b'/@')), ] params = '&'.join(params_data) return self._get( 'https://api.weixin.qq.com/customservice/kfaccount/del', params=params ) def get_accounts(self): """ 获取客服账号列表 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :return: 客服账号列表 """ res = self._get('customservice/getkflist') return res['kf_list'] def upload_headimg(self, account, media_file): """ 上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包 """ return self._post( 'https://api.weixin.qq.com/customservice/kfaccount/uploadheadimg', params={ 'kf_account': account }, files={ 'media': media_file } ) def get_online_accounts(self): """ 获取在线客服接待信息 详情请参考 http://mp.weixin.qq.com/wiki/9/6fff6f191ef92c126b043ada035cc935.html :return: 客服接待信息列表 """ res = self._get('customservice/getonlinekflist') return res['kf_online_list'] def create_session(self, openid, account, text=None): """ 多客服创建会话 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :param openid: 客户 openid :param account: 完整客服账号 :param text: 附加信息,可选 :return: 返回的 JSON 数据包 """ data = optionaldict( openid=openid, kf_account=account, text=text ) return self._post( 'https://api.weixin.qq.com/customservice/kfsession/create', data=data ) def close_session(self, openid, account, text=None): """ 多客服关闭会话 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :param openid: 客户 openid :param account: 完整客服账号 :param text: 附加信息,可选 :return: 返回的 JSON 数据包 """ data = optionaldict( openid=openid, kf_account=account, text=text ) return self._post( 'https://api.weixin.qq.com/customservice/kfsession/close', data=data ) def get_session(self, openid): """ 获取客户的会话状态 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :param openid: 客户 openid :return: 返回的 JSON 数据包 """ return self._get( 'https://api.weixin.qq.com/customservice/kfsession/getsession', params={'openid': openid} ) def get_session_list(self, account): """ 获取客服的会话列表 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :param account: 完整客服账号 :return: 客服的会话列表 """ res = self._get( 'https://api.weixin.qq.com/customservice/kfsession/getsessionlist', params={'kf_account': account} ) return res['sessionlist'] def get_wait_case(self): """ 获取未接入会话列表 详情请参考 http://mp.weixin.qq.com/wiki/2/6c20f3e323bdf5986cfcb33cbd3b829a.html :return: 返回的 JSON 数据包 """ return self._get( 'https://api.weixin.qq.com/customservice/kfsession/getwaitcase' ) def get_records(self, start_time, end_time, page_index, page_size=10, user_id=None): """ 获取客服聊天记录 详情请参考 http://mp.weixin.qq.com/wiki/19/7c129ec71ddfa60923ea9334557e8b23.html :param start_time: 查询开始时间,UNIX 时间戳 :param end_time: 查询结束时间,UNIX 时间戳,每次查询不能跨日查询 :param page_index: 查询第几页,从 1 开始 :param page_size: 每页大小,每页最多拉取 1000 条 :param user_id: 普通用户的标识,对当前公众号唯一 :return: 返回的 JSON 数据包 """ if isinstance(start_time, datetime.datetime): start_time = time.mktime(start_time.timetuple()) if isinstance(end_time, datetime.datetime): end_time = time.mktime(end_time.timetuple()) record_data = { 'starttime': int(start_time), 'endtime': int(end_time), 'pageindex': page_index, 'pagesize': page_size } if user_id: record_data['openid'] = user_id res = self._post( 'https://api.weixin.qq.com/customservice/msgrecord/getrecord', data=record_data ) return res['recordlist']
Published 04/23/2019 10:01:35 pm at 04/23/2019 10:01:35 pm in Door Locks From Inside. door locks from inside 68mm entirety 304 stainless steel lock body lock door lock left inside to open the door free shipping in locks from home improvement on aliexpresscom door locks from inside. door locks from inside,front door locks from inside,door locks from inside only,rv screen door latch from inside,releasing door latch from inside 2008 armada door,bedroom door locks from inside and outside,98 civic cut door latch from inside,apartment door locks from inside,door latch inside car,classroom door locks from inside,door latch from inside.
from pywingui.windows import * from pywingui.wtl import * from ctypes import c_char try: LoadLibrary("SciLexer.DLL") #except Exception, e: except:# for compatibility with Python 3 version MessageBox(0, "The Scintilla DLL could not be loaded.", "Error loading Scintilla", MB_OK | MB_ICONERROR) #~ raise e from .scintilla_constants import * class SCNotification(Structure): _fields_ = [("nmhdr", NMHDR), ("position", c_int), ("ch", c_int), ("modifiers", c_int), ("modificationType", c_int), ("text", c_wchar_p), ("length", c_int), ("linesAdded", c_int), ("message", c_int), ("wParam", WPARAM), ("lParam", LPARAM), ("line", c_int), ("foldLevelNow", c_int), ("foldLevelPrev", c_int), ("margin", c_int), ("listType", c_int), ("x", c_int), ("y", c_int)] copyright = \ """ Scintilla Copyright 1998-2003 by Neil Hodgson <neilh@scintilla.org> All Rights Reserved """ class Scintilla(Window): _window_class_ = "Scintilla" _window_style_ = WS_VISIBLE | WS_CHILD def __init__(self, *args, **kwargs): Window.__init__(self, *args, **kwargs) self.InterceptParent() def GetNotification(self, event): return SCNotification.from_address(int(event.lParam)) def SendScintillaMessage(self, msg, wParam, lParam): #TODO use fast path,e.g. retreive direct message fn from #scintilla as described in scintilla docs return windll.user32.SendMessageA(self.handle, msg, wParam, lParam) #~ return self.SendMessage(msg, wParam, lParam) def SetText(self, txt): self.SendScintillaMessage(SCI_SETTEXT, 0, txt) def GetLexer(self): return self.SendScintillaMessage(SCI_GETLEXER, 0, 0) def SetLexerLanguage(self, lang): self.SendScintillaMessage(SCI_SETLEXERLANGUAGE, 0, lang) def SetStyleBits(self, key, value): self.SendScintillaMessage(SCI_SETSTYLEBITS, key, value) def SetMarginWidth(self, width = 0): self.SendScintillaMessage(SCI_SETMARGINWIDTHN, 0, width) def SetProperty(self, key, value): self.SendScintillaMessage(SCI_SETPROPERTY, key, value) def SetKeyWords(self, keyWordSet, keyWordList): self.SendScintillaMessage(SCI_SETKEYWORDS, keyWordSet, " ".join(keyWordList)) def StyleSetFore(self, styleNumber, color): self.SendScintillaMessage(SCI_STYLESETFORE, styleNumber, color) def StyleSetBack(self, styleNumber, color): self.SendScintillaMessage(SCI_STYLESETBACK, styleNumber, color) def StyleSetSize(self, styleNumber, size): self.SendScintillaMessage(SCI_STYLESETSIZE, styleNumber, size) def StyleSetFont(self, styleNumber, face): self.SendScintillaMessage(SCI_STYLESETFONT, styleNumber, face) def StyleClearAll(self): self.SendScintillaMessage(SCI_STYLECLEARALL, 0, 0) def GetLength(self): return self.SendScintillaMessage(SCI_GETLENGTH, 0, 0) def GetText(self): buff_length = self.GetLength() + 1 buff = create_string_buffer(buff_length) self.SendScintillaMessage(SCI_GETTEXT, buff_length, byref(buff)) return str(buff.value) def GetSelText(self): start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0) end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0) if start == end: return "" buff = (c_char * (end - start + 1))() self.SendScintillaMessage(SCI_GETSELTEXT, 0, byref(buff)) return str(buff.value) def HasSelection(self): start = self.SendScintillaMessage(SCI_GETSELECTIONSTART, 0, 0) end = self.SendScintillaMessage(SCI_GETSELECTIONEND, 0, 0) return (end - start) > 0 def AddText(self, text): self.SendScintillaMessage(SCI_ADDTEXT, len(text), text) def SetTabWidth(self, width): self.SendScintillaMessage(SCI_SETTABWIDTH, width, 0) def SetUseTabs(self, useTabs): self.SendScintillaMessage(SCI_SETUSETABS, int(useTabs), 0) def SetEolMode(self, eolMode): self.SendScintillaMessage(SCI_SETEOLMODE, eolMode, 0) def Undo(self): self.SendScintillaMessage(SCI_UNDO, 0, 0) def Redo(self): self.SendScintillaMessage(SCI_REDO, 0, 0) def CanUndo(self): return self.SendScintillaMessage(SCI_CANUNDO, 0, 0) def CanRedo(self): return self.SendScintillaMessage(SCI_CANREDO, 0, 0) def Cut(self): self.SendScintillaMessage(SCI_CUT, 0, 0) def Copy(self): self.SendScintillaMessage(SCI_COPY, 0, 0) def Clear(self): self.SendScintillaMessage(SCI_CLEAR, 0, 0) def Paste(self): self.SendScintillaMessage(SCI_PASTE, 0, 0) def CanPaste(self): return self.SendScintillaMessage(SCI_CANPASTE, 0, 0) def SelectAll(self): self.SendScintillaMessage(SCI_SELECTALL, 0, 0)
We had such an amazing time during this sweet mom's photo session. In love with both studio and La Jolla beach images. Cant wait to hang your stunning canvas wall art collage to her home! Thrilled that she had chosen me as her San Diego maternity photographer.
#Author: Jbrodin #A simple program used to make a color-changing orb #Requires the Panda3D game engine to run import direct.directbase.DirectStart from panda3d.core import AmbientLight,DirectionalLight from panda3d.core import NodePath,TextNode from panda3d.core import Camera,Vec3,Vec4 from direct.gui.OnscreenText import OnscreenText from direct.interval.IntervalGlobal import * import sys from direct.showbase.DirectObject import DirectObject #enables sys.accept class World(DirectObject): def __init__(self): #Load switch model self.glowswitch = loader.loadModel("glowswitch") self.sphere=self.glowswitch.find("**/sphere") #finds a subcomponent of the .egg model... sphere is the name of the sphere geometry in the .egg file self.glowswitch.reparentTo(render) base.disableMouse() #mouse-controlled camera cannot be moved within the program camera.setPosHpr( 0, -6.5, 1.4, 0, -2, 0) #Light up everything an equal amount ambientLight = AmbientLight("ambientLight") ambientLight.setColor(Vec4(.95, .95, 1.05, 1)) render.setLight(render.attachNewNode(ambientLight)) #Add lighting that only casts light on one side of everything in the scene directionalLight = DirectionalLight("directionalLight") directionalLight.setDirection(Vec3(-5, -5, -5)) directionalLight.setColor(Vec4(.2, .2, .2, .1)) #keepin it dim directionalLight.setSpecularColor(Vec4(0.2, 0.2, 0.2, 0.2)) render.setLight(render.attachNewNode(directionalLight)) #initalize sequence variable self.ChangeColorSeq = Sequence(Wait(.1)) #start with blue by default self.changeOrbColor(.1,0,.6,.3,.2,1) #^(R min, Gmin, Bmin, Rmax, Gmax, Bmax) #user controls #note that changing the color means it will "pulse" that color and therefore needs a range of color values self.accept("1", self.changeOrbColor,[.6,.1,.1,1,.3,.3]) #change orb color to red self.accept("2", self.changeOrbColor,[.1,.6,.1,.3,1,.3])#change orb color to green self.accept("3", self.changeOrbColor,[.1,0,.6,.3,.2,1]) #change orb color to blue self.accept("escape", sys.exit) instructions = OnscreenText(text="1: Change to red \n2: Change to Green \n3: Change to Blue \nEsc: Exit", fg=(1,1,1,1), pos = (-1.3, -.82), scale = .05, align = TextNode.ALeft) def changeOrbColor(self,Ra,Ga,Ba,Rz,Gz,Bz): self.ChangeColorSeq.finish() #end the last sequence BrightenSwitch = self.sphere.colorScaleInterval(2, Vec4(Ra,Ga,Ba,1), Vec4(Rz,Gz,Bz,1)) #the first number inside the () gives the amount of time this takes to execute DarkenSwitch = self.sphere.colorScaleInterval(2, Vec4(Rz,Gz,Bz,1), Vec4(Ra,Ga,Ba,1)) self.ChangeColorSeq = Sequence(BrightenSwitch,Wait(.1),DarkenSwitch,Wait(.1)) self.ChangeColorSeq.loop() w = World() run()
This precut kinesiology tape application is designed to relieve pain and inflammation from injuries to the calf or achilles tendon. It can provide relief for conditions such as achilles tendinitis, strained calf muscles or calf spasms. The elasticity of the tape allows it to provide support and pain relief without restricting range of motion. This allows those with calf or achilles tendon injuries to continue their activities and/or participate in rehabilitation exercises as they heal. If you have any questions about this product by Kindmax, contact us by completing and submitting the form below. If you are looking for a specif part number, please include it with your message. PerformTex Jet Black Bulk, 2" X 115"
# This file was generated by decode.py. Do not edit! # For each instruction the information available is:' # re_parser, input_regs, output_regs, double_regs, long_latency, delayed, extra_phy_inputs, extra_phy_outputs' import re class insn_metadata(object): def __init__(self, info): self.inputs, self.outputs, self.double_regs, self.long_latency, self.delayed, self.extra_inputs, self.extra_outputs, self.immediates, self.is_branch, self.is_condbranch = info reg = r"""(\$[lgsd]?f?\d+|%(?:sp|fp|[ilog][0-7]|[rf]\d+))""" imm = r"""([^%$]\S*|%(?:(?:hi|lo)x?|hh|hm|lm|h44|m44|uhi|ulo|(?:tgd|tldm|tie)_(?:hi22|lo10)|(?:tldo|tle)_(?:hix22|lox10))\([^)]+\))""" re000 = re.compile(r'''\s*''' + imm + r'''\s*$''') re001 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''') re002 = re.compile(r'''\s*$''') re003 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''') re004 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''') re005 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*,\s*''' + reg + r'''\s*$''') re006 = re.compile(r'''\s*(?:)\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''') re007 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''') re008 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''') re009 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''') re010 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''') re011 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*''' + reg + r'''\s*$''') re012 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''') re013 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''') re014 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''') re015 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%c\d+\S*\s*$''') re016 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%c\d+\S*\s*$''') re017 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''') re018 = re.compile(r'''\s*''' + reg + r'''\s*$''') re019 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''') re020 = re.compile(r'''\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''') re021 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''') re022 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''') re023 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''') re024 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%fsr\S*\s*$''') re025 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%fsr\S*\s*$''') re026 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''') re027 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''') re028 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''') re029 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*,\s*%csr\S*\s*$''') re030 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*,\s*%csr\S*\s*$''') re031 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''') re032 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''') re033 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''') re034 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%y\S*\s*$''') re035 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%y\S*\s*$''') re036 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''') re037 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''') re038 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''') re039 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''') re040 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''') re041 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''') re042 = re.compile(r'''\s*%asr\d+\S*\s*,\s*''' + reg + r'''\s*$''') re043 = re.compile(r'''\s*%y\S*\s*,\s*''' + reg + r'''\s*$''') re044 = re.compile(r'''\s*%psr\S*\s*,\s*''' + reg + r'''\s*$''') re045 = re.compile(r'''\s*%wim\S*\s*,\s*''' + reg + r'''\s*$''') re046 = re.compile(r'''\s*%tbr\S*\s*,\s*''' + reg + r'''\s*$''') re047 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%asr\S*\s*$''') re048 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%asr\S*\s*$''') re049 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%y\S*\s*$''') re050 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%y\S*\s*$''') re051 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%psr\S*\s*$''') re052 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%psr\S*\s*$''') re053 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%wim\S*\s*$''') re054 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%wim\S*\s*$''') re055 = re.compile(r'''\s*''' + reg + r'''\s*,\s*%tbr\S*\s*$''') re056 = re.compile(r'''\s*''' + imm + r'''\s*,\s*%tbr\S*\s*$''') re057 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re058 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re059 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re060 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re061 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re062 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re063 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re064 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re065 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re066 = re.compile(r'''\s*%c\d+\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re067 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re068 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re069 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re070 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re071 = re.compile(r'''\s*%csr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re072 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re073 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re074 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re075 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re076 = re.compile(r'''\s*%fsr\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re077 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re078 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*\]\s*$''') re079 = re.compile(r'''\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re080 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re081 = re.compile(r'''\s*\[\s*''' + imm + r'''\s*\]\s*$''') re082 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''') re083 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\[\s*''' + reg + r'''\s*\]\s*(\dx(\d|[a-f])+|\d+)\s*$''') re084 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''') re085 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''') re086 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*$''') re087 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*$''') re088 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*$''') re089 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*$''') re090 = re.compile(r'''\s*''' + reg + r'''\s*,\s*''' + reg + r'''\s*,\s*''' + imm + r'''\s*$''') re091 = re.compile(r'''\s*''' + imm + r'''\s*,\s*\d+\s*$''') re092 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''') re093 = re.compile(r'''\s*''' + reg + r'''\s*,\s*\d+\s*$''') re094 = re.compile(r'''\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*,\s*\d+\s*$''') re095 = re.compile(r'''\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*,\s*\d+\s*$''') re096 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re097 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re098 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re099 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re100 = re.compile(r'''\s*%fq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re101 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re102 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*\]\s*$''') re103 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + reg + r'''\s*[+-]\s*''' + imm + r'''\s*\]\s*$''') re104 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*[+-]\s*''' + reg + r'''\s*\]\s*$''') re105 = re.compile(r'''\s*%cq\S*\s*,\s*\[\s*''' + imm + r'''\s*\]\s*$''') re106 = re.compile(r'''\s*%wim\S*\s*,\s*''' + imm + r'''\s*,\s*''' + reg + r'''\s*$''') form000 = insn_metadata(([], [], [], False, True, [], [], [0], False, True)) form001 = insn_metadata(([0, 1], [2], [], False, False, [], [], [], False, False)) form002 = insn_metadata(([], [], [], False, False, [], [], [], False, False)) form003 = insn_metadata(([0], [2], [], False, False, [], [], [1], False, False)) form004 = insn_metadata(([0, 1], [2], [], True, False, [], [], [], False, False)) form005 = insn_metadata(([0], [1], [], True, False, [], [], [], False, False)) form006 = insn_metadata(([], [1], [], False, False, [], [], [0], False, False)) form007 = insn_metadata(([0, 1], [2], [2], True, False, [], [], [], False, False)) form008 = insn_metadata(([0], [1], [1], True, False, [], [], [], False, False)) form009 = insn_metadata(([0], [2], [2], True, False, [], [], [1], False, False)) form010 = insn_metadata(([1], [2], [2], True, False, [], [], [0], False, False)) form011 = insn_metadata(([], [1], [1], True, False, [], [], [0], False, False)) form012 = insn_metadata(([0, 1], [], [], True, False, [], [], [], False, False)) form013 = insn_metadata(([0], [], [], True, False, [], [], [], False, False)) form014 = insn_metadata(([0], [], [], True, False, [], [], [1], False, False)) form015 = insn_metadata(([1], [], [], True, False, [], [], [0], False, False)) form016 = insn_metadata(([], [], [], True, False, [], [], [0], False, False)) form017 = insn_metadata(([0], [1], [], False, False, [], [], [], False, False)) form018 = insn_metadata(([], [0], [], False, False, [], [], [], False, False)) form019 = insn_metadata(([0, 1], [2], [0, 1, 2], True, False, [], [], [], False, False)) form020 = insn_metadata(([1], [2], [], False, False, [], [], [0], False, False)) form021 = insn_metadata(([0, 1], [], [0], False, False, [], [], [], False, False)) form022 = insn_metadata(([0, 1], [1], [], False, False, [], [], [], False, False)) form023 = insn_metadata(([1], [1], [], False, False, [], [], [0], False, False)) form024 = insn_metadata(([0], [2], [], True, False, [], [], [1], False, False)) form025 = insn_metadata(([1], [2], [], True, False, [], [], [0], False, False)) form026 = insn_metadata(([], [1], [], True, False, [], [], [0], False, False)) form027 = insn_metadata(([0], [], [], False, False, [], [], [], False, False)) form028 = insn_metadata(([], [0], [], True, False, [], [], [], False, False)) form029 = insn_metadata(([0, 1], [2], [1, 2], True, False, [], [], [], False, False)) form030 = insn_metadata(([0, 1], [], [], False, False, [], [], [], False, False)) form031 = insn_metadata(([0], [], [], False, False, [], [], [1], False, False)) form032 = insn_metadata(([], [], [], False, False, [], [], [0], False, False)) form033 = insn_metadata(([0], [1], [0, 1], True, False, [], [], [], False, False)) form034 = insn_metadata(([0, 1], [2], [], True, False, [], ['y'], [], False, False)) form035 = insn_metadata(([0], [2], [], True, False, [], ['y'], [1], False, False)) form036 = insn_metadata(([1], [2], [], True, False, [], ['y'], [0], False, False)) form037 = insn_metadata(([0, 1, 2], [], [], False, False, [], [], [], False, False)) form038 = insn_metadata(([0, 1], [], [], False, False, [], [], [2], False, False)) form039 = insn_metadata(([0, 2], [], [], False, False, [], [], [1], False, False)) form040 = insn_metadata(([1], [], [], False, False, [], [], [0], False, False)) form041 = insn_metadata(([0], [0], [], False, False, [], [], [], False, False)) form042 = insn_metadata(([0, 1], [], [0, 1], True, False, [], [], [], False, False)) form043 = insn_metadata(([0, 1], [2], [], True, False, ['y'], [], [], False, False)) form044 = insn_metadata(([0], [2], [], True, False, ['y'], [], [1], False, False)) form045 = insn_metadata(([1], [2], [], True, False, ['y'], [], [0], False, False)) form046 = insn_metadata(([0, 1], [2], [], False, True, [], [], [], True, False)) form047 = insn_metadata(([0], [1], [], False, True, [], [], [], True, False)) form048 = insn_metadata(([], [1], [], False, True, [], [], [0], True, False)) form049 = insn_metadata(([0], [2], [], False, True, [], [], [1], True, False)) form050 = insn_metadata(([1], [2], [], False, True, [], [], [0], True, False)) form051 = insn_metadata(([0], [1], [1], False, False, [], [], [], False, False)) form052 = insn_metadata(([], [], [], False, True, [], [], [0], True, False)) form053 = insn_metadata(([0, 1, 2], [], [0], False, False, [], [], [], False, False)) form054 = insn_metadata(([0], [1], [0], False, False, [], [], [], False, False)) form055 = insn_metadata(([], [], [], False, True, [15], [], [], True, False)) form056 = insn_metadata(([], [], [], True, False, [], [], [], False, False)) form057 = insn_metadata(([0], [1], [0, 1], False, False, [], [], [], False, False)) form058 = insn_metadata(([], [], [], False, True, [31], [], [], True, False)) form059 = insn_metadata(([], [], [], False, True, [], [15], [0], True, False)) form060 = insn_metadata(([0, 1], [], [], False, True, [], [15], [], True, False)) form061 = insn_metadata(([0], [], [], False, True, [], [15], [], True, False)) form062 = insn_metadata(([0], [], [], False, True, [], [15], [1], True, False)) form063 = insn_metadata(([1], [], [], False, True, [], [15], [0], True, False)) form064 = insn_metadata(([0, 1], [], [], False, True, [], [], [], True, False)) form065 = insn_metadata(([0], [], [], False, True, [], [], [], True, False)) form066 = insn_metadata(([0], [], [], False, True, [], [], [1], True, False)) form067 = insn_metadata(([1], [], [], False, True, [], [], [0], True, False)) form068 = insn_metadata(([0, 1], [], [1], True, False, [], [], [], False, False)) form069 = insn_metadata(([0, 1], [], [0], False, False, [], [], [2], False, False)) form070 = insn_metadata(([0, 2], [], [0], False, False, [], [], [1], False, False)) form071 = insn_metadata(([0], [], [0], False, False, [], [], [1], False, False)) form072 = insn_metadata(([0], [], [], False, False, [], [], [], True, False)) insninfo = { 'add' : [ (re001, form001), (re003, form003), (re019, form020), ], 'addcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'addx' : [ (re001, form001), (re003, form003), (re019, form020), ], 'addxcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'allocate' : [ (re018, form028), (re017, form005), (re020, form026), ], 'allocates' : [ (re018, form028), (re017, form005), (re020, form026), ], 'allocatex' : [ (re018, form028), (re017, form005), (re020, form026), ], 'and' : [ (re001, form001), (re003, form003), (re019, form020), ], 'andcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'andn' : [ (re001, form001), (re003, form003), ], 'andncc' : [ (re001, form001), (re003, form003), ], 'b' : [ (re000, form052), ], 'b,a' : [ (re000, form052), ], 'ba' : [ (re000, form000), ], 'ba,a' : [ (re000, form000), ], 'bcc' : [ (re000, form000), ], 'bcc,a' : [ (re000, form000), ], 'bclr' : [ (re017, form022), (re020, form023), ], 'bcs' : [ (re000, form000), ], 'bcs,a' : [ (re000, form000), ], 'be' : [ (re000, form000), ], 'be,a' : [ (re000, form000), ], 'beq' : [ (re000, form000), ], 'beq,a' : [ (re000, form000), ], 'bg' : [ (re000, form000), ], 'bg,a' : [ (re000, form000), ], 'bge' : [ (re000, form000), ], 'bge,a' : [ (re000, form000), ], 'bgeu' : [ (re000, form000), ], 'bgeu,a' : [ (re000, form000), ], 'bgt' : [ (re000, form000), ], 'bgt,a' : [ (re000, form000), ], 'bgu' : [ (re000, form000), ], 'bgu,a' : [ (re000, form000), ], 'bl' : [ (re000, form000), ], 'bl,a' : [ (re000, form000), ], 'ble' : [ (re000, form000), ], 'ble,a' : [ (re000, form000), ], 'bleu' : [ (re000, form000), ], 'bleu,a' : [ (re000, form000), ], 'blt' : [ (re000, form000), ], 'blt,a' : [ (re000, form000), ], 'blu' : [ (re000, form000), ], 'blu,a' : [ (re000, form000), ], 'bn' : [ (re000, form000), ], 'bn,a' : [ (re000, form000), ], 'bne' : [ (re000, form000), ], 'bne,a' : [ (re000, form000), ], 'bneg' : [ (re000, form000), ], 'bneg,a' : [ (re000, form000), ], 'bnz' : [ (re000, form000), ], 'bnz,a' : [ (re000, form000), ], 'bpos' : [ (re000, form000), ], 'bpos,a' : [ (re000, form000), ], 'break' : [ (re018, form027), (re000, form032), ], 'bset' : [ (re017, form022), (re020, form023), ], 'btog' : [ (re017, form022), (re020, form023), ], 'btst' : [ (re017, form030), (re020, form040), ], 'bvc' : [ (re000, form000), ], 'bvc,a' : [ (re000, form000), ], 'bvs' : [ (re000, form000), ], 'bvs,a' : [ (re000, form000), ], 'bz' : [ (re000, form000), ], 'bz,a' : [ (re000, form000), ], 'call' : [ (re000, form059), (re091, form059), (re087, form060), (re092, form060), (re018, form061), (re093, form061), (re088, form062), (re094, form062), (re089, form063), (re095, form063), (re000, form059), (re091, form059), (re018, form061), (re093, form061), ], 'clr' : [ (re018, form018), (re018, form018), (re077, form030), (re078, form027), (re079, form031), (re080, form040), (re081, form032), (re078, form027), ], 'clrb' : [ (re077, form030), (re078, form027), (re079, form031), (re080, form040), (re081, form032), (re078, form027), ], 'clrh' : [ (re077, form030), (re078, form027), (re079, form031), (re080, form040), (re081, form032), (re078, form027), ], 'cmp' : [ (re017, form030), (re031, form031), ], 'cpop1' : [ (re007, form001), ], 'cpop2' : [ (re007, form001), ], 'create' : [ (re017, form005), ], 'cred' : [ (re020, form015), ], 'crei' : [ (re017, form012), ], 'dec' : [ (re018, form041), (re020, form023), ], 'deccc' : [ (re018, form041), (re020, form023), ], 'detach' : [ (re018, form027), ], 'f_alloc' : [ (re018, form018), ], 'f_break' : [ (re002, form002), (re018, form027), ], 'f_create' : [ (re017, form012), (re001, form004), (re031, form014), (re003, form024), ], 'f_fence' : [ (re017, form030), (re001, form001), (re031, form031), (re003, form003), (re018, form027), (re000, form032), ], 'f_freesrb' : [ (re018, form027), ], 'f_get_blockindex' : [ (re017, form017), (re018, form018), ], 'f_get_blocksize' : [ (re017, form017), (re018, form018), ], 'f_get_gridsize' : [ (re017, form017), (re018, form018), ], 'f_mapg' : [ (re001, form001), (re003, form003), (re017, form030), (re031, form031), ], 'f_maphtg' : [ (re001, form001), (re003, form003), (re017, form030), (re031, form031), ], 'f_set_blocksize' : [ (re017, form030), (re031, form031), ], 'f_set_gridsize' : [ (re017, form030), (re031, form031), ], 'fabss' : [ (re017, form017), ], 'faddd' : [ (re001, form029), ], 'faddq' : [ (re001, form019), ], 'fadds' : [ (re001, form004), ], 'faddx' : [ (re001, form019), ], 'fb' : [ (re000, form000), ], 'fb,a' : [ (re000, form000), ], 'fba' : [ (re000, form000), ], 'fba,a' : [ (re000, form000), ], 'fbe' : [ (re000, form000), ], 'fbe,a' : [ (re000, form000), ], 'fbg' : [ (re000, form000), ], 'fbg,a' : [ (re000, form000), ], 'fbge' : [ (re000, form000), ], 'fbge,a' : [ (re000, form000), ], 'fbl' : [ (re000, form000), ], 'fbl,a' : [ (re000, form000), ], 'fble' : [ (re000, form000), ], 'fble,a' : [ (re000, form000), ], 'fblg' : [ (re000, form000), ], 'fblg,a' : [ (re000, form000), ], 'fbn' : [ (re000, form000), ], 'fbn,a' : [ (re000, form000), ], 'fbne' : [ (re000, form000), ], 'fbne,a' : [ (re000, form000), ], 'fbnz' : [ (re000, form000), ], 'fbnz,a' : [ (re000, form000), ], 'fbo' : [ (re000, form000), ], 'fbo,a' : [ (re000, form000), ], 'fbu' : [ (re000, form000), ], 'fbu,a' : [ (re000, form000), ], 'fbue' : [ (re000, form000), ], 'fbue,a' : [ (re000, form000), ], 'fbug' : [ (re000, form000), ], 'fbug,a' : [ (re000, form000), ], 'fbuge' : [ (re000, form000), ], 'fbuge,a' : [ (re000, form000), ], 'fbul' : [ (re000, form000), ], 'fbul,a' : [ (re000, form000), ], 'fbule' : [ (re000, form000), ], 'fbule,a' : [ (re000, form000), ], 'fbz' : [ (re000, form000), ], 'fbz,a' : [ (re000, form000), ], 'fcmpd' : [ (re017, form068), ], 'fcmped' : [ (re017, form068), ], 'fcmpeq' : [ (re017, form042), ], 'fcmpes' : [ (re017, form012), ], 'fcmpex' : [ (re017, form042), ], 'fcmpq' : [ (re017, form042), ], 'fcmps' : [ (re017, form012), ], 'fcmpx' : [ (re017, form042), ], 'fdivd' : [ (re001, form029), ], 'fdivq' : [ (re001, form019), ], 'fdivs' : [ (re001, form004), ], 'fdivx' : [ (re001, form019), ], 'fdmulq' : [ (re001, form029), ], 'fdmulx' : [ (re001, form029), ], 'fdtoi' : [ (re017, form054), ], 'fdtoq' : [ (re017, form057), ], 'fdtos' : [ (re017, form054), ], 'fgets' : [ (re003, form024), ], 'fitod' : [ (re017, form051), ], 'fitoq' : [ (re017, form051), ], 'fitos' : [ (re017, form017), ], 'flush' : [ (re087, form012), (re018, form013), (re018, form013), (re000, form016), (re088, form014), (re089, form015), ], 'fmovs' : [ (re017, form017), ], 'fmuld' : [ (re001, form029), ], 'fmulq' : [ (re001, form019), ], 'fmuls' : [ (re001, form004), ], 'fmulx' : [ (re001, form019), ], 'fnegs' : [ (re017, form017), ], 'fprintd' : [ (re017, form030), ], 'fprintq' : [ (re017, form021), ], 'fprints' : [ (re017, form030), ], 'fputg' : [ (re090, form038), ], 'fputs' : [ (re090, form038), ], 'fqtod' : [ (re017, form057), ], 'fqtoi' : [ (re017, form054), ], 'fqtos' : [ (re017, form054), ], 'fsmuld' : [ (re001, form007), ], 'fsqrtd' : [ (re017, form033), ], 'fsqrtq' : [ (re017, form033), ], 'fsqrts' : [ (re017, form005), ], 'fsqrtx' : [ (re017, form033), ], 'fstod' : [ (re017, form051), ], 'fstoi' : [ (re017, form017), ], 'fstoq' : [ (re017, form051), ], 'fsubd' : [ (re001, form029), ], 'fsubq' : [ (re001, form019), ], 'fsubs' : [ (re001, form004), ], 'fsubx' : [ (re001, form019), ], 'getcid' : [ (re018, form018), ], 'getfid' : [ (re018, form018), ], 'getpid' : [ (re018, form018), ], 'gets' : [ (re003, form024), ], 'gettid' : [ (re018, form018), ], 'iflush' : [ (re087, form012), (re018, form013), (re018, form013), (re000, form016), (re088, form014), (re089, form015), ], 'inc' : [ (re018, form041), (re020, form023), ], 'inccc' : [ (re018, form041), (re020, form023), ], 'jmp' : [ (re087, form064), (re018, form065), (re088, form066), (re089, form067), (re000, form052), (re018, form065), ], 'jmpl' : [ (re084, form046), (re017, form047), (re017, form047), (re020, form048), (re085, form049), (re086, form050), ], 'launch' : [ (re018, form072), ], 'ld' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), (re021, form012), (re022, form013), (re023, form014), (re024, form015), (re025, form016), (re022, form013), (re012, form012), (re013, form013), (re014, form014), (re015, form015), (re016, form016), (re013, form013), (re026, form012), (re027, form013), (re028, form014), (re029, form015), (re030, form016), (re027, form013), ], 'lda' : [ (re004, form004), (re005, form005), ], 'ldbp' : [ (re018, form018), ], 'ldd' : [ (re007, form007), (re008, form008), (re009, form009), (re010, form010), (re011, form011), (re008, form008), (re007, form007), (re008, form008), (re009, form009), (re010, form010), (re011, form011), (re008, form008), (re012, form012), (re013, form013), (re014, form014), (re015, form015), (re016, form016), (re013, form013), ], 'ldda' : [ (re004, form004), (re005, form005), ], 'ldfp' : [ (re018, form018), ], 'ldsb' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'ldsba' : [ (re004, form004), (re005, form005), ], 'ldsh' : [ (re008, form005), (re007, form004), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'ldsha' : [ (re004, form004), (re005, form005), ], 'ldstub' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'ldstuba' : [ (re004, form004), (re005, form005), ], 'ldub' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'lduba' : [ (re004, form004), (re005, form005), ], 'lduh' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'lduha' : [ (re004, form004), (re005, form005), ], 'mov' : [ (re032, form030), (re033, form031), (re034, form030), (re035, form031), (re036, form030), (re037, form031), (re038, form030), (re039, form031), (re040, form030), (re041, form031), (re042, form018), (re043, form018), (re044, form018), (re045, form018), (re046, form018), (re047, form027), (re048, form032), (re047, form027), (re049, form027), (re050, form032), (re049, form027), (re051, form027), (re052, form032), (re051, form027), (re053, form027), (re054, form032), (re053, form027), (re055, form027), (re056, form032), (re055, form027), (re017, form017), (re020, form006), (re017, form017), (re017, form017), ], 'mulscc' : [ (re001, form043), (re003, form044), ], 'neg' : [ (re017, form017), (re018, form041), ], 'nop' : [ (re002, form002), ], 'not' : [ (re017, form017), (re018, form041), ], 'or' : [ (re001, form001), (re003, form003), (re019, form020), ], 'orcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'orn' : [ (re001, form001), (re003, form003), ], 'orncc' : [ (re001, form001), (re003, form003), ], 'print' : [ (re017, form030), (re031, form031), ], 'putg' : [ (re090, form038), ], 'puts' : [ (re090, form038), ], 'r_allocsrb' : [ (re017, form017), (re020, form006), ], 'r_read' : [ (re017, form005), ], 'r_write' : [ (re017, form012), (re001, form004), (re031, form014), (re003, form024), ], 'rd' : [ (re042, form018), (re043, form018), (re044, form018), (re045, form018), (re106, form006), (re046, form018), ], 'release' : [ (re018, form027), ], 'restore' : [ (re001, form001), (re002, form002), (re003, form003), (re002, form002), ], 'ret' : [ (re002, form055), ], 'retl' : [ (re002, form058), ], 'rett' : [ (re087, form064), (re018, form065), (re088, form066), (re089, form067), (re000, form052), (re000, form052), (re018, form065), ], 'save' : [ (re001, form001), (re003, form003), (re002, form002), ], 'sdiv' : [ (re001, form043), (re003, form044), (re019, form045), ], 'sdivcc' : [ (re001, form043), (re003, form044), (re019, form045), ], 'set' : [ (re006, form006), ], 'setblock' : [ (re017, form030), (re031, form031), ], 'sethi' : [ (re020, form006), ], 'setarg' : [ (re017, form030), (re031, form031), ], 'setlimit' : [ (re017, form030), (re031, form031), ], 'setstart' : [ (re017, form030), (re031, form031), ], 'setstep' : [ (re017, form030), (re031, form031), ], 'setthread' : [ (re017, form030), (re031, form031), ], 'sll' : [ (re001, form001), (re003, form003), ], 'smul' : [ (re001, form034), (re003, form035), (re019, form036), ], 'smulcc' : [ (re001, form034), (re003, form035), (re019, form036), ], 'spill' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'spilld' : [ (re057, form053), (re058, form021), (re059, form069), (re060, form070), (re061, form071), (re058, form021), ], 'sra' : [ (re001, form001), (re003, form003), ], 'srl' : [ (re001, form001), (re003, form003), ], 'st' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), (re062, form030), (re063, form027), (re064, form031), (re065, form040), (re066, form032), (re063, form027), (re067, form030), (re068, form027), (re069, form031), (re070, form040), (re071, form032), (re068, form027), (re072, form030), (re073, form027), (re074, form031), (re075, form040), (re076, form032), (re073, form027), ], 'sta' : [ (re082, form037), (re083, form030), ], 'stb' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stba' : [ (re082, form037), (re083, form030), ], 'stbar' : [ (re002, form056), ], 'std' : [ (re057, form053), (re058, form021), (re059, form069), (re060, form070), (re061, form071), (re058, form021), (re096, form030), (re097, form027), (re098, form031), (re099, form040), (re100, form032), (re097, form027), (re057, form053), (re058, form021), (re059, form069), (re060, form070), (re061, form071), (re058, form021), (re101, form030), (re102, form027), (re103, form031), (re104, form040), (re105, form032), (re102, form027), (re062, form030), (re063, form027), (re064, form031), (re065, form040), (re066, form032), (re063, form027), ], 'stda' : [ (re082, form053), (re083, form021), ], 'sth' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stha' : [ (re082, form037), (re083, form030), ], 'stsb' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stsba' : [ (re082, form037), (re083, form030), ], 'stsh' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stsha' : [ (re082, form037), (re083, form030), ], 'stub' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stuba' : [ (re082, form037), (re083, form030), ], 'stuh' : [ (re057, form037), (re058, form030), (re059, form038), (re060, form039), (re061, form031), (re058, form030), ], 'stuha' : [ (re082, form037), (re083, form030), ], 'sub' : [ (re001, form001), (re003, form003), ], 'subcc' : [ (re001, form001), (re003, form003), ], 'subx' : [ (re001, form001), (re003, form003), ], 'subxcc' : [ (re001, form001), (re003, form003), ], 'swap' : [ (re007, form004), (re008, form005), (re009, form024), (re010, form025), (re011, form026), (re008, form005), ], 'swapa' : [ (re004, form004), (re005, form005), ], 'sync' : [ (re017, form005), ], 't_allochtg' : [ (re001, form001), (re003, form003), (re001, form004), (re003, form024), ], 't_end' : [ (re002, form002), ], 't_freehtg' : [ (re018, form027), ], 't_get_fid' : [ (re018, form018), ], 't_get_pindex' : [ (re018, form018), ], 't_get_tid' : [ (re018, form018), ], 't_wait' : [ (re002, form002), (re018, form027), ], 'taddcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'taddcctv' : [ (re001, form001), (re003, form003), (re019, form020), ], 'tst' : [ (re018, form027), (re018, form027), (re018, form027), ], 'tsubcc' : [ (re001, form001), (re003, form003), ], 'tsubcctv' : [ (re001, form001), (re003, form003), ], 'udiv' : [ (re001, form043), (re003, form044), (re019, form045), ], 'udivcc' : [ (re001, form043), (re003, form044), (re019, form045), ], 'umul' : [ (re001, form034), (re003, form035), (re019, form036), ], 'umulcc' : [ (re001, form034), (re003, form035), (re019, form036), ], 'unimp' : [ (re000, form032), ], 'wr' : [ (re032, form030), (re033, form031), (re047, form027), (re034, form030), (re035, form031), (re049, form027), (re036, form030), (re037, form031), (re051, form027), (re038, form030), (re039, form031), (re053, form027), (re040, form030), (re041, form031), (re055, form027), ], 'xnor' : [ (re001, form001), (re003, form003), (re019, form020), ], 'xnorcc' : [ (re001, form001), (re003, form003), (re019, form020), ], 'xor' : [ (re001, form001), (re003, form003), (re019, form020), ], 'xorcc' : [ (re001, form001), (re003, form003), (re019, form020), ], }
In over my head. I grew up in Western New York, in a small town between Buffalo and Niagara Falls, where the Erie Barge Canal empties into the Niagara River. A few miles downriver from my hometown the Niagara begins to look the way most people imagine it. The current picks up speed. There are rushing cataracts and, before you know it, you’ve arrived at those famous Falls. However, in the town where my family lived, the river is placid, nothing like the wild thing that it is about to become; and so there are a number of small beaches on the riverbank there, as well as on a large island that sits in the middle of the river, where it is quite safe to swim. My family often visited that island, known as Grand Island, during the summer months, arriving there early to eat breakfast and to go for a swim. One summer morning when I was quite young – I must have been four or five – I saw my father walking out into the water, carrying one of my younger brothers. Unbeknownst to him, I began to follow him. I didn’t yet know how to swim and before you knew it I was, literally, in over my head. I began to thrash about, to gasp, and swallow water. I know now that I was never in any real danger, but I can still remember how surprised and frightened I was as I “stepped into the abyss” and could no longer feel firm ground under my feet. Suddenly, however, and just as surprisingly, I was lifted out of the water by a man, a complete stranger, who’d been standing nearby and seen my predicament. He held me for a moment, let me catch my breath, and put me down on the sand. Oddly enough, I can’t remember much of what happened after that, but I can still recall that stranger’s smiling face. I am much older now than he was then. I suppose that he has long since died. But I can still remember him after all these years and I remember his face with relief and with gratitude. Pulled out of the desolate, the roiling, pit. Psalm 40 begins like this, “I waited patiently upon the Lord; he stooped to me and heard my cry. He lifted me out of the desolate pit, out of the mire and clay; he set my feet upon a high cliff and made my footing sure” (Ps 40:1-2). For a long time I’ve imagined that scene as taking place in the mountains. I thought the metaphor that the psalmist was using was that of a person – or perhaps even an animal – that has tripped and fallen into some kind of wet and mucky crevasse. Then God reaches down and lifts the frightened creature out of the mud and carefully places her in a dry and safe place, well away from danger. However, Robert Alter translates these verses as follows, “I urgently hoped for the Lord. He bent down toward me and heard my voice, and He brought me up from the roiling pit, from the thickest mire. And He set my feet on a crag, made my steps firm.” Alter says this about the second verse, “ ‘The roiling pit,’ literally, ‘the pit of noise.’ Most interpreters conclude that the noise is the rushing sound of the…waters of the abyss” (The Book of Psalms: A Translation with Commentary, New York, 2007, p. 141). After reading Alter’s translation, and his comment, the image in my mind has changed. I no longer think of scraped knees and bruised limbs on a mountain path. I think of drowning and gasping for air; and I can’t help remembering “being saved” on the banks of the Niagara River all those many years ago. Ancient advice for reading the psalms. Saint Isaac of Syria once said, “When we have the same disposition in our heart with which each psalm was sung or written down, then we shall become like its author, grasping its significance beforehand rather than afterward. That is, we first take in the power of what is said, rather than the knowledge of it, recalling what has taken place or what does take place in us in daily assaults whenever we reflect on them. When we repeat them we call to mind what our negligence has begotten in us or our diligence has obtained for us or divine providence has bestowed upon us or the enemy’s suggestion has deprived us of or slippery and subtle forgetfulness has taken away from us or human weakness has brought upon us or heedless ignorance has concealed from us. For we find all of these dispositions expressed in the psalms, so that we may see whatever occurs as in a very clear mirror and recognize it more effectively” (quoted in John Cassian the “Tenth Conference on Prayer,” in The Conferences, translated and annotated by Boniface Ramsey, O.P., New York, 1997, p. 385). Saint Isaac then goes on to suggest that by reading the psalms in this way, by discovering the whole breadth of human experience in them, by discovering ourselves in them, we make them our own. They become something other than just holy texts that we approach with great reverence, but without much feeling. Saint Isaac suggests that by putting ourselves into the psalms, they become personal and we pray them in a new way. We don’t just recite ancient words printed on a page, we begin to allow ourselves to take our own deepest experiences to God in prayer; and then we grow closer to God and God to us. YOUR PRAYERS ARE ASKED FOR Demetrio, Helen, Joyce, Susan, Kean, Gene, Victoria, Vincent, Wayne, Debbie, Theresa, Mary, Lee, Julie, Betty, Gerald, Aston, Amy, Jim, Barbara, Odin, Chandra, Sharon, Arpene, Ann, Ruth, Dorothy, Richard, Linda, Gert, and Rick; for the repose of the soul of Warren Olson; and for the members of our Armed Forces on active duty, especially Matthew, Mark, and Rob . . . GRANT THEM PEACE . . . March 11: 1886 Sarah Sophia Schiefflin Barclay; 1888 Thomas Tweedle; 1897 Zenobia Lawrence; 1907 William Chapman; 1920 Joseph Rogers Fallon; 1921 Elizabeth Jane Goddard Smith; 1958 Grace Clark; 1987 Vincent Onorato. IN THIS TRANSITORY LIFE . . . Warren Olson, a former member of the parish, died on March 1. While at Saint Mary’s, Warren was a faithful member of the guild of acolytes. His funeral was held at the Church of the Transfiguration, on Thursday, March 8. Please keep Warren, his family and friends, and all who mourn in your prayers. FASTING AND ABSTINENCE IN LENT . . . The ordinary weekdays of Lent are observed by special acts of discipline and self-denial in commemoration of the crucifixion of the Lord. Fridays in Lent are observed traditionally by abstinence from flesh meats. THIS WEEK AT SAINT MARY’S . . . Saturday, March 10, at 10:30 AM, the Consecration of Canon Andrew Dietsche at the Cathedral Church of Saint John the Divine, 112th Street & Amsterdam Avenue, as Bishop Coadjutor of the Diocese of New York . . . Daylight Saving Time begins at 2:00 AM on Sunday, March 11. Clocks should be moved ahead one hour . . . On Sunday, March 11, at 10:00 AM, Father Peter Powell continues his Lenten series on Genesis 1-11 . . . The Wednesday Night Bible Study Class will not meet on Wednesday, March 14, while Father Smith is away from the parish. The class resumes on March 21 . . . Saturday, March 17, 8:00 PM, Miller Theatre Early Music Series, Tenebrae, with Le Poème Harmonique, Vincent Dumestre, conductor . . . The Stations of the Cross are offered every Friday in Lent at 6:30 PM, following Evening Prayer . . . Father Jim Pace will hear confessions on Saturday, March 10. Father Gerth will hear confessions on Saturday, March 17 . . . Father Smith will be away from the parish from Monday, March 12, until Tuesday, March 20. He returns to the office on Wednesday, March 21. AROUND THE PARISH . . . Demetrio Muñoz, a good friend of Saint Mary’s, suffered a ruptured appendix last weekend. He had surgery early on Sunday morning. His condition continues to improve while he recuperates at Beth Israel Hospital. Please keep him in your prayers . . . Parishioner Gerald McKelvey continues to do rehabilitation therapy. He is now at Terence Cardinal Cooke Healthcare Center, which is located at 1249 Fifth Avenue, between 105th and 106th Streets. The Center’s phone number is 212-360-1000. Please keep him in your prayers . . . We received word this week that former sexton, Emanuel Grantham, has found work in the Washington, D.C., area, where he moved recently to care for his parents. He is grateful to all those here at the parish who provided support, offered prayers, and wrote recommendation letters during this time of transition . . . Confirmation and the other rites of Christian initiation will be celebrated at the Easter Vigil. For more information, please speak with one of the parish clergy . . . If you would like to sponsor a reception after the Solemn Masses on Annunciation, Monday, March 26; Easter Eve, Saturday, April 7; or Ascension Day, Thursday, May 17, please speak to Father Jay Smith or contact the parish office . . . Attendance: Last Sunday 223. SUNDAY ADULT FORUM IN LENT & EASTERTIDE . . . On Sundays during Lent, Father Peter Powell is leading a five-part series on Genesis 1-11. Father Powell tells us what to expect during class this coming Sunday: “The Lenten Genesis study this week takes a brief sojourn into the less well-known stories of Gen 1-11. Looking at chapters 4 through 6:4, we will discuss the stories of Cain and Abel. Do you remember Seth? Adam and Eve had three sons. Where did the wives come from? If humanity was created in Eden then where is Nod? We will look briefly at the amazing ages of the generation between Cain and Seth and Noah. Then we will close with a brief and amazing story about the seduction of women by the sons of God! Very interesting stuff hides in the Bible! Also, please remember: the most important aspect of Bible study is to do it. It is more important to start than to worry about starting in the right place. Therefore, even if you missed the first two weeks of the Gen 1-11 series you are invited to join us this week. Newcomers are always welcome to join the class” . . . The Adult Forum will not meet on Palm Sunday, April 1; on Easter Day, April 8; or on the Second Sunday of Easter, April 15. CONCERTS AT SAINT MARY’S . . . Wednesday, March 20, 2012, 8:00 PM, Between Heaven and Earth: Sacred and Secular Baroque Music from Germany and Italy, with Musica Nuova, Amanda Keil, mezzo-soprano; James Kennerley, tenor; Kris Kwapis, cornetto; Kelly Savage, harpsichord; Elizabeth Weinfeld, viola da gamba; Dorothy Olsson, dance and choreography. You may visit the Musica Nuova website for information and to purchase tickets . . . Saturday, March 31, 8:00 PM, New York Repertory Orchestra, David Leibowitz, music director. Music by Mozart and Shostakovich. Admission is free . . . Saturday, April 21, 8:00 PM, Miller Theatre Early Music Series, Treasures of the Renaissance, with Stile Antico. DONATIONS FOR ALTAR FLOWERS . . . We hope to receive donations for flowers for the Annunciation, March 26, and for Palm Sunday, April 1, as well as for some other Sundays in April. We also welcome donations for Easter flowers. If you would like to make a donation, please contact the parish office. LOOKING AHEAD . . . Monday, March 19, Saint Joseph, Mass 12:10 PM & 6:20 PM . . . Monday, March 26 (transferred), The Annunciation of Our Lord Jesus Christ to the Blessed Virgin Mary, Solemn Pontifical Mass 6:00 PM, the Right Reverend R. William Franklin, bishop of Western New York, celebrant and preacher.
from typing import Dict import os import json import csv import pickle import re from collections import namedtuple import nltk from unidecode import unidecode from qanta import qlogging from qanta.datasets.quiz_bowl import QantaDatabase from qanta.util.constants import ( COUNTRY_LIST_PATH, WIKI_DUMP_REDIRECT_PICKLE, WIKI_LOOKUP_PATH, ) log = qlogging.get(__name__) COUNTRY_SUB = ["History_of_", "Geography_of_"] WikipediaPage = namedtuple("WikipediaPage", ["id", "title", "text", "url"]) def normalize_wikipedia_title(title): return title.replace(" ", "_") def create_wikipedia_title_pickle(dump_path, disambiguation_pages_path, output_path): from qanta.spark import create_spark_session with open(disambiguation_pages_path) as f: disambiguation_pages = set(json.load(f)) spark = create_spark_session() wiki_df = spark.read.json(dump_path) rows = wiki_df.select("title", "id").distinct().collect() content_pages = [r for r in rows if int(r.id) not in disambiguation_pages] clean_titles = {normalize_wikipedia_title(r.title) for r in content_pages} with open(output_path, "wb") as f: pickle.dump(clean_titles, f) spark.stop() def create_wikipedia_cache( parsed_wiki_path="data/external/wikipedia/parsed-wiki", output_path=WIKI_LOOKUP_PATH ): from qanta.spark import create_spark_context sc = create_spark_context() db = QantaDatabase() train_questions = db.train_questions answers = {q.page for q in train_questions} b_answers = sc.broadcast(answers) # Paths used in spark need to be absolute and it needs to exist page_path = os.path.abspath(parsed_wiki_path) page_pattern = os.path.join(page_path, "*", "*") def parse_page(json_text): page = json.loads(json_text) return { "id": int(page["id"]), "title": page["title"].replace(" ", "_"), "text": page["text"], "url": page["url"], } wiki_pages = ( sc.textFile(page_pattern) .map(parse_page) .filter(lambda p: p["title"] in b_answers.value) .collect() ) wiki_lookup = {p["title"]: p for p in wiki_pages} with open(output_path, "w") as f: json.dump(wiki_lookup, f) return wiki_lookup def create_wikipedia_redirect_pickle(redirect_csv, output_pickle): countries = {} with open(COUNTRY_LIST_PATH) as f: for line in f: k, v = line.split("\t") countries[k] = v.strip() db = QantaDatabase() pages = {q.page for q in db.train_questions} with open(redirect_csv) as redirect_f: redirects = {} n_total = 0 n_selected = 0 for row in csv.reader(redirect_f, quotechar='"', escapechar="\\"): n_total += 1 source = row[0] target = row[1] if ( target not in pages or source in countries or target.startswith("WikiProject") or target.endswith("_topics") or target.endswith("_(overview)") ): continue else: redirects[source] = target n_selected += 1 log.info( "Filtered {} raw wikipedia redirects to {} matching redirects".format( n_total, n_selected ) ) with open(output_pickle, "wb") as output_f: pickle.dump(redirects, output_f) def extract_wiki_sentences(title, text, n_sentences, replace_title_mentions=""): """ Extracts the first n_paragraphs from the text of a wikipedia page corresponding to the title. strip_title_mentions and replace_title_mentions control handling of references to the title in text. Oftentimes QA models learn *not* to answer entities mentioned in the question so this helps deal with this in the domain adaptation case. :param title: title of page :param text: text of page :param n_paragraphs: number of paragraphs to use :param replace_title_mentions: Replace mentions with the provided string token, by default removing them :return: """ # Get simplest representation of title and text title = unidecode(title).replace("_", " ") text = unidecode(text) # Split on non-alphanumeric title_words = re.split("[^a-zA-Z0-9]", title) title_word_pattern = "|".join(re.escape(w.lower()) for w in title_words) # Breaking by newline yields paragraphs. Ignore the first since its always just the title paragraphs = [p for p in text.split("\n") if len(p) != 0][1:] sentences = [] for p in paragraphs: formatted_text = re.sub( title_word_pattern, replace_title_mentions, p, flags=re.IGNORECASE ) # Cleanup whitespace formatted_text = re.sub("\s+", " ", formatted_text).strip() sentences.extend(nltk.sent_tokenize(formatted_text)) return sentences[:n_sentences] class Wikipedia: def __init__( self, lookup_path=WIKI_LOOKUP_PATH, dump_redirect_path=WIKI_DUMP_REDIRECT_PICKLE ): """ CachedWikipedia provides a unified way and easy way to access Wikipedia pages. Its design is motivated by: 1) Getting a wikipedia page should function as a simple python dictionary access 2) It should support access to pages using non-canonical names by resolving them to canonical names The following sections explain how the different levels of caching work as well as how redirects work Redirects To support some flexibility in requesting pages that are very close matches we have two sources of redirects. The first is based on wikipedia database dumps which is the most reliable. On top of this we do the very light preprocessing step of replacing whitespace with underscores since the canonical page names in the wikipedia database dumps contains an underscore instead of whitespace (a difference from the HTTP package which defaults to the opposite) """ self.countries = {} self.redirects = {} self.lookup_path = lookup_path self.dump_redirect_path = dump_redirect_path with open(lookup_path, "rb") as f: raw_lookup: Dict[str, Dict] = json.load(f) self.lookup: Dict[str, WikipediaPage] = { title: WikipediaPage( page["id"], page["title"], page["text"], page["url"] ) for title, page in raw_lookup.items() } if COUNTRY_LIST_PATH: with open(COUNTRY_LIST_PATH) as f: for line in f: k, v = line.split("\t") self.countries[k] = v.replace(" ", "_").strip() if os.path.exists(self.dump_redirect_path): with open(self.dump_redirect_path, "rb") as f: self.redirects = pickle.load(f) else: raise ValueError( f"{self.dump_redirect_path} missing, run: luigi --module qanta.pipeline.preprocess " f"WikipediaRedirectPickle" ) def load_country(self, key: str): content = self.lookup[key] for page in [f"{prefix}{self.countries[key]}" for prefix in COUNTRY_SUB]: if page in self.lookup: content = content + " " + self.lookup[page].text return content def __getitem__(self, key: str) -> WikipediaPage: if key in self.countries: return self.load_country(key) else: return self.lookup[key] def __contains__(self, item): return item in self.lookup def __len__(self): return len(self.lookup)
416 Scottsdale, Lexington, KY 40511 (MLS #1821124) :: Sarahsold Inc. Fantastic Open Concept starter ranch w/garage-Ready to MOVE-IN!! Great layout for entertaining family & friends and nice flat lot with partially fenced yard. New flooring throughout-ALL kitchen appliances stay-new hardware-new light fixtures-all new paint-exterior recently power washed...this home is MOVE-IN Ready! Large great room with modern plank flooring and fireplace is open to the bright, updated kitchen. Huge Master bedroom has it's own private full bath. The natural light throughout this house is nice & bright. Convenient to New Circle Road, I-75, I-64, and not far to downtown Lexington. MUST SEE! Co-agent is part owner of property. Listing provided courtesy of Delia Crumbaker of Lexrealty. Listing information © 2019 LBAR Multiple Listing Service. All rights reserved.
# -*- coding: utf-8 -*- """ Created on Sun Jul 17 21:56:26 2016 @author: cjs14 http://scicomp.stackexchange.com/questions/7030/plotting-a-2d-animated-data-surface-on-matplotlib """ from mpl_toolkits.mplot3d import axes3d import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from IPython.display import HTML from ipymd.plotting.JSAnimation.IPython_display import display_animation def generate(X, Y, phi): R = 1 - np.sqrt(X**2 + Y**2) return np.cos(2 * np.pi * X + phi) * R fig = plt.figure() ax = axes3d.Axes3D(fig) #plt.close() xs = np.linspace(-1, 1, 50) ys = np.linspace(-1, 1, 50) X, Y = np.meshgrid(xs, ys) Z = generate(X, Y, 0.0) wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2) ax.set_zlim(-1,1) def update(i, ax, fig): ax.cla() phi = i * 360 / 2 / np.pi / 100 Z = generate(X, Y, phi) wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2) ax.set_zlim(-1,1) return wframe, ani = animation.FuncAnimation(fig, update, frames=xrange(100), fargs=(ax, fig), interval=100) display_animation(ani)
Non-abelian Chabauty and rational points on curves. Progress on Mazur's Program B. A-gonality and points of degree d on curves. Torsion Points and Isogenies on CM Elliptic Curves. Elliptic Curves of Prime Conductor. Modular symbols for Fermat Curves. An $\ell-p$ switch trick to obtain a new proof of a criterion for arithmetic equivalence. Upper bounds for the endomorphism algebra of an abelian variety. Shioda's fourfold and CM Mumford's fourfold. Odd order obstructions to rational points on general K3 surfaces. Computing hyperelliptic modular invariants from period matrices. Galois actions associated to hyperelliptic curves over local fields. Conductors and minimal discriminants of hyperelliptic curves in odd residue characteristic. Computing Zeta Functions of Superelliptic Curves in Large Characteristic. Torsion Subgroups of Elliptic Curves over Function Fields. The geometric average size of Selmer groups over function fields. The Sato-Tate conjecture and Nagao's conjecture.
#-*- coding:UTF-8 -*- #! /usr/bin/python from sgmllib import SGMLParser import sys,urllib,urllib2,cookielib import datetime import time import getpass class spider(SGMLParser): def __init__(self,email,password): SGMLParser.__init__(self) self.h3=False self.h3_is_ready=False self.div=False self.h3_and_div=False self.a=False self.depth=0 self.names="" self.dic={} self.email=email self.password=password self.domain='renren.com' try: cookie=cookielib.CookieJar() cookieProc=urllib2.HTTPCookieProcessor(cookie) except: raise else: opener=urllib2.build_opener(cookieProc) urllib2.install_opener(opener) def login(self): print '[%s] 开始登录' % datetime.datetime.now() url='http://www.renren.com/PLogin.do' postdata={ 'email':self.email, 'password':self.password, 'domain':self.domain } try: req=urllib2.Request(url,urllib.urlencode(postdata)) self.file=urllib2.urlopen(req).read() idPos = self.file.index("'id':'") self.id=self.file[idPos+6:idPos+15] tokPos=self.file.index("get_check:'") self.tok=self.file[tokPos+11:tokPos+21] rtkPos=self.file.index("get_check_x:'") self.rtk=self.file[rtkPos+13:rtkPos+21] print '[%s] 登录成功' % datetime.datetime.now() except: print '[%s] 登录失败' % datetime.datetime.now() sys.exit() def publish(self,content): url='http://shell.renren.com/'+self.id+'/status' postdata={ 'content':content, 'hostid':self.id, 'requestToken':self.tok, '_rtk':self.rtk, 'channel':'renren', } req=urllib2.Request(url,urllib.urlencode(postdata)) self.file=urllib2.urlopen(req).read() print '[%s] 刚才使用账号 %s 发了一条状态 %s' % (datetime.datetime.now(),self.email,postdata.get('content','')) def visit(self,content): url='http://www.renren.com/'+content+'/profile' self.file=urllib2.urlopen(url).read() print '[%s] 刚才使用账号 %s 访问了ID %s' % (datetime.datetime.now(),self.email,content) def start_h3(self,attrs): self.h3 = True def end_h3(self): self.h3=False self.h3_is_ready=True def start_a(self,attrs): if self.h3 or self.div: self.a=True def end_a(self): self.a=False def start_div(self,attrs): if self.h3_is_ready == False: return if self.div==True: self.depth += 1 for k,v in attrs: if k == 'class' and v == 'content': self.div=True; self.h3_and_div=True def end_div(self): if self.depth == 0: self.div=False self.h3_and_div=False self.h3_is_ready=False self.names="" if self.div == True: self.depth-=1 def handle_data(self,text): if self.h3 and self.a: self.names+=text if self.h3 and (self.a==False): if not text:pass else: self.dic.setdefault(self.names,[]).append(text) return if self.h3_and_div: self.dic.setdefault(self.names,[]).append(text) def show(self): type = sys.getfilesystemencoding() for key in self.dic: print ( (''.join(key)).replace(' ','')).decode('utf-8').encode(type), \ ( (''.join(self.dic[key])).replace(' ','')).decode('utf-8').encode(type) email=raw_input('请输入用户名:') password=getpass.getpass('请输入密码:') renrenspider=spider(email,password) renrenspider.login() mode=999 while(mode!='000'): mode=raw_input('请输入操作代码:') if(mode=='120'): content=raw_input('请输入状态内容:') renrenspider.publish(content) if(mode=='200'): content=raw_input('请输入要访问的ID:') renrenspider.visit(content) if(mode=='100'): renrenspider.feed(renrenspider.file) renrenspider.show() sys.exit()
LAMPANG (AFP): Police in this northern Thai province were today looking for the owner of a female elephant which was in a collision with a truck, resulting in the deaths of two people. Police tracked down the elephant yesterday, a day after it collided with a fuel tanker truck while crossing a main road leading to Chiang Mai. The truck overturned and burst into flames, killing the driver and his girlfriend. One of the elephant’s legs was slightly injured in the accident. “The elephant’s owner could be prosecuted on both criminal and civil grounds for recklessness in allowing the elephant to wander across the road,” said Pol Capt Santi Jansak of Hangchat police station.
################################################################# # MET v2 Metadate Explorer Tool # # This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md # Copyright (c) 2012, TERENA All rights reserved. # # This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/ # MET v2 was developed for TERENA by Tamim Ziai, DAASI International GmbH, http://www.daasi.de # Current version of MET has been revised for performance improvements by Andrea Biancini, # Consortium GARR, http://www.garr.it ########################################################################## from django import template from django.template.base import Node, TemplateSyntaxError from django.template.defaultfilters import stringfilter from django.utils.safestring import mark_safe, SafeData from met.metadataparser.models import Federation from met.metadataparser.xmlparser import DESCRIPTOR_TYPES, DESCRIPTOR_TYPES_DISPLAY from met.metadataparser.query_export import export_modes from met.metadataparser.summary_export import export_summary_modes from urllib import urlencode register = template.Library() class AddGetParameter(Node): def __init__(self, values): self.values = values def render(self, context): req = template.resolve_variable('request', context) params = req.GET.copy() for key, value in self.values.items(): params[key] = value.resolve(context) return '?%s' % params.urlencode() @register.tag() def add_get(parser, token): pairs = token.split_contents()[1:] values = {} for pair in pairs: s = pair.split('=', 1) values[s[0]] = parser.compile_filter(s[1]) return AddGetParameter(values) @register.inclusion_tag('metadataparser/bootstrap_form.html') def bootstrap_form(form, cancel_link='..', delete_link=True): return {'form': form, 'cancel_link': cancel_link, 'delete_link': delete_link} @register.inclusion_tag('metadataparser/bootstrap_searchform.html') def bootstrap_searchform(form): return {'form': form} @register.inclusion_tag('metadataparser/federations_summary_tag.html', takes_context=True) def federations_summary(context, queryname, counts, federations=None): if not federations: federations = Federation.objects.all() user = context.get('user', None) add_federation = user and user.has_perm('metadataparser.add_federation') return {'federations': federations, 'add_federation': add_federation, 'queryname': queryname, 'counts': counts, 'entity_types': DESCRIPTOR_TYPES} @register.inclusion_tag('metadataparser/interfederations_summary_tag.html', takes_context=True) def interfederations_summary(context, queryname, counts, federations=None): if not federations: federations = Federation.objects.all() user = context.get('user', None) add_federation = user and user.has_perm('metadataparser.add_federation') return {'federations': federations, 'add_federation': add_federation, 'queryname': queryname, 'counts': counts, 'entity_types': DESCRIPTOR_TYPES} @register.inclusion_tag('metadataparser/tag_entity_list.html', takes_context=True) def entity_list(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None): request = context.get('request', None) lang = 'en' if request: lang = request.GET.get('lang', 'en') return {'request': request, 'entities': entities, 'categories': categories, 'curfed': curfed, 'show_filters': context.get('show_filters'), 'append_query': append_query, 'show_total': show_total, 'lang': lang, 'pagination': pagination, 'onclick_page': onclick_page, 'onclick_export': onclick_export, 'entity_types': DESCRIPTOR_TYPES} @register.inclusion_tag('metadataparser/most_fed_entities_summary.html', takes_context=True) def most_fed_entity_list(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None): request = context.get('request', None) lang = 'en' if request: lang = request.GET.get('lang', 'en') return {'request': request, 'entities': entities, 'categories': categories, 'curfed': curfed, 'show_filters': context.get('show_filters'), 'append_query': append_query, 'show_total': show_total, 'lang': lang, 'pagination': pagination, 'onclick_page': onclick_page, 'onclick_export': onclick_export, 'entity_types': DESCRIPTOR_TYPES} @register.inclusion_tag('metadataparser/service_search_summary.html', takes_context=True) def service_search_result(context, entities, categories=None, pagination=None, curfed=None, show_total=True, append_query=None, onclick_page=None, onclick_export=None): request = context.get('request', None) lang = 'en' if request: lang = request.GET.get('lang', 'en') return {'request': request, 'entities': entities, 'categories': categories, 'curfed': curfed, 'show_filters': context.get('show_filters'), 'append_query': append_query, 'show_total': show_total, 'lang': lang, 'pagination': pagination, 'onclick_page': onclick_page, 'onclick_export': onclick_export, 'entity_types': DESCRIPTOR_TYPES} @register.inclusion_tag('metadataparser/tag_entity_filters.html', takes_context=True) def entity_filters(context, entities, categories): entity_types = ('All', ) + DESCRIPTOR_TYPES request = context.get('request') entity_type = request.GET.get('entity_type', '') entity_category = request.GET.get('entity_category', '') rquery = request.GET.copy() for filt in 'entity_type', 'entity_category', 'page': if filt in rquery: rquery.pop(filt) if not entity_type: entity_type = 'All' if not entity_category: entity_category = 'All' query = urlencode(rquery) filter_base_path = request.path return {'filter_base_path': filter_base_path, 'otherparams': query, 'entity_types': entity_types, 'entity_type': entity_type, 'entity_category': entity_category, 'entities': entities, 'categories': categories} @register.simple_tag() def entity_filter_url(base_path, filt, otherparams=None): url = base_path if filt != 'All': url += '?entity_type=%s' % filt if otherparams: url += '&%s' % otherparams elif otherparams: url += '?%s' % otherparams return url @register.simple_tag() def entitycategory_filter_url(base_path, filt, otherparams=None): url = base_path if filt != 'All': url += '?entity_category=%s' % filt if otherparams: url += '&%s' % otherparams elif otherparams: url += '?%s' % otherparams return url @register.inclusion_tag('metadataparser/export-menu.html', takes_context=True) def export_menu(context, entities, append_query=None, onclick=None): request = context.get('request') copy_query = request.GET.copy() if 'page' in copy_query: copy_query.pop('page') query = copy_query.urlencode() base_path = request.path formats = [] for mode in export_modes.keys(): url = base_path if query: url += '?%s&format=%s' % (query, mode) else: url += '?format=%s' % (mode) if append_query: url += "&%s" % (append_query) formats.append({'url': url, 'label': mode, 'onclick': onclick}) return {'formats': formats} @register.inclusion_tag('metadataparser/export-menu.html') def export_summary_menu(query, onclick=None): formats = [] for mode in export_summary_modes.keys(): urlquery = {'format': mode, 'export': query} url = "./?%(query)s" % {'query': urlencode(urlquery)} formats.append({'url': url, 'label': mode, 'onclick': onclick}) return {'formats': formats} @register.simple_tag() def entities_count(entity_qs, entity_type=None): if entity_type and entity_type != 'All': return entity_qs.filter(types__xmlname=entity_type).count() else: return entity_qs.count() @register.simple_tag() def get_fed_total(totals, entity_type='All'): tot_count = 0 for curtotal in totals: if entity_type == 'All' or curtotal['types__xmlname'] == entity_type: tot_count += curtotal['types__xmlname__count'] return tot_count @register.simple_tag() def get_fed_count(counts, federation='All', entity_type='All'): count = counts[entity_type] fed_count = 0 for curcount in count: if federation == 'All' or curcount['federations__id'] == federation: fed_count += curcount['federations__id__count'] return fed_count @register.simple_tag() def get_fed_count_by_country(count, country='All'): fed_count = 0 for curcount in count: if country == 'All' or curcount['federations__country'] == country: fed_count += curcount['federations__country__count'] return fed_count @register.simple_tag(takes_context=True) def l10n_property(context, prop, lang): if isinstance(prop, dict) and len(prop) > 0: if not lang: lang = context.get('LANGUAGE_CODE', None) if lang and lang in prop: return prop.get(lang) else: return prop[prop.keys()[0]] return prop @register.simple_tag(takes_context=True) def organization_property(context, organizations, prop, lang): if not isinstance(organizations, list): return prop lang = lang or context.get('LANGUAGE_CODE', None) val = None for organization in organizations: if prop in organization: if val is None: val = organization[prop] if organization['lang'] == lang: val = organization[prop] return val @register.simple_tag() def get_property(obj, prop=None): uprop = unicode(prop) if not uprop: return '<a href="%(link)s">%(name)s</a>' % {"link": obj.get_absolute_url(), "name": unicode(obj)} if isinstance(obj, dict): return obj.get(prop, None) if getattr(getattr(obj, uprop, None), 'all', None): return '. '.join(['<a href="%(link)s">%(name)s</a>' % {"link": item.get_absolute_url(), "name": unicode(item)} for item in getattr(obj, uprop).all()]) if isinstance(getattr(obj, uprop, ''), list): return ', '.join(getattr(obj, uprop, [])) return getattr(obj, uprop, '') @register.simple_tag(takes_context=True) def active_url(context, pattern): request = context.get('request') if request.path == pattern: return 'active' return '' @register.filter(name='display_etype') def display_etype(value, separator=', '): if isinstance(value, list): return separator.join(value) elif hasattr(value, 'all'): return separator.join([unicode(item) for item in value.all()]) else: if value in DESCRIPTOR_TYPES_DISPLAY: return DESCRIPTOR_TYPES_DISPLAY.get(value) else: return value @register.filter(name='mailto') def mailto(value): if value.startswith('mailto:'): return value else: return 'mailto:%s' % value @register.filter(name='wrap') def wrap(value, length): value = unicode(value) if len(value) > length: return "%s..." % value[:length] return value class CanEdit(Node): child_nodelists = 'nodelist' def __init__(self, obj, nodelist): self.obj = obj self.nodelist = nodelist @classmethod def __repr__(cls): return "<CanEdit>" def render(self, context): obj = self.obj.resolve(context, True) user = context.get('user') if obj.can_edit(user, False): return self.nodelist.render(context) else: return '' def do_canedit(parser, token): bits = list(token.split_contents()) if len(bits) != 2: raise TemplateSyntaxError("%r takes 1 argument" % bits[0]) end_tag = 'end' + bits[0] nodelist = parser.parse((end_tag,)) obj = parser.compile_filter(bits[1]) token = parser.next_token() return CanEdit(obj, nodelist) @register.tag def canedit(parser, token): """ Outputs the contents of the block if user has edit pemission Examples:: {% canedit obj %} ... {% endcanedit %} """ return do_canedit(parser, token) @register.filter @stringfilter def split(value, splitter='|'): if not isinstance(value, SafeData): value = mark_safe(value) return value.split(splitter)
In the worksheet on line graph the points are plotted on the graph related to two variables and then the points are joined by the line segments. Draw the line graph for the above data. 2. Draw the line graph showing the following information. The table shows the colours favoured by a group of people. 3. Can there be time-temperature graphs as follows? Give reason. 4. The following tables give the information about a patient’s body temperature recorded in the hospital every hour. Represent this information on a line graph. Answers for the worksheet on line graph are given below to check the exact graph and the answers of the above question.
import os import subprocess import sys from .app import app class linux(app): description = "Create a Linux installer to wrap this project" def finalize_options(self): # Copy over all the options from the base 'app' command finalized = self.get_finalized_command('app') for attr in ('formal_name', 'organization_name', 'bundle', 'icon', 'guid', 'splash', 'download_dir'): if getattr(self, attr) is None: setattr(self, attr, getattr(finalized, attr)) super(linux, self).finalize_options() # Set platform-specific options self.platform = 'Linux' if self.dir is None: self.dir = 'linux' self.resource_dir = self.dir def install_icon(self): raise RuntimeError("Linux doesn't support icons screens.") def install_splash(self): raise RuntimeError("Linux doesn't support splash screens.") def install_support_package(self): # No support package; we just use the system python pass @property def launcher_header(self): """ Override the shebang line for launcher scripts """ return "#!python{}.{}\n".format(sys.version_info.major, sys.version_info.minor) @property def launcher_script_location(self): return self.resource_dir def build_app(self): return True def post_build(self): pass def start_app(self): print("Starting {}".format(self.formal_name)) subprocess.Popen([ './{}'.format(self.formal_name) ], cwd=os.path.abspath(self.dir) ).wait()
Included below are homes for sale in Currie Barracks, a neighborhood in West, Calgary. The neighbourhood of Currie Barracks is bordered by Richmond Green Golf Course in the north, Crowchild Trail in the east, Richardson Way in the south and Sarcee Road in the west. While the area began as a military site prior to World War I and soon became the Canadian Forces Base (CFB) in Calgary, it wasn't until its closure in 1995 that it began to be established as a community. With its close proximity to Marda Loop and downtown, Currie Barracks is ideal for those who want to settle close to the city. Currie Barracks is conveniently located close to a variety of green spaces including Sandy Beach Park and River Park where cycling trails and walking paths can be enjoyed. There are also many golf courses close by including Lakeview, Earl Grey and Richmond Green Golf Course. For those who like to be close to the amenities of the city, the retail area of Marda Loop is close by and features many stores including Safeway and Shoppers Drug Mart, as well as restaurants like Belmont Diner, Avenue Deli and Original Joe's Restaurant and Bar. The main hub of Crowchild Trail borders the area in the east and provides access in the north to University of Calgary and Brentwood, and access in the south to Lakeview and Glenmore Trail. Richmond Road runs just north of Currie Barracks and provides access to Marda Loop in the east and to Westhills in the west. The Calgary International Airport is approximately 30 minutes away by car and offers flight to a variety of destinations around the world. For those who want to live close to the main routes and the popular downtown core, Currie Barracks is ideal for a quieter way of life close to the city. Contact us at Liv Real Estate® today to learn more about Currie Barracks and real estate opportunities in the local area. For more information on any of these Currie Barracks listings, just click the "Request More Information" button when viewing the details of a property. We can provide you with disclosures, past sales history, dates and prices of homes recently sold nearby, and more. And, if you haven't already, be sure to register for a free account so that you can receive email alerts whenever new Currie Barracks real estate listings come on the market.
# copyright (c) 2019 paddlepaddle authors. all rights reserved. # # licensed under the apache license, version 2.0 (the "license"); # you may not use this file except in compliance with the license. # you may obtain a copy of the license at # # http://www.apache.org/licenses/license-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the license is distributed on an "as is" basis, # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. import unittest import os import sys import argparse import logging import struct import six import numpy as np import time import paddle import paddle.fluid as fluid from paddle.fluid.framework import IrGraph from paddle.fluid.contrib.slim.quantization import QuantInt8MkldnnPass from paddle.fluid import core paddle.enable_static() logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s') _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int, default=1, help='Batch size.') parser.add_argument( '--skip_batch_num', type=int, default=0, help='Number of the first minibatches to skip in performance statistics.' ) parser.add_argument( '--debug', action='store_true', help='If used, the graph of Quant model is drawn.') parser.add_argument( '--quant_model', type=str, default='', help='A path to a Quant model.') parser.add_argument('--infer_data', type=str, default='', help='Data file.') parser.add_argument( '--batch_num', type=int, default=0, help='Number of batches to process. 0 or less means whole dataset. Default: 0.' ) parser.add_argument( '--acc_diff_threshold', type=float, default=0.01, help='Accepted accuracy difference threshold.') test_args, args = parser.parse_known_args(namespace=unittest) return test_args, sys.argv[:1] + args class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): """ Test for accuracy comparison of Quant FP32 and INT8 Image Classification inference. """ def _reader_creator(self, data_file='data.bin'): def reader(): with open(data_file, 'rb') as fp: num = fp.read(8) num = struct.unpack('q', num)[0] imgs_offset = 8 img_ch = 3 img_w = 224 img_h = 224 img_pixel_size = 4 img_size = img_ch * img_h * img_w * img_pixel_size label_size = 8 labels_offset = imgs_offset + num * img_size step = 0 while step < num: fp.seek(imgs_offset + img_size * step) img = fp.read(img_size) img = struct.unpack_from( '{}f'.format(img_ch * img_w * img_h), img) img = np.array(img) img.shape = (img_ch, img_w, img_h) fp.seek(labels_offset + label_size * step) label = fp.read(label_size) label = struct.unpack('q', label)[0] yield img, int(label) step += 1 return reader def _get_batch_accuracy(self, batch_output=None, labels=None): total = 0 correct = 0 correct_5 = 0 for n, result in enumerate(batch_output): index = result.argsort() top_1_index = index[-1] top_5_index = index[-5:] total += 1 if top_1_index == labels[n]: correct += 1 if labels[n] in top_5_index: correct_5 += 1 acc1 = float(correct) / float(total) acc5 = float(correct_5) / float(total) return acc1, acc5 def _prepare_for_fp32_mkldnn(self, graph): ops = graph.all_op_nodes() for op_node in ops: name = op_node.name() if name in ['depthwise_conv2d']: input_var_node = graph._find_node_by_name( op_node.inputs, op_node.input("Input")[0]) weight_var_node = graph._find_node_by_name( op_node.inputs, op_node.input("Filter")[0]) output_var_node = graph._find_node_by_name( graph.all_var_nodes(), op_node.output("Output")[0]) attrs = { name: op_node.op().attr(name) for name in op_node.op().attr_names() } conv_op_node = graph.create_op_node( op_type='conv2d', attrs=attrs, inputs={ 'Input': input_var_node, 'Filter': weight_var_node }, outputs={'Output': output_var_node}) graph.link_to(input_var_node, conv_op_node) graph.link_to(weight_var_node, conv_op_node) graph.link_to(conv_op_node, output_var_node) graph.safe_remove_nodes(op_node) return graph def _predict(self, test_reader=None, model_path=None, batch_size=1, batch_num=1, skip_batch_num=0, transform_to_int8=False): place = fluid.CPUPlace() exe = fluid.Executor(place) inference_scope = fluid.executor.global_scope() with fluid.scope_guard(inference_scope): if os.path.exists(os.path.join(model_path, '__model__')): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(model_path, exe) else: [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( model_path, exe, 'model', 'params') graph = IrGraph(core.Graph(inference_program.desc), for_test=True) if (self._debug): graph.draw('.', 'quant_orig', graph.all_op_nodes()) if (transform_to_int8): mkldnn_int8_pass = QuantInt8MkldnnPass( _scope=inference_scope, _place=place) graph = mkldnn_int8_pass.apply(graph) else: graph = self._prepare_for_fp32_mkldnn(graph) inference_program = graph.to_program() dshape = [3, 224, 224] outputs = [] infer_accs1 = [] infer_accs5 = [] fpses = [] batch_times = [] total_samples = 0 iters = 0 infer_start_time = time.time() for data in test_reader(): if batch_num > 0 and iters >= batch_num: break if iters == skip_batch_num: total_samples = 0 infer_start_time = time.time() if six.PY2: images = map(lambda x: x[0].reshape(dshape), data) if six.PY3: images = list(map(lambda x: x[0].reshape(dshape), data)) images = np.array(images).astype('float32') labels = np.array([x[1] for x in data]).astype('int64') start = time.time() out = exe.run(inference_program, feed={feed_target_names[0]: images}, fetch_list=fetch_targets) batch_time = (time.time() - start) * 1000 # in miliseconds outputs.append(out[0]) batch_acc1, batch_acc5 = self._get_batch_accuracy(out[0], labels) infer_accs1.append(batch_acc1) infer_accs5.append(batch_acc5) samples = len(data) total_samples += samples batch_times.append(batch_time) fps = samples / batch_time * 1000 fpses.append(fps) iters += 1 appx = ' (warm-up)' if iters <= skip_batch_num else '' _logger.info('batch {0}{5}, acc1: {1:.4f}, acc5: {2:.4f}, ' 'latency: {3:.4f} ms, fps: {4:.2f}'.format( iters, batch_acc1, batch_acc5, batch_time / batch_size, fps, appx)) # Postprocess benchmark data batch_latencies = batch_times[skip_batch_num:] batch_latency_avg = np.average(batch_latencies) latency_avg = batch_latency_avg / batch_size fpses = fpses[skip_batch_num:] fps_avg = np.average(fpses) infer_total_time = time.time() - infer_start_time acc1_avg = np.mean(infer_accs1) acc5_avg = np.mean(infer_accs5) _logger.info('Total inference run time: {:.2f} s'.format( infer_total_time)) return outputs, acc1_avg, acc5_avg, fps_avg, latency_avg def _summarize_performance(self, fp32_fps, fp32_lat, int8_fps, int8_lat): _logger.info('--- Performance summary ---') _logger.info('FP32: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( fp32_fps, fp32_lat)) _logger.info('INT8: avg fps: {0:.2f}, avg latency: {1:.4f} ms'.format( int8_fps, int8_lat)) def _compare_accuracy(self, fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, threshold): _logger.info('--- Accuracy summary ---') _logger.info( 'Accepted top1 accuracy drop threshold: {0}. (condition: (FP32_top1_acc - IN8_top1_acc) <= threshold)' .format(threshold)) _logger.info( 'FP32: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'. format(fp32_acc1, fp32_acc5)) _logger.info( 'INT8: avg top1 accuracy: {0:.4f}, avg top5 accuracy: {1:.4f}'. format(int8_acc1, int8_acc5)) assert fp32_acc1 > 0.0 assert int8_acc1 > 0.0 assert fp32_acc1 - int8_acc1 <= threshold def test_graph_transformation(self): if not fluid.core.is_compiled_with_mkldnn(): return quant_model_path = test_case_args.quant_model assert quant_model_path, 'The Quant model path cannot be empty. Please, use the --quant_model option.' data_path = test_case_args.infer_data assert data_path, 'The dataset path cannot be empty. Please, use the --infer_data option.' batch_size = test_case_args.batch_size batch_num = test_case_args.batch_num skip_batch_num = test_case_args.skip_batch_num acc_diff_threshold = test_case_args.acc_diff_threshold self._debug = test_case_args.debug _logger.info('Quant FP32 & INT8 prediction run.') _logger.info('Quant model: {0}'.format(quant_model_path)) _logger.info('Dataset: {0}'.format(data_path)) _logger.info('Batch size: {0}'.format(batch_size)) _logger.info('Batch number: {0}'.format(batch_num)) _logger.info('Accuracy drop threshold: {0}.'.format(acc_diff_threshold)) _logger.info('--- Quant FP32 prediction start ---') val_reader = paddle.batch( self._reader_creator(data_path), batch_size=batch_size) fp32_output, fp32_acc1, fp32_acc5, fp32_fps, fp32_lat = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, transform_to_int8=False) _logger.info('--- Quant INT8 prediction start ---') val_reader = paddle.batch( self._reader_creator(data_path), batch_size=batch_size) int8_output, int8_acc1, int8_acc5, int8_fps, int8_lat = self._predict( val_reader, quant_model_path, batch_size, batch_num, skip_batch_num, transform_to_int8=True) self._summarize_performance(fp32_fps, fp32_lat, int8_fps, int8_lat) self._compare_accuracy(fp32_acc1, fp32_acc5, int8_acc1, int8_acc5, acc_diff_threshold) if __name__ == '__main__': global test_case_args test_case_args, remaining_args = parse_args() unittest.main(argv=remaining_args)
All of this, the New England Black Wolves' road work against the top teams in the National Lacrosse League's Eastern Division, is in preparation for one thing: the playoffs. UConn's Katie Lou Samuelson and Napheesa Collier were drafted fourth and sixth, respectively, by the Chicago Sky and Minnesota Lynx at Wednesday night's WNBA Draft. Arike Ogunbowale scored 23 points, including four straight free throws in the final minute, as Notre Dame rallied from a nine-point deficit in the fourth quarter to defeat UConn 81-76 in the national semifinals Friday night. 2 p.m., MLB Network — MLB: Arizona Diamondbacks at Chicago Cubs. 7 p.m., NESN Plus, WTIC (1080-AM), WEEI (103.7-FM) — MLB: Boston Red Sox at Tampa Bay Rays. 7 p.m., WCCT, WFAN (660-AM) — MLB: Kansas City Royals at New York Yankees. 7:30 p.m., FS1 — NCAA: Baylor at Texas Tech. 8 p.m., SNY, WCBS (880-AM) — MLB: New York Mets at St. Louis Cardinals. 7 p.m., ESPN — NBA playoffs: Toronto Raptors at Orlando Magic. 8:30 p.m., NBCSB, Ch. 8 — NBA playoffs: Boston Celtics at Indiana Pacers. 9:30 p.m., ESPN — NBA playoffs: Portland Trail Blazers at Oklahoma City Thunder. Noon, Golf Channel — Champions: Mitsubishi Electric Classic from Duluth, Ga. 3 p.m., Golf Channel — PGA: RBC Heritage from Hilton Head Island, S.C. 7 p.m., Golf Channel — LPGA: LOTTE Championship from Kapolei, Hawaii. 1 p.m., ESPN2 — NCAA women: NCAA Championship from Fort Worth, Texas. 7 p.m., ESPNU — NCAA women: NCAA Championship from Fort Worth, Texas. 7:30 p.m., ESPN2 — NCAA women: NCAA Championship from Fort Worth, Texas. 7 p.m., NESN, NBCSN — NHL playoffs: Toronto Maple Leafs at Boston Bruins. 10 p.m., NBCSN — NHL playoffs: Colorado Avalanche at Calgary Flames. 5 p.m., ESPNU — NCAA women: Penn State at Johns Hopkins. 5:30 p.m., ESPN2 — NCAA: Alabama at Florida. 5 a.m., Tennis Channel — ATP: Monte Carlo Masters from Monaco.
#! /usr/bin/env python #-*- coding:utf-8 -*- __author__ = 'p0123n' import MySQLdb from MySQLdb import cursors def keepSingleConn(cls): instances = dict() def getInstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getInstance @keepSingleConn class Connection(): def __init__(self): self.connection = None self.cursor = None def connect(self, params): connection = MySQLdb.connect( host = params['addr'], port = params['port'], user = params['user'], passwd = params['pass'], db = params['name'], cursorclass = params['curs'] ) self.cursor = connection.cursor() self.cursor.execute('set names utf8') self.cursor.execute('set session time_zone="%s"' % params['tmzn']) return connection, self.cursor class Query(): def __init__(self, params): params['curs'] = cursors.SSDictCursor self.connection, self.cursor = Connection().connect(params) def query(self, query): self.cursor.execute(query) self.connection.commit() def __iter__(self): for row in self.connection: yield row def __enter__(self): return self.cursor def __ex_t__(self,ext_type,exc_value,traceback): self.connection.close() if isinstance(exc_value, Exception): self.connection.rollback() else: self.connection.commit() self.connection.close() class Dump(): def __init__(self, params): params['curs'] = cursors.SSCursor self.connection, self.cursor = Connection().connect(params) def dump(self, query, filn=None, dirn='.', sep=';', pref='mygate'): if not filn: from datetime import datetime filn = datetime.today().strftime('%Y-%m-%d(%H%M%S).csv') filn = '%s/%s_%s' % (dirn, pref, filn) else: filn = '%s/%s%s' % (dirn, pref, filn) fn = open(filn, 'w') self.cursor.execute(query) rows = 0 for row in self.cursor: fn.write(sep.join( str(field) for field in row ) + '\n') rows += 1 fn.close() return filn, rows class MyGate(): def __init__(self, params): self._query = None self._dump = None self._params= params def query(self, *args, **kwargs): self._query = self._query or Query(self._params) return self._query.query(*args, **kwargs) def dump(self, *args, **kwargs): self._dump = self._dump or Dump(self._params) return self._dump.dump(*args, **kwargs) if __name__ == '__main__': print 'Hi.'
586 records is a new and innovative venture for Newcastle. A vinyl only record store, manned and managed by Antony Daly. A respected dj and co-promoter of "Reverb" and the "Suono" club nights. He has helped to shape the city's non commercial nightlife, over the last 15 years. His store is located within commercial union house on pilgrim street and found on the 3rd floor,under the protective wing of b&d studios. Within it, there are 1000's of 12 inch records and albums for you to peruse. Genres available range from 60's Soul, Disco, Classic albums, 80's Alternative, House, Techno and Nu Med Tear Jerkers. Prices start from £2, meaning there are many bargains, to be had. To add to the experience, there are 3 technics turntables with earphones, for you to listen to the records, before you buy. A breakfast bar has been installed, so you can enjoy a hot drink, do some work on your laptop or just chat and relax! "I'm hoping to bring back the social aspect of buying records, generating interaction and interest between myself and collectors of vinyl. Putting them onto records they may not know, but will hopefully enjoy. All of this, in a relaxed atmosphere, with a friendly and slightly knowledgeable face, behind the counter".
import math import tensorflow as tf from recognition import sketch_utils as utils import custom_recogntion_utilities as training_helpers from generated_proto import sketch_pb2 as Sketch from recognition.generation import feature_generation as features X = 0 Y = 1 ID = 2 class Recognizer: num_points = 32 classifier = None training_bundle_features = None training_bundle_targets = None training_bundle_amount = 1000 training_bundle_counter = 0 X_placeholder = None Y_placeholder = None num_classes = 2 session = None def __init__(self, label): self.label = label self.graph = tf.Graph() with self.graph.as_default() as g: with g.name_scope(label) as scope: self.points_placeholder = tf.placeholder(tf.float32, shape=[None, 2], name="points") feature_list = features.generate_features(self.points_placeholder) #feature_list = tf.Print(feature_list, [feature_list], "Features for recognition", summarize=500) column_list = features.generate_columns() mapping = features.match_features_columns(feature_list, column_list) first_layer = tf.contrib.layers.input_from_feature_columns(columns_to_tensors=mapping, feature_columns=column_list) with g.name_scope('layer2') as scope1: layer2 = tf.contrib.layers.fully_connected(first_layer, 50, scope=scope1) with g.name_scope('hidden1') as scope2: hidden = tf.contrib.layers.fully_connected(layer2, 20, scope=scope2) with g.name_scope('hidden2') as scope3: output = tf.contrib.layers.fully_connected(hidden, self.num_classes, scope=scope3) output = tf.sigmoid(output) print output self.class_index = tf.argmax(output, 0) output = tf.Print(output, [output, self.class_index], "Raw output of training data") self.output = output self.target = training_helpers.create_target_classes(self.num_classes) lossTarget = tf.Print(self.target, [self.target], "Raw target data") self.loss = training_helpers.create_loss_function(output, lossTarget) self.train_step = training_helpers.create_training(self.loss, .01) self.init = tf.initialize_all_variables() self.graph.finalize() def create_features(self, point_list): utils.strip_ids_from_points(point_list) return point_list def create_target(self, label): # big punishment to show difference between 0 and 1 true_class = 1.0 if label == self.label else 0.0 null_class = 1.0 if label != self.label else 0.0 return [[true_class, null_class]] def train(self, label, points): target = self.create_target(label) if self.training_bundle_features is None: self.training_bundle_features = [points] else: self.training_bundle_features.append(points) if self.training_bundle_targets is None: self.training_bundle_targets = [target] else: self.training_bundle_targets.append(target) if self.training_bundle_counter >= self.training_bundle_amount: self.execute_train_bundle() else: self.training_bundle_counter += 1 # TODO: change back to this when the code is fixed def single_train(self, label, features): target = self.create_target(label) self.classifier.fit(x=features, y=target, steps=1) def execute_train_bundle(self): print 'batch training: ' + self.label with tf.Session(graph=self.graph) as sess: sess.run(self.init) for i in range(self.training_bundle_counter): feed = {self.points_placeholder: self.training_bundle_features[i], self.target: self.training_bundle_targets[i]} result = sess.run(self.train_step, feed_dict=feed) print result self.training_bundle_features = None self.training_bundle_targets = None self.training_bundle_counter = 0 def finish_training(self): if self.training_bundle_counter > 0: self.execute_train_bundle() def recognize(self, features): interpretation = Sketch.SrlInterpretation() with tf.Session(graph=self.graph) as sess: sess.run(self.init) feed = {self.points_placeholder: features} raw_output, class_index = sess.run([self.output, self.class_index], feed) print class_index print 'result: ' + str(self.label if class_index == 0 else None) print raw_output interpretation.label = self.label interpretation.confidence = raw_output[class_index] return interpretation
Here at Fireplace Inspection Guys, we'll be here to fulfill all of your expectations when it comes to Fireplace Inspection in Bayard, IA. You'll need the most innovative solutions around, and our crew of highly trained experts will provide that. Our materials are of the best quality and we are able to save you cash. We are going to provide help to make choices for your undertaking, answer your questions, and schedule a meeting with our professionals whenever you contact us at 888-201-5579. You have got a spending budget to follow, and you should get lower rates. Though, cutting costs should not indicate that you compromise superior quality for Fireplace Inspection in Bayard, IA. Our initiatives to cost less money won't give up the high quality of our results. Any time you work with our team, you will get the benefit of our own practical knowledge and top quality products guaranteeing that your project lasts while saving time and funds. It will be possible since we know how to save time and money on supplies and labor. If you wish to get lower rates, Fireplace Inspection Guys is the company to call. Call up 888-201-5579 to talk to our customer service representatives, now. It is important to be kept informed concerning Fireplace Inspection in Bayard, IA. We will make sure you understand what to expect. We will take the unexpected situations out of the picture by supplying appropriate and thorough information. Start by discussing your project with our customer service representatives when you dial 888-201-5579. During this phone call, you'll get all your questions addressed, and we can schedule a time to initiate services. We always get there at the scheduled time, ready to work with you. You have many reasons to use Fireplace Inspection Guys to meet your needs concerning Fireplace Inspection in Bayard, IA. We've got the best customer support ratings, the very best quality resources, and the most helpful and successful cash saving strategies. We're ready to serve you with the greatest competence and valuable experience in the industry. If you need Fireplace Inspection in Bayard, call Fireplace Inspection Guys by dialing 888-201-5579, and we will be beyond glad to help you.
from structure.LineGrammar.core.line_grammar_executor import LineGrammarExecutor from enum import Enum from tonalmodel.interval import Interval class MelodicSearchAnalysis(object): def __init__(self, pattern_line, pattern_hct): self.__pattern_line = pattern_line self.__pattern_hct = pattern_hct self.__note_annotation = self.prepare_note_search_parameters() self.__note_pair_annotation = self.prepare_note_pair_search_parameters() self.__hct_annotation = self.prepare_hct_search_parameters() @staticmethod def create(pattern_string): line, hct = LineGrammarExecutor().parse(pattern_string) return MelodicSearchAnalysis(line, hct) @property def pattern_line(self): return self.__pattern_line @property def pattern_hct(self): return self.__pattern_hct @property def note_annotation(self): return self.__note_annotation @property def note_pair_annotation(self): return self.__note_pair_annotation @property def hct_annotation(self): return self.__hct_annotation def prepare_note_search_parameters(self): annotation_list = list() for note in self.pattern_line.get_all_notes(): hc = self.pattern_hct[note.get_absolute_position()] annotation_list.append(NoteInformation(note, hc)) return annotation_list def prepare_note_pair_search_parameters(self): pair_annotation_list = list() note_list = self.pattern_line.get_all_notes() for i in range(0, len(note_list) - 1): first = note_list[i] if first.diatonic_pitch is None: continue second = None for j in range(i + 1, len(note_list)): second = note_list[j] if second.diatonic_pitch is not None: break if second.diatonic_pitch is None: continue pair_annotation_list.append(NotePairInformation(first, second)) return pair_annotation_list def prepare_hct_search_parameters(self): hct_annotation = list() hc_list = self.pattern_hct.hc_list() for i in range(0, len(hc_list)): hc = hc_list[i] hct_annotation.append(HCInformation(hc)) return hct_annotation class NoteInformation(object): def __init__(self, note, hc): self.__note = note self.__hc = hc self.__scale_degree = self.compute_scale_degree() self.__chord_interval = self.compute_chord_interval() self.__root_based_interval = self.compute_root_based_interval() self.__duration = note.duration @property def note(self): return self.__note @property def hc(self): return self.__hc @property def scale_degree(self): return self.__scale_degree @property def chord_interval(self): return self.__chord_interval @property def is_scalar(self): return self.scale_degree is not None @property def is_chordal(self): return self.chord_interval is not None @property def duration(self): return self.__duration @property def root_based_interval(self): return self.__root_based_interval def compute_scale_degree(self): annotation = self.hc.tonality.annotation if self.note.diatonic_pitch is None: # Rest return None if self.note.diatonic_pitch.diatonic_tone in annotation: return annotation.index(self.note.diatonic_pitch.diatonic_tone) return None def compute_root_based_interval(self): if self.note.diatonic_pitch is None: return None return Interval.calculate_tone_interval(self.hc.tonality.root_tone, self.note.diatonic_pitch.diatonic_tone) def compute_chord_interval(self): tones = self.hc.chord.tones if self.note.diatonic_pitch is None: return None for tone in tones: if tone[0] == self.note.diatonic_pitch.diatonic_tone: return tone[1] return None def __str__(self): return '{0} hc={1} scale_degree={2} interval={3} is_scalar={4} is_chordal={5} duration={6}'.\ format(self.note, self. hc, self.scale_degree, self.chord_interval, self.is_scalar, self.is_chordal, self.duration) class NotePairInformation(object): class Relationship(Enum): LT = -1 EQ = 0 GT = 1 def __init__(self, first_note, second_note): self.__first_note = first_note self.__second_note = second_note self.__time_difference = self.second_note.get_absolute_position() - self.first_note.get_absolute_position() self.__forward_interval = Interval.create_interval(self.first_note.diatonic_pitch, self.second_note.diatonic_pitch) cd = self.forward_interval.chromatic_distance self.__relationship = NotePairInformation.Relationship.GT if cd < 0 else NotePairInformation.Relationship.LT \ if cd > 0 else NotePairInformation.Relationship.EQ @property def first_note(self): return self.__first_note @property def second_note(self): return self.__second_note @property def time_difference(self): return self.__time_difference @property def forward_interval(self): return self.__forward_interval @property def relationship(self): return self.__relationship @staticmethod def rel_pair_symbol(relationship): return '>' if relationship == NotePairInformation.Relationship.GT else \ '<' if relationship == NotePairInformation.Relationship.LT else '==' def __str__(self): return '{0} {1} {2}'.format(self.first_note, NotePairInformation.rel_pair_symbol(self.relationship), self.second_note) class HCInformation(object): def __init__(self, hc): self.__hc = hc self.__span = hc.duration if self.hc.chord.chord_template.diatonic_basis is None: self.__relative_chord_degree = self.hc.chord.chord_template.scale_degree else: interval = Interval.calculate_tone_interval(self.hc.tonality.diatonic_tone, self.hc.chord.chord_template.diatonic_basis) self.__relative_chord_degree = interval.diatonic_distance + 1 @property def hc(self): return self.__hc @property def span(self): return self.__span @property def relative_chord_degree(self): return self.__relative_chord_degree def __str__(self): return '{0} span={1} chord_degree={2}'.format(self.hc, self.span, self.relative_chord_degree)
is an elegant event to support local survivors/fighters of heart disease. Our goal is to bring awareness to the heart healthy lives campaign. The purpose is to raise awareness and recognition of heart disease in our local community. The 6th Annual Heartbeat Gala will be at the brand new convention center HUB 757 located in the prestigious area of Harbourview in Suffolk, VA on Saturday, February 9, 2019.
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import binascii import pytest from cryptography import utils from cryptography.exceptions import ( AlreadyFinalized, _Reasons ) from cryptography.hazmat.primitives import interfaces from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) from .utils import ( generate_aead_exception_test, generate_aead_tag_exception_test ) from ...utils import raises_unsupported_algorithm @utils.register_interface(interfaces.Mode) class DummyMode(object): name = "dummy-mode" def validate_for_algorithm(self, algorithm): pass @utils.register_interface(interfaces.CipherAlgorithm) class DummyCipher(object): name = "dummy-cipher" @pytest.mark.cipher class TestCipher(object): def test_creates_encryptor(self, backend): cipher = Cipher( algorithms.AES(binascii.unhexlify(b"0" * 32)), modes.CBC(binascii.unhexlify(b"0" * 32)), backend ) assert isinstance(cipher.encryptor(), interfaces.CipherContext) def test_creates_decryptor(self, backend): cipher = Cipher( algorithms.AES(binascii.unhexlify(b"0" * 32)), modes.CBC(binascii.unhexlify(b"0" * 32)), backend ) assert isinstance(cipher.decryptor(), interfaces.CipherContext) def test_instantiate_with_non_algorithm(self, backend): algorithm = object() with pytest.raises(TypeError): Cipher(algorithm, mode=None, backend=backend) @pytest.mark.cipher class TestCipherContext(object): def test_use_after_finalize(self, backend): cipher = Cipher( algorithms.AES(binascii.unhexlify(b"0" * 32)), modes.CBC(binascii.unhexlify(b"0" * 32)), backend ) encryptor = cipher.encryptor() encryptor.update(b"a" * 16) encryptor.finalize() with pytest.raises(AlreadyFinalized): encryptor.update(b"b" * 16) with pytest.raises(AlreadyFinalized): encryptor.finalize() decryptor = cipher.decryptor() decryptor.update(b"a" * 16) decryptor.finalize() with pytest.raises(AlreadyFinalized): decryptor.update(b"b" * 16) with pytest.raises(AlreadyFinalized): decryptor.finalize() def test_unaligned_block_encryption(self, backend): cipher = Cipher( algorithms.AES(binascii.unhexlify(b"0" * 32)), modes.ECB(), backend ) encryptor = cipher.encryptor() ct = encryptor.update(b"a" * 15) assert ct == b"" ct += encryptor.update(b"a" * 65) assert len(ct) == 80 ct += encryptor.finalize() decryptor = cipher.decryptor() pt = decryptor.update(ct[:3]) assert pt == b"" pt += decryptor.update(ct[3:]) assert len(pt) == 80 assert pt == b"a" * 80 decryptor.finalize() @pytest.mark.parametrize("mode", [DummyMode(), None]) def test_nonexistent_cipher(self, backend, mode): cipher = Cipher( DummyCipher(), mode, backend ) with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER): cipher.encryptor() with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER): cipher.decryptor() def test_incorrectly_padded(self, backend): cipher = Cipher( algorithms.AES(b"\x00" * 16), modes.CBC(b"\x00" * 16), backend ) encryptor = cipher.encryptor() encryptor.update(b"1") with pytest.raises(ValueError): encryptor.finalize() decryptor = cipher.decryptor() decryptor.update(b"1") with pytest.raises(ValueError): decryptor.finalize() @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.AES("\x00" * 16), modes.GCM("\x00" * 12) ), skip_message="Does not support AES GCM", ) @pytest.mark.cipher class TestAEADCipherContext(object): test_aead_exceptions = generate_aead_exception_test( algorithms.AES, modes.GCM, ) test_aead_tag_exceptions = generate_aead_tag_exception_test( algorithms.AES, modes.GCM, ) class TestModeValidation(object): def test_cbc(self, backend): with pytest.raises(ValueError): Cipher( algorithms.AES(b"\x00" * 16), modes.CBC(b"abc"), backend, ) def test_ofb(self, backend): with pytest.raises(ValueError): Cipher( algorithms.AES(b"\x00" * 16), modes.OFB(b"abc"), backend, ) def test_cfb(self, backend): with pytest.raises(ValueError): Cipher( algorithms.AES(b"\x00" * 16), modes.CFB(b"abc"), backend, ) def test_ctr(self, backend): with pytest.raises(ValueError): Cipher( algorithms.AES(b"\x00" * 16), modes.CTR(b"abc"), backend, )
The Santa Fe team offers the resources you need to ensure 100% satisfaction with all of our products. Located at our headquarters and manufacturing facility in Madison, Wisconsin, we're ready to help. Our Customer Service team is your go-to source for all things Santa Fe. We are here to answer any questions and give you the advice you need to make an educated purchase. Santa Fe has an on-site Service Center located in Madison, Wisconsin. Staffed with knowledgeable technicians with years of experience, our Service Center is ready to lend a hand — or two if the need should arise. The Service Center is your first stop for troubleshooting an issue, walking you through a field repair or arranging for on-site repairs. We value your opinion and rely on our customers to tell us how we are doing. Have Questions? Please complete our contact form and we'll get back to you soon.
# -*- coding: utf-8 -*- # vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4 ############################################################################### # OpenLP - Open Source Lyrics Projection # # --------------------------------------------------------------------------- # # Copyright (c) 2008-2015 OpenLP Developers # # --------------------------------------------------------------------------- # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation; version 2 of the License. # # # # This program is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # # more details. # # # # You should have received a copy of the GNU General Public License along # # with this program; if not, write to the Free Software Foundation, Inc., 59 # # Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### """ The :mod:`~openlp.core.ui.servicenoteform` module contains the `ServiceNoteForm` class. """ from PyQt4 import QtGui from openlp.core.common import Registry, RegistryProperties, translate from openlp.core.lib import SpellTextEdit from openlp.core.lib.ui import create_button_box class ServiceNoteForm(QtGui.QDialog, RegistryProperties): """ This is the form that is used to edit the verses of the song. """ def __init__(self): """ Constructor """ super(ServiceNoteForm, self).__init__(Registry().get('main_window')) self.setupUi() self.retranslateUi() def exec_(self): """ Execute the form and return the result. """ self.text_edit.setFocus() return QtGui.QDialog.exec_(self) def setupUi(self): """ Set up the UI of the dialog """ self.setObjectName('serviceNoteEdit') self.dialog_layout = QtGui.QVBoxLayout(self) self.dialog_layout.setContentsMargins(8, 8, 8, 8) self.dialog_layout.setSpacing(8) self.dialog_layout.setObjectName('vertical_layout') self.text_edit = SpellTextEdit(self, False) self.text_edit.setObjectName('textEdit') self.dialog_layout.addWidget(self.text_edit) self.button_box = create_button_box(self, 'button_box', ['cancel', 'save']) self.dialog_layout.addWidget(self.button_box) def retranslateUi(self): """ Translate the UI on the fly """ self.setWindowTitle(translate('OpenLP.ServiceNoteForm', 'Service Item Notes'))
Statoil is targeting standardization in certain situations as a means to generate cost savings in even the most complex projects. Norway has a long and rich seafaring history, and it also sits on one of the world’s richest oil & gas resources in its sector of the North Sea. Earlier this year MTR contributor Eric Haun traversed the country in search of companies worthy inclusion in this year’s MTR100. He found, and presents here, a dozen. Announced at the end of 2012 as a joint venture between subsea systems provider Cameron and oilfield services company Schlumberger (split 60/40, respectively), OneSubsea combines the expertise of two leading firms to deliver integrated solutions, products, systems and services for the subsea oil and gas market. As Cameron Chairman, President and CEO, Jack B. Moore explained, OneSubsea essentially provides a marriage of Schlumberger’s oilfield services technology and Cameron’s subsea equipment heritage. OneSubsea leverages Cameron’s flow control expertise, process technologies and world-class manufacturing and aftermarket capabilities, along with Schlumberger’s petro-technical leadership, reservoir and production technology and R&D capabilities, while also bringing into play its subsidiary, Framo Engineering, which provides subsea measurement, boosting and processing systems. OneSubsea is currently operating in more than 20 countries through its six divisions: Integrated Solutions, Production Systems, Processing Systems, Control Systems, Swivel and Marine Systems and Subsea Services. The company offers a step change in reservoir recovery for the subsea oil and gas industry through integration and optimization of the entire production system over the life of the field. Using its comprehensive total system approach – from the reservoir through the well, subsea production system, up to the surface – OneSubsea aims to optimize complete subsea production systems and ultimately improve production and recovery from offshore subsea developments. The company claims its processing systems have increased production rates for operators by as much as 30-100%. U.S.-based FMC Technologies is one of the world’s leading equipment and service providers for the global energy industry, operating from 30 production facilities in 17 countries to design, manufacture and service systems and technologically sophisticated systems and products such as subsea production and processing systems, surface wellhead systems, high pressure fluid control equipment and pumps, measurement solutions and marine loading systems. The company also specializes in subsea technologies that maximize recovery of hydrocarbons from challenging reservoirs. In 2013, FMC recorded $7.1 billion in revenue, 66% of which came from its deepwater and subsea technologies segment (Subsea Systems, Multi Phase Meters and Schilling Robotics), 25% from surface technologies (Surface Wellhead, Fluid Control and Completion Services) and the remaining 9% from energy infrastructure (Measurement Solutions, Loading Systems, Material Handling Solutions, Separation Systems and Automation and Control). FMC claimed 40% of the global subsea tree unit market share from 2009-2013, and the group presently has subsea processing projects in every major deepwater basin. The company attributes much of its growth to an increased global focus on deepwater operations. According to FMC, deepwater is the fastest growing source to meet incremental production demands, and large deepwater sources are increasingly discovered in multiple basins. This, along with technological advancements that improve exploration success and recovery rates, provides an opportunity for IOCs to employ differentiated technology. Going forward, FMC looks to expand its subsea solutions scope for its core products (trees, manifolds, control systems, template systems, flowline connection systems) as well as new products (subsea processing: separation, boosting and gas compression; well intervention services; and ROVs and ROV manipulator systems). As of March 2014, FMC identified potential for more than 300 major subsea production projects (valued at more than $150 million each) in the coming 15 months, including prospects for 48 trees for Nigeria, 38 for Norway and 20 for the Gulf of Mexico. The Bandak Group – a complete multidiscipline project supplier to Subsea Production Systems (SPS) and Subsea Umbilicals, Risers and Flowlines (SURF) players – provides advanced mechanical products, services and solutions to world-leading companies within oil and gas, marine, defense and space industries. The group provides deliveries; engineering and project management, machining, welding, surface treatment, assembly, test, tubular services and documentation for some of the industry’s top players, in Norway and globally, including key customers Statoil, FMC Technologies, GE Oil & Gas, Aker Solutions, Kongsberg, NationalOilwell Varco and Rolls-Royce. Bandak supplies mechanical subsystems and parts that are fitted into subsea production systems or drilling systems (quick connectors, leveling jacks, HUBs, pig launchers, guide posts, connectors, etc.), also supplying capacity related to drilling systems and equipment. Typical products are drillpipe, casing, tubing and many types of related downhole products, including specialized services for OCTG. Bandak develops and manufactures tools for various mechanical offshore operations, both subsea and topside, utilizing its capabilities within engineering, manufacturing, assembly, testing and documentation. With 10 locations in Norway and two in Malaysia, Bandak is a company on the rise, reporting 4x growth in the last three years and expanding its staff from 150 to 550 in the last four years. Bandak has recently expanded in the APAC oil and gas industry with the acquisition of fabrication and engineering company Multi Fibre Snd. Bhdin Malaysia in March 2014, and its acquisition of ITM in 2013 began its move into the maintenance and service market. Annual investment hovers between $6.5-8 million in recent years, with funding toward new machining centers and transition to new clad welding technology. Bandak primarily held by is Herkules Private Equity Fund III (90% share), while the remaining shares are owned by minority shareholders including management and other key employees. CMR Instrumentation, an arm of Christian Michelsen Research AS, is a research and development center directed towards instrumentation research and development. The firm covers measurement science, physics, modeling, software, sensor technologies, electronics and signal processing, among others. CMR typically works in collaboration with clients and partners to carry out projects from ideas and research to qualified industrial measurement solutions and products, and can deliver both turnkey solutions or be a sub-supplier to development projects. CMR also offers services within analysis of metering stations, including uncertainty analysis and dedicated measurement campaigns. One of CMR’s current initiatives aims to improve subsea operation and maintenance by bringing risk management into real time through Integrated Well and Subsea Instrumentation (IWSI). As improved sensors, tools and models are developed by the center, data relating to reservoir and production management, subsea control and safety systems, flow assurance management and integrity management will be used to provide real time performance and risk management, and then visualization and decision support. If workers can better gauge the conditions in which they operate, higher levels of efficiency and safety can be achieved, and thus costs are reduced and safety improved. Established in 1973, Benestad is a world leader in glass/ceramics-to-metal sealing and thin film (PVD) technology. The company provides sophisticated design and product solutions that are based on this technology, and then uses it as a technical advantage as glass/ceramics offer excellent corrosion and erosion properties, will not degrade over time or allow diffusion of liquids/gas, thus providing unmatched long-term stability. The company says highly specialized proprietary processes are at the heart of its products and production. Under major shareholder Aker Solutions, Benestad’s market segments are split between oil and gas (85%) and the defense industry (15%). For the oil and gas market, the company produces customized penetrators (signal and power distribution systems), instrument sensors and subsea instrumentation. For subsea, this comes into play for controls, boosting and processing and power distribution, as well as down-hole ESP. In-house activities include research and development, product design and engineering, manufacturing of core technologies (both for glass-to-metal sealing and thin film deposition) and testing and qualification. Located in the heart of the Subsea Valley in Drammen, Norway, HTS maskinteknikk (HTSM) is a manufacturer and global supplier of mechanical precision components to the subsea, aerospace and defense industry. HTS, established in 1982, is an independent contractor specializing in critical machining and welding of advanced materials, and offers a range of professional production technology advisory services. The company prides itself on its capacity as a total-solution supplier with all disciplines available in-house at its workshop which contains a production area of more than 60,000 square feet, including departments for turning, milling, welding, inspection and inspection/calibration. HTSM effectively manages everything from small-scale prototype production to large-scale serial production, though its forte is in more complex components. HTS maskinteknikk has delivered high precision mechanical components to the oil and gas industry for more than 20 years, with a specialty in the manufacturing of hydraulic couplers and subsea valve components. The group’s subsea client portfolio includes market leading EPC contracts as well as specialized subsea installation and provision companies. Its largest customer is FMC, which it supplies to directly for sites throughout the world including South America and Brazil. Roughly 79% of HTS’ business in 2013 was for the subsea sector, and as such is the recipient of the majority of the group’s current investments. HTSM has recently placed significant capital into its staff, state-of-the-art equipment and facility, greatly expanding upon its operational capacity. Family-owned Reinersten delivers advanced offshore engineering, infrastructure and construction, as well as consulting engineering services. Reinersten is product independent and its clients are mainly oil and gas companies, in Norway and internationally, for which it builds modules and prefabrication units for topside-building projects as well as subsea structures and components. Reinersten’s main market areas within subsea are field development, production systems, processing (subsea-factory), flow assurance, power supply and distribution, control systems and umbilicals, pipelines/flowlines, risers and structures/stations. The company is regarded as a major supplier of maintenance services on Norwegian processing sites, Norwegian shelf and inland. Its department of Installation is in charge of realization of the project’s installation phase on-site, and is responsible for HSE and quality control, completion, resources and subcontractors. Reinersten has also established long-term working agreements with the service trades suppliers within surface protection, isolation, scaffolding, rope access technique and pressure testing. Coast Center Base (CCB) is a logistical hub and main base for supply activities to the petroleum fields in the Tampen area of the North Sea (Statfjord, Gullfaks, Veslefrikk, Troll, Huldra and Kvitebjørn), and has compiled considerable expertise as a service provider in technical maintenance and harbor operations since opening in 1973. CCB offers provision of services and supply to the petroleum activities offshore, the North Sea basin’s largest subsea community with an offshore subsea test well reachable from quay, harbor terminal services for traditional line shipping and IRM maintenance services for rigs and vessels. The CCB base covers about seven hectares, has 1,000 meters of quays, some with water depths up to 50 meters and no weight limits, workshops, warehouses and office buildings. DNV GL provides classification and technical assurance along with software and independent expert advisory services to the maritime, oil and gas and energy industries. An international giant with operations at 300 sites in more than 100 countries, DNV GL stands among the world’s largest ship and offshore classification societies and is a leading technical advisor to the global oil and gas industry and expert for the energy value chain, including renewables and energy efficiency. DNV GL predicts it will generate annual revenue of approximately $3.4 billion. Det Norske Veritas (DNV) began in 1864 as a small Norwegian classification society, and in the 150 years since, has grown into one of the world’s largest enterprises of its kind with the formation of DNV GL. In September 2013, Norway’s DNV and Germany’s Germanischer Lloyd (GL) Noble Denton merged to create what is now the DNV GL Group. DNV Foundation holds 63.5 % of the group, while GL’s owner Mayfair SE holds the remaining 36.5%, and together, DNV GL serves the maritime, oil and gas, energy and business assurance segments. DNV GL is a leading technical advisor to the global oil and gas industry, providing consistent, integrated services within technical and marine assurance and advisory, risk management and offshore classification, to enable safe, reliable and enhanced performance in projects and operations. Lysaker- based Computas, a 1985 spinoff from DNV Høvik, is a 100% employee-owned Norwegian IT consulting company that provides services and solutions for work processes, business systems and collaboration, with core competence in software development, architecture and integration, project management and consulting. With clients in both public and private sectors, the group serves a number of industries globally, one of them being offshore oil and gas. Computas claims its solutions currently have more than 25,000 users, including names such as Statoil, ConocPhillips, Technip, Eni, Aker Solutions and FMC Technologies. As subsea activity on the Norwegian continental shelf and elsewhere reaches new depths, and the industry’s players are forced to become more efficient with fewer resources, new challenges persistently arise. That’s where Computas comes in to answer the call for efficient IT solutions. Computas operates with a great focus on innovation, research and development –its keywords being delivery fitness and added value. The company’s work offshore can essentially be broken down into three main categories: (a) compliance, to ensure that operations are traceable and carried out according to agreed standards; (b) decisions and processes, to provide decision support plus task support for complex work processes; and (c) information, to capture and process massive amounts of data, the basis for decisions and value generation. DOF Subsea is an international subsea operating company and leading provider of subsea services to the oil and gas industry in the North Atlantic, Gulf of Mexico, Brazil, Asia and West Africa. DOF’s two core business segments are long-term charter agreements for the fleet and the execution of complex subsea operations to depths of 4,000 meters, using the group’s owned and operated purpose-built vessels. With a world class fleet of 25 offshore vessels, 59 Remotely Operated Vehicles (ROVs), including one AUV/IIV system and 10 diving spreads, the group combines expertise and technology to deliver integrated subsea solutions to the offshore oil and gas industry. DOF’s fleet of 25 offshore vessels, built primarily by Vard, has an average age of seven to eight years and awaits seven newbuilds presently under construction in yards in Norway and Brazil. DOF’s multi-tool fleet enables the group to perform work for high-profile clients in marine operations; engineering, construction and mobilization; supply services; geotechnical and geophysical services; ROV operations; wellhead intervention; decommissioning; pipelay; pipeline survey and diver assisted intervention. The Bergen-based DOF Subsea Group, recorded a turnover of NOK 6.579 billion in 2013, compared to 5.248 billion in 2012, attributing much of its growth to its subsea project business, which saw revenues rise 32%. DOF’s EBITDA also saw an increase over the same period, rising roughly 9 percent to NOK 1.945 billion in 2013 compared to 1.788 billion in 2012. The group’s backlog entering 2014 was NOK 34 billion. Founded in 1972 as the Norwegian State Oil Company, Statoil is an international energy company and true subsea pioneer with 40 years of experience in oil and gas production on the Norwegian continental shelf. The Norwegian government remains Statoil’s largest shareholder with approximately 67% percent of the company’s shares, while the rest is held in public stock. With operations in 36 countries and more than 520 subsea wells globally, Statoil claims a position as the world’s second largest subsea operator. It is the largest operator on the Norwegian continental shelf, with 60% of the total production, and since 2010, Statoil’s annual investments there have increased by 75%. As a technological forerunner, Statoil has traditionally achieved success by pushing the boundaries to what is possible under water, frequently going “longer, deeper and colder,” but as the complexity of global subsea projects grows, so too does the bill for all involved in subsea projects. Statoil is therefore targeting standardization in certain situations as a means to generate cost savings in even the most complex projects. Rather than spending costly engineering hours working toward the next grand, over-over-the-top subsea solution, industry leaders are striving for smart, simple answers to some of the sector’s largest challenges. This means innovation comes in the form of quick and effective, yet less costly, solutions that do not sacrifice quality. Statoil’s Fast Track work process, for example, adds “simple, standard and cheaper” to “longer, deeper and colder,” said Torger Rød, Statoil’s SVP for Subsea, Pipelines and Cessation. The program essentially fuses tailored innovations with standardized solutions to develop and begin subsea production within a 30-month timeframe. Under this accelerated method, product specifications are simplified, existing designs are reused when possible and concepts are chosen from a preexisting catalog, eliminating the concept selection phase, Rød explained. With Fast Track, the company reports a 40% shorter execution time, a low breakeven level of $40/boe and average IRR (nom) greater than 25%. Statoil currently has six Fast Track projects in operation off Norway, with six more on the way. Strong supporters of the subsea and offshore O&G industry from and working in Norway.
# # Basic test, grid operators # import sys print ("Running python "+sys.version) from manta import * from helperInclude import * # solver params gs = vec3(10, 20, 30) s = Solver(name='main', gridSize = gs, dim=3) # prepare grids rlg1 = s.create(RealGrid) rlg2 = s.create(RealGrid) rlg3 = s.create(RealGrid) vcg1 = s.create(MACGrid) vcg2 = s.create(MACGrid) vcg3 = s.create(MACGrid) int1 = s.create(IntGrid) int2 = s.create(IntGrid) int3 = s.create(IntGrid) vcgTmp= s.create(MACGrid) genRefFiles = getGenRefFileSetting() if (genRefFiles==1): # manually init result rlg1.setConst( 1.1 ) rlg2.setConst( 1.2 ) rlg3.setConst( 2.9 ) #vcg1.setConst( vec3(1.2, 1.2, 1.2) ) #vcg2.setConst( vec3(0.5, 0.5, 0.5) ) #vcg3.setConst( vec3(1.95, 1.95, 1.95) ) vcg1.setConst( vec3(1.25, 1.25, 1.25) ) vcg2.setConst( vec3(0.5, 0.5, 0.5) ) vcg3.setConst( vec3(1.95, 1.95, 1.95) ) int1.setConst( 125 ) int2.setConst( 6 ) int3.setConst( 143 ) else: # real test run, perform basic calculations rlg1.setConst( 1.0 ) rlg2.setConst( 2.4 ) rlg3.setConst( 9.6 ) rlg1.addConst (0.1) # 1.1 rlg2.multConst(0.5) # 1.2 rlg3.copyFrom( rlg1 ) # 1.1 rlg3.add(rlg2) # 2.3 rlg3.addScaled(rlg2, 0.5) # 2.9 #print "r1 %f , r2 %f , r3 %f " % ( rlg1.getMaxAbs() , rlg2.getMaxAbs() , rlg3.getMaxAbs() ) vcg1.setConst( vec3(1.0, 1.0, 1.0) ) vcg2.setConst( vec3(1.0, 1.0, 1.0) ) vcg3.setConst( vec3(9.0, 9.0, 9.0) ) vcg1.addConst ( vec3(0.25,0.25,0.25) ) # 1.25 vcg2.multConst( vec3(0.5,0.5,0.5) ) # 0.5 vcgTmp.setConst( vec3(1.2, 1.2, 1.2) ) vcg3.copyFrom( vcgTmp ) # 1.2 vcg3.add(vcg2) # 1.7 vcg3.addScaled(vcg2, vec3(0.5, 0.5, 0.5) ) # 1.95 #print "v1 %s , v2 %s , v3 %s " % ( vcg1.getMaxAbs() , vcg2.getMaxAbs(), vcg3.getMaxAbs() ) int1.setConst( 123 ) int2.setConst( 2 ) int3.setConst( 9 ) int1.addConst ( 2 ) # 125 int2.multConst( 3 ) # 6 int3.copyFrom( int1 ) # 125 int3.add(int2) # 131 int3.addScaled(int2, 2) # 143 #print "i1 %s , i2 %s , i3 %s " % ( int1.getMaxAbs() , int2.getMaxAbs() , int3.getMaxAbs() ) # verify doTestGrid( sys.argv[0], "rlg1", s, rlg1 , threshold=1e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "rlg2", s, rlg2 , threshold=1e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "rlg3", s, rlg3 , threshold=1e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "vcg1", s, vcg1 , threshold=5e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "vcg2", s, vcg2 , threshold=5e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "vcg3", s, vcg3 , threshold=5e-07 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "int1", s, int1 , threshold=1e-14 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "int2", s, int2 , threshold=1e-14 , thresholdStrict=1e-14 ) doTestGrid( sys.argv[0], "int3", s, int3 , threshold=1e-14 , thresholdStrict=1e-14 )
Macmillan Learning is proud to announce that The Flipped Learning Global Initiative has named Introductory Chemistry author Kevin Revell one of the top 40 Flipped Learning educators worldwide. The list, compiled annually by the FLGI executive committee, names the top 100 K-12 educators from around the world who are identified as driving forces of flipped classroom adoptions. This year, the initiative broadened their recognition to include the top 40 Flipped Learning leaders in higher education. FLGI’s Chief Academic Officer, Jon Bergmann, stated, "The 2017 FLGI Flipped Learning Leaders lists includes some of the most experienced, innovative and proactive education and training professionals in the world. These are the people driving Flipped Learning forward in thought and action and demonstrating what is possible when Flipped Learning is done well."
from django.utils.encoding import smart_str from django.utils.simplejson import dumps from hashlib import sha1 from mediagenerator.generators.bundles.base import Filter from mediagenerator.utils import get_media_url_mapping _CODE = """ _$MEDIA_URLS = %s; media_urls = function(key) { var urls = _$MEDIA_URLS[key]; if (!urls) throw 'Could not resolve media url ' + key; return urls; }; media_url = function(key) { var urls = media_urls(key); if (urls.length == 1) return urls[0]; throw 'media_url() only works with keys that point to a single entry (e.g. an image), but not bundles. Use media_urls() instead.'; }; """.lstrip() class MediaURL(Filter): takes_input = False def __init__(self, **kwargs): super(MediaURL, self).__init__(**kwargs) assert self.filetype == 'js', ( 'MediaURL only supports JS output. ' 'The parent filter expects "%s".' % self.filetype) def get_output(self, variation): yield self._compile() def get_dev_output(self, name, variation): assert name == '.media_url.js' return self._compile() def get_dev_output_names(self, variation): content = self._compile() hash = sha1(smart_str(content)).hexdigest() yield '.media_url.js', hash def _compile(self): return _CODE % dumps(get_media_url_mapping())
Pitawas is probably a bit older than Bobby. Jingbai is unfit for the job. Varda arrived at home early yesterday. Saumya predicted there would be rain. The doctor told Boyd that he should eat a lot of vegetables. Shamim can afford to be choosy. Doyle is obsessed with learning French. Hubert is a powerful warrior. I hear Jacobson is getting married. I think Lord is truthful. Why does Nicolas think he needs a bodyguard? Tracey stopped in Boston on his way back from a business trip to Australia. Lorenzo is willing to pay Kelly whatever she asks for. Curt seems very sure of himself. Martha is showing signs of severe depression. Troy might be writing email now. I'm a lot nicer than Susanne. I'm used to Juliane always yelling at me. Leith ate lunch in the cafeteria. Seth is trying too hard.
##################################################################### # s06f06.py # # (c) Copyright 2021, Benjamin Parzella. All rights reserved. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. ##################################################################### """Class for stream 06 function 06.""" from secsgem.secs.functions.base import SecsStreamFunction from secsgem.secs.data_items import GRANT6 class SecsS06F06(SecsStreamFunction): """ multi block data grant. **Data Items** - :class:`GRANT6 <secsgem.secs.data_items.GRANT6>` **Structure**:: >>> import secsgem.secs >>> secsgem.secs.functions.SecsS06F06 GRANT6: B[1] **Example**:: >>> import secsgem.secs >>> secsgem.secs.functions.SecsS06F06(secsgem.secs.data_items.GRANT6.BUSY) S6F6 <B 0x1> . :param value: parameters for this function (see example) :type value: byte """ _stream = 6 _function = 6 _data_format = GRANT6 _to_host = False _to_equipment = True _has_reply = False _is_reply_required = False _is_multi_block = False
Peter Newman is a special agent for the President. There should be no real doubt in anyone's mind that the prototype for Peter Newman is Oliver North. Newman is a Marine attached to the White House in a Special Operations capacity, charged with operating under covert conditions to fight the nation's enemies. While North's primary emphasis while working for Reagan was procuring arms for the Contras fighting in Nicaragua, Newman's chief enemy is international terrorism. Against them, he is a tenacious and determined as North was. He mainly has to dodge the bullets and bombs of Islamic radicals while North dodged Congressmen and news anchors. Just over 6’ tall and ramrod straight, Newman is 38 years old but look considerably younger. A graduate of the Naval Academy, he served in numerous trouble spots around the world, being promoted early for his bravery and his accomplishments. He fought during the Gulf War where he was injured by shrapnel, earning a Purple Heart, as well as the Navy Cross. He is married as the series begins, though that is going through trouble, with no children. Newman is a proud member of a Force Reconnaissance company, the Marines Special Forces. In the first book, he is just being assigned to work for the National Security Advisor though he had tried to not be. He would have preferred to be leading troops in the field, not working for a man who greatly disliked anyone in uniform and denigrated not only their accomplishments but also their actual profession. Orders, however, are orders and he goes where he is directed. With Joe Musser, Oliver North created the character of Peter Newman and three adventures came from the collaboration. The hero of the series is Peter Newman, a colonel in the military who will over the three books advance in rank to become a general and a major player in the political arena as well. A half decade later, Oliver North, without a co-author listed, created two more Peter Newman adventures but for these tales, he advanced the timeline two decades with Newman still a vibrant and powerful figure but definitely older and wiser. Attached to the White House Special Projects Office, Peter Newman is given a dangerous clandestine assignment: hunt down and eliminate the top terrorist before he can act against the U.S. A huge block though is only a handful of people know his assignment. Even as he tries to stop a madman in the Middle East from attacking Israel with nuclear weapons, Peter Newman is given another nightmare. His cover is blown and his wife is kidnapped in Jerusalem, giving him a choice of saving his loved one or stopping another holocaust. Reeling from the news that terrorists have killed most of the Saudi royal family, Congress enacts a secret law authorizing Peter Newman to lead a highly trained team against the terrorists, even if the trail leads straight to Iran. The year is 2032 and the United States is nothing like it was two decades earlier. The government rules and its intelligence agencies spy on everybody all the time. Peter Newman needs to look into the disappearance of a friend who invented a new fuel cell and he needs his son James to be his leg man. In the year 2032, the US President has assured all citizens that with the UN help, America is safe and its military has been pared back substantially. Then Houston is the site for suicide bombers and America's premiere physicist is kidnapped. His friend, Peter Newman, is determined to get him back. I was never a fan of aid to the Contras in the ‘80s because I thought them largely thugs and bandits. Colonel North didn't see them that way and he was closer than I was to them so I could be wrong. As a result of my bias, I went to these novels with less than enthusiasm. I was wrong. I should have remembered that Colonel North's expertise and Mr. Musser's genuine writing skill would not have created anything other than exciting, compelling reading. I would never try to state categorically that Peter Newman is Oliver North as I do not know the real man but Colonel North is a prototype and that makes reading the books have an almost peeping affect on me. The stories were good but the feeling persisted.
""" Django settings for NewsReaderDjango project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from django.conf import global_settings from datetime import timedelta from celery import Celery BASE_DIR = os.path.dirname(os.path.dirname(__file__)) ENV_PATH = os.path.abspath(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'iudhm*b^7!8ea5nrjgwz@m1(pkjq60acj0+9*h1_d6!!c(&yr3' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] NO_SERVIDOR = True INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.sites', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #'djcelery', 'Site', 'Crawler', 'sorl.thumbnail', #'kronos', ) #HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor' BROKER_TRANSPORT = "memory" CELERY_ALWAYS_EAGER = True CELERY_IGNORE_RESULT = True CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) #HAYSTACK_CONNECTIONS = { # 'default': { # 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine', # 'URL': 'http://127.0.0.1:9200/', # 'INDEX_NAME': 'haystack', # }, #} ROOT_URLCONF = 'NewsReaderDjango.urls' WSGI_APPLICATION = 'NewsReaderDjango.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'NAME': 'postgres', 'USER': 'postgres', 'PASSWORD': 'docker', 'HOST': 'db', 'PORT': '5432', 'TEST': { 'NAME': 'test_crawler', }, }, } TIMEOUT = 10000 # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'pt-BR' TIME_ZONE = 'America/Sao_Paulo' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' if NO_SERVIDOR: STATIC_ROOT = os.path.join(BASE_DIR, 'static') # #if NO_SERVIDOR: # STATICFILES_DIRS = ( # os.path.join(BASE_DIR, "static_root"), # ) #else: # STATICFILES_DIRS = ( # os.path.join(BASE_DIR, "Site/static"), # ) MEDIA_URL = "/media/" MEDIA_ROOT = os.path.join(BASE_DIR, 'media') TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this # list if you haven't customized them: 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.request', 'django.contrib.messages.context_processors.messages', ], }, }, ] LOGIN_URL = '/login/' #BOOTSTRAP_ADMIN_SIDEBAR_MENU = True #KRONOS_PYTHONPATH = "/home/nolram/Virtualenv/py3_django/bin/python3" #if NO_SERVIDOR: # KRONOS_POSTFIX = "> /opt/flyn_django/log_thread.log 2>&1 " #else: # KRONOS_PREFIX = "source /home/nolram/Virtualenv/py3_django/bin/activate &&" # KRONOS_POSTFIX = "> /home/nolram/log_thread.log 2>&1 "
Udar Mózgu. Problemy Interdyscyplinarne 2007;9(1):1-7. This review presents the survey of the chosen reflections on the phenomenon of death as expressed by outstanding philosophers from the various periods, ranging from the antiquity to the contemporary times. Basing on Philippe Ariés’s ("Man and death"), the paper recalls these patterns of death, which are typical for the Christian civilization of the West as well as the ones that appeared in the 20th century together with the medicalization of death. The paper ends with the reflection on the phenomenon of death viewed from the perspective of the sense of Life.
from granite import __version__ try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages setup( name='sphinx-granite', version=__version__, license='MIT', url='https://github.com/dmpayton/sphinx-granite/', description='A theme for Sphinx.', long_description=open('./README.rst', 'r').read(), keywords='sphinx theme' classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Documentation', 'Topic :: Software Development :: Documentation', ], author='Derek Payton', author_email='derek.payton@gmail.com', install_requires=open('requirements.txt').read().splitlines(), include_package_data=True, packages=['granite'], package_data={'granite': [ 'theme.conf', '*.html', 'static/css/*.css', 'static/js/*.js', 'static/font/*.*' ]}, zip_safe=False, )
DOWNPOUR by Madison Daniel is officialy released!!!! Click here to order it now!!! A journey to save the past. Max Valentine is a walking supernova. On the run and searching for answers, his light darkens. Dark skies on the horizon make his next chapter ominous. His powers are fading but still he pushes toward the storm. …love is eternal but the heart is not.
import os from rebineo_o7o6 import * import matplotlib.patches as patches import matplotlib.transforms as transforms import console_colors as ccl def makefig(medVAR, avrVAR, stdVAR, nVAR, tnorm, dTday, SUBTITLE, YLIMS, YLAB, fname_fig): fig = figure(1, figsize=(13, 6)) ax = fig.add_subplot(111) ax.plot(tnorm, avrVAR, 'o-', c='black', markersize=5, label='mean') ax.plot(tnorm, medVAR, 'o-', c='red', alpha=.5, markersize=5, markeredgecolor='none', label='median') inf = avrVAR + stdVAR/sqrt(nVAR) sup = avrVAR - stdVAR/sqrt(nVAR) ax.fill_between(tnorm, inf, sup, facecolor='gray', alpha=0.5) trans = transforms.blended_transform_factory( ax.transData, ax.transAxes) rect1 = patches.Rectangle((0., 0.), width=1.0, height=1, transform=trans, color='blue', alpha=0.3) ax.add_patch(rect1) ax.legend(loc='upper right') ax.grid() ax.set_ylim(YLIMS) TITLE = SUBTITLE ax.set_title(TITLE) ax.set_xlabel('time normalized to MC passage time [1]') ax.set_ylabel(YLAB) savefig(fname_fig, format='png', dpi=180, bbox_inches='tight') close() def wangflag(ThetaThres): if ThetaThres<0: return 'NaN' else: return str(ThetaThres) #-------------------- para figuras: Nsh = dVARS[0][0] WangFlag = 'NaN'#wangflag(ThetaThres) SUBTITLE = 'number of sheaths: %d \n\ %dbins per time unit \n\ sheaths w/ dT>%gdays \n\ MCflags: %s \n\ WangFlag: %s' % (Nsh, nbin/(1+nbefore+nafter), dTday, MCwant['alias'], WangFlag) # prefijo gral para los nombres de los graficos: if CorrShift: prexShift = 'wShiftCorr' else: prexShift = 'woShiftCorr' DIR_FIGS = '../plots/MCflag%s/%s' % (MCwant['alias'], prexShift) DIR_ASCII = '../ascii/MCflag%s/%s' % (MCwant['alias'], prexShift) try: os.system('mkdir -p %s' % DIR_FIGS) os.system('mkdir -p %s' % DIR_ASCII) print ccl.On + " -------> creando: %s" % DIR_FIGS + ccl.W print ccl.On + " -------> creando: %s" % DIR_ASCII + ccl.W except: print ccl.On + " Ya existe: %s" %DIR_FIGS + ccl.W print ccl.On + " Ya existe: %s" %DIR_ASCII + ccl.W FNAMEs = 'MCflag%s_%dbefore.%dafter_Wang%s_fgap%1.1f' % (MCwant['alias'], nbefore, nafter, WangFlag, fgap) FNAME_ASCII = '%s/%s' % (DIR_ASCII, FNAMEs) FNAME_FIGS = '%s/%s' % (DIR_FIGS, FNAMEs) #---------------------------------------------------------------------------------------------------- for i in range(nvars): fname_fig = '%s_%s.png' % (FNAME_FIGS, VARS[i][1]) print ccl.Rn+ " ------> %s" % fname_fig ylims = VARS[i][2] ylabel = VARS[i][3] mediana = dVARS[i][4] average = dVARS[i][3] std_err = dVARS[i][5] nValues = dVARS[i][6] # nmbr of good values aporting data binsPerTimeUnit = nbin/(1+nbefore+nafter) SUBTITLE = '# of selected events: %d \n\ events w/80%% of data: %d \n\ bins per time unit: %d \n\ MCflag: %s \n\ WangFlag: %s' % (dVARS[i][0], nEnough[i], binsPerTimeUnit, MCwant['alias'], WangFlag) makefig(mediana, average, std_err, nValues, tnorm, dTday, SUBTITLE, ylims, ylabel, fname_fig) fdataout = '%s_%s.txt' % (FNAME_ASCII, VARS[i][1]) dataout = array([tnorm, mediana, average, std_err, nValues]) print " ------> %s\n" % fdataout + ccl.W savetxt(fdataout, dataout.T, fmt='%12.5f') ##
Celebrate spring with this "Showers & Flowers" decorative wall bunting. This spring-themed wall bunting is crafted using high quality, walnut and maple contrasting woods with umbrella and floral accents. Space the letters and hang to your liking perfectly in that special spot in your home. A unique decor piece that's great all spring long! Handcrafted in Minnesota, USA. Finished with our homegrown blend of organic oil and beeswax. Arrives in a clear bag with natural hemp for stringing. Item does not come pre-strung, so that you can arrange according to your liking. Bunting is approximately 51" long when hung. Letters are 2"-2.75" tall and 0.25" thick.